From 1ea5f90a30f24d7c5b4ad9b9cda4bbbcc6aabd35 Mon Sep 17 00:00:00 2001 From: Anxhela Coba Date: Mon, 2 Feb 2026 22:50:11 -0500 Subject: [PATCH 1/9] vector stores with lls 0.4.3 Signed-off-by: Anxhela Coba --- lightspeed-stack.yaml | 10 +- run.yaml | 223 +++++--- src/app/endpoints/query.py | 41 +- src/app/endpoints/query_v2.py | 270 ++++++++- src/app/endpoints/shields.py | 2 + src/app/endpoints/streaming_query.py | 534 ++++++++++++++++++ src/app/endpoints/streaming_query_v2.py | 2 +- src/app/main.py | 10 + src/constants.py | 2 +- src/models/requests.py | 9 +- tests/unit/app/endpoints/test_query_v2.py | 10 + .../app/endpoints/test_streaming_query.py | 17 + 12 files changed, 999 insertions(+), 131 deletions(-) diff --git a/lightspeed-stack.yaml b/lightspeed-stack.yaml index ba29f85fa..356394c7e 100644 --- a/lightspeed-stack.yaml +++ b/lightspeed-stack.yaml @@ -10,12 +10,12 @@ service: llama_stack: # Uses a remote llama-stack service # The instance would have already been started with a llama-stack-run.yaml file - use_as_library_client: false + # use_as_library_client: false # Alternative for "as library use" - # use_as_library_client: true - # library_client_config_path: - url: http://llama-stack:8321 - api_key: xyzzy + use_as_library_client: true + library_client_config_path: run.yaml + # url: http://llama-stack:8321 + # api_key: xyzzy user_data_collection: feedback_enabled: true feedback_storage: "/tmp/data/feedback" diff --git a/run.yaml b/run.yaml index 3680f2b32..5ed401dcd 100644 --- a/run.yaml +++ b/run.yaml @@ -15,124 +15,159 @@ apis: benchmarks: [] datasets: [] image_name: starter -# external_providers_dir: /opt/app-root/src/.llama/providers.d +external_providers_dir: ${env.EXTERNAL_PROVIDERS_DIR} -providers: - inference: - - provider_id: openai # This ID is a reference to 'providers.inference' - provider_type: remote::openai - config: - api_key: ${env.OPENAI_API_KEY} - allowed_models: ["${env.E2E_OPENAI_MODEL:=gpt-4o-mini}"] - - config: {} - provider_id: sentence-transformers - provider_type: inline::sentence-transformers - files: - - config: - metadata_store: - table_name: files_metadata - backend: sql_default - storage_dir: ~/.llama/storage/files - provider_id: meta-reference-files - provider_type: inline::localfs - safety: - - config: - excluded_categories: [] - provider_id: llama-guard - provider_type: inline::llama-guard - scoring: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: llm-as-judge - provider_type: inline::llm-as-judge - config: {} - - provider_id: braintrust - provider_type: inline::braintrust - config: - openai_api_key: '********' - tool_runtime: - - config: {} # Enable the RAG tool - provider_id: rag-runtime - provider_type: inline::rag-runtime - vector_io: - - config: # Define the storage backend for RAG - persistence: - namespace: vector_io::faiss - backend: kv_default - provider_id: faiss - provider_type: inline::faiss - agents: - - config: - persistence: - agent_state: - namespace: agents_state - backend: kv_default - responses: - table_name: agents_responses - backend: sql_default - provider_id: meta-reference - provider_type: inline::meta-reference - batches: - - config: - kvstore: - namespace: batches_store - backend: kv_default - provider_id: reference - provider_type: inline::reference - datasetio: - - config: - kvstore: - namespace: huggingface_datasetio - backend: kv_default - provider_id: huggingface - provider_type: remote::huggingface - - config: - kvstore: - namespace: localfs_datasetio - backend: kv_default - provider_id: localfs - provider_type: inline::localfs - eval: - - config: - kvstore: - namespace: eval_store - backend: kv_default - provider_id: meta-reference - provider_type: inline::meta-reference -scoring_fns: [] -server: - port: 8321 storage: backends: - kv_default: # Define the storage backend type for RAG, in this case registry and RAG are unified i.e. information on registered resources (e.g. models, vector_stores) are saved together with the RAG chunks + kv_default: type: kv_sqlite db_path: ${env.KV_STORE_PATH:=~/.llama/storage/rag/kv_store.db} sql_default: type: sql_sqlite db_path: ${env.SQL_STORE_PATH:=~/.llama/storage/sql_store.db} + stores: metadata: namespace: registry backend: kv_default + inference: table_name: inference_store backend: sql_default max_write_queue_size: 10000 num_writers: 4 + conversations: table_name: openai_conversations backend: sql_default + prompts: namespace: prompts backend: kv_default + +metadata_store: + type: sqlite + db_path: ~/.llama/storage/registry.db + +inference_store: + type: sqlite + db_path: ~/.llama/storage/inference-store.db + +conversations_store: + type: sqlite + db_path: ~/.llama/storage/conversations.db + +providers: + inference: + - provider_id: openai + provider_type: remote::openai + config: + api_key: ${env.OPENAI_API_KEY} + allowed_models: + - gpt-4o-mini + + - provider_id: sentence-transformers + provider_type: inline::sentence-transformers + config: + allowed_models: + - ${env.EMBEDDING_MODEL_DIR} + + files: + - provider_id: meta-reference-files + provider_type: inline::localfs + config: + storage_dir: ~/.llama/storage/files + metadata_store: + table_name: files_metadata + backend: sql_default + + safety: + - provider_id: llama-guard + provider_type: inline::llama-guard + config: + excluded_categories: [] + + scoring: + - provider_id: basic + provider_type: inline::basic + config: {} + + - provider_id: llm-as-judge + provider_type: inline::llm-as-judge + config: {} + + tool_runtime: + - provider_id: rag-runtime + provider_type: inline::rag-runtime + config: {} + + vector_io: + - provider_id: solr-vector + provider_type: remote::solr_vector_io + config: + solr_url: http://localhost:8983/solr + collection_name: portal-rag + vector_field: chunk_vector + content_field: chunk + embedding_dimension: 384 + embedding_model: ${env.EMBEDDING_MODEL_DIR} + persistence: + namespace: portal-rag + backend: kv_default + + agents: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: + persistence: + agent_state: + namespace: agents_state + backend: kv_default + responses: + table_name: agents_responses + backend: sql_default + + batches: + - provider_id: reference + provider_type: inline::reference + config: + kvstore: + namespace: batches_store + backend: kv_default + + datasetio: + - provider_id: huggingface + provider_type: remote::huggingface + config: + kvstore: + namespace: huggingface_datasetio + backend: kv_default + + - provider_id: localfs + provider_type: inline::localfs + config: + kvstore: + namespace: localfs_datasetio + backend: kv_default + registered_resources: - models: [] + models: + - model_id: granite-embedding-30m + model_type: embedding + provider_id: sentence-transformers + provider_model_id: ${env.EMBEDDING_MODEL_DIR} + metadata: + embedding_dimension: 384 + shields: - shield_id: llama-guard provider_id: llama-guard provider_shield_id: openai/gpt-4o-mini - vector_stores: [] + vector_stores: + - vector_store_id: portal-rag + provider_id: solr-vector + embedding_model: sentence-transformers/${env.EMBEDDING_MODEL_DIR} + embedding_dimension: 384 datasets: [] scoring_fns: [] benchmarks: [] @@ -140,9 +175,9 @@ registered_resources: - toolgroup_id: builtin::rag # Register the RAG tool provider_id: rag-runtime vector_stores: - default_provider_id: faiss - default_embedding_model: # Define the default embedding model for RAG - provider_id: sentence-transformers - model_id: nomic-ai/nomic-embed-text-v1.5 + vector_store_id: portal-rag + provider_id: solr-vector + embedding_model: sentence-transformers/${env.EMBEDDING_MODEL_DIR} + embedding_dimension: 384 safety: default_shield_id: llama-guard diff --git a/src/app/endpoints/query.py b/src/app/endpoints/query.py index eddf30f86..209c6f2e4 100644 --- a/src/app/endpoints/query.py +++ b/src/app/endpoints/query.py @@ -55,6 +55,11 @@ router = APIRouter(tags=["query"]) +# When OFFLINE is False, use reference_url for chunk source +# When OFFLINE is True, use parent_id for chunk source +# TODO: move this setting to a higher level configuration +OFFLINE = True + query_response: dict[int | str, dict[str, Any]] = { 200: QueryResponse.openapi_response(), 401: UnauthorizedResponse.openapi_response( @@ -386,9 +391,9 @@ async def query_endpoint_handler_base( # pylint: disable=R0914 response = QueryResponse( conversation_id=conversation_id, response=summary.llm_response, - tool_calls=summary.tool_calls, - tool_results=summary.tool_results, - rag_chunks=summary.rag_chunks, + rag_chunks=rag_chunks_dict, + tool_calls=summary.tool_calls if summary.tool_calls else [], + tool_results=summary.tool_results if summary.tool_results else [], referenced_documents=referenced_documents, truncated=False, # TODO: implement truncation detection input_tokens=token_usage.input_tokens, @@ -577,3 +582,33 @@ def validate_attachments_metadata(attachments: list[Attachment]) -> None: response="Invalid attribute value", cause=message ) raise HTTPException(**response.model_dump()) + + +# def get_rag_toolgroups( +# vector_db_ids: list[str], +# ) -> Optional[list[Toolgroup]]: +# """ +# Return a list of RAG Tool groups if the given vector DB list is not empty. + +# Generate a list containing a RAG knowledge search toolgroup if +# vector database IDs are provided. + +# Parameters: +# vector_db_ids (list[str]): List of vector database identifiers to include in the toolgroup. + +# Returns: +# Optional[list[Toolgroup]]: A list with a single RAG toolgroup if +# vector_db_ids is non-empty; otherwise, None. +# """ +# return ( +# [ +# ToolgroupAgentToolGroupWithArgs( +# name="builtin::rag/file_search", +# args={ +# "vector_db_ids": vector_db_ids, +# }, +# ) +# ] +# if vector_db_ids +# else None +# ) diff --git a/src/app/endpoints/query_v2.py b/src/app/endpoints/query_v2.py index ecc39b071..3348be07a 100644 --- a/src/app/endpoints/query_v2.py +++ b/src/app/endpoints/query_v2.py @@ -4,9 +4,12 @@ import json import logging +import traceback from typing import Annotated, Any, Optional, cast +from urllib.parse import urljoin from fastapi import APIRouter, Depends, Request +from llama_stack_client import APIConnectionError, APIStatusError from llama_stack_api.openai_responses import ( OpenAIResponseMCPApprovalRequest, OpenAIResponseMCPApprovalResponse, @@ -364,9 +367,14 @@ async def retrieve_response( # pylint: disable=too-many-locals,too-many-branche if query_request.attachments: validate_attachments_metadata(query_request.attachments) - # Prepare tools for responses API + # Prepare tools for responses API - skip RAG tools since we're doing direct vector query toolgroups = await prepare_tools_for_responses_api( - client, query_request, token, configuration, mcp_headers + client, + query_request, + token, + configuration, + mcp_headers=mcp_headers, + skip_rag_tools=True, ) # Prepare input for Responses API @@ -420,6 +428,165 @@ async def retrieve_response( # pylint: disable=too-many-locals,too-many-branche TokenCounter(), ) + # Extract RAG chunks from vector DB query response BEFORE calling responses API + rag_chunks = [] + doc_ids_from_chunks = [] + retrieved_chunks = [] + retrieved_scores = [] + + # When offline is False, use reference_url for chunk source + # When offline is True, use parent_id for chunk source + # TODO: move this setting to a higher level configuration + offline = True + + try: + # Get vector stores for direct querying + if query_request.vector_store_ids: + vector_store_ids = query_request.vector_store_ids + logger.info( + "Using specified vector_store_ids for direct query: %s", + vector_store_ids, + ) + else: + vector_store_ids = [ + vector_store.id + for vector_store in (await client.vector_stores.list()).data + ] + logger.info( + "Using all available vector_store_ids for direct query: %s", + vector_store_ids, + ) + + if vector_store_ids: + vector_store_id = vector_store_ids[0] # Use first available vector store + + params = {"k": 5, "score_threshold": 0.0} + logger.info("Initial params: %s", params) + logger.info("query_request.solr: %s", query_request.solr) + if query_request.solr: + # Pass the entire solr dict under the 'solr' key + params["solr"] = query_request.solr + logger.info("Final params with solr filters: %s", params) + else: + logger.info("No solr filters provided") + logger.info("Final params being sent to vector_io.query: %s", params) + + query_response = await client.vector_io.query( + vector_store_id=vector_store_id, query=query_request.query, params=params + ) + + logger.info("The query response total payload: %s", query_response) + + if query_response.chunks: + retrieved_chunks = query_response.chunks + retrieved_scores = ( + query_response.scores if hasattr(query_response, "scores") else [] + ) + + # Extract doc_ids from chunks for referenced_documents + metadata_doc_ids = set() + + for chunk in query_response.chunks: + logger.info("Extract doc ids from chunk: %s", chunk) + + # 1) dict metadata (what your code expects today) + md = getattr(chunk, "metadata", None) or {} + doc_id = md.get("doc_id") or md.get("document_id") + title = md.get("title") + + # 2) typed chunk_metadata (what your provider/logs are actually populating) + if not doc_id: + cm = getattr(chunk, "chunk_metadata", None) + if cm is not None: + # cm might be a pydantic model or a dict depending on caller + if isinstance(cm, dict): + doc_id = cm.get("doc_id") or cm.get("document_id") + title = title or cm.get("title") + reference_url = cm.get("reference_url") + else: + doc_id = getattr(cm, "doc_id", None) or getattr(cm, "document_id", None) + title = title or getattr(cm, "title", None) + reference_url = getattr(cm, "reference_url", None) + else: + reference_url = None + else: + reference_url = md.get("reference_url") + + if not doc_id and not reference_url: + continue + + # Build URL based on offline flag + if offline: + # Use parent/doc path + reference_doc = doc_id + doc_url = "https://mimir.corp.redhat.com" + reference_doc + else: + # Use reference_url if online + reference_doc = reference_url or doc_id + doc_url = reference_doc if reference_doc.startswith("http") else ("https://mimir.corp.redhat.com" + reference_doc) + + if reference_doc and reference_doc not in metadata_doc_ids: + metadata_doc_ids.add(reference_doc) + doc_ids_from_chunks.append( + ReferencedDocument( + doc_title=title, + doc_url=doc_url, + ) + ) + + logger.info("Extracted %d unique document IDs from chunks", len(doc_ids_from_chunks)) + + + except ( + APIConnectionError, + APIStatusError, + AttributeError, + KeyError, + ValueError, + ) as e: + logger.warning("Failed to query vector database for chunks: %s", e) + logger.debug("Vector DB query error details: %s", traceback.format_exc()) + # Continue without RAG chunks + + # Convert retrieved chunks to RAGChunk format + for i, chunk in enumerate(retrieved_chunks): + # Extract source from chunk metadata based on offline flag + source = None + if chunk.metadata: + if offline: + parent_id = chunk.metadata.get("parent_id") + if parent_id: + source = urljoin("https://mimir.corp.redhat.com", parent_id) + else: + source = chunk.metadata.get("reference_url") + + # Get score from retrieved_scores list if available + score = retrieved_scores[i] if i < len(retrieved_scores) else None + + rag_chunks.append( + RAGChunk( + content=chunk.content, + source=source, + score=score, + ) + ) + + logger.info("Retrieved %d chunks from vector DB", len(rag_chunks)) + + # Format RAG context for injection into user message + rag_context = "" + if rag_chunks: + context_chunks = [] + for chunk in rag_chunks[:5]: # Limit to top 5 chunks + chunk_text = f"Source: {chunk.source or 'Unknown'}\n{chunk.content}" + context_chunks.append(chunk_text) + rag_context = "\n\nRelevant documentation:\n" + "\n\n".join(context_chunks) + logger.info("Injecting %d RAG chunks into user message", len(context_chunks)) + + # Inject RAG context into input text + if rag_context: + input_text = input_text + rag_context + # Create OpenAI response using responses API create_kwargs: dict[str, Any] = { "input": input_text, @@ -444,18 +611,29 @@ async def retrieve_response( # pylint: disable=too-many-locals,too-many-branche llm_response = "" tool_calls: list[ToolCallSummary] = [] tool_results: list[ToolResultSummary] = [] - rag_chunks: list[RAGChunk] = [] + response_api_rag_chunks: list[RAGChunk] = [] for output_item in response.output: message_text = extract_text_from_response_output_item(output_item) if message_text: llm_response += message_text - tool_call, tool_result = _build_tool_call_summary(output_item, rag_chunks) + tool_call, tool_result = _build_tool_call_summary( + output_item, response_api_rag_chunks + ) if tool_call: tool_calls.append(tool_call) if tool_result: tool_results.append(tool_result) + # Merge RAG chunks from direct vector query with those from responses API + all_rag_chunks = rag_chunks + response_api_rag_chunks + logger.info( + "Combined RAG chunks: %d from direct query + %d from responses API = %d total", + len(rag_chunks), + len(response_api_rag_chunks), + len(all_rag_chunks), + ) + logger.info( "Response processing complete - Tool calls: %d, Response length: %d chars", len(tool_calls), @@ -466,11 +644,21 @@ async def retrieve_response( # pylint: disable=too-many-locals,too-many-branche llm_response=llm_response, tool_calls=tool_calls, tool_results=tool_results, - rag_chunks=rag_chunks, + rag_chunks=all_rag_chunks, ) # Extract referenced documents and token usage from Responses API response - referenced_documents = parse_referenced_documents_from_responses_api(response) + # Merge with documents from direct vector query + response_referenced_documents = parse_referenced_documents_from_responses_api( + response + ) + all_referenced_documents = doc_ids_from_chunks + response_referenced_documents + logger.info( + "Combined referenced documents: %d from direct query + %d from responses API = %d total", + len(doc_ids_from_chunks), + len(response_referenced_documents), + len(all_referenced_documents), + ) model_label = model_id.split("/", 1)[1] if "/" in model_id else model_id token_usage = extract_token_usage_from_responses_api( response, model_label, provider_id, system_prompt @@ -485,7 +673,7 @@ async def retrieve_response( # pylint: disable=too-many-locals,too-many-branche return ( summary, normalize_conversation_id(conversation_id), - referenced_documents, + all_referenced_documents, token_usage, ) @@ -687,12 +875,15 @@ def _increment_llm_call_metric(provider: str, model: str) -> None: logger.warning("Failed to update LLM call metric: %s", e) -def get_rag_tools(vector_store_ids: list[str]) -> Optional[list[dict[str, Any]]]: +def get_rag_tools( + vector_store_ids: list[str], solr_params: Optional[dict[str, Any]] = None +) -> Optional[list[dict[str, Any]]]: """ Convert vector store IDs to tools format for Responses API. Args: vector_store_ids: List of vector store identifiers + solr_params: Optional Solr filtering parameters Returns: Optional[list[dict[str, Any]]]: List containing file_search tool configuration, @@ -701,13 +892,16 @@ def get_rag_tools(vector_store_ids: list[str]) -> Optional[list[dict[str, Any]]] if not vector_store_ids: return None - return [ - { - "type": "file_search", - "vector_store_ids": vector_store_ids, - "max_num_results": 10, - } - ] + tool_config = { + "type": "file_search", + "vector_store_ids": vector_store_ids, + "max_num_results": 10, + } + + if solr_params: + tool_config["solr"] = solr_params + + return [tool_config] def get_mcp_tools( @@ -808,7 +1002,9 @@ async def prepare_tools_for_responses_api( query_request: QueryRequest, token: str, config: AppConfig, + *, mcp_headers: Optional[dict[str, dict[str, str]]] = None, + skip_rag_tools: bool = False, ) -> Optional[list[dict[str, Any]]]: """ Prepare tools for Responses API including RAG and MCP tools. @@ -822,6 +1018,7 @@ async def prepare_tools_for_responses_api( token: Authentication token for MCP tools config: Configuration object containing MCP server settings mcp_headers: Per-request headers for MCP servers + skip_rag_tools: If True, skip adding RAG tools (used when doing direct vector querying) Returns: Optional[list[dict[str, Any]]]: List of tool configurations for the @@ -831,18 +1028,39 @@ async def prepare_tools_for_responses_api( return None toolgroups = [] - # Get vector stores for RAG tools - use specified ones or fetch all - if query_request.vector_store_ids: - vector_store_ids = query_request.vector_store_ids - else: - vector_store_ids = [ - vector_store.id for vector_store in (await client.vector_stores.list()).data - ] - # Add RAG tools if vector stores are available - rag_tools = get_rag_tools(vector_store_ids) - if rag_tools: - toolgroups.extend(rag_tools) + # Add RAG tools if not skipped + if not skip_rag_tools: + # Get vector stores for RAG tools - use specified ones or fetch all + if query_request.vector_store_ids: + vector_store_ids = query_request.vector_store_ids + logger.info("Using specified vector_store_ids: %s", vector_store_ids) + else: + vector_store_ids = [ + vector_store.id + for vector_store in (await client.vector_stores.list()).data + ] + logger.info("Using all available vector_store_ids: %s", vector_store_ids) + + # Add RAG tools if vector stores are available + if vector_store_ids: + # logger.info("query_request.solr: %s", query_request.solr) + rag_tools = get_rag_tools(vector_store_ids) + if rag_tools: + logger.info("rag_tool are: %s", rag_tools) + toolgroups.extend(rag_tools) + # if query_request.solr: + # logger.info( + # "RAG tools configured with Solr filters: %s", query_request.solr + # ) + # else: + # logger.info("RAG tools configured without Solr filters") + else: + logger.info("No RAG tools configured") + else: + logger.info("No vector stores available for RAG tools") + else: + logger.info("Skipping RAG tools - using direct vector querying instead") # Add MCP server tools mcp_tools = get_mcp_tools(config.mcp_servers, token, mcp_headers) diff --git a/src/app/endpoints/shields.py b/src/app/endpoints/shields.py index 5dd8b8b6c..790c2d0b3 100644 --- a/src/app/endpoints/shields.py +++ b/src/app/endpoints/shields.py @@ -70,6 +70,8 @@ async def shields_endpoint_handler( try: # try to get Llama Stack client client = AsyncLlamaStackClientHolder().get_client() + # await client.shields.delete(identifier="llama-guard-shielf") + # exit(1) # retrieve shields shields = await client.shields.list() s = [dict(s) for s in shields] diff --git a/src/app/endpoints/streaming_query.py b/src/app/endpoints/streaming_query.py index afd7293a3..cbe7a5663 100644 --- a/src/app/endpoints/streaming_query.py +++ b/src/app/endpoints/streaming_query.py @@ -4,6 +4,8 @@ import json import logging import re + +# import traceback import uuid from collections.abc import Callable from datetime import UTC, datetime @@ -13,6 +15,9 @@ Optional, ) +# from urllib.parse import urljoin + + from fastapi import APIRouter, Request from fastapi.responses import StreamingResponse from llama_stack_client import ( @@ -43,6 +48,7 @@ NotFoundResponse, PromptTooLongResponse, QuotaExceededResponse, + # RAGChunk, ServiceUnavailableResponse, StreamingQueryResponse, UnauthorizedResponse, @@ -59,6 +65,10 @@ logger = logging.getLogger("app.endpoints.handlers") router = APIRouter(tags=["streaming_query"]) +# # When OFFLINE is False, use reference_url for chunk source +# # When OFFLINE is True, use parent_id for chunk source +# # TODO: move this setting to a higher level configuration +# OFFLINE = True streaming_query_responses: dict[int | str, dict[str, Any]] = { 200: StreamingQueryResponse.openapi_response(), @@ -132,6 +142,7 @@ def stream_end_event( available_quotas: dict[str, int], referenced_documents: list[ReferencedDocument], media_type: str = MEDIA_TYPE_JSON, + # vector_io_referenced_docs: list[ReferencedDocument] | None = None, ) -> str: """ Yield the end of the data stream. @@ -164,10 +175,35 @@ def stream_end_event( # Use mode="json" to ensure AnyUrl is serialized to string (not just model_dump()) referenced_docs_dict = [doc.model_dump(mode="json") for doc in referenced_documents] + # referenced_docs_dict = [ + # { + # "doc_url": v.get("docs_url"), + # "doc_title": v.get("title"), + # } + # for v in metadata_map.values() + # if "docs_url" in v and "title" in v + # ] + + # # Add vector_io referenced documents + # if vector_io_referenced_docs: + # for doc in vector_io_referenced_docs: + # referenced_docs_dict.append( + # { + # "doc_url": doc.doc_url, + # "doc_title": doc.doc_title, + # } + # ) + + # # Convert RAG chunks to dict format + # rag_chunks_dict = [] + # if summary.rag_chunks: + # rag_chunks_dict = [chunk.model_dump() for chunk in summary.rag_chunks] + return format_stream_data( { "event": "end", "data": { + # "rag_chunks": rag_chunks_dict, "referenced_documents": referenced_docs_dict, "truncated": None, # TODO(jboos): implement truncated "input_tokens": token_usage.input_tokens, @@ -559,6 +595,130 @@ def _handle_heartbeat_event( ) +# def create_agent_response_generator( # pylint: disable=too-many-locals +# context: ResponseGeneratorContext, +# ) -> Any: +# """ +# Create a response generator function for Agent API streaming. + +# This factory function returns an async generator that processes streaming +# responses from the Agent API and yields Server-Sent Events (SSE). + +# Args: +# context: Context object containing all necessary parameters for response generation + +# Returns: +# An async generator function that yields SSE-formatted strings +# """ + +# async def response_generator( +# turn_response: AsyncIterator[AgentTurnResponseStreamChunk], +# ) -> AsyncIterator[str]: +# """ +# Generate SSE formatted streaming response. + +# Asynchronously generates a stream of Server-Sent Events +# (SSE) representing incremental responses from a +# language model turn. + +# Yields start, token, tool call, turn completion, and +# end events as SSE-formatted strings. Collects the +# complete response for transcript storage if enabled. +# """ +# chunk_id = 0 +# summary = TurnSummary( +# llm_response="No response from the model", +# tool_calls=[], +# tool_results=[], +# rag_chunks=[], +# ) + +# # Determine media type for response formatting +# media_type = context.query_request.media_type or MEDIA_TYPE_JSON + +# # Send start event at the beginning of the stream +# yield stream_start_event(context.conversation_id) + +# latest_turn: Optional[Any] = None + +# async for chunk in turn_response: +# if chunk.event is None: +# continue +# p = chunk.event.payload +# if p.event_type == "turn_complete": +# summary.llm_response = content_to_str(p.turn.output_message.content) +# latest_turn = p.turn +# system_prompt = get_system_prompt(context.query_request, configuration) +# try: +# update_llm_token_count_from_turn( +# p.turn, context.model_id, context.provider_id, system_prompt +# ) +# except Exception: # pylint: disable=broad-except +# logger.exception("Failed to update token usage metrics") +# elif p.event_type == "step_complete": +# if p.step_details.step_type == "tool_execution": +# summary.append_tool_calls_from_llama(p.step_details) + +# for event in stream_build_event( +# chunk, +# chunk_id, +# context.metadata_map, +# media_type, +# context.conversation_id, +# ): +# chunk_id += 1 +# yield event + +# # Extract token usage from the turn +# token_usage = ( +# extract_token_usage_from_turn(latest_turn) +# if latest_turn is not None +# else TokenCounter() +# ) +# referenced_documents = ( +# parse_referenced_documents(latest_turn) if latest_turn is not None else [] +# ) + +# # Add RAG chunks to summary if available from vector_io query +# if hasattr(context, "vector_io_rag_chunks") and context.vector_io_rag_chunks: +# summary.rag_chunks = context.vector_io_rag_chunks + +# available_quotas = get_available_quotas( +# configuration.quota_limiters, context.user_id +# ) +# yield stream_end_event( +# context.metadata_map, +# summary, +# token_usage, +# available_quotas, +# referenced_documents, +# media_type, +# ) + +# # Perform cleanup tasks (database and cache operations) +# await cleanup_after_streaming( +# user_id=context.user_id, +# conversation_id=context.conversation_id, +# model_id=context.model_id, +# provider_id=context.provider_id, +# llama_stack_model_id=context.llama_stack_model_id, +# query_request=context.query_request, +# summary=summary, +# metadata_map=context.metadata_map, +# started_at=context.started_at, +# client=context.client, +# config=configuration, +# skip_userid_check=context.skip_userid_check, +# get_topic_summary_func=get_topic_summary, +# is_transcripts_enabled_func=is_transcripts_enabled, +# store_transcript_func=store_transcript, +# persist_user_conversation_details_func=persist_user_conversation_details, +# rag_chunks=create_rag_chunks_dict(summary), +# ) + +# return response_generator + + async def streaming_query_endpoint_handler_base( # pylint: disable=too-many-locals,too-many-statements,too-many-arguments,too-many-positional-arguments request: Request, query_request: QueryRequest, @@ -662,6 +822,12 @@ async def streaming_query_endpoint_handler_base( # pylint: disable=too-many-loc token, mcp_headers=mcp_headers, ) + + # # Query vector_io for RAG chunks and referenced documents + # vector_io_rag_chunks, vector_io_referenced_docs = ( + # await query_vector_io_for_chunks(client, query_request) + # ) + metadata_map: dict[str, dict[str, Any]] = {} # Create context object for response generator @@ -678,6 +844,12 @@ async def streaming_query_endpoint_handler_base( # pylint: disable=too-many-loc metadata_map=metadata_map, ) + # Add vector_io data to context if available + # if hasattr(context, "vector_io_rag_chunks"): + # context.vector_io_rag_chunks = vector_io_rag_chunks + # if hasattr(context, "vector_io_referenced_docs"): + # context.vector_io_referenced_docs = vector_io_referenced_docs + # Create the response generator using the provided factory function response_generator = create_response_generator_func(context) @@ -724,3 +896,365 @@ async def streaming_query_endpoint_handler_base( # pylint: disable=too-many-loc status_code=error_response.status_code, media_type=query_request.media_type or MEDIA_TYPE_JSON, ) + + +# async def query_vector_io_for_chunks( +# client: AsyncLlamaStackClientHolder, +# query_request: QueryRequest, +# ) -> tuple[list[RAGChunk], list[ReferencedDocument]]: +# """ +# Query vector_io database for RAG chunks and referenced documents. + +# Args: +# client: AsyncLlamaStackClient for vector database access +# query_request: The user's query request containing query text and Solr filters + +# Returns: +# tuple: A tuple containing RAG chunks and referenced documents +# """ +# rag_chunks = [] +# doc_ids_from_chunks = [] + +# try: +# # Use the first available vector database if any exist +# try: +# # Try vector_stores first (new API) +# vector_stores = await client.vector_stores.list() +# vector_db_ids = [vs.id for vs in vector_stores.data] +# except AttributeError: +# # Fallback to vector_dbs (old API) +# vector_dbs = await client.vector_dbs.list() +# vector_db_ids = [vdb.identifier for vdb in vector_dbs] + +# if vector_db_ids: +# vector_db_id = vector_db_ids[0] # Use first available vector DB + +# params = {"k": 5, "score_threshold": 0.0} +# logger.info("Initial params: %s", params) +# logger.info("query_request.solr: %s", query_request.solr) +# if query_request.solr: +# # Pass the entire solr dict under the 'solr' key +# params["solr"] = query_request.solr +# logger.info("Final params with solr filters: %s", params) +# else: +# logger.info("No solr filters provided") +# logger.info("Final params being sent to vector_io.query: %s", params) + +# query_response = await client.vector_io.query( +# vector_db_id=vector_db_id, query=query_request.query, params=params +# ) + +# logger.info("The query response total payload: %s", query_response) + +# if query_response.chunks: +# rag_chunks = [ +# RAGChunk( +# content=str(chunk.content), # Convert to string if needed +# source=getattr(chunk, "doc_id", None) +# or getattr(chunk, "source", None), +# score=getattr(chunk, "score", None), +# ) +# for chunk in query_response.chunks[:5] # Limit to top 5 chunks +# ] +# logger.info("Retrieved %d chunks from vector DB", len(rag_chunks)) + +# # Extract doc_ids from chunks for referenced_documents +# metadata_doc_ids = set() +# for chunk in query_response.chunks: +# metadata = getattr(chunk, "metadata", None) +# if metadata and "doc_id" in metadata: +# reference_doc = metadata["doc_id"] +# logger.info(reference_doc) +# if reference_doc and reference_doc not in metadata_doc_ids: +# metadata_doc_ids.add(reference_doc) +# doc_ids_from_chunks.append( +# ReferencedDocument( +# doc_title=metadata.get("title", None), +# doc_url="https://mimir.corp.redhat.com" +# + reference_doc, +# ) +# ) + +# logger.info( +# "Extracted %d unique document IDs from chunks", +# len(doc_ids_from_chunks), +# ) + +# # Convert retrieved chunks to RAGChunk format with proper source handling +# final_rag_chunks = [] +# for chunk in query_response.chunks[:5]: +# # Extract source from chunk metadata based on OFFLINE flag +# source = None +# if chunk.metadata: +# if OFFLINE: +# parent_id = chunk.metadata.get("parent_id") +# if parent_id: +# source = urljoin( +# "https://mimir.corp.redhat.com", parent_id +# ) +# else: +# source = chunk.metadata.get("reference_url") + +# # Get score from chunk if available +# score = getattr(chunk, "score", None) + +# final_rag_chunks.append( +# RAGChunk( +# content=chunk.content, +# source=source, +# score=score, +# ) +# ) + +# return final_rag_chunks, doc_ids_from_chunks + +# except Exception as e: # pylint: disable=broad-except +# logger.warning("Failed to query vector database for chunks: %s", e) +# logger.debug("Vector DB query error details: %s", traceback.format_exc()) +# # Continue without RAG chunks + +# return rag_chunks, doc_ids_from_chunks + + +# @router.post( +# "/streaming_query", +# response_class=StreamingResponse, +# responses=streaming_query_responses, +# ) +# @authorize(Action.STREAMING_QUERY) +# async def streaming_query_endpoint_handler( # pylint: disable=too-many-locals,too-many-statements +# request: Request, +# query_request: QueryRequest, +# auth: Annotated[AuthTuple, Depends(get_auth_dependency())], +# mcp_headers: dict[str, dict[str, str]] = Depends(mcp_headers_dependency), +# ) -> StreamingResponse: +# """ +# Handle request to the /streaming_query endpoint using Agent API. + +# Returns a streaming response using Server-Sent Events (SSE) format with +# content type text/event-stream. + +# Returns: +# StreamingResponse: An HTTP streaming response yielding +# SSE-formatted events for the query lifecycle with content type +# text/event-stream. + +# Raises: +# HTTPException: +# - 401: Unauthorized - Missing or invalid credentials +# - 403: Forbidden - Insufficient permissions or model override not allowed +# - 404: Not Found - Conversation, model, or provider not found +# - 422: Unprocessable Entity - Request validation failed +# - 429: Too Many Requests - Quota limit exceeded +# - 500: Internal Server Error - Configuration not loaded or other server errors +# - 503: Service Unavailable - Unable to connect to Llama Stack backend +# """ +# return await streaming_query_endpoint_handler_base( +# request=request, +# query_request=query_request, +# auth=auth, +# mcp_headers=mcp_headers, +# retrieve_response_func=retrieve_response, +# create_response_generator_func=create_agent_response_generator, +# ) + + +# async def retrieve_response( +# client: AsyncLlamaStackClient, +# model_id: str, +# query_request: QueryRequest, +# token: str, +# mcp_headers: Optional[dict[str, dict[str, str]]] = None, +# ) -> tuple[AsyncIterator[AgentTurnResponseStreamChunk], str]: +# """ +# Retrieve response from LLMs and agents. + +# Asynchronously retrieves a streaming response and conversation +# ID from the Llama Stack agent for a given user query. + +# This function configures input/output shields, system prompt, +# and tool usage based on the request and environment. It +# prepares the agent with appropriate headers and toolgroups, +# validates attachments if present, and initiates a streaming +# turn with the user's query and any provided documents. + +# Parameters: +# model_id (str): Identifier of the model to use for the query. +# query_request (QueryRequest): The user's query and associated metadata. +# token (str): Authentication token for downstream services. +# mcp_headers (dict[str, dict[str, str]], optional): +# Multi-cluster proxy headers for tool integrations. + +# Returns: +# tuple: A tuple containing the streaming response object +# and the conversation ID. +# """ +# available_input_shields = [ +# shield.identifier +# for shield in filter(is_input_shield, await client.shields.list()) +# ] +# available_output_shields = [ +# shield.identifier +# for shield in filter(is_output_shield, await client.shields.list()) +# ] +# if not available_input_shields and not available_output_shields: +# logger.info("No available shields. Disabling safety") +# else: +# logger.info( +# "Available input shields: %s, output shields: %s", +# available_input_shields, +# available_output_shields, +# ) +# # use system prompt from request or default one +# system_prompt = get_system_prompt(query_request, configuration) +# logger.debug("Using system prompt: %s", system_prompt) + +# # TODO(lucasagomes): redact attachments content before sending to LLM +# # if attachments are provided, validate them +# if query_request.attachments: +# validate_attachments_metadata(query_request.attachments) + +# agent, conversation_id, session_id = await get_agent( +# client, +# model_id, +# system_prompt, +# available_input_shields, +# available_output_shields, +# query_request.conversation_id, +# query_request.no_tools or False, +# ) + +# logger.debug("Conversation ID: %s, session ID: %s", conversation_id, session_id) +# # bypass tools and MCP servers if no_tools is True +# if query_request.no_tools: +# mcp_headers = {} +# agent.extra_headers = {} +# toolgroups = None +# else: +# # preserve compatibility when mcp_headers is not provided +# if mcp_headers is None: +# mcp_headers = {} + +# mcp_headers = handle_mcp_headers_with_toolgroups(mcp_headers, configuration) + +# if not mcp_headers and token: +# for mcp_server in configuration.mcp_servers: +# mcp_headers[mcp_server.url] = { +# "Authorization": f"Bearer {token}", +# } + +# agent.extra_headers = { +# "X-LlamaStack-Provider-Data": json.dumps( +# { +# "mcp_headers": mcp_headers, +# } +# ), +# } + +# # Use specified vector stores or fetch all available ones +# if query_request.vector_store_ids: +# vector_db_ids = query_request.vector_store_ids +# else: +# vector_db_ids = [ +# vector_store.id +# for vector_store in (await client.vector_stores.list()).data +# ] +# toolgroups = (get_rag_toolgroups(vector_db_ids) or []) + [ +# mcp_server.name for mcp_server in configuration.mcp_servers +# ] +# # Convert empty list to None for consistency with existing behavior +# if not toolgroups: +# toolgroups = None + +# # TODO: LCORE-881 - Remove if Llama Stack starts to support these mime types +# # documents: list[Document] = [ +# # ( +# # {"content": doc["content"], "mime_type": "text/plain"} +# # if doc["mime_type"].lower() in ("application/json", "application/xml") +# # else doc +# # ) +# # for doc in query_request.get_documents() +# # ] + +# # Get RAG chunks before sending to LLM (reuse logic from query_vector_io_for_chunks) +# rag_chunks = [] +# try: +# if vector_db_ids: +# vector_db_id = vector_db_ids[0] # Use first available vector DB + +# params = {"k": 5, "score_threshold": 0.0} +# logger.info("Initial params: %s", params) +# logger.info("query_request.solr: %s", query_request.solr) +# if query_request.solr: +# # Pass the entire solr dict under the 'solr' key +# params["solr"] = query_request.solr +# logger.info("Final params with solr filters: %s", params) +# else: +# logger.info("No solr filters provided") +# logger.info("Final params being sent to vector_io.query: %s", params) + +# query_response = await client.vector_io.query( +# vector_db_id=vector_db_id, query=query_request.query, params=params +# ) + +# logger.info("The query response total payload: %s", query_response) + +# if query_response.chunks: +# # Convert retrieved chunks to RAGChunk format with proper source handling +# for chunk in query_response.chunks[:5]: +# # Extract source from chunk metadata based on OFFLINE flag +# source = None +# if chunk.metadata: +# if OFFLINE: +# parent_id = chunk.metadata.get("parent_id") +# if parent_id: +# source = urljoin( +# "https://mimir.corp.redhat.com", parent_id +# ) +# else: +# source = chunk.metadata.get("reference_url") + +# # Get score from chunk if available +# score = getattr(chunk, "score", None) + +# rag_chunks.append( +# RAGChunk( +# content=chunk.content, +# source=source, +# score=score, +# ) +# ) + +# logger.info( +# "Retrieved %d chunks from vector DB for streaming", len(rag_chunks) +# ) + +# except Exception as e: +# logger.warning("Failed to query vector database for chunks: %s", e) +# logger.debug("Vector DB query error details: %s", traceback.format_exc()) + +# # Format RAG context for injection into user message +# rag_context = "" +# if rag_chunks: +# context_chunks = [] +# for chunk in rag_chunks[:5]: # Limit to top 5 chunks +# chunk_text = f"Source: {chunk.source or 'Unknown'}\n{chunk.content}" +# context_chunks.append(chunk_text) +# rag_context = "\n\nRelevant documentation:\n" + "\n\n".join(context_chunks) +# logger.info( +# "Injecting %d RAG chunks into streaming user message", len(context_chunks) +# ) + +# # Inject RAG context into user message +# user_content = query_request.query + rag_context + +# response = await agent.create_turn( +# messages=[UserMessage(role="user", content=user_content).model_dump()], +# session_id=session_id, +# # documents=documents, +# stream=True, +# toolgroups=toolgroups, +# ) +# response = cast(AsyncIterator[AgentTurnResponseStreamChunk], response) + +# return response, conversation_id diff --git a/src/app/endpoints/streaming_query_v2.py b/src/app/endpoints/streaming_query_v2.py index e1c02ca4a..4c9f4125c 100644 --- a/src/app/endpoints/streaming_query_v2.py +++ b/src/app/endpoints/streaming_query_v2.py @@ -417,7 +417,7 @@ async def retrieve_response( # pylint: disable=too-many-locals # Prepare tools for responses API toolgroups = await prepare_tools_for_responses_api( - client, query_request, token, configuration, mcp_headers + client, query_request, token, configuration, mcp_headers=mcp_headers ) # Prepare input for Responses API diff --git a/src/app/main.py b/src/app/main.py index 74a6b86a1..7dd242f93 100644 --- a/src/app/main.py +++ b/src/app/main.py @@ -22,6 +22,10 @@ from utils.common import register_mcp_servers_async from utils.llama_stack_version import check_llama_stack_version +import faulthandler +import signal +faulthandler.register(signal.SIGUSR1) + logger = get_logger(__name__) logger.info("Initializing app") @@ -55,6 +59,12 @@ async def lifespan(_app: FastAPI) -> AsyncIterator[None]: # check if the Llama Stack version is supported by the service await check_llama_stack_version(client) + # try: + # await client.vector_stores.delete(vector_store_id="portal-rag") + # logger.info("Successfully deregistered vector store: portal-rag") + # except Exception as e: + # logger.warning("Failed to deregister vector store 'portal-rag': %s", e) + logger.info("Registering MCP servers") await register_mcp_servers_async(logger, configuration.configuration) get_logger("app.endpoints.handlers") diff --git a/src/constants.py b/src/constants.py index 681759cd4..e9f4e211b 100644 --- a/src/constants.py +++ b/src/constants.py @@ -127,7 +127,7 @@ MCP_AUTH_CLIENT = "client" # default RAG tool value -DEFAULT_RAG_TOOL = "knowledge_search" +DEFAULT_RAG_TOOL = "file_search" # Media type constants for streaming responses MEDIA_TYPE_JSON = "application/json" diff --git a/src/models/requests.py b/src/models/requests.py index 18e5b4b61..3ac4ede66 100644 --- a/src/models/requests.py +++ b/src/models/requests.py @@ -1,7 +1,7 @@ """Models for REST API requests.""" +from typing import Optional, Self, Any from enum import Enum -from typing import Optional, Self from pydantic import BaseModel, Field, field_validator, model_validator @@ -166,6 +166,13 @@ class QueryRequest(BaseModel): examples=["ocp_docs", "knowledge_base", "vector_db_1"], ) + solr: Optional[dict[str, Any]] = Field( + None, + description="Solr-specific query parameters including filter queries", + examples=[ + {"fq": ["product:*openshift*", "product_version:*4.16*"]}, + ], + ) # provides examples for /docs endpoint model_config = { "extra": "forbid", diff --git a/tests/unit/app/endpoints/test_query_v2.py b/tests/unit/app/endpoints/test_query_v2.py index 37468ad91..10ae325fa 100644 --- a/tests/unit/app/endpoints/test_query_v2.py +++ b/tests/unit/app/endpoints/test_query_v2.py @@ -53,6 +53,16 @@ def test_get_rag_tools() -> None: assert tools[0]["type"] == "file_search" assert tools[0]["vector_store_ids"] == ["db1", "db2"] assert tools[0]["max_num_results"] == 10 + assert "solr" not in tools[0] + + # Test with Solr parameters + solr_params = {"fq": ["product:*openshift*", "product_version:*4.16*"]} + tools_with_solr = get_rag_tools(["db1", "db2"], solr_params) + assert isinstance(tools_with_solr, list) + assert tools_with_solr[0]["type"] == "file_search" + assert tools_with_solr[0]["vector_store_ids"] == ["db1", "db2"] + assert tools_with_solr[0]["max_num_results"] == 10 + assert tools_with_solr[0]["solr"] == solr_params def test_get_mcp_tools_with_and_without_token() -> None: diff --git a/tests/unit/app/endpoints/test_streaming_query.py b/tests/unit/app/endpoints/test_streaming_query.py index a892aff5d..01d900f89 100644 --- a/tests/unit/app/endpoints/test_streaming_query.py +++ b/tests/unit/app/endpoints/test_streaming_query.py @@ -22,6 +22,7 @@ from models.requests import QueryRequest from models.responses import ReferencedDocument from utils.token_counter import TokenCounter +from utils.types import TurnSummary # Note: content_delta module doesn't exist in llama-stack-client 0.3.x # These are mock classes for backward compatibility with Agent API tests @@ -433,6 +434,9 @@ def test_stream_end_event_json(self) -> None: } # Create mock objects for the test mock_token_usage = TokenCounter(input_tokens=100, output_tokens=50) + mock_summary = TurnSummary( + llm_response="", tool_calls=[], tool_results=[], rag_chunks=[] + ) available_quotas: dict[str, int] = {} referenced_documents = [ ReferencedDocument( @@ -444,6 +448,7 @@ def test_stream_end_event_json(self) -> None: ] result = stream_end_event( metadata_map, + mock_summary, mock_token_usage, available_quotas, referenced_documents, @@ -473,6 +478,9 @@ def test_stream_end_event_text(self) -> None: } # Create mock objects for the test mock_token_usage = TokenCounter(input_tokens=100, output_tokens=50) + mock_summary = TurnSummary( + llm_response="", tool_calls=[], tool_results=[], rag_chunks=[] + ) available_quotas: dict[str, int] = {} referenced_documents = [ ReferencedDocument( @@ -484,6 +492,7 @@ def test_stream_end_event_text(self) -> None: ] result = stream_end_event( metadata_map, + mock_summary, mock_token_usage, available_quotas, referenced_documents, @@ -502,10 +511,14 @@ def test_stream_end_event_text_no_docs(self) -> None: metadata_map: dict = {} # Create mock objects for the test mock_token_usage = TokenCounter(input_tokens=100, output_tokens=50) + mock_summary = TurnSummary( + llm_response="", tool_calls=[], tool_results=[], rag_chunks=[] + ) available_quotas: dict[str, int] = {} referenced_documents: list[ReferencedDocument] = [] result = stream_end_event( metadata_map, + mock_summary, mock_token_usage, available_quotas, referenced_documents, @@ -630,6 +643,9 @@ def test_ols_end_event_structure(self) -> None: } # Create mock objects for the test mock_token_usage = TokenCounter(input_tokens=100, output_tokens=50) + mock_summary = TurnSummary( + llm_response="", tool_calls=[], tool_results=[], rag_chunks=[] + ) available_quotas: dict[str, int] = {} referenced_documents = [ ReferencedDocument( @@ -638,6 +654,7 @@ def test_ols_end_event_structure(self) -> None: ] end_event = stream_end_event( metadata_map, + mock_summary, mock_token_usage, available_quotas, referenced_documents, From 2428b486301b577fcc3cb248f09ecee10617ba95 Mon Sep 17 00:00:00 2001 From: Anxhela Coba Date: Tue, 3 Feb 2026 14:53:20 -0500 Subject: [PATCH 2/9] add hybrid param Signed-off-by: Anxhela Coba --- src/app/endpoints/query_v2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/app/endpoints/query_v2.py b/src/app/endpoints/query_v2.py index 3348be07a..da5972fc1 100644 --- a/src/app/endpoints/query_v2.py +++ b/src/app/endpoints/query_v2.py @@ -460,7 +460,7 @@ async def retrieve_response( # pylint: disable=too-many-locals,too-many-branche if vector_store_ids: vector_store_id = vector_store_ids[0] # Use first available vector store - params = {"k": 5, "score_threshold": 0.0} + params = {"k": 5, "score_threshold": 0.0, "mode": "hybrid"} logger.info("Initial params: %s", params) logger.info("query_request.solr: %s", query_request.solr) if query_request.solr: From 19bc8ca38d6afa8c6250241941a8b1b4b5483a30 Mon Sep 17 00:00:00 2001 From: Anxhela Coba Date: Tue, 3 Feb 2026 18:32:39 -0500 Subject: [PATCH 3/9] streaming query Signed-off-by: Anxhela Coba --- src/app/endpoints/query_v2.py | 19 +- src/app/endpoints/streaming_query_v2.py | 203 +++++++++++++++++- tests/unit/app/endpoints/test_query_v2.py | 18 +- .../app/endpoints/test_streaming_query.py | 4 - .../app/endpoints/test_streaming_query_v2.py | 12 +- 5 files changed, 233 insertions(+), 23 deletions(-) diff --git a/src/app/endpoints/query_v2.py b/src/app/endpoints/query_v2.py index da5972fc1..40b6ea7ae 100644 --- a/src/app/endpoints/query_v2.py +++ b/src/app/endpoints/query_v2.py @@ -472,7 +472,9 @@ async def retrieve_response( # pylint: disable=too-many-locals,too-many-branche logger.info("Final params being sent to vector_io.query: %s", params) query_response = await client.vector_io.query( - vector_store_id=vector_store_id, query=query_request.query, params=params + vector_store_id=vector_store_id, + query=query_request.query, + params=params, ) logger.info("The query response total payload: %s", query_response) @@ -504,7 +506,9 @@ async def retrieve_response( # pylint: disable=too-many-locals,too-many-branche title = title or cm.get("title") reference_url = cm.get("reference_url") else: - doc_id = getattr(cm, "doc_id", None) or getattr(cm, "document_id", None) + doc_id = getattr(cm, "doc_id", None) or getattr( + cm, "document_id", None + ) title = title or getattr(cm, "title", None) reference_url = getattr(cm, "reference_url", None) else: @@ -523,7 +527,11 @@ async def retrieve_response( # pylint: disable=too-many-locals,too-many-branche else: # Use reference_url if online reference_doc = reference_url or doc_id - doc_url = reference_doc if reference_doc.startswith("http") else ("https://mimir.corp.redhat.com" + reference_doc) + doc_url = ( + reference_doc + if reference_doc.startswith("http") + else ("https://mimir.corp.redhat.com" + reference_doc) + ) if reference_doc and reference_doc not in metadata_doc_ids: metadata_doc_ids.add(reference_doc) @@ -534,8 +542,9 @@ async def retrieve_response( # pylint: disable=too-many-locals,too-many-branche ) ) - logger.info("Extracted %d unique document IDs from chunks", len(doc_ids_from_chunks)) - + logger.info( + "Extracted %d unique document IDs from chunks", len(doc_ids_from_chunks) + ) except ( APIConnectionError, diff --git a/src/app/endpoints/streaming_query_v2.py b/src/app/endpoints/streaming_query_v2.py index 4c9f4125c..de96cdf0a 100644 --- a/src/app/endpoints/streaming_query_v2.py +++ b/src/app/endpoints/streaming_query_v2.py @@ -1,7 +1,9 @@ """Streaming query handler using Responses API (v2).""" import logging +import traceback from typing import Annotated, Any, AsyncIterator, Optional, cast +from urllib.parse import urljoin from fastapi import APIRouter, Depends, Request from fastapi.responses import StreamingResponse @@ -14,7 +16,7 @@ OpenAIResponseObjectStreamResponseOutputTextDelta, OpenAIResponseObjectStreamResponseOutputTextDone, ) -from llama_stack_client import AsyncLlamaStackClient +from llama_stack_client import APIConnectionError, APIStatusError, AsyncLlamaStackClient from app.endpoints.query import ( is_transcripts_enabled, @@ -51,6 +53,7 @@ InternalServerErrorResponse, NotFoundResponse, QuotaExceededResponse, + ReferencedDocument, ServiceUnavailableResponse, StreamingQueryResponse, UnauthorizedResponse, @@ -97,6 +100,7 @@ def create_responses_response_generator( # pylint: disable=too-many-locals,too-many-statements context: ResponseGeneratorContext, + doc_ids_from_chunks: Optional[list[ReferencedDocument]] = None, ) -> Any: """ Create a response generator function for Responses API streaming. @@ -106,6 +110,7 @@ def create_responses_response_generator( # pylint: disable=too-many-locals,too- Args: context: Context object containing all necessary parameters for response generation + doc_ids_from_chunks: Referenced documents extracted from vector DB chunks Returns: An async generator function that yields SSE-formatted strings @@ -294,9 +299,13 @@ async def response_generator( # pylint: disable=too-many-branches,too-many-stat model_id=context.model_id, provider_id=context.provider_id, ) - referenced_documents = parse_referenced_documents_from_responses_api( + response_referenced_documents = parse_referenced_documents_from_responses_api( cast(OpenAIResponseObject, latest_response_object) ) + # Combine doc_ids_from_chunks with response_referenced_documents + all_referenced_documents = ( + doc_ids_from_chunks or [] + ) + response_referenced_documents available_quotas = get_available_quotas( configuration.quota_limiters, context.user_id ) @@ -304,7 +313,7 @@ async def response_generator( # pylint: disable=too-many-branches,too-many-stat context.metadata_map, token_usage, available_quotas, - referenced_documents, + all_referenced_documents, media_type, ) @@ -382,7 +391,7 @@ async def retrieve_response( # pylint: disable=too-many-locals query_request: QueryRequest, token: str, mcp_headers: Optional[dict[str, dict[str, str]]] = None, -) -> tuple[AsyncIterator[OpenAIResponseObjectStream], str]: +) -> tuple[AsyncIterator[OpenAIResponseObjectStream], str, list[ReferencedDocument]]: """ Retrieve response from LLMs and agents. @@ -403,8 +412,8 @@ async def retrieve_response( # pylint: disable=too-many-locals Multi-cluster proxy headers for tool integrations. Returns: - tuple: A tuple containing the streaming response object - and the conversation ID. + tuple: A tuple containing the streaming response object, + the conversation ID, and the list of referenced documents from vector DB chunks. """ # use system prompt from request or default one system_prompt = get_system_prompt(query_request, configuration) @@ -415,11 +424,180 @@ async def retrieve_response( # pylint: disable=too-many-locals if query_request.attachments: validate_attachments_metadata(query_request.attachments) - # Prepare tools for responses API + # Prepare tools for responses API - skip RAG tools since we're doing direct vector query toolgroups = await prepare_tools_for_responses_api( - client, query_request, token, configuration, mcp_headers=mcp_headers + client, + query_request, + token, + configuration, + mcp_headers=mcp_headers, + skip_rag_tools=True, ) + # Extract RAG chunks from vector DB query response BEFORE calling responses API + rag_chunks = [] + doc_ids_from_chunks = [] + retrieved_chunks = [] + retrieved_scores = [] + + # When offline is False, use reference_url for chunk source + # When offline is True, use parent_id for chunk source + # TODO: move this setting to a higher level configuration + offline = True + + try: + # Get vector stores for direct querying + if query_request.vector_store_ids: + vector_store_ids = query_request.vector_store_ids + logger.info( + "Using specified vector_store_ids for direct query: %s", + vector_store_ids, + ) + else: + vector_store_ids = [ + vector_store.id + for vector_store in (await client.vector_stores.list()).data + ] + logger.info( + "Using all available vector_store_ids for direct query: %s", + vector_store_ids, + ) + + if vector_store_ids: + vector_store_id = vector_store_ids[0] # Use first available vector store + + params = {"k": 5, "score_threshold": 0.0, "mode": "hybrid"} + logger.info("Initial params: %s", params) + logger.info("query_request.solr: %s", query_request.solr) + if query_request.solr: + # Pass the entire solr dict under the 'solr' key + params["solr"] = query_request.solr + logger.info("Final params with solr filters: %s", params) + else: + logger.info("No solr filters provided") + logger.info("Final params being sent to vector_io.query: %s", params) + + query_response = await client.vector_io.query( + vector_store_id=vector_store_id, + query=query_request.query, + params=params, + ) + + logger.info("The query response total payload: %s", query_response) + + if query_response.chunks: + retrieved_chunks = query_response.chunks + retrieved_scores = ( + query_response.scores if hasattr(query_response, "scores") else [] + ) + + # Extract doc_ids from chunks for referenced_documents + metadata_doc_ids = set() + + for chunk in query_response.chunks: + logger.info("Extract doc ids from chunk: %s", chunk) + + # 1) dict metadata + md = getattr(chunk, "metadata", None) or {} + doc_id = md.get("doc_id") or md.get("document_id") + title = md.get("title") + + # 2) typed chunk_metadata + if not doc_id: + cm = getattr(chunk, "chunk_metadata", None) + if cm is not None: + # cm might be a pydantic model or a dict depending on caller + if isinstance(cm, dict): + doc_id = cm.get("doc_id") or cm.get("document_id") + title = title or cm.get("title") + reference_url = cm.get("reference_url") + else: + doc_id = getattr(cm, "doc_id", None) or getattr( + cm, "document_id", None + ) + title = title or getattr(cm, "title", None) + reference_url = getattr(cm, "reference_url", None) + else: + reference_url = None + else: + reference_url = md.get("reference_url") + + if not doc_id and not reference_url: + continue + + # Build URL based on offline flag + if offline: + # Use parent/doc path + reference_doc = doc_id + doc_url = "https://mimir.corp.redhat.com" + reference_doc + else: + # Use reference_url if online + reference_doc = reference_url or doc_id + doc_url = ( + reference_doc + if reference_doc.startswith("http") + else ("https://mimir.corp.redhat.com" + reference_doc) + ) + + if reference_doc and reference_doc not in metadata_doc_ids: + metadata_doc_ids.add(reference_doc) + doc_ids_from_chunks.append( + ReferencedDocument( + doc_title=title, + doc_url=doc_url, + ) + ) + + logger.info( + "Extracted %d unique document IDs from chunks", len(doc_ids_from_chunks) + ) + + except ( + APIConnectionError, + APIStatusError, + AttributeError, + KeyError, + ValueError, + ) as e: + logger.warning("Failed to query vector database for chunks: %s", e) + logger.debug("Vector DB query error details: %s", traceback.format_exc()) + # Continue without RAG chunks + + # Convert retrieved chunks to RAGChunk format + for i, chunk in enumerate(retrieved_chunks): + # Extract source from chunk metadata based on offline flag + source = None + if chunk.metadata: + if offline: + parent_id = chunk.metadata.get("parent_id") + if parent_id: + source = urljoin("https://mimir.corp.redhat.com", parent_id) + else: + source = chunk.metadata.get("reference_url") + + # Get score from retrieved_scores list if available + score = retrieved_scores[i] if i < len(retrieved_scores) else None + + rag_chunks.append( + RAGChunk( + content=chunk.content, + source=source, + score=score, + ) + ) + + logger.info("Retrieved %d chunks from vector DB", len(rag_chunks)) + + # Format RAG context for injection into user message + rag_context = "" + if rag_chunks: + context_chunks = [] + for chunk in rag_chunks[:5]: # Limit to top 5 chunks + chunk_text = f"Source: {chunk.source or 'Unknown'}\n{chunk.content}" + context_chunks.append(chunk_text) + rag_context = "\n\nRelevant documentation:\n" + "\n\n".join(context_chunks) + logger.info("Injecting %d RAG chunks into user message", len(context_chunks)) + # Prepare input for Responses API # Convert attachments to text and concatenate with query input_text = query_request.query @@ -430,6 +608,9 @@ async def retrieve_response( # pylint: disable=too-many-locals f"{attachment.content}" ) + # Add RAG context to input text + input_text += rag_context + # Handle conversation ID for Responses API # Create conversation upfront if not provided conversation_id = query_request.conversation_id @@ -475,4 +656,8 @@ async def retrieve_response( # pylint: disable=too-many-locals response = await client.responses.create(**create_params) response_stream = cast(AsyncIterator[OpenAIResponseObjectStream], response) - return response_stream, normalize_conversation_id(conversation_id) + return ( + response_stream, + normalize_conversation_id(conversation_id), + doc_ids_from_chunks, + ) diff --git a/tests/unit/app/endpoints/test_query_v2.py b/tests/unit/app/endpoints/test_query_v2.py index 10ae325fa..c412a2574 100644 --- a/tests/unit/app/endpoints/test_query_v2.py +++ b/tests/unit/app/endpoints/test_query_v2.py @@ -289,8 +289,20 @@ async def test_retrieve_response_builds_rag_and_mcp_tools( # pylint: disable=to # Mock shields.list and models.list for run_shield_moderation mock_client.shields.list = mocker.AsyncMock(return_value=[]) mock_client.models.list = mocker.AsyncMock(return_value=[]) + + # Mock vector_io.query for direct vector querying + mock_query_response = mocker.Mock() + mock_query_response.chunks = [] + mock_query_response.scores = [] + mock_client.vector_io.query = mocker.AsyncMock(return_value=mock_query_response) mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") + + # Mock shield moderation + mock_moderation_result = mocker.Mock() + mock_moderation_result.blocked = False + mocker.patch("app.endpoints.query_v2.run_shield_moderation", return_value=mock_moderation_result) + mock_cfg = mocker.Mock() mock_cfg.mcp_servers = [ ModelContextProtocolServer( @@ -314,11 +326,9 @@ async def test_retrieve_response_builds_rag_and_mcp_tools( # pylint: disable=to kwargs = mock_client.responses.create.call_args.kwargs tools = kwargs["tools"] assert isinstance(tools, list) - # Expect one file_search and one mcp tool + # Expect only MCP tools since RAG tools are skipped when doing direct vector querying tool_types = {t.get("type") for t in tools} - assert tool_types == {"file_search", "mcp"} - file_search = next(t for t in tools if t["type"] == "file_search") - assert file_search["vector_store_ids"] == ["dbA"] + assert tool_types == {"mcp"} mcp_tool = next(t for t in tools if t["type"] == "mcp") assert mcp_tool["server_label"] == "fs" assert mcp_tool["headers"] == {"Authorization": "Bearer mytoken"} diff --git a/tests/unit/app/endpoints/test_streaming_query.py b/tests/unit/app/endpoints/test_streaming_query.py index 01d900f89..b75f33f8d 100644 --- a/tests/unit/app/endpoints/test_streaming_query.py +++ b/tests/unit/app/endpoints/test_streaming_query.py @@ -448,7 +448,6 @@ def test_stream_end_event_json(self) -> None: ] result = stream_end_event( metadata_map, - mock_summary, mock_token_usage, available_quotas, referenced_documents, @@ -492,7 +491,6 @@ def test_stream_end_event_text(self) -> None: ] result = stream_end_event( metadata_map, - mock_summary, mock_token_usage, available_quotas, referenced_documents, @@ -518,7 +516,6 @@ def test_stream_end_event_text_no_docs(self) -> None: referenced_documents: list[ReferencedDocument] = [] result = stream_end_event( metadata_map, - mock_summary, mock_token_usage, available_quotas, referenced_documents, @@ -654,7 +651,6 @@ def test_ols_end_event_structure(self) -> None: ] end_event = stream_end_event( metadata_map, - mock_summary, mock_token_usage, available_quotas, referenced_documents, diff --git a/tests/unit/app/endpoints/test_streaming_query_v2.py b/tests/unit/app/endpoints/test_streaming_query_v2.py index d4740786e..69cde6e9e 100644 --- a/tests/unit/app/endpoints/test_streaming_query_v2.py +++ b/tests/unit/app/endpoints/test_streaming_query_v2.py @@ -53,6 +53,10 @@ async def test_retrieve_response_builds_rag_and_mcp_tools( # Mock shields.list and models.list for run_shield_moderation mock_client.shields.list = mocker.AsyncMock(return_value=[]) mock_client.models.list = mocker.AsyncMock(return_value=[]) + # Mock vector_io.query for direct vector querying + mock_query_response = mocker.Mock() + mock_query_response.chunks = [] + mock_client.vector_io.query = mocker.AsyncMock(return_value=mock_query_response) mocker.patch( "app.endpoints.streaming_query_v2.get_system_prompt", return_value="PROMPT" @@ -77,7 +81,9 @@ async def test_retrieve_response_builds_rag_and_mcp_tools( tools = kwargs["tools"] assert isinstance(tools, list) types = {t.get("type") for t in tools} - assert types == {"file_search", "mcp"} + # Since we're now skipping RAG tools and doing direct vector querying, + # we should only see MCP tools, not file_search tools + assert types == {"mcp"} @pytest.mark.asyncio @@ -95,6 +101,10 @@ async def test_retrieve_response_no_tools_passes_none(mocker: MockerFixture) -> # Mock shields.list and models.list for run_shield_moderation mock_client.shields.list = mocker.AsyncMock(return_value=[]) mock_client.models.list = mocker.AsyncMock(return_value=[]) + # Mock vector_io.query for direct vector querying + mock_query_response = mocker.Mock() + mock_query_response.chunks = [] + mock_client.vector_io.query = mocker.AsyncMock(return_value=mock_query_response) mocker.patch( "app.endpoints.streaming_query_v2.get_system_prompt", return_value="PROMPT" From fdd402865fe09dbc5a57c238f47da83e694beb7a Mon Sep 17 00:00:00 2001 From: Anxhela Coba Date: Wed, 4 Feb 2026 13:38:54 -0500 Subject: [PATCH 4/9] lint Signed-off-by: Anxhela Coba --- src/app/main.py | 1 + src/utils/endpoints.py | 2 +- tests/unit/app/endpoints/test_query_v2.py | 11 +++++++---- tests/unit/app/endpoints/test_streaming_query.py | 12 ------------ 4 files changed, 9 insertions(+), 17 deletions(-) diff --git a/src/app/main.py b/src/app/main.py index 7dd242f93..f011ee22a 100644 --- a/src/app/main.py +++ b/src/app/main.py @@ -24,6 +24,7 @@ import faulthandler import signal + faulthandler.register(signal.SIGUSR1) logger = get_logger(__name__) diff --git a/src/utils/endpoints.py b/src/utils/endpoints.py index b0b49917d..016cf95fc 100644 --- a/src/utils/endpoints.py +++ b/src/utils/endpoints.py @@ -592,7 +592,7 @@ def _process_rag_chunks_for_documents( for chunk in rag_chunks: src = chunk.source - if not src or src == constants.DEFAULT_RAG_TOOL: + if not src or src == constants.DEFAULT_RAG_TOOL or src.endswith("_search"): continue if src.startswith("http"): diff --git a/tests/unit/app/endpoints/test_query_v2.py b/tests/unit/app/endpoints/test_query_v2.py index c412a2574..6fd6f3720 100644 --- a/tests/unit/app/endpoints/test_query_v2.py +++ b/tests/unit/app/endpoints/test_query_v2.py @@ -289,7 +289,7 @@ async def test_retrieve_response_builds_rag_and_mcp_tools( # pylint: disable=to # Mock shields.list and models.list for run_shield_moderation mock_client.shields.list = mocker.AsyncMock(return_value=[]) mock_client.models.list = mocker.AsyncMock(return_value=[]) - + # Mock vector_io.query for direct vector querying mock_query_response = mocker.Mock() mock_query_response.chunks = [] @@ -297,12 +297,15 @@ async def test_retrieve_response_builds_rag_and_mcp_tools( # pylint: disable=to mock_client.vector_io.query = mocker.AsyncMock(return_value=mock_query_response) mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") - + # Mock shield moderation mock_moderation_result = mocker.Mock() mock_moderation_result.blocked = False - mocker.patch("app.endpoints.query_v2.run_shield_moderation", return_value=mock_moderation_result) - + mocker.patch( + "app.endpoints.query_v2.run_shield_moderation", + return_value=mock_moderation_result, + ) + mock_cfg = mocker.Mock() mock_cfg.mcp_servers = [ ModelContextProtocolServer( diff --git a/tests/unit/app/endpoints/test_streaming_query.py b/tests/unit/app/endpoints/test_streaming_query.py index b75f33f8d..f058db17c 100644 --- a/tests/unit/app/endpoints/test_streaming_query.py +++ b/tests/unit/app/endpoints/test_streaming_query.py @@ -434,9 +434,6 @@ def test_stream_end_event_json(self) -> None: } # Create mock objects for the test mock_token_usage = TokenCounter(input_tokens=100, output_tokens=50) - mock_summary = TurnSummary( - llm_response="", tool_calls=[], tool_results=[], rag_chunks=[] - ) available_quotas: dict[str, int] = {} referenced_documents = [ ReferencedDocument( @@ -477,9 +474,6 @@ def test_stream_end_event_text(self) -> None: } # Create mock objects for the test mock_token_usage = TokenCounter(input_tokens=100, output_tokens=50) - mock_summary = TurnSummary( - llm_response="", tool_calls=[], tool_results=[], rag_chunks=[] - ) available_quotas: dict[str, int] = {} referenced_documents = [ ReferencedDocument( @@ -509,9 +503,6 @@ def test_stream_end_event_text_no_docs(self) -> None: metadata_map: dict = {} # Create mock objects for the test mock_token_usage = TokenCounter(input_tokens=100, output_tokens=50) - mock_summary = TurnSummary( - llm_response="", tool_calls=[], tool_results=[], rag_chunks=[] - ) available_quotas: dict[str, int] = {} referenced_documents: list[ReferencedDocument] = [] result = stream_end_event( @@ -640,9 +631,6 @@ def test_ols_end_event_structure(self) -> None: } # Create mock objects for the test mock_token_usage = TokenCounter(input_tokens=100, output_tokens=50) - mock_summary = TurnSummary( - llm_response="", tool_calls=[], tool_results=[], rag_chunks=[] - ) available_quotas: dict[str, int] = {} referenced_documents = [ ReferencedDocument( From 31ffb01c7b5638179580ee8c9ab472590a981cb6 Mon Sep 17 00:00:00 2001 From: Anxhela Coba Date: Wed, 4 Feb 2026 15:29:26 -0500 Subject: [PATCH 5/9] clean up Signed-off-by: Anxhela Coba --- src/app/endpoints/query.py | 30 -- src/app/endpoints/query_v2.py | 8 +- src/app/endpoints/streaming_query.py | 535 ------------------------ src/app/endpoints/streaming_query_v2.py | 8 +- src/constants.py | 3 + 5 files changed, 11 insertions(+), 573 deletions(-) diff --git a/src/app/endpoints/query.py b/src/app/endpoints/query.py index 209c6f2e4..eb28a0018 100644 --- a/src/app/endpoints/query.py +++ b/src/app/endpoints/query.py @@ -582,33 +582,3 @@ def validate_attachments_metadata(attachments: list[Attachment]) -> None: response="Invalid attribute value", cause=message ) raise HTTPException(**response.model_dump()) - - -# def get_rag_toolgroups( -# vector_db_ids: list[str], -# ) -> Optional[list[Toolgroup]]: -# """ -# Return a list of RAG Tool groups if the given vector DB list is not empty. - -# Generate a list containing a RAG knowledge search toolgroup if -# vector database IDs are provided. - -# Parameters: -# vector_db_ids (list[str]): List of vector database identifiers to include in the toolgroup. - -# Returns: -# Optional[list[Toolgroup]]: A list with a single RAG toolgroup if -# vector_db_ids is non-empty; otherwise, None. -# """ -# return ( -# [ -# ToolgroupAgentToolGroupWithArgs( -# name="builtin::rag/file_search", -# args={ -# "vector_db_ids": vector_db_ids, -# }, -# ) -# ] -# if vector_db_ids -# else None -# ) diff --git a/src/app/endpoints/query_v2.py b/src/app/endpoints/query_v2.py index 40b6ea7ae..3da330451 100644 --- a/src/app/endpoints/query_v2.py +++ b/src/app/endpoints/query_v2.py @@ -33,7 +33,7 @@ from authentication.interface import AuthTuple from authorization.middleware import authorize from configuration import AppConfig, configuration -from constants import DEFAULT_RAG_TOOL +from constants import DEFAULT_RAG_TOOL, MIMIR_DOC_URL from models.config import Action, ModelContextProtocolServer from models.requests import QueryRequest from models.responses import ( @@ -523,14 +523,14 @@ async def retrieve_response( # pylint: disable=too-many-locals,too-many-branche if offline: # Use parent/doc path reference_doc = doc_id - doc_url = "https://mimir.corp.redhat.com" + reference_doc + doc_url = MIMIR_DOC_URL + reference_doc else: # Use reference_url if online reference_doc = reference_url or doc_id doc_url = ( reference_doc if reference_doc.startswith("http") - else ("https://mimir.corp.redhat.com" + reference_doc) + else (MIMIR_DOC_URL + reference_doc) ) if reference_doc and reference_doc not in metadata_doc_ids: @@ -565,7 +565,7 @@ async def retrieve_response( # pylint: disable=too-many-locals,too-many-branche if offline: parent_id = chunk.metadata.get("parent_id") if parent_id: - source = urljoin("https://mimir.corp.redhat.com", parent_id) + source = urljoin(MIMIR_DOC_URL, parent_id) else: source = chunk.metadata.get("reference_url") diff --git a/src/app/endpoints/streaming_query.py b/src/app/endpoints/streaming_query.py index cbe7a5663..d596c30f6 100644 --- a/src/app/endpoints/streaming_query.py +++ b/src/app/endpoints/streaming_query.py @@ -4,8 +4,6 @@ import json import logging import re - -# import traceback import uuid from collections.abc import Callable from datetime import UTC, datetime @@ -15,9 +13,6 @@ Optional, ) -# from urllib.parse import urljoin - - from fastapi import APIRouter, Request from fastapi.responses import StreamingResponse from llama_stack_client import ( @@ -48,7 +43,6 @@ NotFoundResponse, PromptTooLongResponse, QuotaExceededResponse, - # RAGChunk, ServiceUnavailableResponse, StreamingQueryResponse, UnauthorizedResponse, @@ -65,11 +59,6 @@ logger = logging.getLogger("app.endpoints.handlers") router = APIRouter(tags=["streaming_query"]) -# # When OFFLINE is False, use reference_url for chunk source -# # When OFFLINE is True, use parent_id for chunk source -# # TODO: move this setting to a higher level configuration -# OFFLINE = True - streaming_query_responses: dict[int | str, dict[str, Any]] = { 200: StreamingQueryResponse.openapi_response(), 401: UnauthorizedResponse.openapi_response( @@ -142,7 +131,6 @@ def stream_end_event( available_quotas: dict[str, int], referenced_documents: list[ReferencedDocument], media_type: str = MEDIA_TYPE_JSON, - # vector_io_referenced_docs: list[ReferencedDocument] | None = None, ) -> str: """ Yield the end of the data stream. @@ -175,35 +163,10 @@ def stream_end_event( # Use mode="json" to ensure AnyUrl is serialized to string (not just model_dump()) referenced_docs_dict = [doc.model_dump(mode="json") for doc in referenced_documents] - # referenced_docs_dict = [ - # { - # "doc_url": v.get("docs_url"), - # "doc_title": v.get("title"), - # } - # for v in metadata_map.values() - # if "docs_url" in v and "title" in v - # ] - - # # Add vector_io referenced documents - # if vector_io_referenced_docs: - # for doc in vector_io_referenced_docs: - # referenced_docs_dict.append( - # { - # "doc_url": doc.doc_url, - # "doc_title": doc.doc_title, - # } - # ) - - # # Convert RAG chunks to dict format - # rag_chunks_dict = [] - # if summary.rag_chunks: - # rag_chunks_dict = [chunk.model_dump() for chunk in summary.rag_chunks] - return format_stream_data( { "event": "end", "data": { - # "rag_chunks": rag_chunks_dict, "referenced_documents": referenced_docs_dict, "truncated": None, # TODO(jboos): implement truncated "input_tokens": token_usage.input_tokens, @@ -594,131 +557,6 @@ def _handle_heartbeat_event( media_type=media_type, ) - -# def create_agent_response_generator( # pylint: disable=too-many-locals -# context: ResponseGeneratorContext, -# ) -> Any: -# """ -# Create a response generator function for Agent API streaming. - -# This factory function returns an async generator that processes streaming -# responses from the Agent API and yields Server-Sent Events (SSE). - -# Args: -# context: Context object containing all necessary parameters for response generation - -# Returns: -# An async generator function that yields SSE-formatted strings -# """ - -# async def response_generator( -# turn_response: AsyncIterator[AgentTurnResponseStreamChunk], -# ) -> AsyncIterator[str]: -# """ -# Generate SSE formatted streaming response. - -# Asynchronously generates a stream of Server-Sent Events -# (SSE) representing incremental responses from a -# language model turn. - -# Yields start, token, tool call, turn completion, and -# end events as SSE-formatted strings. Collects the -# complete response for transcript storage if enabled. -# """ -# chunk_id = 0 -# summary = TurnSummary( -# llm_response="No response from the model", -# tool_calls=[], -# tool_results=[], -# rag_chunks=[], -# ) - -# # Determine media type for response formatting -# media_type = context.query_request.media_type or MEDIA_TYPE_JSON - -# # Send start event at the beginning of the stream -# yield stream_start_event(context.conversation_id) - -# latest_turn: Optional[Any] = None - -# async for chunk in turn_response: -# if chunk.event is None: -# continue -# p = chunk.event.payload -# if p.event_type == "turn_complete": -# summary.llm_response = content_to_str(p.turn.output_message.content) -# latest_turn = p.turn -# system_prompt = get_system_prompt(context.query_request, configuration) -# try: -# update_llm_token_count_from_turn( -# p.turn, context.model_id, context.provider_id, system_prompt -# ) -# except Exception: # pylint: disable=broad-except -# logger.exception("Failed to update token usage metrics") -# elif p.event_type == "step_complete": -# if p.step_details.step_type == "tool_execution": -# summary.append_tool_calls_from_llama(p.step_details) - -# for event in stream_build_event( -# chunk, -# chunk_id, -# context.metadata_map, -# media_type, -# context.conversation_id, -# ): -# chunk_id += 1 -# yield event - -# # Extract token usage from the turn -# token_usage = ( -# extract_token_usage_from_turn(latest_turn) -# if latest_turn is not None -# else TokenCounter() -# ) -# referenced_documents = ( -# parse_referenced_documents(latest_turn) if latest_turn is not None else [] -# ) - -# # Add RAG chunks to summary if available from vector_io query -# if hasattr(context, "vector_io_rag_chunks") and context.vector_io_rag_chunks: -# summary.rag_chunks = context.vector_io_rag_chunks - -# available_quotas = get_available_quotas( -# configuration.quota_limiters, context.user_id -# ) -# yield stream_end_event( -# context.metadata_map, -# summary, -# token_usage, -# available_quotas, -# referenced_documents, -# media_type, -# ) - -# # Perform cleanup tasks (database and cache operations) -# await cleanup_after_streaming( -# user_id=context.user_id, -# conversation_id=context.conversation_id, -# model_id=context.model_id, -# provider_id=context.provider_id, -# llama_stack_model_id=context.llama_stack_model_id, -# query_request=context.query_request, -# summary=summary, -# metadata_map=context.metadata_map, -# started_at=context.started_at, -# client=context.client, -# config=configuration, -# skip_userid_check=context.skip_userid_check, -# get_topic_summary_func=get_topic_summary, -# is_transcripts_enabled_func=is_transcripts_enabled, -# store_transcript_func=store_transcript, -# persist_user_conversation_details_func=persist_user_conversation_details, -# rag_chunks=create_rag_chunks_dict(summary), -# ) - -# return response_generator - - async def streaming_query_endpoint_handler_base( # pylint: disable=too-many-locals,too-many-statements,too-many-arguments,too-many-positional-arguments request: Request, query_request: QueryRequest, @@ -823,11 +661,6 @@ async def streaming_query_endpoint_handler_base( # pylint: disable=too-many-loc mcp_headers=mcp_headers, ) - # # Query vector_io for RAG chunks and referenced documents - # vector_io_rag_chunks, vector_io_referenced_docs = ( - # await query_vector_io_for_chunks(client, query_request) - # ) - metadata_map: dict[str, dict[str, Any]] = {} # Create context object for response generator @@ -844,12 +677,6 @@ async def streaming_query_endpoint_handler_base( # pylint: disable=too-many-loc metadata_map=metadata_map, ) - # Add vector_io data to context if available - # if hasattr(context, "vector_io_rag_chunks"): - # context.vector_io_rag_chunks = vector_io_rag_chunks - # if hasattr(context, "vector_io_referenced_docs"): - # context.vector_io_referenced_docs = vector_io_referenced_docs - # Create the response generator using the provided factory function response_generator = create_response_generator_func(context) @@ -896,365 +723,3 @@ async def streaming_query_endpoint_handler_base( # pylint: disable=too-many-loc status_code=error_response.status_code, media_type=query_request.media_type or MEDIA_TYPE_JSON, ) - - -# async def query_vector_io_for_chunks( -# client: AsyncLlamaStackClientHolder, -# query_request: QueryRequest, -# ) -> tuple[list[RAGChunk], list[ReferencedDocument]]: -# """ -# Query vector_io database for RAG chunks and referenced documents. - -# Args: -# client: AsyncLlamaStackClient for vector database access -# query_request: The user's query request containing query text and Solr filters - -# Returns: -# tuple: A tuple containing RAG chunks and referenced documents -# """ -# rag_chunks = [] -# doc_ids_from_chunks = [] - -# try: -# # Use the first available vector database if any exist -# try: -# # Try vector_stores first (new API) -# vector_stores = await client.vector_stores.list() -# vector_db_ids = [vs.id for vs in vector_stores.data] -# except AttributeError: -# # Fallback to vector_dbs (old API) -# vector_dbs = await client.vector_dbs.list() -# vector_db_ids = [vdb.identifier for vdb in vector_dbs] - -# if vector_db_ids: -# vector_db_id = vector_db_ids[0] # Use first available vector DB - -# params = {"k": 5, "score_threshold": 0.0} -# logger.info("Initial params: %s", params) -# logger.info("query_request.solr: %s", query_request.solr) -# if query_request.solr: -# # Pass the entire solr dict under the 'solr' key -# params["solr"] = query_request.solr -# logger.info("Final params with solr filters: %s", params) -# else: -# logger.info("No solr filters provided") -# logger.info("Final params being sent to vector_io.query: %s", params) - -# query_response = await client.vector_io.query( -# vector_db_id=vector_db_id, query=query_request.query, params=params -# ) - -# logger.info("The query response total payload: %s", query_response) - -# if query_response.chunks: -# rag_chunks = [ -# RAGChunk( -# content=str(chunk.content), # Convert to string if needed -# source=getattr(chunk, "doc_id", None) -# or getattr(chunk, "source", None), -# score=getattr(chunk, "score", None), -# ) -# for chunk in query_response.chunks[:5] # Limit to top 5 chunks -# ] -# logger.info("Retrieved %d chunks from vector DB", len(rag_chunks)) - -# # Extract doc_ids from chunks for referenced_documents -# metadata_doc_ids = set() -# for chunk in query_response.chunks: -# metadata = getattr(chunk, "metadata", None) -# if metadata and "doc_id" in metadata: -# reference_doc = metadata["doc_id"] -# logger.info(reference_doc) -# if reference_doc and reference_doc not in metadata_doc_ids: -# metadata_doc_ids.add(reference_doc) -# doc_ids_from_chunks.append( -# ReferencedDocument( -# doc_title=metadata.get("title", None), -# doc_url="https://mimir.corp.redhat.com" -# + reference_doc, -# ) -# ) - -# logger.info( -# "Extracted %d unique document IDs from chunks", -# len(doc_ids_from_chunks), -# ) - -# # Convert retrieved chunks to RAGChunk format with proper source handling -# final_rag_chunks = [] -# for chunk in query_response.chunks[:5]: -# # Extract source from chunk metadata based on OFFLINE flag -# source = None -# if chunk.metadata: -# if OFFLINE: -# parent_id = chunk.metadata.get("parent_id") -# if parent_id: -# source = urljoin( -# "https://mimir.corp.redhat.com", parent_id -# ) -# else: -# source = chunk.metadata.get("reference_url") - -# # Get score from chunk if available -# score = getattr(chunk, "score", None) - -# final_rag_chunks.append( -# RAGChunk( -# content=chunk.content, -# source=source, -# score=score, -# ) -# ) - -# return final_rag_chunks, doc_ids_from_chunks - -# except Exception as e: # pylint: disable=broad-except -# logger.warning("Failed to query vector database for chunks: %s", e) -# logger.debug("Vector DB query error details: %s", traceback.format_exc()) -# # Continue without RAG chunks - -# return rag_chunks, doc_ids_from_chunks - - -# @router.post( -# "/streaming_query", -# response_class=StreamingResponse, -# responses=streaming_query_responses, -# ) -# @authorize(Action.STREAMING_QUERY) -# async def streaming_query_endpoint_handler( # pylint: disable=too-many-locals,too-many-statements -# request: Request, -# query_request: QueryRequest, -# auth: Annotated[AuthTuple, Depends(get_auth_dependency())], -# mcp_headers: dict[str, dict[str, str]] = Depends(mcp_headers_dependency), -# ) -> StreamingResponse: -# """ -# Handle request to the /streaming_query endpoint using Agent API. - -# Returns a streaming response using Server-Sent Events (SSE) format with -# content type text/event-stream. - -# Returns: -# StreamingResponse: An HTTP streaming response yielding -# SSE-formatted events for the query lifecycle with content type -# text/event-stream. - -# Raises: -# HTTPException: -# - 401: Unauthorized - Missing or invalid credentials -# - 403: Forbidden - Insufficient permissions or model override not allowed -# - 404: Not Found - Conversation, model, or provider not found -# - 422: Unprocessable Entity - Request validation failed -# - 429: Too Many Requests - Quota limit exceeded -# - 500: Internal Server Error - Configuration not loaded or other server errors -# - 503: Service Unavailable - Unable to connect to Llama Stack backend -# """ -# return await streaming_query_endpoint_handler_base( -# request=request, -# query_request=query_request, -# auth=auth, -# mcp_headers=mcp_headers, -# retrieve_response_func=retrieve_response, -# create_response_generator_func=create_agent_response_generator, -# ) - - -# async def retrieve_response( -# client: AsyncLlamaStackClient, -# model_id: str, -# query_request: QueryRequest, -# token: str, -# mcp_headers: Optional[dict[str, dict[str, str]]] = None, -# ) -> tuple[AsyncIterator[AgentTurnResponseStreamChunk], str]: -# """ -# Retrieve response from LLMs and agents. - -# Asynchronously retrieves a streaming response and conversation -# ID from the Llama Stack agent for a given user query. - -# This function configures input/output shields, system prompt, -# and tool usage based on the request and environment. It -# prepares the agent with appropriate headers and toolgroups, -# validates attachments if present, and initiates a streaming -# turn with the user's query and any provided documents. - -# Parameters: -# model_id (str): Identifier of the model to use for the query. -# query_request (QueryRequest): The user's query and associated metadata. -# token (str): Authentication token for downstream services. -# mcp_headers (dict[str, dict[str, str]], optional): -# Multi-cluster proxy headers for tool integrations. - -# Returns: -# tuple: A tuple containing the streaming response object -# and the conversation ID. -# """ -# available_input_shields = [ -# shield.identifier -# for shield in filter(is_input_shield, await client.shields.list()) -# ] -# available_output_shields = [ -# shield.identifier -# for shield in filter(is_output_shield, await client.shields.list()) -# ] -# if not available_input_shields and not available_output_shields: -# logger.info("No available shields. Disabling safety") -# else: -# logger.info( -# "Available input shields: %s, output shields: %s", -# available_input_shields, -# available_output_shields, -# ) -# # use system prompt from request or default one -# system_prompt = get_system_prompt(query_request, configuration) -# logger.debug("Using system prompt: %s", system_prompt) - -# # TODO(lucasagomes): redact attachments content before sending to LLM -# # if attachments are provided, validate them -# if query_request.attachments: -# validate_attachments_metadata(query_request.attachments) - -# agent, conversation_id, session_id = await get_agent( -# client, -# model_id, -# system_prompt, -# available_input_shields, -# available_output_shields, -# query_request.conversation_id, -# query_request.no_tools or False, -# ) - -# logger.debug("Conversation ID: %s, session ID: %s", conversation_id, session_id) -# # bypass tools and MCP servers if no_tools is True -# if query_request.no_tools: -# mcp_headers = {} -# agent.extra_headers = {} -# toolgroups = None -# else: -# # preserve compatibility when mcp_headers is not provided -# if mcp_headers is None: -# mcp_headers = {} - -# mcp_headers = handle_mcp_headers_with_toolgroups(mcp_headers, configuration) - -# if not mcp_headers and token: -# for mcp_server in configuration.mcp_servers: -# mcp_headers[mcp_server.url] = { -# "Authorization": f"Bearer {token}", -# } - -# agent.extra_headers = { -# "X-LlamaStack-Provider-Data": json.dumps( -# { -# "mcp_headers": mcp_headers, -# } -# ), -# } - -# # Use specified vector stores or fetch all available ones -# if query_request.vector_store_ids: -# vector_db_ids = query_request.vector_store_ids -# else: -# vector_db_ids = [ -# vector_store.id -# for vector_store in (await client.vector_stores.list()).data -# ] -# toolgroups = (get_rag_toolgroups(vector_db_ids) or []) + [ -# mcp_server.name for mcp_server in configuration.mcp_servers -# ] -# # Convert empty list to None for consistency with existing behavior -# if not toolgroups: -# toolgroups = None - -# # TODO: LCORE-881 - Remove if Llama Stack starts to support these mime types -# # documents: list[Document] = [ -# # ( -# # {"content": doc["content"], "mime_type": "text/plain"} -# # if doc["mime_type"].lower() in ("application/json", "application/xml") -# # else doc -# # ) -# # for doc in query_request.get_documents() -# # ] - -# # Get RAG chunks before sending to LLM (reuse logic from query_vector_io_for_chunks) -# rag_chunks = [] -# try: -# if vector_db_ids: -# vector_db_id = vector_db_ids[0] # Use first available vector DB - -# params = {"k": 5, "score_threshold": 0.0} -# logger.info("Initial params: %s", params) -# logger.info("query_request.solr: %s", query_request.solr) -# if query_request.solr: -# # Pass the entire solr dict under the 'solr' key -# params["solr"] = query_request.solr -# logger.info("Final params with solr filters: %s", params) -# else: -# logger.info("No solr filters provided") -# logger.info("Final params being sent to vector_io.query: %s", params) - -# query_response = await client.vector_io.query( -# vector_db_id=vector_db_id, query=query_request.query, params=params -# ) - -# logger.info("The query response total payload: %s", query_response) - -# if query_response.chunks: -# # Convert retrieved chunks to RAGChunk format with proper source handling -# for chunk in query_response.chunks[:5]: -# # Extract source from chunk metadata based on OFFLINE flag -# source = None -# if chunk.metadata: -# if OFFLINE: -# parent_id = chunk.metadata.get("parent_id") -# if parent_id: -# source = urljoin( -# "https://mimir.corp.redhat.com", parent_id -# ) -# else: -# source = chunk.metadata.get("reference_url") - -# # Get score from chunk if available -# score = getattr(chunk, "score", None) - -# rag_chunks.append( -# RAGChunk( -# content=chunk.content, -# source=source, -# score=score, -# ) -# ) - -# logger.info( -# "Retrieved %d chunks from vector DB for streaming", len(rag_chunks) -# ) - -# except Exception as e: -# logger.warning("Failed to query vector database for chunks: %s", e) -# logger.debug("Vector DB query error details: %s", traceback.format_exc()) - -# # Format RAG context for injection into user message -# rag_context = "" -# if rag_chunks: -# context_chunks = [] -# for chunk in rag_chunks[:5]: # Limit to top 5 chunks -# chunk_text = f"Source: {chunk.source or 'Unknown'}\n{chunk.content}" -# context_chunks.append(chunk_text) -# rag_context = "\n\nRelevant documentation:\n" + "\n\n".join(context_chunks) -# logger.info( -# "Injecting %d RAG chunks into streaming user message", len(context_chunks) -# ) - -# # Inject RAG context into user message -# user_content = query_request.query + rag_context - -# response = await agent.create_turn( -# messages=[UserMessage(role="user", content=user_content).model_dump()], -# session_id=session_id, -# # documents=documents, -# stream=True, -# toolgroups=toolgroups, -# ) -# response = cast(AsyncIterator[AgentTurnResponseStreamChunk], response) - -# return response, conversation_id diff --git a/src/app/endpoints/streaming_query_v2.py b/src/app/endpoints/streaming_query_v2.py index de96cdf0a..1655d954f 100644 --- a/src/app/endpoints/streaming_query_v2.py +++ b/src/app/endpoints/streaming_query_v2.py @@ -44,7 +44,7 @@ from authentication.interface import AuthTuple from authorization.middleware import authorize from configuration import configuration -from constants import MEDIA_TYPE_JSON +from constants import MEDIA_TYPE_JSON, MIMIR_DOC_URL from models.config import Action from models.context import ResponseGeneratorContext from models.requests import QueryRequest @@ -529,14 +529,14 @@ async def retrieve_response( # pylint: disable=too-many-locals if offline: # Use parent/doc path reference_doc = doc_id - doc_url = "https://mimir.corp.redhat.com" + reference_doc + doc_url = MIMIR_DOC_URL + reference_doc else: # Use reference_url if online reference_doc = reference_url or doc_id doc_url = ( reference_doc if reference_doc.startswith("http") - else ("https://mimir.corp.redhat.com" + reference_doc) + else (MIMIR_DOC_URL + reference_doc) ) if reference_doc and reference_doc not in metadata_doc_ids: @@ -571,7 +571,7 @@ async def retrieve_response( # pylint: disable=too-many-locals if offline: parent_id = chunk.metadata.get("parent_id") if parent_id: - source = urljoin("https://mimir.corp.redhat.com", parent_id) + source = urljoin(MIMIR_DOC_URL, parent_id) else: source = chunk.metadata.get("reference_url") diff --git a/src/constants.py b/src/constants.py index e9f4e211b..e7ce5d279 100644 --- a/src/constants.py +++ b/src/constants.py @@ -161,3 +161,6 @@ # quota limiters constants USER_QUOTA_LIMITER = "user_limiter" CLUSTER_QUOTA_LIMITER = "cluster_limiter" + +# SOLR OKP RAG +MIMIR_DOC_URL = "https://mimir.corp.redhat.com" From c5cfefb6efa449e2f46705a9d1abec9c2af441a8 Mon Sep 17 00:00:00 2001 From: Anxhela Coba Date: Wed, 4 Feb 2026 16:39:51 -0500 Subject: [PATCH 6/9] lint Signed-off-by: Anxhela Coba --- src/app/endpoints/query_v2.py | 7 ------- src/app/endpoints/streaming_query.py | 1 + tests/unit/app/endpoints/test_streaming_query.py | 1 - 3 files changed, 1 insertion(+), 8 deletions(-) diff --git a/src/app/endpoints/query_v2.py b/src/app/endpoints/query_v2.py index 3da330451..3e34b07d2 100644 --- a/src/app/endpoints/query_v2.py +++ b/src/app/endpoints/query_v2.py @@ -1053,17 +1053,10 @@ async def prepare_tools_for_responses_api( # Add RAG tools if vector stores are available if vector_store_ids: - # logger.info("query_request.solr: %s", query_request.solr) rag_tools = get_rag_tools(vector_store_ids) if rag_tools: logger.info("rag_tool are: %s", rag_tools) toolgroups.extend(rag_tools) - # if query_request.solr: - # logger.info( - # "RAG tools configured with Solr filters: %s", query_request.solr - # ) - # else: - # logger.info("RAG tools configured without Solr filters") else: logger.info("No RAG tools configured") else: diff --git a/src/app/endpoints/streaming_query.py b/src/app/endpoints/streaming_query.py index d596c30f6..2b12f14c3 100644 --- a/src/app/endpoints/streaming_query.py +++ b/src/app/endpoints/streaming_query.py @@ -557,6 +557,7 @@ def _handle_heartbeat_event( media_type=media_type, ) + async def streaming_query_endpoint_handler_base( # pylint: disable=too-many-locals,too-many-statements,too-many-arguments,too-many-positional-arguments request: Request, query_request: QueryRequest, diff --git a/tests/unit/app/endpoints/test_streaming_query.py b/tests/unit/app/endpoints/test_streaming_query.py index f058db17c..a892aff5d 100644 --- a/tests/unit/app/endpoints/test_streaming_query.py +++ b/tests/unit/app/endpoints/test_streaming_query.py @@ -22,7 +22,6 @@ from models.requests import QueryRequest from models.responses import ReferencedDocument from utils.token_counter import TokenCounter -from utils.types import TurnSummary # Note: content_delta module doesn't exist in llama-stack-client 0.3.x # These are mock classes for backward compatibility with Agent API tests From d834cffff21d59b48cf2a6af0c982660dc2ffe4f Mon Sep 17 00:00:00 2001 From: Anxhela Coba Date: Tue, 10 Feb 2026 22:21:46 -0500 Subject: [PATCH 7/9] pr feedback incorporated Signed-off-by: Anxhela Coba --- lightspeed-stack.yaml | 14 +- run.yaml | 239 ++++++++++----------- src/app/endpoints/query.py | 5 - src/app/endpoints/query_v2.py | 170 +-------------- src/app/endpoints/shields.py | 2 - src/app/endpoints/streaming_query_v2.py | 173 +-------------- src/configuration.py | 8 + src/constants.py | 5 + src/models/config.py | 21 ++ src/utils/vector_search.py | 267 ++++++++++++++++++++++++ 10 files changed, 437 insertions(+), 467 deletions(-) create mode 100644 src/utils/vector_search.py diff --git a/lightspeed-stack.yaml b/lightspeed-stack.yaml index 356394c7e..66326b9ff 100644 --- a/lightspeed-stack.yaml +++ b/lightspeed-stack.yaml @@ -10,12 +10,12 @@ service: llama_stack: # Uses a remote llama-stack service # The instance would have already been started with a llama-stack-run.yaml file - # use_as_library_client: false + use_as_library_client: false # Alternative for "as library use" - use_as_library_client: true - library_client_config_path: run.yaml - # url: http://llama-stack:8321 - # api_key: xyzzy + # use_as_library_client: true + # library_client_config_path: + url: http://llama-stack:8321 + api_key: xyzzy user_data_collection: feedback_enabled: true feedback_storage: "/tmp/data/feedback" @@ -30,3 +30,7 @@ conversation_cache: authentication: module: "noop" + + +solr: + offline: True \ No newline at end of file diff --git a/run.yaml b/run.yaml index 5ed401dcd..e1446f1a0 100644 --- a/run.yaml +++ b/run.yaml @@ -17,157 +17,142 @@ datasets: [] image_name: starter external_providers_dir: ${env.EXTERNAL_PROVIDERS_DIR} +providers: + inference: + - provider_id: openai # This ID is a reference to 'providers.inference' + provider_type: remote::openai + config: + api_key: ${env.OPENAI_API_KEY} + allowed_models: ["${env.E2E_OPENAI_MODEL:=gpt-4o-mini}"] + - config: + allowed_models: + - ${env.EMBEDDING_MODEL_DIR} + provider_id: sentence-transformers + provider_type: inline::sentence-transformers + files: + - config: + metadata_store: + table_name: files_metadata + backend: sql_default + storage_dir: ~/.llama/storage/files + provider_id: meta-reference-files + provider_type: inline::localfs + safety: + - config: + excluded_categories: [] + provider_id: llama-guard + provider_type: inline::llama-guard + scoring: + - provider_id: basic + provider_type: inline::basic + config: {} + - provider_id: llm-as-judge + provider_type: inline::llm-as-judge + config: {} + - provider_id: braintrust + provider_type: inline::braintrust + config: + openai_api_key: '********' + tool_runtime: + - config: {} # Enable the RAG tool + provider_id: rag-runtime + provider_type: inline::rag-runtime + vector_io: + - provider_id: solr-vector + provider_type: remote::solr_vector_io + config: + solr_url: http://localhost:8983/solr + collection_name: portal-rag + vector_field: chunk_vector + content_field: chunk + embedding_dimension: 384 + embedding_model: ${env.EMBEDDING_MODEL_DIR} + persistence: + namespace: portal-rag + backend: kv_default + - config: # Define the storage backend for RAG + persistence: + namespace: vector_io::faiss + backend: kv_default + provider_id: faiss + provider_type: inline::faiss + agents: + - config: + persistence: + agent_state: + namespace: agents_state + backend: kv_default + responses: + table_name: agents_responses + backend: sql_default + provider_id: meta-reference + provider_type: inline::meta-reference + batches: + - config: + kvstore: + namespace: batches_store + backend: kv_default + provider_id: reference + provider_type: inline::reference + datasetio: + - config: + kvstore: + namespace: huggingface_datasetio + backend: kv_default + provider_id: huggingface + provider_type: remote::huggingface + - config: + kvstore: + namespace: localfs_datasetio + backend: kv_default + provider_id: localfs + provider_type: inline::localfs + eval: + - config: + kvstore: + namespace: eval_store + backend: kv_default + provider_id: meta-reference + provider_type: inline::meta-reference +scoring_fns: [] +server: + port: 8321 storage: backends: - kv_default: + kv_default: # Define the storage backend type for RAG, in this case registry and RAG are unified i.e. information on registered resources (e.g. models, vector_stores) are saved together with the RAG chunks type: kv_sqlite db_path: ${env.KV_STORE_PATH:=~/.llama/storage/rag/kv_store.db} sql_default: type: sql_sqlite db_path: ${env.SQL_STORE_PATH:=~/.llama/storage/sql_store.db} - stores: metadata: namespace: registry backend: kv_default - inference: table_name: inference_store backend: sql_default max_write_queue_size: 10000 num_writers: 4 - conversations: table_name: openai_conversations backend: sql_default - prompts: namespace: prompts backend: kv_default - -metadata_store: - type: sqlite - db_path: ~/.llama/storage/registry.db - -inference_store: - type: sqlite - db_path: ~/.llama/storage/inference-store.db - -conversations_store: - type: sqlite - db_path: ~/.llama/storage/conversations.db - -providers: - inference: - - provider_id: openai - provider_type: remote::openai - config: - api_key: ${env.OPENAI_API_KEY} - allowed_models: - - gpt-4o-mini - - - provider_id: sentence-transformers - provider_type: inline::sentence-transformers - config: - allowed_models: - - ${env.EMBEDDING_MODEL_DIR} - - files: - - provider_id: meta-reference-files - provider_type: inline::localfs - config: - storage_dir: ~/.llama/storage/files - metadata_store: - table_name: files_metadata - backend: sql_default - - safety: - - provider_id: llama-guard - provider_type: inline::llama-guard - config: - excluded_categories: [] - - scoring: - - provider_id: basic - provider_type: inline::basic - config: {} - - - provider_id: llm-as-judge - provider_type: inline::llm-as-judge - config: {} - - tool_runtime: - - provider_id: rag-runtime - provider_type: inline::rag-runtime - config: {} - - vector_io: - - provider_id: solr-vector - provider_type: remote::solr_vector_io - config: - solr_url: http://localhost:8983/solr - collection_name: portal-rag - vector_field: chunk_vector - content_field: chunk - embedding_dimension: 384 - embedding_model: ${env.EMBEDDING_MODEL_DIR} - persistence: - namespace: portal-rag - backend: kv_default - - agents: - - provider_id: meta-reference - provider_type: inline::meta-reference - config: - persistence: - agent_state: - namespace: agents_state - backend: kv_default - responses: - table_name: agents_responses - backend: sql_default - - batches: - - provider_id: reference - provider_type: inline::reference - config: - kvstore: - namespace: batches_store - backend: kv_default - - datasetio: - - provider_id: huggingface - provider_type: remote::huggingface - config: - kvstore: - namespace: huggingface_datasetio - backend: kv_default - - - provider_id: localfs - provider_type: inline::localfs - config: - kvstore: - namespace: localfs_datasetio - backend: kv_default - registered_resources: models: - - model_id: granite-embedding-30m - model_type: embedding - provider_id: sentence-transformers - provider_model_id: ${env.EMBEDDING_MODEL_DIR} - metadata: - embedding_dimension: 384 - + - model_id: granite-embedding-30m + model_type: embedding + provider_id: sentence-transformers + provider_model_id: ${env.EMBEDDING_MODEL_DIR} + metadata: + embedding_dimension: 384 shields: - shield_id: llama-guard provider_id: llama-guard provider_shield_id: openai/gpt-4o-mini - vector_stores: - - vector_store_id: portal-rag - provider_id: solr-vector - embedding_model: sentence-transformers/${env.EMBEDDING_MODEL_DIR} - embedding_dimension: 384 + vector_stores: [] datasets: [] scoring_fns: [] benchmarks: [] @@ -175,9 +160,9 @@ registered_resources: - toolgroup_id: builtin::rag # Register the RAG tool provider_id: rag-runtime vector_stores: - vector_store_id: portal-rag - provider_id: solr-vector - embedding_model: sentence-transformers/${env.EMBEDDING_MODEL_DIR} - embedding_dimension: 384 + default_provider_id: faiss + default_embedding_model: # Define the default embedding model for RAG + provider_id: sentence-transformers + model_id: nomic-ai/nomic-embed-text-v1.5 safety: default_shield_id: llama-guard diff --git a/src/app/endpoints/query.py b/src/app/endpoints/query.py index eb28a0018..251b346d7 100644 --- a/src/app/endpoints/query.py +++ b/src/app/endpoints/query.py @@ -55,11 +55,6 @@ router = APIRouter(tags=["query"]) -# When OFFLINE is False, use reference_url for chunk source -# When OFFLINE is True, use parent_id for chunk source -# TODO: move this setting to a higher level configuration -OFFLINE = True - query_response: dict[int | str, dict[str, Any]] = { 200: QueryResponse.openapi_response(), 401: UnauthorizedResponse.openapi_response( diff --git a/src/app/endpoints/query_v2.py b/src/app/endpoints/query_v2.py index 3e34b07d2..8b829d7f5 100644 --- a/src/app/endpoints/query_v2.py +++ b/src/app/endpoints/query_v2.py @@ -4,12 +4,9 @@ import json import logging -import traceback from typing import Annotated, Any, Optional, cast -from urllib.parse import urljoin from fastapi import APIRouter, Depends, Request -from llama_stack_client import APIConnectionError, APIStatusError from llama_stack_api.openai_responses import ( OpenAIResponseMCPApprovalRequest, OpenAIResponseMCPApprovalResponse, @@ -33,7 +30,7 @@ from authentication.interface import AuthTuple from authorization.middleware import authorize from configuration import AppConfig, configuration -from constants import DEFAULT_RAG_TOOL, MIMIR_DOC_URL +from constants import DEFAULT_RAG_TOOL from models.config import Action, ModelContextProtocolServer from models.requests import QueryRequest from models.responses import ( @@ -62,6 +59,7 @@ from utils.suid import normalize_conversation_id, to_llama_stack_conversation_id from utils.token_counter import TokenCounter from utils.types import RAGChunk, ToolCallSummary, ToolResultSummary, TurnSummary +from utils.vector_search import perform_vector_search, format_rag_context_for_injection logger = logging.getLogger("app.endpoints.handlers") router = APIRouter(tags=["query_v1"]) @@ -429,168 +427,12 @@ async def retrieve_response( # pylint: disable=too-many-locals,too-many-branche ) # Extract RAG chunks from vector DB query response BEFORE calling responses API - rag_chunks = [] - doc_ids_from_chunks = [] - retrieved_chunks = [] - retrieved_scores = [] - - # When offline is False, use reference_url for chunk source - # When offline is True, use parent_id for chunk source - # TODO: move this setting to a higher level configuration - offline = True - - try: - # Get vector stores for direct querying - if query_request.vector_store_ids: - vector_store_ids = query_request.vector_store_ids - logger.info( - "Using specified vector_store_ids for direct query: %s", - vector_store_ids, - ) - else: - vector_store_ids = [ - vector_store.id - for vector_store in (await client.vector_stores.list()).data - ] - logger.info( - "Using all available vector_store_ids for direct query: %s", - vector_store_ids, - ) - - if vector_store_ids: - vector_store_id = vector_store_ids[0] # Use first available vector store - - params = {"k": 5, "score_threshold": 0.0, "mode": "hybrid"} - logger.info("Initial params: %s", params) - logger.info("query_request.solr: %s", query_request.solr) - if query_request.solr: - # Pass the entire solr dict under the 'solr' key - params["solr"] = query_request.solr - logger.info("Final params with solr filters: %s", params) - else: - logger.info("No solr filters provided") - logger.info("Final params being sent to vector_io.query: %s", params) - - query_response = await client.vector_io.query( - vector_store_id=vector_store_id, - query=query_request.query, - params=params, - ) - - logger.info("The query response total payload: %s", query_response) - - if query_response.chunks: - retrieved_chunks = query_response.chunks - retrieved_scores = ( - query_response.scores if hasattr(query_response, "scores") else [] - ) - - # Extract doc_ids from chunks for referenced_documents - metadata_doc_ids = set() - - for chunk in query_response.chunks: - logger.info("Extract doc ids from chunk: %s", chunk) - - # 1) dict metadata (what your code expects today) - md = getattr(chunk, "metadata", None) or {} - doc_id = md.get("doc_id") or md.get("document_id") - title = md.get("title") - - # 2) typed chunk_metadata (what your provider/logs are actually populating) - if not doc_id: - cm = getattr(chunk, "chunk_metadata", None) - if cm is not None: - # cm might be a pydantic model or a dict depending on caller - if isinstance(cm, dict): - doc_id = cm.get("doc_id") or cm.get("document_id") - title = title or cm.get("title") - reference_url = cm.get("reference_url") - else: - doc_id = getattr(cm, "doc_id", None) or getattr( - cm, "document_id", None - ) - title = title or getattr(cm, "title", None) - reference_url = getattr(cm, "reference_url", None) - else: - reference_url = None - else: - reference_url = md.get("reference_url") - - if not doc_id and not reference_url: - continue - - # Build URL based on offline flag - if offline: - # Use parent/doc path - reference_doc = doc_id - doc_url = MIMIR_DOC_URL + reference_doc - else: - # Use reference_url if online - reference_doc = reference_url or doc_id - doc_url = ( - reference_doc - if reference_doc.startswith("http") - else (MIMIR_DOC_URL + reference_doc) - ) - - if reference_doc and reference_doc not in metadata_doc_ids: - metadata_doc_ids.add(reference_doc) - doc_ids_from_chunks.append( - ReferencedDocument( - doc_title=title, - doc_url=doc_url, - ) - ) - - logger.info( - "Extracted %d unique document IDs from chunks", len(doc_ids_from_chunks) - ) - - except ( - APIConnectionError, - APIStatusError, - AttributeError, - KeyError, - ValueError, - ) as e: - logger.warning("Failed to query vector database for chunks: %s", e) - logger.debug("Vector DB query error details: %s", traceback.format_exc()) - # Continue without RAG chunks - - # Convert retrieved chunks to RAGChunk format - for i, chunk in enumerate(retrieved_chunks): - # Extract source from chunk metadata based on offline flag - source = None - if chunk.metadata: - if offline: - parent_id = chunk.metadata.get("parent_id") - if parent_id: - source = urljoin(MIMIR_DOC_URL, parent_id) - else: - source = chunk.metadata.get("reference_url") - - # Get score from retrieved_scores list if available - score = retrieved_scores[i] if i < len(retrieved_scores) else None - - rag_chunks.append( - RAGChunk( - content=chunk.content, - source=source, - score=score, - ) - ) - - logger.info("Retrieved %d chunks from vector DB", len(rag_chunks)) + _, _, doc_ids_from_chunks, rag_chunks = await perform_vector_search( + client, query_request, configuration + ) # Format RAG context for injection into user message - rag_context = "" - if rag_chunks: - context_chunks = [] - for chunk in rag_chunks[:5]: # Limit to top 5 chunks - chunk_text = f"Source: {chunk.source or 'Unknown'}\n{chunk.content}" - context_chunks.append(chunk_text) - rag_context = "\n\nRelevant documentation:\n" + "\n\n".join(context_chunks) - logger.info("Injecting %d RAG chunks into user message", len(context_chunks)) + rag_context = format_rag_context_for_injection(rag_chunks) # Inject RAG context into input text if rag_context: diff --git a/src/app/endpoints/shields.py b/src/app/endpoints/shields.py index 790c2d0b3..5dd8b8b6c 100644 --- a/src/app/endpoints/shields.py +++ b/src/app/endpoints/shields.py @@ -70,8 +70,6 @@ async def shields_endpoint_handler( try: # try to get Llama Stack client client = AsyncLlamaStackClientHolder().get_client() - # await client.shields.delete(identifier="llama-guard-shielf") - # exit(1) # retrieve shields shields = await client.shields.list() s = [dict(s) for s in shields] diff --git a/src/app/endpoints/streaming_query_v2.py b/src/app/endpoints/streaming_query_v2.py index 1655d954f..ee7465617 100644 --- a/src/app/endpoints/streaming_query_v2.py +++ b/src/app/endpoints/streaming_query_v2.py @@ -1,9 +1,7 @@ """Streaming query handler using Responses API (v2).""" import logging -import traceback from typing import Annotated, Any, AsyncIterator, Optional, cast -from urllib.parse import urljoin from fastapi import APIRouter, Depends, Request from fastapi.responses import StreamingResponse @@ -16,7 +14,7 @@ OpenAIResponseObjectStreamResponseOutputTextDelta, OpenAIResponseObjectStreamResponseOutputTextDone, ) -from llama_stack_client import APIConnectionError, APIStatusError, AsyncLlamaStackClient +from llama_stack_client import AsyncLlamaStackClient from app.endpoints.query import ( is_transcripts_enabled, @@ -44,7 +42,9 @@ from authentication.interface import AuthTuple from authorization.middleware import authorize from configuration import configuration -from constants import MEDIA_TYPE_JSON, MIMIR_DOC_URL +from constants import ( + MEDIA_TYPE_JSON, +) from models.config import Action from models.context import ResponseGeneratorContext from models.requests import QueryRequest @@ -74,6 +74,7 @@ from utils.token_counter import TokenCounter from utils.transcripts import store_transcript from utils.types import RAGChunk, TurnSummary +from utils.vector_search import perform_vector_search, format_rag_context_for_injection logger = logging.getLogger("app.endpoints.handlers") router = APIRouter(tags=["streaming_query_v1"]) @@ -435,168 +436,12 @@ async def retrieve_response( # pylint: disable=too-many-locals ) # Extract RAG chunks from vector DB query response BEFORE calling responses API - rag_chunks = [] - doc_ids_from_chunks = [] - retrieved_chunks = [] - retrieved_scores = [] - - # When offline is False, use reference_url for chunk source - # When offline is True, use parent_id for chunk source - # TODO: move this setting to a higher level configuration - offline = True - - try: - # Get vector stores for direct querying - if query_request.vector_store_ids: - vector_store_ids = query_request.vector_store_ids - logger.info( - "Using specified vector_store_ids for direct query: %s", - vector_store_ids, - ) - else: - vector_store_ids = [ - vector_store.id - for vector_store in (await client.vector_stores.list()).data - ] - logger.info( - "Using all available vector_store_ids for direct query: %s", - vector_store_ids, - ) - - if vector_store_ids: - vector_store_id = vector_store_ids[0] # Use first available vector store - - params = {"k": 5, "score_threshold": 0.0, "mode": "hybrid"} - logger.info("Initial params: %s", params) - logger.info("query_request.solr: %s", query_request.solr) - if query_request.solr: - # Pass the entire solr dict under the 'solr' key - params["solr"] = query_request.solr - logger.info("Final params with solr filters: %s", params) - else: - logger.info("No solr filters provided") - logger.info("Final params being sent to vector_io.query: %s", params) - - query_response = await client.vector_io.query( - vector_store_id=vector_store_id, - query=query_request.query, - params=params, - ) - - logger.info("The query response total payload: %s", query_response) - - if query_response.chunks: - retrieved_chunks = query_response.chunks - retrieved_scores = ( - query_response.scores if hasattr(query_response, "scores") else [] - ) - - # Extract doc_ids from chunks for referenced_documents - metadata_doc_ids = set() - - for chunk in query_response.chunks: - logger.info("Extract doc ids from chunk: %s", chunk) - - # 1) dict metadata - md = getattr(chunk, "metadata", None) or {} - doc_id = md.get("doc_id") or md.get("document_id") - title = md.get("title") - - # 2) typed chunk_metadata - if not doc_id: - cm = getattr(chunk, "chunk_metadata", None) - if cm is not None: - # cm might be a pydantic model or a dict depending on caller - if isinstance(cm, dict): - doc_id = cm.get("doc_id") or cm.get("document_id") - title = title or cm.get("title") - reference_url = cm.get("reference_url") - else: - doc_id = getattr(cm, "doc_id", None) or getattr( - cm, "document_id", None - ) - title = title or getattr(cm, "title", None) - reference_url = getattr(cm, "reference_url", None) - else: - reference_url = None - else: - reference_url = md.get("reference_url") - - if not doc_id and not reference_url: - continue - - # Build URL based on offline flag - if offline: - # Use parent/doc path - reference_doc = doc_id - doc_url = MIMIR_DOC_URL + reference_doc - else: - # Use reference_url if online - reference_doc = reference_url or doc_id - doc_url = ( - reference_doc - if reference_doc.startswith("http") - else (MIMIR_DOC_URL + reference_doc) - ) - - if reference_doc and reference_doc not in metadata_doc_ids: - metadata_doc_ids.add(reference_doc) - doc_ids_from_chunks.append( - ReferencedDocument( - doc_title=title, - doc_url=doc_url, - ) - ) - - logger.info( - "Extracted %d unique document IDs from chunks", len(doc_ids_from_chunks) - ) - - except ( - APIConnectionError, - APIStatusError, - AttributeError, - KeyError, - ValueError, - ) as e: - logger.warning("Failed to query vector database for chunks: %s", e) - logger.debug("Vector DB query error details: %s", traceback.format_exc()) - # Continue without RAG chunks - - # Convert retrieved chunks to RAGChunk format - for i, chunk in enumerate(retrieved_chunks): - # Extract source from chunk metadata based on offline flag - source = None - if chunk.metadata: - if offline: - parent_id = chunk.metadata.get("parent_id") - if parent_id: - source = urljoin(MIMIR_DOC_URL, parent_id) - else: - source = chunk.metadata.get("reference_url") - - # Get score from retrieved_scores list if available - score = retrieved_scores[i] if i < len(retrieved_scores) else None - - rag_chunks.append( - RAGChunk( - content=chunk.content, - source=source, - score=score, - ) - ) - - logger.info("Retrieved %d chunks from vector DB", len(rag_chunks)) + _, _, doc_ids_from_chunks, rag_chunks = await perform_vector_search( + client, query_request, configuration + ) # Format RAG context for injection into user message - rag_context = "" - if rag_chunks: - context_chunks = [] - for chunk in rag_chunks[:5]: # Limit to top 5 chunks - chunk_text = f"Source: {chunk.source or 'Unknown'}\n{chunk.content}" - context_chunks.append(chunk_text) - rag_context = "\n\nRelevant documentation:\n" + "\n\n".join(context_chunks) - logger.info("Injecting %d RAG chunks into user message", len(context_chunks)) + rag_context = format_rag_context_for_injection(rag_chunks) # Prepare input for Responses API # Convert attachments to text and concatenate with query diff --git a/src/configuration.py b/src/configuration.py index 9a253ac7a..46e2d76d0 100644 --- a/src/configuration.py +++ b/src/configuration.py @@ -23,6 +23,7 @@ DatabaseConfiguration, ConversationHistoryConfiguration, QuotaHandlersConfiguration, + SolrConfiguration, SplunkConfiguration, ) @@ -363,5 +364,12 @@ def deployment_environment(self) -> str: raise LogicError("logic error: configuration is not loaded") return self._configuration.deployment_environment + @property + def solr(self) -> Optional[SolrConfiguration]: + """Return Solr configuration, or None if not provided.""" + if self._configuration is None: + raise LogicError("logic error: configuration is not loaded") + return self._configuration.solr + configuration: AppConfig = AppConfig() diff --git a/src/constants.py b/src/constants.py index e7ce5d279..e4f5de25e 100644 --- a/src/constants.py +++ b/src/constants.py @@ -162,5 +162,10 @@ USER_QUOTA_LIMITER = "user_limiter" CLUSTER_QUOTA_LIMITER = "cluster_limiter" +# Vector search constants +VECTOR_SEARCH_DEFAULT_K = 5 +VECTOR_SEARCH_DEFAULT_SCORE_THRESHOLD = 0.0 +VECTOR_SEARCH_DEFAULT_MODE = "hybrid" + # SOLR OKP RAG MIMIR_DOC_URL = "https://mimir.corp.redhat.com" diff --git a/src/models/config.py b/src/models/config.py index b74a0233d..26672ccd5 100644 --- a/src/models/config.py +++ b/src/models/config.py @@ -1622,6 +1622,21 @@ class QuotaHandlersConfiguration(ConfigurationBase): ) +class SolrConfiguration(ConfigurationBase): + """Solr configuration for vector search queries. + + Controls whether to use offline or online mode when building document URLs + from vector search results. + """ + + offline: bool = Field( + True, + title="Offline mode", + description="When True, use parent_id for chunk source URLs. " + "When False, use reference_url for chunk source URLs.", + ) + + class AzureEntraIdConfiguration(ConfigurationBase): """Microsoft Entra ID authentication attributes for Azure.""" @@ -1760,6 +1775,12 @@ class Configuration(ConfigurationBase): "Used in telemetry events.", ) + solr: Optional[SolrConfiguration] = Field( + default=None, + title="Solr configuration", + description="Configuration for Solr vector search operations.", + ) + @model_validator(mode="after") def validate_mcp_auth_headers(self) -> Self: """ diff --git a/src/utils/vector_search.py b/src/utils/vector_search.py new file mode 100644 index 000000000..6f157b1f8 --- /dev/null +++ b/src/utils/vector_search.py @@ -0,0 +1,267 @@ +"""Vector search utilities for query endpoints. + +This module contains common functionality for performing vector searches +and processing RAG chunks that is shared between query_v2.py and streaming_query_v2.py. +""" + +import logging +import traceback +from typing import Any, Optional +from urllib.parse import urljoin + +from llama_stack_client import AsyncLlamaStackClient + +import constants +from configuration import AppConfig +from models.requests import QueryRequest +from models.responses import ReferencedDocument +from utils.types import RAGChunk + +logger = logging.getLogger(__name__) + + +async def perform_vector_search( + client: AsyncLlamaStackClient, + query_request: QueryRequest, + configuration: AppConfig, +) -> tuple[list[Any], list[float], list[ReferencedDocument], list[RAGChunk]]: + """ + Perform vector search and extract RAG chunks and referenced documents. + + Args: + client: The AsyncLlamaStackClient to use for the request + query_request: The user's query request + configuration: Application configuration + + Returns: + Tuple containing: + - retrieved_chunks: Raw chunks from vector store + - retrieved_scores: Scores for each chunk + - doc_ids_from_chunks: Referenced documents extracted from chunks + - rag_chunks: Processed RAG chunks ready for use + """ + retrieved_chunks = [] + retrieved_scores = [] + doc_ids_from_chunks = [] + rag_chunks = [] + + # Get offline setting from configuration + offline = configuration.solr.offline if configuration.solr else True + + try: + # Get vector stores for direct querying + if query_request.vector_store_ids: + vector_store_ids = query_request.vector_store_ids + logger.info( + "Using specified vector_store_ids for direct query: %s", + vector_store_ids, + ) + else: + vector_store_ids = [ + vector_store.id + for vector_store in (await client.vector_stores.list()).data + ] + logger.info( + "Using all available vector_store_ids for direct query: %s", + vector_store_ids, + ) + + if vector_store_ids: + vector_store_id = vector_store_ids[0] # Use first available vector store + + params = { + "k": constants.VECTOR_SEARCH_DEFAULT_K, + "score_threshold": constants.VECTOR_SEARCH_DEFAULT_SCORE_THRESHOLD, + "mode": constants.VECTOR_SEARCH_DEFAULT_MODE, + } + logger.info("Initial params: %s", params) + logger.info("query_request.solr: %s", query_request.solr) + if query_request.solr: + # Pass the entire solr dict under the 'solr' key + params["solr"] = query_request.solr + logger.info("Final params with solr filters: %s", params) + else: + logger.info("No solr filters provided") + logger.info("Final params being sent to vector_io.query: %s", params) + + query_response = await client.vector_io.query( + vector_store_id=vector_store_id, + query=query_request.query, + params=params, + ) + + logger.info("The query response total payload: %s", query_response) + + if query_response.chunks: + retrieved_chunks = query_response.chunks + retrieved_scores = ( + query_response.scores if hasattr(query_response, "scores") else [] + ) + + # Extract doc_ids from chunks for referenced_documents + metadata_doc_ids = set() + + for chunk in query_response.chunks: + logger.info("Extract doc ids from chunk: %s", chunk) + + # 1) dict metadata + metadata = getattr(chunk, "metadata", None) or {} + doc_id = metadata.get("doc_id") or metadata.get("document_id") + title = metadata.get("title") + + # 2) typed chunk_metadata + if not doc_id: + chunk_meta = getattr(chunk, "chunk_metadata", None) + if chunk_meta is not None: + # chunk_meta might be a pydantic model or a dict depending on caller + if isinstance(chunk_meta, dict): + doc_id = chunk_meta.get("doc_id") or chunk_meta.get("document_id") + title = title or chunk_meta.get("title") + reference_url = chunk_meta.get("reference_url") + else: + doc_id = getattr(chunk_meta, "doc_id", None) or getattr( + chunk_meta, "document_id", None + ) + title = title or getattr(chunk_meta, "title", None) + reference_url = getattr(chunk_meta, "reference_url", None) + else: + reference_url = None + else: + reference_url = metadata.get("reference_url") + + if not doc_id and not reference_url: + continue + + # Build URL based on offline flag + doc_url, reference_doc = _build_document_url( + offline, doc_id, reference_url + ) + + if reference_doc and reference_doc not in metadata_doc_ids: + metadata_doc_ids.add(reference_doc) + doc_ids_from_chunks.append( + ReferencedDocument( + doc_title=title, + doc_url=doc_url, + ) + ) + + logger.info( + "Extracted %d unique document IDs from chunks", + len(doc_ids_from_chunks), + ) + + # Convert retrieved chunks to RAGChunk format + rag_chunks = _convert_chunks_to_rag_format( + retrieved_chunks, retrieved_scores, offline + ) + logger.info("Retrieved %d chunks from vector DB", len(rag_chunks)) + + except Exception as e: + logger.warning("Failed to query vector database for chunks: %s", e) + logger.debug("Vector DB query error details: %s", traceback.format_exc()) + # Continue without RAG chunks + + return retrieved_chunks, retrieved_scores, doc_ids_from_chunks, rag_chunks + + +def _build_document_url( + offline: bool, doc_id: Optional[str], reference_url: Optional[str] +) -> tuple[str, Optional[str]]: + """ + Build document URL based on offline flag and available metadata. + + Args: + offline: Whether to use offline mode (parent_id) or online mode (reference_url) + doc_id: Document ID from chunk metadata + reference_url: Reference URL from chunk metadata + + Returns: + Tuple of (doc_url, reference_doc) where: + - doc_url: The full URL for the document + - reference_doc: The document reference used for deduplication + """ + if offline: + # Use parent/doc path + reference_doc = doc_id + doc_url = constants.MIMIR_DOC_URL + reference_doc if reference_doc else "" + else: + # Use reference_url if online + reference_doc = reference_url or doc_id + doc_url = ( + reference_doc + if reference_doc and reference_doc.startswith("http") + else (constants.MIMIR_DOC_URL + reference_doc if reference_doc else "") + ) + + return doc_url, reference_doc + + +def _convert_chunks_to_rag_format( + retrieved_chunks: list[Any], + retrieved_scores: list[float], + offline: bool, +) -> list[RAGChunk]: + """ + Convert retrieved chunks to RAGChunk format. + + Args: + retrieved_chunks: Raw chunks from vector store + retrieved_scores: Scores for each chunk + offline: Whether to use offline mode for source URLs + + Returns: + List of RAGChunk objects + """ + rag_chunks = [] + + for i, chunk in enumerate(retrieved_chunks): + # Extract source from chunk metadata based on offline flag + source = None + if chunk.metadata: + if offline: + parent_id = chunk.metadata.get("parent_id") + if parent_id: + source = urljoin(constants.MIMIR_DOC_URL, parent_id) + else: + source = chunk.metadata.get("reference_url") + + # Get score from retrieved_scores list if available + score = retrieved_scores[i] if i < len(retrieved_scores) else None + + rag_chunks.append( + RAGChunk( + content=chunk.content, + source=source, + score=score, + ) + ) + + return rag_chunks + + +def format_rag_context_for_injection( + rag_chunks: list[RAGChunk], max_chunks: int = 5 +) -> str: + """ + Format RAG context for injection into user message. + + Args: + rag_chunks: List of RAG chunks to format + max_chunks: Maximum number of chunks to include (default: 5) + + Returns: + Formatted RAG context string ready for injection + """ + if not rag_chunks: + return "" + + context_chunks = [] + for chunk in rag_chunks[:max_chunks]: # Limit to top chunks + chunk_text = f"Source: {chunk.source or 'Unknown'}\n{chunk.content}" + context_chunks.append(chunk_text) + + rag_context = "\n\nRelevant documentation:\n" + "\n\n".join(context_chunks) + logger.info("Injecting %d RAG chunks into user message", len(context_chunks)) + + return rag_context From cb1c532c7187f34a72ea18afebce2a054ffa84c0 Mon Sep 17 00:00:00 2001 From: Anxhela Coba Date: Wed, 11 Feb 2026 17:13:05 -0500 Subject: [PATCH 8/9] WIP: local changes before rebase Signed-off-by: Anxhela Coba --- src/app/endpoints/a2a.py | 4 +- src/app/endpoints/query.py | 1245 ++++++++++------ src/app/endpoints/query_old.py | 579 ++++++++ src/app/endpoints/query_v2.py | 922 ------------ src/app/endpoints/streaming_query.py | 1028 +++++-------- src/app/endpoints/streaming_query_old.py | 726 +++++++++ src/app/endpoints/streaming_query_v2.py | 508 ------- src/app/routers.py | 9 +- .../endpoints/test_query_v2_integration.py | 10 +- tests/unit/app/endpoints/test_query.py | 1303 ++++++++++++----- tests/unit/app/endpoints/test_query_old.py | 486 ++++++ tests/unit/app/endpoints/test_query_v2.py | 1039 ------------- .../app/endpoints/test_streaming_query.py | 1123 +++++++------- .../app/endpoints/test_streaming_query_old.py | 654 +++++++++ .../app/endpoints/test_streaming_query_v2.py | 637 -------- tests/unit/app/test_routers.py | 12 +- 16 files changed, 5142 insertions(+), 5143 deletions(-) create mode 100644 src/app/endpoints/query_old.py delete mode 100644 src/app/endpoints/query_v2.py create mode 100644 src/app/endpoints/streaming_query_old.py delete mode 100644 src/app/endpoints/streaming_query_v2.py create mode 100644 tests/unit/app/endpoints/test_query_old.py delete mode 100644 tests/unit/app/endpoints/test_query_v2.py create mode 100644 tests/unit/app/endpoints/test_streaming_query_old.py delete mode 100644 tests/unit/app/endpoints/test_streaming_query_v2.py diff --git a/src/app/endpoints/a2a.py b/src/app/endpoints/a2a.py index 7e3fc0152..15fb3e5d1 100644 --- a/src/app/endpoints/a2a.py +++ b/src/app/endpoints/a2a.py @@ -36,11 +36,11 @@ from starlette.responses import Response, StreamingResponse from a2a_storage import A2AContextStore, A2AStorageFactory -from app.endpoints.query import ( +from app.endpoints.query_old import ( evaluate_model_hints, select_model_and_provider_id, ) -from app.endpoints.streaming_query_v2 import retrieve_response +from app.endpoints.streaming_query import retrieve_response from authentication import get_auth_dependency from authentication.interface import AuthTuple from authorization.middleware import authorize diff --git a/src/app/endpoints/query.py b/src/app/endpoints/query.py index 251b346d7..9646df7d9 100644 --- a/src/app/endpoints/query.py +++ b/src/app/endpoints/query.py @@ -1,61 +1,70 @@ -"""Handler for REST API call to provide answer to query.""" +# pylint: disable=too-many-locals,too-many-branches,too-many-nested-blocks +"""Handler for REST API call to provide answer to query using Response API.""" + +import json import logging -from datetime import UTC, datetime -from typing import Annotated, Any, Optional - -from fastapi import APIRouter, Depends, HTTPException, Request -from llama_stack_api.shields import Shield -from llama_stack_client import ( - APIConnectionError, - APIStatusError, - RateLimitError, # type: ignore +from typing import Annotated, Any, Optional, cast + +from fastapi import APIRouter, Depends, Request +from llama_stack_api.openai_responses import ( + OpenAIResponseMCPApprovalRequest, + OpenAIResponseMCPApprovalResponse, + OpenAIResponseObject, + OpenAIResponseOutput, + OpenAIResponseOutputMessageFileSearchToolCall, + OpenAIResponseOutputMessageFunctionToolCall, + OpenAIResponseOutputMessageMCPCall, + OpenAIResponseOutputMessageMCPListTools, + OpenAIResponseOutputMessageWebSearchToolCall, ) -from llama_stack_client.types.model_list_response import ModelListResponse -from sqlalchemy.exc import SQLAlchemyError +from llama_stack_client import AsyncLlamaStackClient import constants import metrics -from app.database import get_session +from app.endpoints.query_old import ( + query_endpoint_handler_base, + validate_attachments_metadata, +) from authentication import get_auth_dependency from authentication.interface import AuthTuple -from authorization.azure_token_manager import AzureEntraIDManager -from client import AsyncLlamaStackClientHolder -from configuration import configuration -from models.cache_entry import CacheEntry -from models.config import Action -from models.database.conversations import UserConversation -from models.requests import Attachment, QueryRequest +from authorization.middleware import authorize +from configuration import AppConfig, configuration +from constants import DEFAULT_RAG_TOOL +from models.config import Action, ModelContextProtocolServer +from models.requests import QueryRequest from models.responses import ( ForbiddenResponse, InternalServerErrorResponse, NotFoundResponse, - PromptTooLongResponse, QueryResponse, QuotaExceededResponse, + ReferencedDocument, ServiceUnavailableResponse, UnauthorizedResponse, UnprocessableEntityResponse, ) from utils.endpoints import ( check_configuration_loaded, - store_conversation_into_cache, - validate_conversation_ownership, - validate_model_provider_override, + get_system_prompt, + get_topic_summary_system_prompt, ) -from utils.quota import ( - check_tokens_available, - consume_tokens, - get_available_quotas, +from utils.mcp_headers import mcp_headers_dependency +from utils.query import parse_arguments_string +from utils.responses import extract_text_from_response_output_item +from utils.shields import ( + append_turn_to_conversation, + run_shield_moderation, ) -from utils.suid import normalize_conversation_id -from utils.transcripts import store_transcript +from utils.suid import normalize_conversation_id, to_llama_stack_conversation_id +from utils.token_counter import TokenCounter +from utils.types import RAGChunk, ToolCallSummary, ToolResultSummary, TurnSummary +from utils.vector_search import perform_vector_search, format_rag_context_for_injection logger = logging.getLogger("app.endpoints.handlers") -router = APIRouter(tags=["query"]) - +router = APIRouter(tags=["query_v1"]) -query_response: dict[int | str, dict[str, Any]] = { +query_v2_response: dict[int | str, dict[str, Any]] = { 200: QueryResponse.openapi_response(), 401: UnauthorizedResponse.openapi_response( examples=["missing header", "missing token"] @@ -64,9 +73,9 @@ examples=["endpoint", "conversation read", "model override"] ), 404: NotFoundResponse.openapi_response( - examples=["model", "conversation", "provider"] + examples=["conversation", "model", "provider"] ), - 413: PromptTooLongResponse.openapi_response(), + # 413: PromptTooLongResponse.openapi_response(), 422: UnprocessableEntityResponse.openapi_response(), 429: QuotaExceededResponse.openapi_response(), 500: InternalServerErrorResponse.openapi_response(examples=["configuration"]), @@ -74,506 +83,840 @@ } -def is_transcripts_enabled() -> bool: - """Check if transcripts is enabled. +def _build_tool_call_summary( # pylint: disable=too-many-return-statements,too-many-branches + output_item: OpenAIResponseOutput, + rag_chunks: list[RAGChunk], +) -> tuple[Optional[ToolCallSummary], Optional[ToolResultSummary]]: + """Translate Responses API tool outputs into ToolCallSummary and ToolResultSummary records. + Processes OpenAI response output items and extracts tool call and result information. + Also parses RAG chunks from file_search_call items and appends them to the provided list. + + Args: + output_item: An OpenAIResponseOutput item from the response.output array + rag_chunks: List to append extracted RAG chunks to (from file_search_call items) Returns: - bool: True if transcripts is enabled, False otherwise. + A tuple of (ToolCallSummary, ToolResultSummary) one of them possibly None + if current llama stack Responses API does not provide the information. + + Supported tool types: + - function_call: Function tool calls with parsed arguments (no result) + - file_search_call: File search operations with results (also extracts RAG chunks) + - web_search_call: Web search operations (incomplete) + - mcp_call: MCP calls with server labels + - mcp_list_tools: MCP server tool listings + - mcp_approval_request: MCP approval requests (no result) + - mcp_approval_response: MCP approval responses (no call) """ - return configuration.user_data_collection_configuration.transcripts_enabled + item_type = getattr(output_item, "type", None) + + if item_type == "function_call": + item = cast(OpenAIResponseOutputMessageFunctionToolCall, output_item) + return ( + ToolCallSummary( + id=item.call_id, + name=item.name, + args=parse_arguments_string(item.arguments), + type="function_call", + ), + None, # not supported by Responses API at all + ) + if item_type == "file_search_call": + file_search_item = cast( + OpenAIResponseOutputMessageFileSearchToolCall, output_item + ) + extract_rag_chunks_from_file_search_item(file_search_item, rag_chunks) + response_payload: Optional[dict[str, Any]] = None + if file_search_item.results is not None: + response_payload = { + "results": [result.model_dump() for result in file_search_item.results] + } + return ToolCallSummary( + id=file_search_item.id, + name=DEFAULT_RAG_TOOL, + args={"queries": file_search_item.queries}, + type="file_search_call", + ), ToolResultSummary( + id=file_search_item.id, + status=file_search_item.status, + content=json.dumps(response_payload) if response_payload else "", + type="file_search_call", + round=1, + ) -def persist_user_conversation_details( - user_id: str, - conversation_id: str, - model: str, - provider_id: str, - topic_summary: Optional[str], -) -> None: - """Associate conversation to user in the database.""" - # Normalize the conversation ID (strip 'conv_' prefix if present) - normalized_id = normalize_conversation_id(conversation_id) - logger.debug( - "persist_user_conversation_details - original conv_id: %s, normalized: %s, user: %s", - conversation_id, - normalized_id, - user_id, - ) + # Incomplete OpenAI Responses API definition in LLS: action attribute not supported yet + if item_type == "web_search_call": + web_search_item = cast( + OpenAIResponseOutputMessageWebSearchToolCall, output_item + ) + return ( + ToolCallSummary( + id=web_search_item.id, + name="web_search", + args={}, + type="web_search_call", + ), + ToolResultSummary( + id=web_search_item.id, + status=web_search_item.status, + content="", + type="web_search_call", + round=1, + ), + ) - with get_session() as session: - existing_conversation = ( - session.query(UserConversation).filter_by(id=normalized_id).first() + if item_type == "mcp_call": + mcp_call_item = cast(OpenAIResponseOutputMessageMCPCall, output_item) + args = parse_arguments_string(mcp_call_item.arguments) + if mcp_call_item.server_label: + args["server_label"] = mcp_call_item.server_label + content = ( + mcp_call_item.error + if mcp_call_item.error + else (mcp_call_item.output if mcp_call_item.output else "") ) - if not existing_conversation: - conversation = UserConversation( - id=normalized_id, - user_id=user_id, - last_used_model=model, - last_used_provider=provider_id, - topic_summary=topic_summary, - message_count=1, - ) - session.add(conversation) - logger.debug( - "Associated conversation %s to user %s", normalized_id, user_id - ) - else: - existing_conversation.last_used_model = model - existing_conversation.last_used_provider = provider_id - existing_conversation.last_message_at = datetime.now(UTC) - existing_conversation.message_count += 1 - logger.debug( - "Updating existing conversation in DB - ID: %s, User: %s, Messages: %d", - normalized_id, - user_id, - existing_conversation.message_count, - ) + return ToolCallSummary( + id=mcp_call_item.id, + name=mcp_call_item.name, + args=args, + type="mcp_call", + ), ToolResultSummary( + id=mcp_call_item.id, + status="success" if mcp_call_item.error is None else "failure", + content=content, + type="mcp_call", + round=1, + ) - session.commit() - logger.debug( - "Successfully committed conversation %s to database", normalized_id + if item_type == "mcp_list_tools": + mcp_list_tools_item = cast(OpenAIResponseOutputMessageMCPListTools, output_item) + tools_info = [ + { + "name": tool.name, + "description": tool.description, + "input_schema": tool.input_schema, + } + for tool in mcp_list_tools_item.tools + ] + content_dict = { + "server_label": mcp_list_tools_item.server_label, + "tools": tools_info, + } + return ( + ToolCallSummary( + id=mcp_list_tools_item.id, + name="mcp_list_tools", + args={"server_label": mcp_list_tools_item.server_label}, + type="mcp_list_tools", + ), + ToolResultSummary( + id=mcp_list_tools_item.id, + status="success", + content=json.dumps(content_dict), + type="mcp_list_tools", + round=1, + ), ) + if item_type == "mcp_approval_request": + approval_request_item = cast(OpenAIResponseMCPApprovalRequest, output_item) + args = parse_arguments_string(approval_request_item.arguments) + return ( + ToolCallSummary( + id=approval_request_item.id, + name=approval_request_item.name, + args=args, + type="tool_call", + ), + None, + ) -def evaluate_model_hints( - user_conversation: Optional[UserConversation], - query_request: QueryRequest, -) -> tuple[Optional[str], Optional[str]]: - """Evaluate model hints from user conversation.""" - model_id: Optional[str] = query_request.model - provider_id: Optional[str] = query_request.provider - - if user_conversation is not None: - if query_request.model is not None: - if query_request.model != user_conversation.last_used_model: - logger.debug( - "Model specified in request: %s, preferring it over user conversation model %s", - query_request.model, - user_conversation.last_used_model, - ) - else: - logger.debug( - "No model specified in request, using latest model from user conversation: %s", - user_conversation.last_used_model, - ) - model_id = user_conversation.last_used_model + if item_type == "mcp_approval_response": + approval_response_item = cast(OpenAIResponseMCPApprovalResponse, output_item) + content_dict = {} + if approval_response_item.reason: + content_dict["reason"] = approval_response_item.reason + return ( + None, + ToolResultSummary( + id=approval_response_item.approval_request_id, + status="success" if approval_response_item.approve else "denied", + content=json.dumps(content_dict), + type="mcp_approval_response", + round=1, + ), + ) - if query_request.provider is not None: - if query_request.provider != user_conversation.last_used_provider: - logger.debug( - "Provider specified in request: %s, " - "preferring it over user conversation provider %s", - query_request.provider, - user_conversation.last_used_provider, - ) - else: - logger.debug( - "No provider specified in request, " - "using latest provider from user conversation: %s", - user_conversation.last_used_provider, - ) - provider_id = user_conversation.last_used_provider + return None, None + + +async def get_topic_summary( # pylint: disable=too-many-nested-blocks + question: str, client: AsyncLlamaStackClient, model_id: str +) -> str: + """ + Get a topic summary for a question using Responses API. + + This is the Responses API version of get_topic_summary, which uses + client.responses.create() instead of the Agent API. + + Args: + question: The question to generate a topic summary for + client: The AsyncLlamaStackClient to use for the request + model_id: The llama stack model ID (full format: provider/model) + + Returns: + str: The topic summary for the question + """ + topic_summary_system_prompt = get_topic_summary_system_prompt(configuration) + + # Use Responses API to generate topic summary + response = cast( + OpenAIResponseObject, + await client.responses.create( + input=question, + model=model_id, + instructions=topic_summary_system_prompt, + stream=False, + store=False, # Don't store topic summary requests + ), + ) + + # Extract text from response output + summary_text = "".join( + extract_text_from_response_output_item(output_item) + for output_item in response.output + ) - return model_id, provider_id + return summary_text.strip() if summary_text else "" -async def query_endpoint_handler_base( # pylint: disable=R0914 +@router.post("/query", responses=query_v2_response, summary="Query Endpoint Handler V1") +@authorize(Action.QUERY) +async def query_endpoint_handler_v2( request: Request, query_request: QueryRequest, auth: Annotated[AuthTuple, Depends(get_auth_dependency())], - mcp_headers: dict[str, dict[str, str]], - retrieve_response_func: Any, - get_topic_summary_func: Any, + mcp_headers: dict[str, dict[str, str]] = Depends(mcp_headers_dependency), ) -> QueryResponse: """ - Handle query endpoints (shared by Agent API and Responses API). + Handle request to the /query endpoint using Responses API. - Processes a POST request to a query endpoint, forwarding the - user's query to a selected Llama Stack LLM and returning the generated response. - - Validates configuration and authentication, selects the appropriate model - and provider, retrieves the LLM response, updates metrics, and optionally - stores a transcript of the interaction. Handles connection errors to the - Llama Stack service by returning an HTTP 500 error. - - Args: - request: The FastAPI request object - query_request: The query request containing the user's question - auth: Authentication tuple from dependency - mcp_headers: MCP headers from dependency - retrieve_response_func: The retrieve_response function to use (Agent or Responses API) - get_topic_summary_func: The get_topic_summary function to use (Agent or Responses API) + This is a wrapper around query_endpoint_handler_base that provides + the Responses API specific retrieve_response and get_topic_summary functions. Returns: QueryResponse: Contains the conversation ID and the LLM-generated response. """ check_configuration_loaded(configuration) + return await query_endpoint_handler_base( + request=request, + query_request=query_request, + auth=auth, + mcp_headers=mcp_headers, + retrieve_response_func=retrieve_response, + get_topic_summary_func=get_topic_summary, + ) + - # Enforce RBAC: optionally disallow overriding model/provider in requests - validate_model_provider_override(query_request, request.state.authorized_actions) +async def retrieve_response( # pylint: disable=too-many-locals,too-many-branches,too-many-arguments,too-many-statements + client: AsyncLlamaStackClient, + model_id: str, + query_request: QueryRequest, + token: str, + mcp_headers: Optional[dict[str, dict[str, str]]] = None, + *, + provider_id: str = "", +) -> tuple[TurnSummary, str, list[ReferencedDocument], TokenCounter]: + """ + Retrieve response from LLMs and agents. - # log Llama Stack configuration - logger.info("Llama stack config: %s", configuration.llama_stack_configuration) + Retrieves a response from the Llama Stack LLM or agent for a + given query, handling shield configuration, tool usage, and + attachment validation. - user_id, _, _skip_userid_check, token = auth + This function configures system prompts, shields, and toolgroups + (including RAG and MCP integration) as needed based on + the query request and system configuration. It + validates attachments, manages conversation and session + context, and processes MCP headers for multi-component + processing. Corresponding metrics are updated. - started_at = datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ") - user_conversation: Optional[UserConversation] = None - if query_request.conversation_id: - logger.debug( - "Conversation ID specified in query: %s", query_request.conversation_id - ) - # Normalize the conversation ID for database lookup (strip conv_ prefix if present) - normalized_conv_id_for_lookup = normalize_conversation_id( - query_request.conversation_id - ) - user_conversation = validate_conversation_ownership( - user_id=user_id, - conversation_id=normalized_conv_id_for_lookup, - others_allowed=( - Action.QUERY_OTHERS_CONVERSATIONS in request.state.authorized_actions - ), - ) + Parameters: + client (AsyncLlamaStackClient): The AsyncLlamaStackClient to use for the request. + model_id (str): The identifier of the LLM model to use. + query_request (QueryRequest): The user's query and associated metadata. + token (str): The authentication token for authorization. + mcp_headers (dict[str, dict[str, str]], optional): Headers for multi-component processing. + provider_id (str): The identifier of the LLM provider to use. - if user_conversation is None: - logger.warning( - "Conversation %s not found for user %s", - query_request.conversation_id, - user_id, - ) - response = NotFoundResponse( - resource="conversation", resource_id=query_request.conversation_id + Returns: + tuple[TurnSummary, str]: A tuple containing a summary of the LLM or agent's response content + and the conversation ID, the list of parsed referenced documents, + and token usage information. + """ + # use system prompt from request or default one + system_prompt = get_system_prompt(query_request, configuration) + logger.debug("Using system prompt: %s", system_prompt) + + # TODO(lucasagomes): redact attachments content before sending to LLM + # if attachments are provided, validate them + if query_request.attachments: + validate_attachments_metadata(query_request.attachments) + + # Prepare tools for responses API - skip RAG tools since we're doing direct vector query + toolgroups = await prepare_tools_for_responses_api( + client, + query_request, + token, + configuration, + mcp_headers=mcp_headers, + skip_rag_tools=True, + ) + + # Prepare input for Responses API + # Convert attachments to text and concatenate with query + input_text = query_request.query + if query_request.attachments: + for attachment in query_request.attachments: + # Append attachment content with type label + input_text += ( + f"\n\n[Attachment: {attachment.attachment_type}]\n{attachment.content}" ) - raise HTTPException(**response.model_dump()) + # Handle conversation ID for Responses API + # Create conversation upfront if not provided + conversation_id = query_request.conversation_id + if conversation_id: + # Conversation ID was provided - convert to llama-stack format + logger.debug("Using existing conversation ID: %s", conversation_id) + llama_stack_conv_id = to_llama_stack_conversation_id(conversation_id) else: - logger.debug("Query does not contain conversation ID") - - try: - check_tokens_available(configuration.quota_limiters, user_id) - # try to get Llama Stack client - client = AsyncLlamaStackClientHolder().get_client() - llama_stack_model_id, model_id, provider_id = select_model_and_provider_id( - await client.models.list(), - *evaluate_model_hints( - user_conversation=user_conversation, query_request=query_request - ), + # No conversation_id provided - create a new conversation first + logger.debug("No conversation_id provided, creating new conversation") + + conversation = await client.conversations.create(metadata={}) + llama_stack_conv_id = conversation.id + # Store the normalized version for later use + conversation_id = normalize_conversation_id(llama_stack_conv_id) + logger.info( + "Created new conversation with ID: %s (normalized: %s)", + llama_stack_conv_id, + conversation_id, ) - if ( - provider_id == "azure" - and AzureEntraIDManager().is_entra_id_configured - and AzureEntraIDManager().is_token_expired - and AzureEntraIDManager().refresh_token() - ): - if AsyncLlamaStackClientHolder().is_library_client: - client = await AsyncLlamaStackClientHolder().reload_library_client() - else: - azure_config = next( - p.config - for p in await client.providers.list() - if p.provider_type == "remote::azure" - ) - client = AsyncLlamaStackClientHolder().update_provider_data( - { - "azure_api_key": AzureEntraIDManager().access_token.get_secret_value(), - "azure_api_base": str(azure_config.get("api_base")), - } - ) - - summary, conversation_id, referenced_documents, token_usage = ( - await retrieve_response_func( - client, - llama_stack_model_id, - query_request, - token, - mcp_headers=mcp_headers, - provider_id=provider_id, - ) + # Run shield moderation before calling LLM + moderation_result = await run_shield_moderation(client, input_text) + if moderation_result.blocked: + violation_message = moderation_result.message or "" + await append_turn_to_conversation( + client, llama_stack_conv_id, input_text, violation_message + ) + summary = TurnSummary( + llm_response=violation_message, + tool_calls=[], + tool_results=[], + rag_chunks=[], + ) + return ( + summary, + normalize_conversation_id(conversation_id), + [], + TokenCounter(), ) - # Get the initial topic summary for the conversation - topic_summary = None - with get_session() as session: - existing_conversation = ( - session.query(UserConversation).filter_by(id=conversation_id).first() - ) - if not existing_conversation: - # Check if topic summary should be generated (default: True) - should_generate = query_request.generate_topic_summary - - if should_generate: - logger.debug("Generating topic summary for new conversation") - topic_summary = await get_topic_summary_func( - query_request.query, client, llama_stack_model_id - ) - else: - logger.debug( - "Topic summary generation disabled by request parameter" - ) - topic_summary = None - # Convert RAG chunks to dictionary format once for reuse - logger.info("Processing RAG chunks...") - rag_chunks_dict = [chunk.model_dump() for chunk in summary.rag_chunks] + # Extract RAG chunks from vector DB query response BEFORE calling responses API + _, _, doc_ids_from_chunks, rag_chunks = await perform_vector_search( + client, query_request, configuration + ) - if not is_transcripts_enabled(): - logger.debug("Transcript collection is disabled in the configuration") - else: - store_transcript( - user_id=user_id, - conversation_id=conversation_id, - model_id=model_id, - provider_id=provider_id, - query_is_valid=True, # TODO(lucasagomes): implement as part of query validation - query=query_request.query, - query_request=query_request, - summary=summary, - rag_chunks=rag_chunks_dict, - truncated=False, # TODO(lucasagomes): implement truncation as part of quota work - attachments=query_request.attachments or [], - ) + # Format RAG context for injection into user message + rag_context = format_rag_context_for_injection(rag_chunks) + + # Inject RAG context into input text + if rag_context: + input_text = input_text + rag_context + + # Create OpenAI response using responses API + create_kwargs: dict[str, Any] = { + "input": input_text, + "model": model_id, + "instructions": system_prompt, + "tools": cast(Any, toolgroups), + "stream": False, + "store": True, + "conversation": llama_stack_conv_id, + } + + response = await client.responses.create(**create_kwargs) + response = cast(OpenAIResponseObject, response) + logger.debug( + "Received response with ID: %s, conversation ID: %s, output items: %d", + response.id, + conversation_id, + len(response.output), + ) - logger.info("Persisting conversation details...") - persist_user_conversation_details( - user_id=user_id, - conversation_id=conversation_id, - model=model_id, - provider_id=provider_id, - topic_summary=topic_summary, + # Process OpenAI response format + llm_response = "" + tool_calls: list[ToolCallSummary] = [] + tool_results: list[ToolResultSummary] = [] + response_api_rag_chunks: list[RAGChunk] = [] + for output_item in response.output: + message_text = extract_text_from_response_output_item(output_item) + if message_text: + llm_response += message_text + + tool_call, tool_result = _build_tool_call_summary( + output_item, response_api_rag_chunks ) + if tool_call: + tool_calls.append(tool_call) + if tool_result: + tool_results.append(tool_result) + + # Merge RAG chunks from direct vector query with those from responses API + all_rag_chunks = rag_chunks + response_api_rag_chunks + logger.info( + "Combined RAG chunks: %d from direct query + %d from responses API = %d total", + len(rag_chunks), + len(response_api_rag_chunks), + len(all_rag_chunks), + ) - completed_at = datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ") - cache_entry = CacheEntry( - query=query_request.query, - response=summary.llm_response, - provider=provider_id, - model=model_id, - started_at=started_at, - completed_at=completed_at, - referenced_documents=referenced_documents if referenced_documents else None, - tool_calls=summary.tool_calls if summary.tool_calls else None, - tool_results=summary.tool_results if summary.tool_results else None, - ) + logger.info( + "Response processing complete - Tool calls: %d, Response length: %d chars", + len(tool_calls), + len(llm_response), + ) - consume_tokens( - configuration.quota_limiters, - configuration.token_usage_history, - user_id, - input_tokens=token_usage.input_tokens, - output_tokens=token_usage.output_tokens, - model_id=model_id, - provider_id=provider_id, - ) + summary = TurnSummary( + llm_response=llm_response, + tool_calls=tool_calls, + tool_results=tool_results, + rag_chunks=all_rag_chunks, + ) + + # Extract referenced documents and token usage from Responses API response + # Merge with documents from direct vector query + response_referenced_documents = parse_referenced_documents_from_responses_api( + response + ) + all_referenced_documents = doc_ids_from_chunks + response_referenced_documents + logger.info( + "Combined referenced documents: %d from direct query + %d from responses API = %d total", + len(doc_ids_from_chunks), + len(response_referenced_documents), + len(all_referenced_documents), + ) + model_label = model_id.split("/", 1)[1] if "/" in model_id else model_id + token_usage = extract_token_usage_from_responses_api( + response, model_label, provider_id, system_prompt + ) - store_conversation_into_cache( - configuration, - user_id, + if not summary.llm_response: + logger.warning( + "Response lacks content (conversation_id=%s)", conversation_id, - cache_entry, - _skip_userid_check, - topic_summary, ) - # Convert tool calls to response format - logger.info("Processing tool calls...") - - logger.info("Using referenced documents from response...") - - available_quotas = get_available_quotas(configuration.quota_limiters, user_id) - - logger.info("Building final response...") - response = QueryResponse( - conversation_id=conversation_id, - response=summary.llm_response, - rag_chunks=rag_chunks_dict, - tool_calls=summary.tool_calls if summary.tool_calls else [], - tool_results=summary.tool_results if summary.tool_results else [], - referenced_documents=referenced_documents, - truncated=False, # TODO: implement truncation detection - input_tokens=token_usage.input_tokens, - output_tokens=token_usage.output_tokens, - available_quotas=available_quotas, - ) - logger.info("Query processing completed successfully!") - return response - - # connection to Llama Stack server - except APIConnectionError as e: - # Update metrics for the LLM call failure - metrics.llm_calls_failures_total.inc() - logger.error("Unable to connect to Llama Stack: %s", e) - response = ServiceUnavailableResponse( - backend_name="Llama Stack", - cause=str(e), - ) - raise HTTPException(**response.model_dump()) from e - except SQLAlchemyError as e: - logger.exception("Error persisting conversation details.") - response = InternalServerErrorResponse.database_error() - raise HTTPException(**response.model_dump()) from e - except RateLimitError as e: - used_model = getattr(e, "model", "") - if used_model: - response = QuotaExceededResponse.model(used_model) - else: - response = QuotaExceededResponse( - response="The quota has been exceeded", cause=str(e) + return ( + summary, + normalize_conversation_id(conversation_id), + all_referenced_documents, + token_usage, + ) + + +def extract_rag_chunks_from_file_search_item( + item: OpenAIResponseOutputMessageFileSearchToolCall, + rag_chunks: list[RAGChunk], +) -> None: + """Extract RAG chunks from a file search tool call item and append to rag_chunks. + + Args: + item: The file search tool call item. + rag_chunks: List to append extracted RAG chunks to. + """ + if item.results is not None: + for result in item.results: + rag_chunk = RAGChunk( + content=result.text, source=result.filename, score=result.score ) - raise HTTPException(**response.model_dump()) from e - except APIStatusError as e: - logger.exception("Error in query endpoint handler: %s", e) - response = InternalServerErrorResponse.generic() - raise HTTPException(**response.model_dump()) from e + rag_chunks.append(rag_chunk) -def select_model_and_provider_id( - models: ModelListResponse, model_id: Optional[str], provider_id: Optional[str] -) -> tuple[str, str, str]: +def parse_rag_chunks_from_responses_api( + response_obj: OpenAIResponseObject, +) -> list[RAGChunk]: """ - Select the model ID and provider ID based on the request or available models. + Extract rag_chunks from the llama-stack OpenAI response. - Determine and return the appropriate model and provider IDs for - a query request. + Args: + response_obj: The ResponseObject from OpenAI compatible response API in llama-stack. - If the request specifies both model and provider IDs, those are used. - Otherwise, defaults from configuration are applied. If neither is - available, selects the first available LLM model from the provided model - list. Validates that the selected model exists among the available models. + Returns: + List of RAGChunk with content, source, score + """ + rag_chunks: list[RAGChunk] = [] + + for output_item in response_obj.output: + item_type = getattr(output_item, "type", None) + if item_type == "file_search_call": + item = cast(OpenAIResponseOutputMessageFileSearchToolCall, output_item) + extract_rag_chunks_from_file_search_item(item, rag_chunks) + + return rag_chunks + + +def parse_referenced_documents_from_responses_api( + response: OpenAIResponseObject, # pylint: disable=unused-argument +) -> list[ReferencedDocument]: + """ + Parse referenced documents from OpenAI Responses API response. + + Args: + response: The OpenAI Response API response object Returns: - A tuple containing the combined model ID (in the format - "provider/model"), and its separated parts: the model label and the provider ID. + list[ReferencedDocument]: List of referenced documents with doc_url and doc_title + """ + documents: list[ReferencedDocument] = [] + # Use a set to track unique documents by (doc_url, doc_title) tuple + seen_docs: set[tuple[Optional[str], Optional[str]]] = set() + + # Handle None response (e.g., when agent fails) + if response is None or not response.output: + return documents + + for output_item in response.output: + item_type = getattr(output_item, "type", None) + + # 1. Parse from file_search_call results + if item_type == "file_search_call": + results = getattr(output_item, "results", []) or [] + for result in results: + # Handle both object and dict access + if isinstance(result, dict): + attributes = result.get("attributes", {}) + else: + attributes = getattr(result, "attributes", {}) + + # Try to get URL from attributes + # Look for common URL fields in attributes + doc_url = ( + attributes.get("doc_url") + or attributes.get("docs_url") + or attributes.get("url") + or attributes.get("link") + ) + doc_title = attributes.get("title") + + if doc_title or doc_url: + # Treat empty string as None for URL to satisfy Optional[AnyUrl] + final_url = doc_url if doc_url else None + if (final_url, doc_title) not in seen_docs: + documents.append( + ReferencedDocument(doc_url=final_url, doc_title=doc_title) + ) + seen_docs.add((final_url, doc_title)) + + return documents - Raises: - HTTPException: If no suitable LLM model is found or the selected model is not available. + +def extract_token_usage_from_responses_api( + response: OpenAIResponseObject, + model: str, + provider: str, + system_prompt: str = "", # pylint: disable=unused-argument +) -> TokenCounter: """ - # If model_id and provider_id are provided in the request, use them + Extract token usage from OpenAI Responses API response and update metrics. - # If model_id is not provided in the request, check the configuration - if not model_id or not provider_id: - logger.debug( - "No model ID or provider ID specified in request, checking configuration" - ) - model_id = configuration.inference.default_model # type: ignore[reportAttributeAccessIssue] - provider_id = ( - configuration.inference.default_provider # type: ignore[reportAttributeAccessIssue] - ) + This function extracts token usage information from the Responses API response + object and updates Prometheus metrics. If usage information is not available, + it returns zero values without estimation. - # If no model is specified in the request or configuration, use the first available LLM - if not model_id or not provider_id: - logger.debug( - "No model ID or provider ID specified in request or configuration, " - "using the first available LLM" - ) + Note: When llama stack internally uses chat_completions, the usage field may be + empty or a dict. This is expected and will be populated in future llama stack versions. + + Args: + response: The OpenAI Response API response object + model: The model identifier for metrics labeling + provider: The provider identifier for metrics labeling + system_prompt: The system prompt used (unused, kept for compatibility) + + Returns: + TokenCounter: Token usage information with input_tokens and output_tokens + """ + token_counter = TokenCounter() + token_counter.llm_calls = 1 + + # Extract usage from the response if available + # Note: usage attribute exists at runtime but may not be in type definitions + usage = getattr(response, "usage", None) + if usage: try: - model = next( - m - for m in models - if m.custom_metadata and m.custom_metadata.get("model_type") == "llm" - ) - model_id = model.id - # Extract provider_id from custom_metadata - provider_id = ( - str(model.custom_metadata.get("provider_id", "")) - if model.custom_metadata - else "" + # Handle both dict and object cases due to llama_stack inconsistency: + # - When llama_stack converts to chat_completions internally, usage is a dict + # - When using proper Responses API, usage should be an object + # TODO: Remove dict handling once llama_stack standardizes on object type # pylint: disable=fixme + if isinstance(usage, dict): + input_tokens = usage.get("input_tokens", 0) + output_tokens = usage.get("output_tokens", 0) + else: + # Object with attributes (expected final behavior) + input_tokens = getattr(usage, "input_tokens", 0) + output_tokens = getattr(usage, "output_tokens", 0) + # Only set if we got valid values + if input_tokens or output_tokens: + token_counter.input_tokens = input_tokens or 0 + token_counter.output_tokens = output_tokens or 0 + + logger.debug( + "Extracted token usage from Responses API: input=%d, output=%d", + token_counter.input_tokens, + token_counter.output_tokens, + ) + + # Update Prometheus metrics only when we have actual usage data + try: + metrics.llm_token_sent_total.labels(provider, model).inc( + token_counter.input_tokens + ) + metrics.llm_token_received_total.labels(provider, model).inc( + token_counter.output_tokens + ) + except (AttributeError, TypeError, ValueError) as e: + logger.warning("Failed to update token metrics: %s", e) + _increment_llm_call_metric(provider, model) + else: + logger.debug( + "Usage object exists but tokens are 0 or None, treating as no usage info" + ) + # Still increment the call counter + _increment_llm_call_metric(provider, model) + except (AttributeError, KeyError, TypeError) as e: + logger.warning( + "Failed to extract token usage from response.usage: %s. Usage value: %s", + e, + usage, ) - logger.info("Selected model: %s", model) - model_label = model_id.split("/", 1)[1] if "/" in model_id else model_id - return model_id, model_label, provider_id - except (StopIteration, AttributeError) as e: - message = "No LLM model found in available models" - logger.error(message) - response = NotFoundResponse(resource="model", resource_id=model_id or "") - raise HTTPException(**response.model_dump()) from e - - llama_stack_model_id = f"{provider_id}/{model_id}" - # Validate that the model_id and provider_id are in the available models - logger.debug("Searching for model: %s, provider: %s", model_id, provider_id) - # TODO: Create sepparate validation of provider - if not any( - m.id in (llama_stack_model_id, model_id) - and ( - m.custom_metadata - and str(m.custom_metadata.get("provider_id", "")) == provider_id + # Still increment the call counter + _increment_llm_call_metric(provider, model) + else: + # No usage information available - this is expected when llama stack + # internally converts to chat_completions + logger.debug( + "No usage information in Responses API response, token counts will be 0" ) - for m in models - ): - message = f"Model {model_id} from provider {provider_id} not found in available models" - logger.error(message) - response = NotFoundResponse(resource="model", resource_id=model_id) - raise HTTPException(**response.model_dump()) - return llama_stack_model_id, model_id, provider_id + # token_counter already initialized with 0 values + # Still increment the call counter + _increment_llm_call_metric(provider, model) + + return token_counter + + +def _increment_llm_call_metric(provider: str, model: str) -> None: + """Safely increment LLM call metric.""" + try: + metrics.llm_calls_total.labels(provider, model).inc() + except (AttributeError, TypeError, ValueError) as e: + logger.warning("Failed to update LLM call metric: %s", e) -def _is_inout_shield(shield: Shield) -> bool: +def get_rag_tools( + vector_store_ids: list[str], solr_params: Optional[dict[str, Any]] = None +) -> Optional[list[dict[str, Any]]]: """ - Determine if the shield identifier indicates an input/output shield. + Convert vector store IDs to tools format for Responses API. - Parameters: - shield (Shield): The shield to check. + Args: + vector_store_ids: List of vector store identifiers + solr_params: Optional Solr filtering parameters Returns: - bool: True if the shield identifier starts with "inout_", otherwise False. + Optional[list[dict[str, Any]]]: List containing file_search tool configuration, + or None if no vector stores provided """ - return shield.identifier.startswith("inout_") + if not vector_store_ids: + return None + + tool_config = { + "type": "file_search", + "vector_store_ids": vector_store_ids, + "max_num_results": 10, + } + + if solr_params: + tool_config["solr"] = solr_params + return [tool_config] -def is_output_shield(shield: Shield) -> bool: + +def get_mcp_tools( + mcp_servers: list[ModelContextProtocolServer], + token: str | None = None, + mcp_headers: dict[str, dict[str, str]] | None = None, +) -> list[dict[str, Any]]: """ - Determine if the shield is for monitoring output. + Convert MCP servers to tools format for Responses API. - Return True if the given shield is classified as an output or - inout shield. + Args: + mcp_servers: List of MCP server configurations + token: Optional authentication token for MCP server authorization + mcp_headers: Optional per-request headers for MCP servers, keyed by server URL - A shield is considered an output shield if its identifier - starts with "output_" or "inout_". + Returns: + list[dict[str, Any]]: List of MCP tool definitions with server + details and optional auth headers + + The way it works is we go through all the defined mcp servers and + create a tool definitions for each of them. If MCP server definition + has a non-empty resolved_authorization_headers we create invocation + headers, following the algorithm: + 1. If the header value is 'kubernetes' the header value is a k8s token + 2. If the header value is 'client': + find the value for a given MCP server/header in mcp_headers. + if the value is not found omit this header, otherwise use found value + 3. otherwise use the value from resolved_authorization_headers directly + + This algorithm allows to: + 1. Use static global header values, provided by configuration + 2. Use user specific k8s token, which will work for the majority of kubernetes + based MCP servers + 3. Use user specific tokens (passed by the client) for user specific MCP headers """ - return _is_inout_shield(shield) or shield.identifier.startswith("output_") + + def _get_token_value(original: str, header: str) -> str | None: + """Convert to header value.""" + match original: + case constants.MCP_AUTH_KUBERNETES: + # use k8s token + if token is None or token == "": + return None + return f"Bearer {token}" + case constants.MCP_AUTH_CLIENT: + # use client provided token + if mcp_headers is None: + return None + c_headers = mcp_headers.get(mcp_server.name, None) + if c_headers is None: + return None + return c_headers.get(header, None) + case _: + # use provided + return original + + tools = [] + for mcp_server in mcp_servers: + # Base tool definition + tool_def = { + "type": "mcp", + "server_label": mcp_server.name, + "server_url": mcp_server.url, + "require_approval": "never", + } + + # Build headers + headers = {} + for name, value in mcp_server.resolved_authorization_headers.items(): + # for each defined header + h_value = _get_token_value(value, name) + # only add the header if we got value + if h_value is not None: + headers[name] = h_value + + # Skip server if auth headers were configured but not all could be resolved + if mcp_server.authorization_headers and len(headers) != len( + mcp_server.authorization_headers + ): + logger.warning( + "Skipping MCP server %s: required %d auth headers but only resolved %d", + mcp_server.name, + len(mcp_server.authorization_headers), + len(headers), + ) + continue + + if len(headers) > 0: + # add headers to tool definition + tool_def["headers"] = headers # type: ignore[index] + # collect tools info + tools.append(tool_def) + return tools -def is_input_shield(shield: Shield) -> bool: +async def prepare_tools_for_responses_api( + client: AsyncLlamaStackClient, + query_request: QueryRequest, + token: str, + config: AppConfig, + *, + mcp_headers: Optional[dict[str, dict[str, str]]] = None, + skip_rag_tools: bool = False, +) -> Optional[list[dict[str, Any]]]: """ - Determine if the shield is for monitoring input. + Prepare tools for Responses API including RAG and MCP tools. - Return True if the shield is classified as an input or inout - shield. + This function retrieves vector stores and combines them with MCP + server tools to create a unified toolgroups list for the Responses API. - Parameters: - shield (Shield): The shield identifier to classify. + Args: + client: The Llama Stack client instance + query_request: The user's query request + token: Authentication token for MCP tools + config: Configuration object containing MCP server settings + mcp_headers: Per-request headers for MCP servers + skip_rag_tools: If True, skip adding RAG tools (used when doing direct vector querying) Returns: - bool: True if the shield is for input or both input/output monitoring; False otherwise. + Optional[list[dict[str, Any]]]: List of tool configurations for the + Responses API, or None if no_tools is True or no tools are available """ - return _is_inout_shield(shield) or not is_output_shield(shield) + if query_request.no_tools: + return None + toolgroups = [] -def validate_attachments_metadata(attachments: list[Attachment]) -> None: - """Validate the attachments metadata provided in the request. + # Add RAG tools if not skipped + if not skip_rag_tools: + # Get vector stores for RAG tools - use specified ones or fetch all + if query_request.vector_store_ids: + vector_store_ids = query_request.vector_store_ids + logger.info("Using specified vector_store_ids: %s", vector_store_ids) + else: + vector_store_ids = [ + vector_store.id + for vector_store in (await client.vector_stores.list()).data + ] + logger.info("Using all available vector_store_ids: %s", vector_store_ids) + + # Add RAG tools if vector stores are available + if vector_store_ids: + rag_tools = get_rag_tools(vector_store_ids) + if rag_tools: + logger.info("rag_tool are: %s", rag_tools) + toolgroups.extend(rag_tools) + else: + logger.info("No RAG tools configured") + else: + logger.info("No vector stores available for RAG tools") + else: + logger.info("Skipping RAG tools - using direct vector querying instead") - Raises: - HTTPException: If any attachment has an invalid type or content type, - an HTTP 422 error is raised. - """ - for attachment in attachments: - if attachment.attachment_type not in constants.ATTACHMENT_TYPES: - message = ( - f"Invalid attatchment type {attachment.attachment_type}: " - f"must be one of {constants.ATTACHMENT_TYPES}" - ) - logger.error(message) - response = UnprocessableEntityResponse( - response="Invalid attribute value", cause=message - ) - raise HTTPException(**response.model_dump()) - if attachment.content_type not in constants.ATTACHMENT_CONTENT_TYPES: - message = ( - f"Invalid attatchment content type {attachment.content_type}: " - f"must be one of {constants.ATTACHMENT_CONTENT_TYPES}" - ) - logger.error(message) - response = UnprocessableEntityResponse( - response="Invalid attribute value", cause=message - ) - raise HTTPException(**response.model_dump()) + # Add MCP server tools + mcp_tools = get_mcp_tools(config.mcp_servers, token, mcp_headers) + if mcp_tools: + toolgroups.extend(mcp_tools) + logger.debug( + "Configured %d MCP tools: %s", + len(mcp_tools), + [tool.get("server_label", "unknown") for tool in mcp_tools], + ) + # Convert empty list to None for consistency with existing behavior + if not toolgroups: + return None + + return toolgroups diff --git a/src/app/endpoints/query_old.py b/src/app/endpoints/query_old.py new file mode 100644 index 000000000..251b346d7 --- /dev/null +++ b/src/app/endpoints/query_old.py @@ -0,0 +1,579 @@ +"""Handler for REST API call to provide answer to query.""" + +import logging +from datetime import UTC, datetime +from typing import Annotated, Any, Optional + +from fastapi import APIRouter, Depends, HTTPException, Request +from llama_stack_api.shields import Shield +from llama_stack_client import ( + APIConnectionError, + APIStatusError, + RateLimitError, # type: ignore +) +from llama_stack_client.types.model_list_response import ModelListResponse +from sqlalchemy.exc import SQLAlchemyError + +import constants +import metrics +from app.database import get_session +from authentication import get_auth_dependency +from authentication.interface import AuthTuple +from authorization.azure_token_manager import AzureEntraIDManager +from client import AsyncLlamaStackClientHolder +from configuration import configuration +from models.cache_entry import CacheEntry +from models.config import Action +from models.database.conversations import UserConversation +from models.requests import Attachment, QueryRequest +from models.responses import ( + ForbiddenResponse, + InternalServerErrorResponse, + NotFoundResponse, + PromptTooLongResponse, + QueryResponse, + QuotaExceededResponse, + ServiceUnavailableResponse, + UnauthorizedResponse, + UnprocessableEntityResponse, +) +from utils.endpoints import ( + check_configuration_loaded, + store_conversation_into_cache, + validate_conversation_ownership, + validate_model_provider_override, +) +from utils.quota import ( + check_tokens_available, + consume_tokens, + get_available_quotas, +) +from utils.suid import normalize_conversation_id +from utils.transcripts import store_transcript + +logger = logging.getLogger("app.endpoints.handlers") +router = APIRouter(tags=["query"]) + + +query_response: dict[int | str, dict[str, Any]] = { + 200: QueryResponse.openapi_response(), + 401: UnauthorizedResponse.openapi_response( + examples=["missing header", "missing token"] + ), + 403: ForbiddenResponse.openapi_response( + examples=["endpoint", "conversation read", "model override"] + ), + 404: NotFoundResponse.openapi_response( + examples=["model", "conversation", "provider"] + ), + 413: PromptTooLongResponse.openapi_response(), + 422: UnprocessableEntityResponse.openapi_response(), + 429: QuotaExceededResponse.openapi_response(), + 500: InternalServerErrorResponse.openapi_response(examples=["configuration"]), + 503: ServiceUnavailableResponse.openapi_response(), +} + + +def is_transcripts_enabled() -> bool: + """Check if transcripts is enabled. + + Returns: + bool: True if transcripts is enabled, False otherwise. + """ + return configuration.user_data_collection_configuration.transcripts_enabled + + +def persist_user_conversation_details( + user_id: str, + conversation_id: str, + model: str, + provider_id: str, + topic_summary: Optional[str], +) -> None: + """Associate conversation to user in the database.""" + # Normalize the conversation ID (strip 'conv_' prefix if present) + normalized_id = normalize_conversation_id(conversation_id) + logger.debug( + "persist_user_conversation_details - original conv_id: %s, normalized: %s, user: %s", + conversation_id, + normalized_id, + user_id, + ) + + with get_session() as session: + existing_conversation = ( + session.query(UserConversation).filter_by(id=normalized_id).first() + ) + + if not existing_conversation: + conversation = UserConversation( + id=normalized_id, + user_id=user_id, + last_used_model=model, + last_used_provider=provider_id, + topic_summary=topic_summary, + message_count=1, + ) + session.add(conversation) + logger.debug( + "Associated conversation %s to user %s", normalized_id, user_id + ) + else: + existing_conversation.last_used_model = model + existing_conversation.last_used_provider = provider_id + existing_conversation.last_message_at = datetime.now(UTC) + existing_conversation.message_count += 1 + logger.debug( + "Updating existing conversation in DB - ID: %s, User: %s, Messages: %d", + normalized_id, + user_id, + existing_conversation.message_count, + ) + + session.commit() + logger.debug( + "Successfully committed conversation %s to database", normalized_id + ) + + +def evaluate_model_hints( + user_conversation: Optional[UserConversation], + query_request: QueryRequest, +) -> tuple[Optional[str], Optional[str]]: + """Evaluate model hints from user conversation.""" + model_id: Optional[str] = query_request.model + provider_id: Optional[str] = query_request.provider + + if user_conversation is not None: + if query_request.model is not None: + if query_request.model != user_conversation.last_used_model: + logger.debug( + "Model specified in request: %s, preferring it over user conversation model %s", + query_request.model, + user_conversation.last_used_model, + ) + else: + logger.debug( + "No model specified in request, using latest model from user conversation: %s", + user_conversation.last_used_model, + ) + model_id = user_conversation.last_used_model + + if query_request.provider is not None: + if query_request.provider != user_conversation.last_used_provider: + logger.debug( + "Provider specified in request: %s, " + "preferring it over user conversation provider %s", + query_request.provider, + user_conversation.last_used_provider, + ) + else: + logger.debug( + "No provider specified in request, " + "using latest provider from user conversation: %s", + user_conversation.last_used_provider, + ) + provider_id = user_conversation.last_used_provider + + return model_id, provider_id + + +async def query_endpoint_handler_base( # pylint: disable=R0914 + request: Request, + query_request: QueryRequest, + auth: Annotated[AuthTuple, Depends(get_auth_dependency())], + mcp_headers: dict[str, dict[str, str]], + retrieve_response_func: Any, + get_topic_summary_func: Any, +) -> QueryResponse: + """ + Handle query endpoints (shared by Agent API and Responses API). + + Processes a POST request to a query endpoint, forwarding the + user's query to a selected Llama Stack LLM and returning the generated response. + + Validates configuration and authentication, selects the appropriate model + and provider, retrieves the LLM response, updates metrics, and optionally + stores a transcript of the interaction. Handles connection errors to the + Llama Stack service by returning an HTTP 500 error. + + Args: + request: The FastAPI request object + query_request: The query request containing the user's question + auth: Authentication tuple from dependency + mcp_headers: MCP headers from dependency + retrieve_response_func: The retrieve_response function to use (Agent or Responses API) + get_topic_summary_func: The get_topic_summary function to use (Agent or Responses API) + + Returns: + QueryResponse: Contains the conversation ID and the LLM-generated response. + """ + check_configuration_loaded(configuration) + + # Enforce RBAC: optionally disallow overriding model/provider in requests + validate_model_provider_override(query_request, request.state.authorized_actions) + + # log Llama Stack configuration + logger.info("Llama stack config: %s", configuration.llama_stack_configuration) + + user_id, _, _skip_userid_check, token = auth + + started_at = datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ") + user_conversation: Optional[UserConversation] = None + if query_request.conversation_id: + logger.debug( + "Conversation ID specified in query: %s", query_request.conversation_id + ) + # Normalize the conversation ID for database lookup (strip conv_ prefix if present) + normalized_conv_id_for_lookup = normalize_conversation_id( + query_request.conversation_id + ) + user_conversation = validate_conversation_ownership( + user_id=user_id, + conversation_id=normalized_conv_id_for_lookup, + others_allowed=( + Action.QUERY_OTHERS_CONVERSATIONS in request.state.authorized_actions + ), + ) + + if user_conversation is None: + logger.warning( + "Conversation %s not found for user %s", + query_request.conversation_id, + user_id, + ) + response = NotFoundResponse( + resource="conversation", resource_id=query_request.conversation_id + ) + raise HTTPException(**response.model_dump()) + + else: + logger.debug("Query does not contain conversation ID") + + try: + check_tokens_available(configuration.quota_limiters, user_id) + # try to get Llama Stack client + client = AsyncLlamaStackClientHolder().get_client() + llama_stack_model_id, model_id, provider_id = select_model_and_provider_id( + await client.models.list(), + *evaluate_model_hints( + user_conversation=user_conversation, query_request=query_request + ), + ) + + if ( + provider_id == "azure" + and AzureEntraIDManager().is_entra_id_configured + and AzureEntraIDManager().is_token_expired + and AzureEntraIDManager().refresh_token() + ): + if AsyncLlamaStackClientHolder().is_library_client: + client = await AsyncLlamaStackClientHolder().reload_library_client() + else: + azure_config = next( + p.config + for p in await client.providers.list() + if p.provider_type == "remote::azure" + ) + client = AsyncLlamaStackClientHolder().update_provider_data( + { + "azure_api_key": AzureEntraIDManager().access_token.get_secret_value(), + "azure_api_base": str(azure_config.get("api_base")), + } + ) + + summary, conversation_id, referenced_documents, token_usage = ( + await retrieve_response_func( + client, + llama_stack_model_id, + query_request, + token, + mcp_headers=mcp_headers, + provider_id=provider_id, + ) + ) + + # Get the initial topic summary for the conversation + topic_summary = None + with get_session() as session: + existing_conversation = ( + session.query(UserConversation).filter_by(id=conversation_id).first() + ) + if not existing_conversation: + # Check if topic summary should be generated (default: True) + should_generate = query_request.generate_topic_summary + + if should_generate: + logger.debug("Generating topic summary for new conversation") + topic_summary = await get_topic_summary_func( + query_request.query, client, llama_stack_model_id + ) + else: + logger.debug( + "Topic summary generation disabled by request parameter" + ) + topic_summary = None + # Convert RAG chunks to dictionary format once for reuse + logger.info("Processing RAG chunks...") + rag_chunks_dict = [chunk.model_dump() for chunk in summary.rag_chunks] + + if not is_transcripts_enabled(): + logger.debug("Transcript collection is disabled in the configuration") + else: + store_transcript( + user_id=user_id, + conversation_id=conversation_id, + model_id=model_id, + provider_id=provider_id, + query_is_valid=True, # TODO(lucasagomes): implement as part of query validation + query=query_request.query, + query_request=query_request, + summary=summary, + rag_chunks=rag_chunks_dict, + truncated=False, # TODO(lucasagomes): implement truncation as part of quota work + attachments=query_request.attachments or [], + ) + + logger.info("Persisting conversation details...") + persist_user_conversation_details( + user_id=user_id, + conversation_id=conversation_id, + model=model_id, + provider_id=provider_id, + topic_summary=topic_summary, + ) + + completed_at = datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ") + cache_entry = CacheEntry( + query=query_request.query, + response=summary.llm_response, + provider=provider_id, + model=model_id, + started_at=started_at, + completed_at=completed_at, + referenced_documents=referenced_documents if referenced_documents else None, + tool_calls=summary.tool_calls if summary.tool_calls else None, + tool_results=summary.tool_results if summary.tool_results else None, + ) + + consume_tokens( + configuration.quota_limiters, + configuration.token_usage_history, + user_id, + input_tokens=token_usage.input_tokens, + output_tokens=token_usage.output_tokens, + model_id=model_id, + provider_id=provider_id, + ) + + store_conversation_into_cache( + configuration, + user_id, + conversation_id, + cache_entry, + _skip_userid_check, + topic_summary, + ) + + # Convert tool calls to response format + logger.info("Processing tool calls...") + + logger.info("Using referenced documents from response...") + + available_quotas = get_available_quotas(configuration.quota_limiters, user_id) + + logger.info("Building final response...") + response = QueryResponse( + conversation_id=conversation_id, + response=summary.llm_response, + rag_chunks=rag_chunks_dict, + tool_calls=summary.tool_calls if summary.tool_calls else [], + tool_results=summary.tool_results if summary.tool_results else [], + referenced_documents=referenced_documents, + truncated=False, # TODO: implement truncation detection + input_tokens=token_usage.input_tokens, + output_tokens=token_usage.output_tokens, + available_quotas=available_quotas, + ) + logger.info("Query processing completed successfully!") + return response + + # connection to Llama Stack server + except APIConnectionError as e: + # Update metrics for the LLM call failure + metrics.llm_calls_failures_total.inc() + logger.error("Unable to connect to Llama Stack: %s", e) + response = ServiceUnavailableResponse( + backend_name="Llama Stack", + cause=str(e), + ) + raise HTTPException(**response.model_dump()) from e + except SQLAlchemyError as e: + logger.exception("Error persisting conversation details.") + response = InternalServerErrorResponse.database_error() + raise HTTPException(**response.model_dump()) from e + except RateLimitError as e: + used_model = getattr(e, "model", "") + if used_model: + response = QuotaExceededResponse.model(used_model) + else: + response = QuotaExceededResponse( + response="The quota has been exceeded", cause=str(e) + ) + raise HTTPException(**response.model_dump()) from e + except APIStatusError as e: + logger.exception("Error in query endpoint handler: %s", e) + response = InternalServerErrorResponse.generic() + raise HTTPException(**response.model_dump()) from e + + +def select_model_and_provider_id( + models: ModelListResponse, model_id: Optional[str], provider_id: Optional[str] +) -> tuple[str, str, str]: + """ + Select the model ID and provider ID based on the request or available models. + + Determine and return the appropriate model and provider IDs for + a query request. + + If the request specifies both model and provider IDs, those are used. + Otherwise, defaults from configuration are applied. If neither is + available, selects the first available LLM model from the provided model + list. Validates that the selected model exists among the available models. + + Returns: + A tuple containing the combined model ID (in the format + "provider/model"), and its separated parts: the model label and the provider ID. + + Raises: + HTTPException: If no suitable LLM model is found or the selected model is not available. + """ + # If model_id and provider_id are provided in the request, use them + + # If model_id is not provided in the request, check the configuration + if not model_id or not provider_id: + logger.debug( + "No model ID or provider ID specified in request, checking configuration" + ) + model_id = configuration.inference.default_model # type: ignore[reportAttributeAccessIssue] + provider_id = ( + configuration.inference.default_provider # type: ignore[reportAttributeAccessIssue] + ) + + # If no model is specified in the request or configuration, use the first available LLM + if not model_id or not provider_id: + logger.debug( + "No model ID or provider ID specified in request or configuration, " + "using the first available LLM" + ) + try: + model = next( + m + for m in models + if m.custom_metadata and m.custom_metadata.get("model_type") == "llm" + ) + model_id = model.id + # Extract provider_id from custom_metadata + provider_id = ( + str(model.custom_metadata.get("provider_id", "")) + if model.custom_metadata + else "" + ) + logger.info("Selected model: %s", model) + model_label = model_id.split("/", 1)[1] if "/" in model_id else model_id + return model_id, model_label, provider_id + except (StopIteration, AttributeError) as e: + message = "No LLM model found in available models" + logger.error(message) + response = NotFoundResponse(resource="model", resource_id=model_id or "") + raise HTTPException(**response.model_dump()) from e + + llama_stack_model_id = f"{provider_id}/{model_id}" + # Validate that the model_id and provider_id are in the available models + logger.debug("Searching for model: %s, provider: %s", model_id, provider_id) + # TODO: Create sepparate validation of provider + if not any( + m.id in (llama_stack_model_id, model_id) + and ( + m.custom_metadata + and str(m.custom_metadata.get("provider_id", "")) == provider_id + ) + for m in models + ): + message = f"Model {model_id} from provider {provider_id} not found in available models" + logger.error(message) + response = NotFoundResponse(resource="model", resource_id=model_id) + raise HTTPException(**response.model_dump()) + return llama_stack_model_id, model_id, provider_id + + +def _is_inout_shield(shield: Shield) -> bool: + """ + Determine if the shield identifier indicates an input/output shield. + + Parameters: + shield (Shield): The shield to check. + + Returns: + bool: True if the shield identifier starts with "inout_", otherwise False. + """ + return shield.identifier.startswith("inout_") + + +def is_output_shield(shield: Shield) -> bool: + """ + Determine if the shield is for monitoring output. + + Return True if the given shield is classified as an output or + inout shield. + + A shield is considered an output shield if its identifier + starts with "output_" or "inout_". + """ + return _is_inout_shield(shield) or shield.identifier.startswith("output_") + + +def is_input_shield(shield: Shield) -> bool: + """ + Determine if the shield is for monitoring input. + + Return True if the shield is classified as an input or inout + shield. + + Parameters: + shield (Shield): The shield identifier to classify. + + Returns: + bool: True if the shield is for input or both input/output monitoring; False otherwise. + """ + return _is_inout_shield(shield) or not is_output_shield(shield) + + +def validate_attachments_metadata(attachments: list[Attachment]) -> None: + """Validate the attachments metadata provided in the request. + + Raises: + HTTPException: If any attachment has an invalid type or content type, + an HTTP 422 error is raised. + """ + for attachment in attachments: + if attachment.attachment_type not in constants.ATTACHMENT_TYPES: + message = ( + f"Invalid attatchment type {attachment.attachment_type}: " + f"must be one of {constants.ATTACHMENT_TYPES}" + ) + logger.error(message) + response = UnprocessableEntityResponse( + response="Invalid attribute value", cause=message + ) + raise HTTPException(**response.model_dump()) + if attachment.content_type not in constants.ATTACHMENT_CONTENT_TYPES: + message = ( + f"Invalid attatchment content type {attachment.content_type}: " + f"must be one of {constants.ATTACHMENT_CONTENT_TYPES}" + ) + logger.error(message) + response = UnprocessableEntityResponse( + response="Invalid attribute value", cause=message + ) + raise HTTPException(**response.model_dump()) diff --git a/src/app/endpoints/query_v2.py b/src/app/endpoints/query_v2.py deleted file mode 100644 index 8b829d7f5..000000000 --- a/src/app/endpoints/query_v2.py +++ /dev/null @@ -1,922 +0,0 @@ -# pylint: disable=too-many-locals,too-many-branches,too-many-nested-blocks - -"""Handler for REST API call to provide answer to query using Response API.""" - -import json -import logging -from typing import Annotated, Any, Optional, cast - -from fastapi import APIRouter, Depends, Request -from llama_stack_api.openai_responses import ( - OpenAIResponseMCPApprovalRequest, - OpenAIResponseMCPApprovalResponse, - OpenAIResponseObject, - OpenAIResponseOutput, - OpenAIResponseOutputMessageFileSearchToolCall, - OpenAIResponseOutputMessageFunctionToolCall, - OpenAIResponseOutputMessageMCPCall, - OpenAIResponseOutputMessageMCPListTools, - OpenAIResponseOutputMessageWebSearchToolCall, -) -from llama_stack_client import AsyncLlamaStackClient - -import constants -import metrics -from app.endpoints.query import ( - query_endpoint_handler_base, - validate_attachments_metadata, -) -from authentication import get_auth_dependency -from authentication.interface import AuthTuple -from authorization.middleware import authorize -from configuration import AppConfig, configuration -from constants import DEFAULT_RAG_TOOL -from models.config import Action, ModelContextProtocolServer -from models.requests import QueryRequest -from models.responses import ( - ForbiddenResponse, - InternalServerErrorResponse, - NotFoundResponse, - QueryResponse, - QuotaExceededResponse, - ReferencedDocument, - ServiceUnavailableResponse, - UnauthorizedResponse, - UnprocessableEntityResponse, -) -from utils.endpoints import ( - check_configuration_loaded, - get_system_prompt, - get_topic_summary_system_prompt, -) -from utils.mcp_headers import mcp_headers_dependency -from utils.query import parse_arguments_string -from utils.responses import extract_text_from_response_output_item -from utils.shields import ( - append_turn_to_conversation, - run_shield_moderation, -) -from utils.suid import normalize_conversation_id, to_llama_stack_conversation_id -from utils.token_counter import TokenCounter -from utils.types import RAGChunk, ToolCallSummary, ToolResultSummary, TurnSummary -from utils.vector_search import perform_vector_search, format_rag_context_for_injection - -logger = logging.getLogger("app.endpoints.handlers") -router = APIRouter(tags=["query_v1"]) - -query_v2_response: dict[int | str, dict[str, Any]] = { - 200: QueryResponse.openapi_response(), - 401: UnauthorizedResponse.openapi_response( - examples=["missing header", "missing token"] - ), - 403: ForbiddenResponse.openapi_response( - examples=["endpoint", "conversation read", "model override"] - ), - 404: NotFoundResponse.openapi_response( - examples=["conversation", "model", "provider"] - ), - # 413: PromptTooLongResponse.openapi_response(), - 422: UnprocessableEntityResponse.openapi_response(), - 429: QuotaExceededResponse.openapi_response(), - 500: InternalServerErrorResponse.openapi_response(examples=["configuration"]), - 503: ServiceUnavailableResponse.openapi_response(), -} - - -def _build_tool_call_summary( # pylint: disable=too-many-return-statements,too-many-branches - output_item: OpenAIResponseOutput, - rag_chunks: list[RAGChunk], -) -> tuple[Optional[ToolCallSummary], Optional[ToolResultSummary]]: - """Translate Responses API tool outputs into ToolCallSummary and ToolResultSummary records. - - Processes OpenAI response output items and extracts tool call and result information. - Also parses RAG chunks from file_search_call items and appends them to the provided list. - - Args: - output_item: An OpenAIResponseOutput item from the response.output array - rag_chunks: List to append extracted RAG chunks to (from file_search_call items) - Returns: - A tuple of (ToolCallSummary, ToolResultSummary) one of them possibly None - if current llama stack Responses API does not provide the information. - - Supported tool types: - - function_call: Function tool calls with parsed arguments (no result) - - file_search_call: File search operations with results (also extracts RAG chunks) - - web_search_call: Web search operations (incomplete) - - mcp_call: MCP calls with server labels - - mcp_list_tools: MCP server tool listings - - mcp_approval_request: MCP approval requests (no result) - - mcp_approval_response: MCP approval responses (no call) - """ - item_type = getattr(output_item, "type", None) - - if item_type == "function_call": - item = cast(OpenAIResponseOutputMessageFunctionToolCall, output_item) - return ( - ToolCallSummary( - id=item.call_id, - name=item.name, - args=parse_arguments_string(item.arguments), - type="function_call", - ), - None, # not supported by Responses API at all - ) - - if item_type == "file_search_call": - file_search_item = cast( - OpenAIResponseOutputMessageFileSearchToolCall, output_item - ) - extract_rag_chunks_from_file_search_item(file_search_item, rag_chunks) - response_payload: Optional[dict[str, Any]] = None - if file_search_item.results is not None: - response_payload = { - "results": [result.model_dump() for result in file_search_item.results] - } - return ToolCallSummary( - id=file_search_item.id, - name=DEFAULT_RAG_TOOL, - args={"queries": file_search_item.queries}, - type="file_search_call", - ), ToolResultSummary( - id=file_search_item.id, - status=file_search_item.status, - content=json.dumps(response_payload) if response_payload else "", - type="file_search_call", - round=1, - ) - - # Incomplete OpenAI Responses API definition in LLS: action attribute not supported yet - if item_type == "web_search_call": - web_search_item = cast( - OpenAIResponseOutputMessageWebSearchToolCall, output_item - ) - return ( - ToolCallSummary( - id=web_search_item.id, - name="web_search", - args={}, - type="web_search_call", - ), - ToolResultSummary( - id=web_search_item.id, - status=web_search_item.status, - content="", - type="web_search_call", - round=1, - ), - ) - - if item_type == "mcp_call": - mcp_call_item = cast(OpenAIResponseOutputMessageMCPCall, output_item) - args = parse_arguments_string(mcp_call_item.arguments) - if mcp_call_item.server_label: - args["server_label"] = mcp_call_item.server_label - content = ( - mcp_call_item.error - if mcp_call_item.error - else (mcp_call_item.output if mcp_call_item.output else "") - ) - - return ToolCallSummary( - id=mcp_call_item.id, - name=mcp_call_item.name, - args=args, - type="mcp_call", - ), ToolResultSummary( - id=mcp_call_item.id, - status="success" if mcp_call_item.error is None else "failure", - content=content, - type="mcp_call", - round=1, - ) - - if item_type == "mcp_list_tools": - mcp_list_tools_item = cast(OpenAIResponseOutputMessageMCPListTools, output_item) - tools_info = [ - { - "name": tool.name, - "description": tool.description, - "input_schema": tool.input_schema, - } - for tool in mcp_list_tools_item.tools - ] - content_dict = { - "server_label": mcp_list_tools_item.server_label, - "tools": tools_info, - } - return ( - ToolCallSummary( - id=mcp_list_tools_item.id, - name="mcp_list_tools", - args={"server_label": mcp_list_tools_item.server_label}, - type="mcp_list_tools", - ), - ToolResultSummary( - id=mcp_list_tools_item.id, - status="success", - content=json.dumps(content_dict), - type="mcp_list_tools", - round=1, - ), - ) - - if item_type == "mcp_approval_request": - approval_request_item = cast(OpenAIResponseMCPApprovalRequest, output_item) - args = parse_arguments_string(approval_request_item.arguments) - return ( - ToolCallSummary( - id=approval_request_item.id, - name=approval_request_item.name, - args=args, - type="tool_call", - ), - None, - ) - - if item_type == "mcp_approval_response": - approval_response_item = cast(OpenAIResponseMCPApprovalResponse, output_item) - content_dict = {} - if approval_response_item.reason: - content_dict["reason"] = approval_response_item.reason - return ( - None, - ToolResultSummary( - id=approval_response_item.approval_request_id, - status="success" if approval_response_item.approve else "denied", - content=json.dumps(content_dict), - type="mcp_approval_response", - round=1, - ), - ) - - return None, None - - -async def get_topic_summary( # pylint: disable=too-many-nested-blocks - question: str, client: AsyncLlamaStackClient, model_id: str -) -> str: - """ - Get a topic summary for a question using Responses API. - - This is the Responses API version of get_topic_summary, which uses - client.responses.create() instead of the Agent API. - - Args: - question: The question to generate a topic summary for - client: The AsyncLlamaStackClient to use for the request - model_id: The llama stack model ID (full format: provider/model) - - Returns: - str: The topic summary for the question - """ - topic_summary_system_prompt = get_topic_summary_system_prompt(configuration) - - # Use Responses API to generate topic summary - response = cast( - OpenAIResponseObject, - await client.responses.create( - input=question, - model=model_id, - instructions=topic_summary_system_prompt, - stream=False, - store=False, # Don't store topic summary requests - ), - ) - - # Extract text from response output - summary_text = "".join( - extract_text_from_response_output_item(output_item) - for output_item in response.output - ) - - return summary_text.strip() if summary_text else "" - - -@router.post("/query", responses=query_v2_response, summary="Query Endpoint Handler V1") -@authorize(Action.QUERY) -async def query_endpoint_handler_v2( - request: Request, - query_request: QueryRequest, - auth: Annotated[AuthTuple, Depends(get_auth_dependency())], - mcp_headers: dict[str, dict[str, str]] = Depends(mcp_headers_dependency), -) -> QueryResponse: - """ - Handle request to the /query endpoint using Responses API. - - This is a wrapper around query_endpoint_handler_base that provides - the Responses API specific retrieve_response and get_topic_summary functions. - - Returns: - QueryResponse: Contains the conversation ID and the LLM-generated response. - """ - check_configuration_loaded(configuration) - return await query_endpoint_handler_base( - request=request, - query_request=query_request, - auth=auth, - mcp_headers=mcp_headers, - retrieve_response_func=retrieve_response, - get_topic_summary_func=get_topic_summary, - ) - - -async def retrieve_response( # pylint: disable=too-many-locals,too-many-branches,too-many-arguments,too-many-statements - client: AsyncLlamaStackClient, - model_id: str, - query_request: QueryRequest, - token: str, - mcp_headers: Optional[dict[str, dict[str, str]]] = None, - *, - provider_id: str = "", -) -> tuple[TurnSummary, str, list[ReferencedDocument], TokenCounter]: - """ - Retrieve response from LLMs and agents. - - Retrieves a response from the Llama Stack LLM or agent for a - given query, handling shield configuration, tool usage, and - attachment validation. - - This function configures system prompts, shields, and toolgroups - (including RAG and MCP integration) as needed based on - the query request and system configuration. It - validates attachments, manages conversation and session - context, and processes MCP headers for multi-component - processing. Corresponding metrics are updated. - - Parameters: - client (AsyncLlamaStackClient): The AsyncLlamaStackClient to use for the request. - model_id (str): The identifier of the LLM model to use. - query_request (QueryRequest): The user's query and associated metadata. - token (str): The authentication token for authorization. - mcp_headers (dict[str, dict[str, str]], optional): Headers for multi-component processing. - provider_id (str): The identifier of the LLM provider to use. - - Returns: - tuple[TurnSummary, str]: A tuple containing a summary of the LLM or agent's response content - and the conversation ID, the list of parsed referenced documents, - and token usage information. - """ - # use system prompt from request or default one - system_prompt = get_system_prompt(query_request, configuration) - logger.debug("Using system prompt: %s", system_prompt) - - # TODO(lucasagomes): redact attachments content before sending to LLM - # if attachments are provided, validate them - if query_request.attachments: - validate_attachments_metadata(query_request.attachments) - - # Prepare tools for responses API - skip RAG tools since we're doing direct vector query - toolgroups = await prepare_tools_for_responses_api( - client, - query_request, - token, - configuration, - mcp_headers=mcp_headers, - skip_rag_tools=True, - ) - - # Prepare input for Responses API - # Convert attachments to text and concatenate with query - input_text = query_request.query - if query_request.attachments: - for attachment in query_request.attachments: - # Append attachment content with type label - input_text += ( - f"\n\n[Attachment: {attachment.attachment_type}]\n{attachment.content}" - ) - - # Handle conversation ID for Responses API - # Create conversation upfront if not provided - conversation_id = query_request.conversation_id - if conversation_id: - # Conversation ID was provided - convert to llama-stack format - logger.debug("Using existing conversation ID: %s", conversation_id) - llama_stack_conv_id = to_llama_stack_conversation_id(conversation_id) - else: - # No conversation_id provided - create a new conversation first - logger.debug("No conversation_id provided, creating new conversation") - - conversation = await client.conversations.create(metadata={}) - llama_stack_conv_id = conversation.id - # Store the normalized version for later use - conversation_id = normalize_conversation_id(llama_stack_conv_id) - logger.info( - "Created new conversation with ID: %s (normalized: %s)", - llama_stack_conv_id, - conversation_id, - ) - - # Run shield moderation before calling LLM - moderation_result = await run_shield_moderation(client, input_text) - if moderation_result.blocked: - violation_message = moderation_result.message or "" - await append_turn_to_conversation( - client, llama_stack_conv_id, input_text, violation_message - ) - summary = TurnSummary( - llm_response=violation_message, - tool_calls=[], - tool_results=[], - rag_chunks=[], - ) - return ( - summary, - normalize_conversation_id(conversation_id), - [], - TokenCounter(), - ) - - # Extract RAG chunks from vector DB query response BEFORE calling responses API - _, _, doc_ids_from_chunks, rag_chunks = await perform_vector_search( - client, query_request, configuration - ) - - # Format RAG context for injection into user message - rag_context = format_rag_context_for_injection(rag_chunks) - - # Inject RAG context into input text - if rag_context: - input_text = input_text + rag_context - - # Create OpenAI response using responses API - create_kwargs: dict[str, Any] = { - "input": input_text, - "model": model_id, - "instructions": system_prompt, - "tools": cast(Any, toolgroups), - "stream": False, - "store": True, - "conversation": llama_stack_conv_id, - } - - response = await client.responses.create(**create_kwargs) - response = cast(OpenAIResponseObject, response) - logger.debug( - "Received response with ID: %s, conversation ID: %s, output items: %d", - response.id, - conversation_id, - len(response.output), - ) - - # Process OpenAI response format - llm_response = "" - tool_calls: list[ToolCallSummary] = [] - tool_results: list[ToolResultSummary] = [] - response_api_rag_chunks: list[RAGChunk] = [] - for output_item in response.output: - message_text = extract_text_from_response_output_item(output_item) - if message_text: - llm_response += message_text - - tool_call, tool_result = _build_tool_call_summary( - output_item, response_api_rag_chunks - ) - if tool_call: - tool_calls.append(tool_call) - if tool_result: - tool_results.append(tool_result) - - # Merge RAG chunks from direct vector query with those from responses API - all_rag_chunks = rag_chunks + response_api_rag_chunks - logger.info( - "Combined RAG chunks: %d from direct query + %d from responses API = %d total", - len(rag_chunks), - len(response_api_rag_chunks), - len(all_rag_chunks), - ) - - logger.info( - "Response processing complete - Tool calls: %d, Response length: %d chars", - len(tool_calls), - len(llm_response), - ) - - summary = TurnSummary( - llm_response=llm_response, - tool_calls=tool_calls, - tool_results=tool_results, - rag_chunks=all_rag_chunks, - ) - - # Extract referenced documents and token usage from Responses API response - # Merge with documents from direct vector query - response_referenced_documents = parse_referenced_documents_from_responses_api( - response - ) - all_referenced_documents = doc_ids_from_chunks + response_referenced_documents - logger.info( - "Combined referenced documents: %d from direct query + %d from responses API = %d total", - len(doc_ids_from_chunks), - len(response_referenced_documents), - len(all_referenced_documents), - ) - model_label = model_id.split("/", 1)[1] if "/" in model_id else model_id - token_usage = extract_token_usage_from_responses_api( - response, model_label, provider_id, system_prompt - ) - - if not summary.llm_response: - logger.warning( - "Response lacks content (conversation_id=%s)", - conversation_id, - ) - - return ( - summary, - normalize_conversation_id(conversation_id), - all_referenced_documents, - token_usage, - ) - - -def extract_rag_chunks_from_file_search_item( - item: OpenAIResponseOutputMessageFileSearchToolCall, - rag_chunks: list[RAGChunk], -) -> None: - """Extract RAG chunks from a file search tool call item and append to rag_chunks. - - Args: - item: The file search tool call item. - rag_chunks: List to append extracted RAG chunks to. - """ - if item.results is not None: - for result in item.results: - rag_chunk = RAGChunk( - content=result.text, source=result.filename, score=result.score - ) - rag_chunks.append(rag_chunk) - - -def parse_rag_chunks_from_responses_api( - response_obj: OpenAIResponseObject, -) -> list[RAGChunk]: - """ - Extract rag_chunks from the llama-stack OpenAI response. - - Args: - response_obj: The ResponseObject from OpenAI compatible response API in llama-stack. - - Returns: - List of RAGChunk with content, source, score - """ - rag_chunks: list[RAGChunk] = [] - - for output_item in response_obj.output: - item_type = getattr(output_item, "type", None) - if item_type == "file_search_call": - item = cast(OpenAIResponseOutputMessageFileSearchToolCall, output_item) - extract_rag_chunks_from_file_search_item(item, rag_chunks) - - return rag_chunks - - -def parse_referenced_documents_from_responses_api( - response: OpenAIResponseObject, # pylint: disable=unused-argument -) -> list[ReferencedDocument]: - """ - Parse referenced documents from OpenAI Responses API response. - - Args: - response: The OpenAI Response API response object - - Returns: - list[ReferencedDocument]: List of referenced documents with doc_url and doc_title - """ - documents: list[ReferencedDocument] = [] - # Use a set to track unique documents by (doc_url, doc_title) tuple - seen_docs: set[tuple[Optional[str], Optional[str]]] = set() - - # Handle None response (e.g., when agent fails) - if response is None or not response.output: - return documents - - for output_item in response.output: - item_type = getattr(output_item, "type", None) - - # 1. Parse from file_search_call results - if item_type == "file_search_call": - results = getattr(output_item, "results", []) or [] - for result in results: - # Handle both object and dict access - if isinstance(result, dict): - attributes = result.get("attributes", {}) - else: - attributes = getattr(result, "attributes", {}) - - # Try to get URL from attributes - # Look for common URL fields in attributes - doc_url = ( - attributes.get("doc_url") - or attributes.get("docs_url") - or attributes.get("url") - or attributes.get("link") - ) - doc_title = attributes.get("title") - - if doc_title or doc_url: - # Treat empty string as None for URL to satisfy Optional[AnyUrl] - final_url = doc_url if doc_url else None - if (final_url, doc_title) not in seen_docs: - documents.append( - ReferencedDocument(doc_url=final_url, doc_title=doc_title) - ) - seen_docs.add((final_url, doc_title)) - - return documents - - -def extract_token_usage_from_responses_api( - response: OpenAIResponseObject, - model: str, - provider: str, - system_prompt: str = "", # pylint: disable=unused-argument -) -> TokenCounter: - """ - Extract token usage from OpenAI Responses API response and update metrics. - - This function extracts token usage information from the Responses API response - object and updates Prometheus metrics. If usage information is not available, - it returns zero values without estimation. - - Note: When llama stack internally uses chat_completions, the usage field may be - empty or a dict. This is expected and will be populated in future llama stack versions. - - Args: - response: The OpenAI Response API response object - model: The model identifier for metrics labeling - provider: The provider identifier for metrics labeling - system_prompt: The system prompt used (unused, kept for compatibility) - - Returns: - TokenCounter: Token usage information with input_tokens and output_tokens - """ - token_counter = TokenCounter() - token_counter.llm_calls = 1 - - # Extract usage from the response if available - # Note: usage attribute exists at runtime but may not be in type definitions - usage = getattr(response, "usage", None) - if usage: - try: - # Handle both dict and object cases due to llama_stack inconsistency: - # - When llama_stack converts to chat_completions internally, usage is a dict - # - When using proper Responses API, usage should be an object - # TODO: Remove dict handling once llama_stack standardizes on object type # pylint: disable=fixme - if isinstance(usage, dict): - input_tokens = usage.get("input_tokens", 0) - output_tokens = usage.get("output_tokens", 0) - else: - # Object with attributes (expected final behavior) - input_tokens = getattr(usage, "input_tokens", 0) - output_tokens = getattr(usage, "output_tokens", 0) - # Only set if we got valid values - if input_tokens or output_tokens: - token_counter.input_tokens = input_tokens or 0 - token_counter.output_tokens = output_tokens or 0 - - logger.debug( - "Extracted token usage from Responses API: input=%d, output=%d", - token_counter.input_tokens, - token_counter.output_tokens, - ) - - # Update Prometheus metrics only when we have actual usage data - try: - metrics.llm_token_sent_total.labels(provider, model).inc( - token_counter.input_tokens - ) - metrics.llm_token_received_total.labels(provider, model).inc( - token_counter.output_tokens - ) - except (AttributeError, TypeError, ValueError) as e: - logger.warning("Failed to update token metrics: %s", e) - _increment_llm_call_metric(provider, model) - else: - logger.debug( - "Usage object exists but tokens are 0 or None, treating as no usage info" - ) - # Still increment the call counter - _increment_llm_call_metric(provider, model) - except (AttributeError, KeyError, TypeError) as e: - logger.warning( - "Failed to extract token usage from response.usage: %s. Usage value: %s", - e, - usage, - ) - # Still increment the call counter - _increment_llm_call_metric(provider, model) - else: - # No usage information available - this is expected when llama stack - # internally converts to chat_completions - logger.debug( - "No usage information in Responses API response, token counts will be 0" - ) - # token_counter already initialized with 0 values - # Still increment the call counter - _increment_llm_call_metric(provider, model) - - return token_counter - - -def _increment_llm_call_metric(provider: str, model: str) -> None: - """Safely increment LLM call metric.""" - try: - metrics.llm_calls_total.labels(provider, model).inc() - except (AttributeError, TypeError, ValueError) as e: - logger.warning("Failed to update LLM call metric: %s", e) - - -def get_rag_tools( - vector_store_ids: list[str], solr_params: Optional[dict[str, Any]] = None -) -> Optional[list[dict[str, Any]]]: - """ - Convert vector store IDs to tools format for Responses API. - - Args: - vector_store_ids: List of vector store identifiers - solr_params: Optional Solr filtering parameters - - Returns: - Optional[list[dict[str, Any]]]: List containing file_search tool configuration, - or None if no vector stores provided - """ - if not vector_store_ids: - return None - - tool_config = { - "type": "file_search", - "vector_store_ids": vector_store_ids, - "max_num_results": 10, - } - - if solr_params: - tool_config["solr"] = solr_params - - return [tool_config] - - -def get_mcp_tools( - mcp_servers: list[ModelContextProtocolServer], - token: str | None = None, - mcp_headers: dict[str, dict[str, str]] | None = None, -) -> list[dict[str, Any]]: - """ - Convert MCP servers to tools format for Responses API. - - Args: - mcp_servers: List of MCP server configurations - token: Optional authentication token for MCP server authorization - mcp_headers: Optional per-request headers for MCP servers, keyed by server URL - - Returns: - list[dict[str, Any]]: List of MCP tool definitions with server - details and optional auth headers - - The way it works is we go through all the defined mcp servers and - create a tool definitions for each of them. If MCP server definition - has a non-empty resolved_authorization_headers we create invocation - headers, following the algorithm: - 1. If the header value is 'kubernetes' the header value is a k8s token - 2. If the header value is 'client': - find the value for a given MCP server/header in mcp_headers. - if the value is not found omit this header, otherwise use found value - 3. otherwise use the value from resolved_authorization_headers directly - - This algorithm allows to: - 1. Use static global header values, provided by configuration - 2. Use user specific k8s token, which will work for the majority of kubernetes - based MCP servers - 3. Use user specific tokens (passed by the client) for user specific MCP headers - """ - - def _get_token_value(original: str, header: str) -> str | None: - """Convert to header value.""" - match original: - case constants.MCP_AUTH_KUBERNETES: - # use k8s token - if token is None or token == "": - return None - return f"Bearer {token}" - case constants.MCP_AUTH_CLIENT: - # use client provided token - if mcp_headers is None: - return None - c_headers = mcp_headers.get(mcp_server.name, None) - if c_headers is None: - return None - return c_headers.get(header, None) - case _: - # use provided - return original - - tools = [] - for mcp_server in mcp_servers: - # Base tool definition - tool_def = { - "type": "mcp", - "server_label": mcp_server.name, - "server_url": mcp_server.url, - "require_approval": "never", - } - - # Build headers - headers = {} - for name, value in mcp_server.resolved_authorization_headers.items(): - # for each defined header - h_value = _get_token_value(value, name) - # only add the header if we got value - if h_value is not None: - headers[name] = h_value - - # Skip server if auth headers were configured but not all could be resolved - if mcp_server.authorization_headers and len(headers) != len( - mcp_server.authorization_headers - ): - logger.warning( - "Skipping MCP server %s: required %d auth headers but only resolved %d", - mcp_server.name, - len(mcp_server.authorization_headers), - len(headers), - ) - continue - - if len(headers) > 0: - # add headers to tool definition - tool_def["headers"] = headers # type: ignore[index] - # collect tools info - tools.append(tool_def) - return tools - - -async def prepare_tools_for_responses_api( - client: AsyncLlamaStackClient, - query_request: QueryRequest, - token: str, - config: AppConfig, - *, - mcp_headers: Optional[dict[str, dict[str, str]]] = None, - skip_rag_tools: bool = False, -) -> Optional[list[dict[str, Any]]]: - """ - Prepare tools for Responses API including RAG and MCP tools. - - This function retrieves vector stores and combines them with MCP - server tools to create a unified toolgroups list for the Responses API. - - Args: - client: The Llama Stack client instance - query_request: The user's query request - token: Authentication token for MCP tools - config: Configuration object containing MCP server settings - mcp_headers: Per-request headers for MCP servers - skip_rag_tools: If True, skip adding RAG tools (used when doing direct vector querying) - - Returns: - Optional[list[dict[str, Any]]]: List of tool configurations for the - Responses API, or None if no_tools is True or no tools are available - """ - if query_request.no_tools: - return None - - toolgroups = [] - - # Add RAG tools if not skipped - if not skip_rag_tools: - # Get vector stores for RAG tools - use specified ones or fetch all - if query_request.vector_store_ids: - vector_store_ids = query_request.vector_store_ids - logger.info("Using specified vector_store_ids: %s", vector_store_ids) - else: - vector_store_ids = [ - vector_store.id - for vector_store in (await client.vector_stores.list()).data - ] - logger.info("Using all available vector_store_ids: %s", vector_store_ids) - - # Add RAG tools if vector stores are available - if vector_store_ids: - rag_tools = get_rag_tools(vector_store_ids) - if rag_tools: - logger.info("rag_tool are: %s", rag_tools) - toolgroups.extend(rag_tools) - else: - logger.info("No RAG tools configured") - else: - logger.info("No vector stores available for RAG tools") - else: - logger.info("Skipping RAG tools - using direct vector querying instead") - - # Add MCP server tools - mcp_tools = get_mcp_tools(config.mcp_servers, token, mcp_headers) - if mcp_tools: - toolgroups.extend(mcp_tools) - logger.debug( - "Configured %d MCP tools: %s", - len(mcp_tools), - [tool.get("server_label", "unknown") for tool in mcp_tools], - ) - # Convert empty list to None for consistency with existing behavior - if not toolgroups: - return None - - return toolgroups diff --git a/src/app/endpoints/streaming_query.py b/src/app/endpoints/streaming_query.py index 2b12f14c3..789ee4c18 100644 --- a/src/app/endpoints/streaming_query.py +++ b/src/app/endpoints/streaming_query.py @@ -1,65 +1,86 @@ -"""Handler for REST API call to provide answer to streaming query.""" # pylint: disable=too-many-lines,too-many-locals,W0511 +"""Streaming query handler using Responses API (v2).""" -import ast -import json import logging -import re -import uuid -from collections.abc import Callable -from datetime import UTC, datetime -from typing import ( - Any, - Iterator, - Optional, -) +from typing import Annotated, Any, AsyncIterator, Optional, cast -from fastapi import APIRouter, Request +from fastapi import APIRouter, Depends, Request from fastapi.responses import StreamingResponse -from llama_stack_client import ( - APIConnectionError, - RateLimitError, # type: ignore +from llama_stack_api.openai_responses import ( + OpenAIResponseObject, + OpenAIResponseObjectStream, + OpenAIResponseObjectStreamResponseCompleted, + OpenAIResponseObjectStreamResponseFailed, + OpenAIResponseObjectStreamResponseOutputItemDone, + OpenAIResponseObjectStreamResponseOutputTextDelta, + OpenAIResponseObjectStreamResponseOutputTextDone, ) -from llama_stack_client.types.shared.interleaved_content_item import TextContentItem -from openai._exceptions import APIStatusError +from llama_stack_client import AsyncLlamaStackClient -import metrics +from app.endpoints.query_old import ( + is_transcripts_enabled, + persist_user_conversation_details, + validate_attachments_metadata, +) from app.endpoints.query import ( - evaluate_model_hints, - select_model_and_provider_id, - validate_conversation_ownership, + _build_tool_call_summary, + extract_token_usage_from_responses_api, + get_topic_summary, + parse_referenced_documents_from_responses_api, + prepare_tools_for_responses_api, +) +from app.endpoints.streaming_query_old import ( + LLM_TOKEN_EVENT, + LLM_TOOL_CALL_EVENT, + LLM_TOOL_RESULT_EVENT, + format_stream_data, + stream_end_event, + stream_event, + stream_start_event, + streaming_query_endpoint_handler_base, ) +from authentication import get_auth_dependency from authentication.interface import AuthTuple -from authorization.azure_token_manager import AzureEntraIDManager -from client import AsyncLlamaStackClientHolder +from authorization.middleware import authorize from configuration import configuration -from constants import DEFAULT_RAG_TOOL, MEDIA_TYPE_JSON, MEDIA_TYPE_TEXT +from constants import ( + MEDIA_TYPE_JSON, +) +from models.config import Action from models.context import ResponseGeneratorContext -from models.database.conversations import UserConversation from models.requests import QueryRequest from models.responses import ( - AbstractErrorResponse, ForbiddenResponse, InternalServerErrorResponse, NotFoundResponse, - PromptTooLongResponse, QuotaExceededResponse, + ReferencedDocument, ServiceUnavailableResponse, StreamingQueryResponse, UnauthorizedResponse, UnprocessableEntityResponse, ) from utils.endpoints import ( - ReferencedDocument, - check_configuration_loaded, - validate_model_provider_override, + cleanup_after_streaming, + get_system_prompt, +) +from utils.query import create_violation_stream +from utils.quota import consume_tokens, get_available_quotas +from utils.suid import normalize_conversation_id, to_llama_stack_conversation_id +from utils.mcp_headers import mcp_headers_dependency +from utils.shields import ( + append_turn_to_conversation, + run_shield_moderation, ) from utils.token_counter import TokenCounter -from utils.types import content_to_str +from utils.transcripts import store_transcript +from utils.types import RAGChunk, TurnSummary +from utils.vector_search import perform_vector_search, format_rag_context_for_injection logger = logging.getLogger("app.endpoints.handlers") -router = APIRouter(tags=["streaming_query"]) +router = APIRouter(tags=["streaming_query_v1"]) +auth_dependency = get_auth_dependency() -streaming_query_responses: dict[int | str, dict[str, Any]] = { +streaming_query_v2_responses: dict[int | str, dict[str, Any]] = { 200: StreamingQueryResponse.openapi_response(), 401: UnauthorizedResponse.openapi_response( examples=["missing header", "missing token"] @@ -70,7 +91,7 @@ 404: NotFoundResponse.openapi_response( examples=["conversation", "model", "provider"] ), - 413: PromptTooLongResponse.openapi_response(), + # 413: PromptTooLongResponse.openapi_response(), 422: UnprocessableEntityResponse.openapi_response(), 429: QuotaExceededResponse.openapi_response(), 500: InternalServerErrorResponse.openapi_response(examples=["configuration"]), @@ -78,649 +99,410 @@ } -METADATA_PATTERN = re.compile(r"\nMetadata: (\{.+})\n") - -# OLS-compatible event types -LLM_TOKEN_EVENT = "token" -LLM_TOOL_CALL_EVENT = "tool_call" -LLM_TOOL_RESULT_EVENT = "tool_result" -LLM_VALIDATION_EVENT = "validation" - - -def format_stream_data(d: dict) -> str: - """ - Format a dictionary as a Server-Sent Events (SSE) data string. - - Parameters: - d (dict): The data to be formatted as an SSE event. - - Returns: - str: The formatted SSE data string. - """ - data = json.dumps(d) - return f"data: {data}\n\n" - - -def stream_start_event(conversation_id: str) -> str: - """ - Yield the start of the data stream. - - Format a Server-Sent Events (SSE) start event containing the - conversation ID. - - Parameters: - conversation_id (str): Unique identifier for the - conversation. - - Returns: - str: SSE-formatted string representing the start event. +def create_responses_response_generator( # pylint: disable=too-many-locals,too-many-statements + context: ResponseGeneratorContext, + doc_ids_from_chunks: Optional[list[ReferencedDocument]] = None, +) -> Any: """ - return format_stream_data( - { - "event": "start", - "data": { - "conversation_id": conversation_id, - }, - } - ) - + Create a response generator function for Responses API streaming. -def stream_end_event( - metadata_map: dict, - token_usage: TokenCounter, - available_quotas: dict[str, int], - referenced_documents: list[ReferencedDocument], - media_type: str = MEDIA_TYPE_JSON, -) -> str: - """ - Yield the end of the data stream. - - Format and return the end event for a streaming response, - including referenced document metadata and token usage information. - - Parameters: - metadata_map (dict): A mapping containing metadata about - referenced documents. - summary (TurnSummary): Summary of the conversation turn. - token_usage (TokenCounter): Token usage information. - media_type (str): The media type for the response format. - - Returns: - str: A Server-Sent Events (SSE) formatted string - representing the end of the data stream. - """ - if media_type == MEDIA_TYPE_TEXT: - ref_docs_string = "\n".join( - f'{v["title"]}: {v["docs_url"]}' - for v in filter( - lambda v: ("docs_url" in v) and ("title" in v), - metadata_map.values(), - ) - ) - return f"\n\n---\n\n{ref_docs_string}" if ref_docs_string else "" - - # Convert ReferencedDocument objects to dicts for JSON serialization - # Use mode="json" to ensure AnyUrl is serialized to string (not just model_dump()) - referenced_docs_dict = [doc.model_dump(mode="json") for doc in referenced_documents] - - return format_stream_data( - { - "event": "end", - "data": { - "referenced_documents": referenced_docs_dict, - "truncated": None, # TODO(jboos): implement truncated - "input_tokens": token_usage.input_tokens, - "output_tokens": token_usage.output_tokens, - }, - "available_quotas": available_quotas, - } - ) - - -def stream_event(data: dict, event_type: str, media_type: str) -> str: - """Build an item to yield based on media type. + This factory function returns an async generator that processes streaming + responses from the Responses API and yields Server-Sent Events (SSE). Args: - data: The data to yield. - event_type: The type of event (e.g. token, tool request, tool execution). - media_type: Media type of the response (e.g. text or JSON). + context: Context object containing all necessary parameters for response generation + doc_ids_from_chunks: Referenced documents extracted from vector DB chunks Returns: - str: The formatted string or JSON to yield. - """ - if media_type == MEDIA_TYPE_TEXT: - if event_type == LLM_TOKEN_EVENT: - return data["token"] - if event_type == LLM_TOOL_CALL_EVENT: - return f"\nTool call: {json.dumps(data)}\n" - if event_type == LLM_TOOL_RESULT_EVENT: - return f"\nTool result: {json.dumps(data)}\n" - logger.error("Unknown event type: %s", event_type) - return "" - return format_stream_data( - { - "event": event_type, - "data": data, - } - ) - - -# ----------------------------------- -# Error handling -# ----------------------------------- -def _handle_error_event( - chunk: Any, chunk_id: int, media_type: str = MEDIA_TYPE_JSON -) -> Iterator[str]: + An async generator function that yields SSE-formatted strings """ - Yield error event. - Yield a formatted Server-Sent Events (SSE) error event - containing the error message from a streaming chunk. - - Parameters: - chunk_id (int): The unique identifier for the current - streaming chunk. - media_type (str): The media type for the response format. - """ - if media_type == MEDIA_TYPE_TEXT: - yield f"Error: {chunk.error['message']}" - else: - yield format_stream_data( - { - "event": "error", - "data": { - "id": chunk_id, - "token": chunk.error["message"], - }, - } + async def response_generator( # pylint: disable=too-many-branches,too-many-statements + turn_response: AsyncIterator[OpenAIResponseObjectStream], + ) -> AsyncIterator[str]: + """ + Generate SSE formatted streaming response. + + Asynchronously generates a stream of Server-Sent Events + (SSE) representing incremental responses from a + language model turn. + + Yields start, token, tool call, turn completion, and + end events as SSE-formatted strings. Collects the + complete response for transcript storage if enabled. + """ + chunk_id = 0 + summary = TurnSummary( + llm_response="", tool_calls=[], tool_results=[], rag_chunks=[] ) + # Determine media type for response formatting + media_type = context.query_request.media_type or MEDIA_TYPE_JSON -def prompt_too_long_error(error: Exception, media_type: str) -> str: - """Return error representation for long prompts. - - Args: - error: The exception raised for long prompts. - media_type: Media type of the response (e.g. text or JSON). - - Returns: - str: The error message formatted for the media type. - """ - logger.error("Prompt is too long: %s", error) - if media_type == MEDIA_TYPE_TEXT: - return f"Prompt is too long: {error}" - return format_stream_data( - { - "event": "error", - "data": { - "status_code": 413, - "response": "Prompt is too long", - "cause": str(error), - }, - } - ) - - -def generic_llm_error(error: Exception, media_type: str) -> str: - """Return error representation for generic LLM errors. - - Args: - error: The exception raised during processing. - media_type: Media type of the response (e.g. text or JSON). - - Returns: - str: The error message formatted for the media type. - """ - logger.error("Error while obtaining answer for user question") - logger.exception(error) - - if media_type == MEDIA_TYPE_TEXT: - return f"Error: {str(error)}" - return format_stream_data( - { - "event": "error", - "data": { - "response": "Internal server error", - "cause": str(error), - }, - } - ) - - -def stream_http_error(error: AbstractErrorResponse) -> Iterator[str]: - """ - Yield an SSE-formatted error response for generic LLM or API errors. - - Args: - error: An AbstractErrorResponse instance representing the error. - - Yields: - str: A Server-Sent Events (SSE) formatted error message containing - the serialized error details. - """ - logger.error("Error while obtaining answer for user question") - logger.exception(error) + # Accumulators for Responses API + text_parts: list[str] = [] + emitted_turn_complete = False - yield format_stream_data({"event": "error", "data": {**error.detail.model_dump()}}) + # Use the conversation_id from context (either provided or newly created) + conv_id = context.conversation_id + # Track the latest response object from response.completed event + latest_response_object: Optional[Any] = None -# ----------------------------------- -# Turn handling -# ----------------------------------- -def _handle_turn_start_event( - _chunk_id: int, - media_type: str = MEDIA_TYPE_JSON, - conversation_id: Optional[str] = None, -) -> Iterator[str]: - """ - Yield turn start event. + # RAG chunks + rag_chunks: list[RAGChunk] = [] - Yield a Server-Sent Event (SSE) start event indicating the - start of a new conversation turn. + logger.debug("Starting streaming response (Responses API) processing") - Parameters: - chunk_id (int): The unique identifier for the current - chunk. + async for chunk in turn_response: + event_type = getattr(chunk, "type", None) + logger.debug("Processing chunk %d, type: %s", chunk_id, event_type) - Yields: - str: SSE-formatted start event with conversation_id. - """ - # Use provided conversation_id or generate one if not available - if conversation_id is None: - conversation_id = str(uuid.uuid4()) - - if media_type == MEDIA_TYPE_TEXT: - yield ( - f"data: {json.dumps({'event': 'start', 'data': {'conversation_id': conversation_id}})}\n\n" # pylint: disable=line-too-long - ) - else: - yield format_stream_data( - { - "event": "start", - "data": {"conversation_id": conversation_id}, - } - ) + # Emit start event when response is created + if event_type == "response.created": + yield stream_start_event(conv_id) - -def _handle_turn_complete_event( - chunk: Any, _chunk_id: int, media_type: str = MEDIA_TYPE_JSON -) -> Iterator[str]: - """ - Yield turn complete event. - - Yields a Server-Sent Event (SSE) indicating the completion of a - conversation turn, including the full output message content. - - Parameters: - chunk_id (int): The unique identifier for the current - chunk. - - Yields: - str: SSE-formatted string containing the turn completion - event and output message content. - """ - full_response = content_to_str(chunk.event.payload.turn.output_message.content) - - if media_type == MEDIA_TYPE_TEXT: - yield ( - f"data: {json.dumps({'event': 'turn_complete', 'data': {'token': full_response}})}\n\n" - ) - else: - yield format_stream_data( - { - "event": "turn_complete", - "data": {"token": full_response}, - } - ) - - -# ----------------------------------- -# Shield handling -# ----------------------------------- -def _handle_shield_event( - chunk: Any, chunk_id: int, media_type: str = MEDIA_TYPE_JSON -) -> Iterator[str]: - """ - Yield shield event. - - Processes a shield event chunk and yields a formatted SSE token - event indicating shield validation results. - - Yields a "No Violation" token if no violation is detected, or a - violation message if a shield violation occurs. Increments - validation error metrics when violations are present. - """ - if chunk.event.payload.event_type == "step_complete": - violation = chunk.event.payload.step_details.violation - if not violation: - yield stream_event( - data={ - "id": chunk_id, - "token": "No Violation", - }, - event_type=LLM_VALIDATION_EVENT, - media_type=media_type, - ) - else: - # Metric for LLM validation errors - metrics.llm_calls_validation_errors_total.inc() - violation = ( - f"Violation: {violation.user_message} (Metadata: {violation.metadata})" - ) - yield stream_event( - data={ - "id": chunk_id, - "token": violation, - }, - event_type=LLM_VALIDATION_EVENT, - media_type=media_type, - ) - - -# ----------------------------------- -# Tool Execution handling -# ----------------------------------- -# pylint: disable=R1702,R0912 -def _handle_tool_execution_event( - chunk: Any, chunk_id: int, metadata_map: dict, media_type: str = MEDIA_TYPE_JSON -) -> Iterator[str]: - """ - Yield tool call event. - - Processes tool execution events from a streaming chunk and - yields formatted Server-Sent Events (SSE) strings. - - Handles both tool call initiation and completion, including - tool call arguments, responses, and summaries. Extracts and - updates document metadata from knowledge search tool responses - when present. - - Parameters: - chunk_id (int): Unique identifier for the current streaming - chunk. metadata_map (dict): Dictionary to be updated with - document metadata extracted from tool responses. - - Yields: - str: SSE-formatted event strings representing tool call - events and responses. - """ - if chunk.event.payload.event_type == "step_start": - yield stream_event( - data={ - "id": chunk_id, - "token": "", - }, - event_type=LLM_TOOL_CALL_EVENT, - media_type=media_type, - ) - - elif chunk.event.payload.event_type == "step_complete": - for t in chunk.event.payload.step_details.tool_calls: - yield stream_event( - data={ - "id": chunk_id, - "token": { - "tool_name": t.tool_name, - "arguments": t.arguments, - }, - }, - event_type=LLM_TOOL_CALL_EVENT, - media_type=media_type, - ) - - for r in chunk.event.payload.step_details.tool_responses: - if r.tool_name == "query_from_memory": - inserted_context = content_to_str(r.content) - yield stream_event( - data={ - "id": chunk_id, - "token": { - "tool_name": r.tool_name, - "response": f"Fetched {len(inserted_context)} bytes from memory", + # Text streaming + if event_type == "response.output_text.delta": + delta_chunk = cast( + OpenAIResponseObjectStreamResponseOutputTextDelta, chunk + ) + if delta_chunk.delta: + text_parts.append(delta_chunk.delta) + yield stream_event( + { + "id": chunk_id, + "token": delta_chunk.delta, }, - }, - event_type=LLM_TOOL_RESULT_EVENT, - media_type=media_type, + LLM_TOKEN_EVENT, + media_type, + ) + chunk_id += 1 + + # Final text of the output (capture, but emit at response.completed) + elif event_type == "response.output_text.done": + text_done_chunk = cast( + OpenAIResponseObjectStreamResponseOutputTextDone, chunk ) + if text_done_chunk.text: + summary.llm_response = text_done_chunk.text - elif r.tool_name == DEFAULT_RAG_TOOL and r.content: - summary = "" - for i, text_content_item in enumerate(r.content): - if isinstance(text_content_item, TextContentItem): - if i == 0: - summary = text_content_item.text - newline_pos = summary.find("\n") - if newline_pos > 0: - summary = summary[:newline_pos] - for match in METADATA_PATTERN.findall(text_content_item.text): - try: - meta = ast.literal_eval(match) - if "document_id" in meta: - metadata_map[meta["document_id"]] = meta - except Exception: # pylint: disable=broad-except - logger.debug( - "An exception was thrown in processing %s", - match, - ) - + # Content part started - emit an empty token to kick off UI streaming + elif event_type == "response.content_part.added": yield stream_event( - data={ + { "id": chunk_id, - "token": { - "tool_name": r.tool_name, - "summary": summary, - }, + "token": "", }, - event_type=LLM_TOOL_RESULT_EVENT, - media_type=media_type, + LLM_TOKEN_EVENT, + media_type, ) + chunk_id += 1 - else: - yield stream_event( - data={ - "id": chunk_id, - "token": { - "tool_name": r.tool_name, - "response": content_to_str(r.content), + # Process tool calls and results are emitted together when output items are done + # TODO(asimurka): support emitting tool calls and results separately when ready + elif event_type == "response.output_item.done": + output_item_done_chunk = cast( + OpenAIResponseObjectStreamResponseOutputItemDone, chunk + ) + if output_item_done_chunk.item.type == "message": + continue + tool_call, tool_result = _build_tool_call_summary( + output_item_done_chunk.item, rag_chunks + ) + if tool_call: + summary.tool_calls.append(tool_call) + yield stream_event( + tool_call.model_dump(), + LLM_TOOL_CALL_EVENT, + media_type, + ) + if tool_result: + summary.tool_results.append(tool_result) + yield stream_event( + tool_result.model_dump(), + LLM_TOOL_RESULT_EVENT, + media_type, + ) + + # Completed response - capture final text and response object + elif event_type == "response.completed": + # Capture the response object for token usage extraction + completed_chunk = cast( + OpenAIResponseObjectStreamResponseCompleted, chunk + ) + latest_response_object = completed_chunk.response + + if not emitted_turn_complete: + final_message = summary.llm_response or "".join(text_parts) + if not final_message: + final_message = "No response from the model" + summary.llm_response = final_message + yield stream_event( + { + "id": chunk_id, + "token": final_message, }, - }, - event_type=LLM_TOOL_RESULT_EVENT, - media_type=media_type, + "turn_complete", + media_type, + ) + chunk_id += 1 + emitted_turn_complete = True + + # Incomplete response - emit error because LLS does not + # support incomplete responses "incomplete_detail" attribute yet + elif event_type == "response.incomplete": + error_response = InternalServerErrorResponse.query_failed( + "An unexpected error occurred while processing the request." ) + logger.error("Error while obtaining answer for user question") + yield format_stream_data( + {"event": "error", "data": {**error_response.detail.model_dump()}} + ) + return + + # Failed response - emit error with custom cause from error message + elif event_type == "response.failed": + failed_chunk = cast(OpenAIResponseObjectStreamResponseFailed, chunk) + latest_response_object = failed_chunk.response + error_message = ( + failed_chunk.response.error.message + if failed_chunk.response.error + else "An unexpected error occurred while processing the request." + ) + error_response = InternalServerErrorResponse.query_failed(error_message) + logger.error("Error while obtaining answer for user question") + yield format_stream_data( + {"event": "error", "data": {**error_response.detail.model_dump()}} + ) + return + logger.debug( + "Streaming complete - Tool calls: %d, Response chars: %d", + len(summary.tool_calls), + len(summary.llm_response), + ) -# ----------------------------------- -# Catch-all for everything else -# ----------------------------------- -def _handle_heartbeat_event( - chunk_id: int, media_type: str = MEDIA_TYPE_JSON -) -> Iterator[str]: - """ - Yield a heartbeat event. - - Yield a heartbeat event as a Server-Sent Event (SSE) for the - given chunk ID. + # Extract token usage from the response object + token_usage = ( + extract_token_usage_from_responses_api( + latest_response_object, context.model_id, context.provider_id + ) + if latest_response_object is not None + else TokenCounter() + ) + consume_tokens( + configuration.quota_limiters, + configuration.token_usage_history, + context.user_id, + input_tokens=token_usage.input_tokens, + output_tokens=token_usage.output_tokens, + model_id=context.model_id, + provider_id=context.provider_id, + ) + response_referenced_documents = parse_referenced_documents_from_responses_api( + cast(OpenAIResponseObject, latest_response_object) + ) + # Combine doc_ids_from_chunks with response_referenced_documents + all_referenced_documents = ( + doc_ids_from_chunks or [] + ) + response_referenced_documents + available_quotas = get_available_quotas( + configuration.quota_limiters, context.user_id + ) + yield stream_end_event( + context.metadata_map, + token_usage, + available_quotas, + all_referenced_documents, + media_type, + ) - Parameters: - chunk_id (int): The identifier for the current streaming - chunk. + # Perform cleanup tasks (database and cache operations)) + await cleanup_after_streaming( + user_id=context.user_id, + conversation_id=conv_id, + model_id=context.model_id, + provider_id=context.provider_id, + llama_stack_model_id=context.llama_stack_model_id, + query_request=context.query_request, + summary=summary, + metadata_map=context.metadata_map, + started_at=context.started_at, + client=context.client, + config=configuration, + skip_userid_check=context.skip_userid_check, + get_topic_summary_func=get_topic_summary, + is_transcripts_enabled_func=is_transcripts_enabled, + store_transcript_func=store_transcript, + persist_user_conversation_details_func=persist_user_conversation_details, + rag_chunks=[rag_chunk.model_dump() for rag_chunk in rag_chunks], + ) - Yields: - str: SSE-formatted heartbeat event string. - """ - yield stream_event( - data={ - "id": chunk_id, - "token": "heartbeat", - }, - event_type=LLM_TOKEN_EVENT, - media_type=media_type, - ) + return response_generator -async def streaming_query_endpoint_handler_base( # pylint: disable=too-many-locals,too-many-statements,too-many-arguments,too-many-positional-arguments +@router.post( + "/streaming_query", + response_class=StreamingResponse, + responses=streaming_query_v2_responses, + summary="Streaming Query Endpoint Handler V1", +) +@authorize(Action.STREAMING_QUERY) +async def streaming_query_endpoint_handler_v2( # pylint: disable=too-many-locals request: Request, query_request: QueryRequest, - auth: AuthTuple, - mcp_headers: dict[str, dict[str, str]], - retrieve_response_func: Callable[..., Any], - create_response_generator_func: Callable[..., Any], + auth: Annotated[AuthTuple, Depends(auth_dependency)], + mcp_headers: dict[str, dict[str, str]] = Depends(mcp_headers_dependency), ) -> StreamingResponse: """ - Handle streaming query endpoints with common logic. - - This base handler contains all the common logic for streaming query endpoints - and accepts functions for API-specific behavior (Agent API vs Responses API). + Handle request to the /streaming_query endpoint using Responses API. - Args: - request: The FastAPI request object - query_request: The query request from the user - auth: Authentication tuple (user_id, username, skip_check, token) - mcp_headers: MCP headers for tool integrations - retrieve_response_func: Function to retrieve the streaming response - create_response_generator_func: Function factory that creates the response generator + Returns a streaming response using Server-Sent Events (SSE) format with + content type text/event-stream. Returns: - StreamingResponse: An HTTP streaming response yielding SSE-formatted events + StreamingResponse: An HTTP streaming response yielding + SSE-formatted events for the query lifecycle with content type + text/event-stream. Raises: - HTTPException: Returns HTTP 500 if unable to connect to Llama Stack + HTTPException: + - 401: Unauthorized - Missing or invalid credentials + - 403: Forbidden - Insufficient permissions or model override not allowed + - 404: Not Found - Conversation, model, or provider not found + - 422: Unprocessable Entity - Request validation failed + - 429: Too Many Requests - Quota limit exceeded + - 500: Internal Server Error - Configuration not loaded or other server errors + - 503: Service Unavailable - Unable to connect to Llama Stack backend """ - # Nothing interesting in the request - _ = request + return await streaming_query_endpoint_handler_base( + request=request, + query_request=query_request, + auth=auth, + mcp_headers=mcp_headers, + retrieve_response_func=retrieve_response, + create_response_generator_func=create_responses_response_generator, + ) - check_configuration_loaded(configuration) - started_at = datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ") - # Enforce RBAC: optionally disallow overriding model/provider in requests - validate_model_provider_override(query_request, request.state.authorized_actions) +async def retrieve_response( # pylint: disable=too-many-locals + client: AsyncLlamaStackClient, + model_id: str, + query_request: QueryRequest, + token: str, + mcp_headers: Optional[dict[str, dict[str, str]]] = None, +) -> tuple[AsyncIterator[OpenAIResponseObjectStream], str, list[ReferencedDocument]]: + """ + Retrieve response from LLMs and agents. - # log Llama Stack configuration - logger.info("Llama stack config: %s", configuration.llama_stack_configuration) + Asynchronously retrieves a streaming response and conversation + ID from the Llama Stack agent for a given user query. - user_id, _user_name, _skip_userid_check, token = auth + This function configures shields, system prompt, and tool usage + based on the request and environment. It prepares the agent with + appropriate headers and toolgroups, validates attachments if + present, and initiates a streaming turn with the user's query + and any provided documents. - user_conversation: Optional[UserConversation] = None - if query_request.conversation_id: - user_conversation = validate_conversation_ownership( - user_id=user_id, conversation_id=query_request.conversation_id - ) + Parameters: + model_id (str): Identifier of the model to use for the query. + query_request (QueryRequest): The user's query and associated metadata. + token (str): Authentication token for downstream services. + mcp_headers (dict[str, dict[str, str]], optional): + Multi-cluster proxy headers for tool integrations. - if user_conversation is None: - logger.warning( - "User %s attempted to query conversation %s they don't own", - user_id, - query_request.conversation_id, - ) - forbidden_error = ForbiddenResponse.conversation( - action="read", - resource_id=query_request.conversation_id, - user_id=user_id, - ) - return StreamingResponse( - stream_http_error(forbidden_error), - media_type="text/event-stream", - status_code=forbidden_error.status_code, - ) + Returns: + tuple: A tuple containing the streaming response object, + the conversation ID, and the list of referenced documents from vector DB chunks. + """ + # use system prompt from request or default one + system_prompt = get_system_prompt(query_request, configuration) + logger.debug("Using system prompt: %s", system_prompt) + + # TODO(lucasagomes): redact attachments content before sending to LLM + # if attachments are provided, validate them + if query_request.attachments: + validate_attachments_metadata(query_request.attachments) + + # Prepare tools for responses API - skip RAG tools since we're doing direct vector query + toolgroups = await prepare_tools_for_responses_api( + client, + query_request, + token, + configuration, + mcp_headers=mcp_headers, + skip_rag_tools=True, + ) - try: - # try to get Llama Stack client - client = AsyncLlamaStackClientHolder().get_client() - llama_stack_model_id, model_id, provider_id = select_model_and_provider_id( - await client.models.list(), - *evaluate_model_hints( - user_conversation=user_conversation, query_request=query_request - ), - ) + # Extract RAG chunks from vector DB query response BEFORE calling responses API + _, _, doc_ids_from_chunks, rag_chunks = await perform_vector_search( + client, query_request, configuration + ) - if ( - provider_id == "azure" - and AzureEntraIDManager().is_entra_id_configured - and AzureEntraIDManager().is_token_expired - and AzureEntraIDManager().refresh_token() - ): - if AsyncLlamaStackClientHolder().is_library_client: - client = await AsyncLlamaStackClientHolder().reload_library_client() - else: - azure_config = next( - p.config - for p in await client.providers.list() - if p.provider_type == "remote::azure" - ) - client = AsyncLlamaStackClientHolder().update_provider_data( - { - "azure_api_key": AzureEntraIDManager().access_token.get_secret_value(), - "azure_api_base": str(azure_config.get("api_base")), - } - ) + # Format RAG context for injection into user message + rag_context = format_rag_context_for_injection(rag_chunks) + + # Prepare input for Responses API + # Convert attachments to text and concatenate with query + input_text = query_request.query + if query_request.attachments: + for attachment in query_request.attachments: + input_text += ( + f"\n\n[Attachment: {attachment.attachment_type}]\n" + f"{attachment.content}" + ) - response, conversation_id = await retrieve_response_func( - client, - llama_stack_model_id, - query_request, - token, - mcp_headers=mcp_headers, - ) + # Add RAG context to input text + input_text += rag_context - metadata_map: dict[str, dict[str, Any]] = {} - - # Create context object for response generator - context = ResponseGeneratorContext( - conversation_id=conversation_id, - user_id=user_id, - skip_userid_check=_skip_userid_check, - model_id=model_id, - provider_id=provider_id, - llama_stack_model_id=llama_stack_model_id, - query_request=query_request, - started_at=started_at, - client=client, - metadata_map=metadata_map, + # Handle conversation ID for Responses API + # Create conversation upfront if not provided + conversation_id = query_request.conversation_id + if conversation_id: + # Conversation ID was provided - convert to llama-stack format + logger.debug("Using existing conversation ID: %s", conversation_id) + llama_stack_conv_id = to_llama_stack_conversation_id(conversation_id) + else: + # No conversation_id provided - create a new conversation first + logger.debug("No conversation_id provided, creating new conversation") + conversation = await client.conversations.create(metadata={}) + llama_stack_conv_id = conversation.id + # Store the normalized version for later use + conversation_id = normalize_conversation_id(llama_stack_conv_id) + logger.info( + "Created new conversation with ID: %s (normalized: %s)", + llama_stack_conv_id, + conversation_id, ) - # Create the response generator using the provided factory function - response_generator = create_response_generator_func(context) - - # Update metrics for the LLM call - metrics.llm_calls_total.labels(provider_id, model_id).inc() - - # Determine media type for response - # Note: The HTTP Content-Type header is always text/event-stream for SSE, - # but the media_type parameter controls how the content is formatted - return StreamingResponse( - response_generator(response), media_type="text/event-stream" - ) - except APIConnectionError as e: - metrics.llm_calls_failures_total.inc() - logger.error("Unable to connect to Llama Stack: %s", e) - error_response = ServiceUnavailableResponse( - backend_name="Llama Stack", - cause=str(e), + # Run shield moderation before calling LLM + moderation_result = await run_shield_moderation(client, input_text) + if moderation_result.blocked: + violation_message = moderation_result.message or "" + await append_turn_to_conversation( + client, llama_stack_conv_id, input_text, violation_message ) - return StreamingResponse( - stream_http_error(error_response), - status_code=error_response.status_code, - media_type="text/event-stream", - ) - except RateLimitError as e: - used_model = getattr(e, "model", "") - if used_model: - error_response = QuotaExceededResponse.model(used_model) - else: - error_response = QuotaExceededResponse( - response="The quota has been exceeded", cause=str(e) - ) - return StreamingResponse( - stream_http_error(error_response), - status_code=error_response.status_code, - media_type="text/event-stream", - ) - except APIStatusError as e: - metrics.llm_calls_failures_total.inc() - logger.error("API status error: %s", e) - error_response = InternalServerErrorResponse.generic() - return StreamingResponse( - stream_http_error(error_response), - status_code=error_response.status_code, - media_type=query_request.media_type or MEDIA_TYPE_JSON, + return ( + create_violation_stream(violation_message, moderation_result.shield_model), + normalize_conversation_id(conversation_id), ) + + create_params: dict[str, Any] = { + "input": input_text, + "model": model_id, + "instructions": system_prompt, + "stream": True, + "store": True, + "tools": toolgroups, + "conversation": llama_stack_conv_id, + } + + response = await client.responses.create(**create_params) + response_stream = cast(AsyncIterator[OpenAIResponseObjectStream], response) + + return ( + response_stream, + normalize_conversation_id(conversation_id), + doc_ids_from_chunks, + ) diff --git a/src/app/endpoints/streaming_query_old.py b/src/app/endpoints/streaming_query_old.py new file mode 100644 index 000000000..005949ea1 --- /dev/null +++ b/src/app/endpoints/streaming_query_old.py @@ -0,0 +1,726 @@ +"""Handler for REST API call to provide answer to streaming query.""" # pylint: disable=too-many-lines,too-many-locals,W0511 + +import ast +import json +import logging +import re +import uuid +from collections.abc import Callable +from datetime import UTC, datetime +from typing import ( + Any, + Iterator, + Optional, +) + +from fastapi import APIRouter, Request +from fastapi.responses import StreamingResponse +from llama_stack_client import ( + APIConnectionError, + RateLimitError, # type: ignore +) +from llama_stack_client.types.shared.interleaved_content_item import TextContentItem +from openai._exceptions import APIStatusError + +import metrics +from app.endpoints.query_old import ( + evaluate_model_hints, + select_model_and_provider_id, + validate_conversation_ownership, +) +from authentication.interface import AuthTuple +from authorization.azure_token_manager import AzureEntraIDManager +from client import AsyncLlamaStackClientHolder +from configuration import configuration +from constants import DEFAULT_RAG_TOOL, MEDIA_TYPE_JSON, MEDIA_TYPE_TEXT +from models.context import ResponseGeneratorContext +from models.database.conversations import UserConversation +from models.requests import QueryRequest +from models.responses import ( + AbstractErrorResponse, + ForbiddenResponse, + InternalServerErrorResponse, + NotFoundResponse, + PromptTooLongResponse, + QuotaExceededResponse, + ServiceUnavailableResponse, + StreamingQueryResponse, + UnauthorizedResponse, + UnprocessableEntityResponse, +) +from utils.endpoints import ( + ReferencedDocument, + check_configuration_loaded, + validate_model_provider_override, +) +from utils.token_counter import TokenCounter +from utils.types import content_to_str + +logger = logging.getLogger("app.endpoints.handlers") +router = APIRouter(tags=["streaming_query"]) + +streaming_query_responses: dict[int | str, dict[str, Any]] = { + 200: StreamingQueryResponse.openapi_response(), + 401: UnauthorizedResponse.openapi_response( + examples=["missing header", "missing token"] + ), + 403: ForbiddenResponse.openapi_response( + examples=["conversation read", "endpoint", "model override"] + ), + 404: NotFoundResponse.openapi_response( + examples=["conversation", "model", "provider"] + ), + 413: PromptTooLongResponse.openapi_response(), + 422: UnprocessableEntityResponse.openapi_response(), + 429: QuotaExceededResponse.openapi_response(), + 500: InternalServerErrorResponse.openapi_response(examples=["configuration"]), + 503: ServiceUnavailableResponse.openapi_response(), +} + + +METADATA_PATTERN = re.compile(r"\nMetadata: (\{.+})\n") + +# OLS-compatible event types +LLM_TOKEN_EVENT = "token" +LLM_TOOL_CALL_EVENT = "tool_call" +LLM_TOOL_RESULT_EVENT = "tool_result" +LLM_VALIDATION_EVENT = "validation" + + +def format_stream_data(d: dict) -> str: + """ + Format a dictionary as a Server-Sent Events (SSE) data string. + + Parameters: + d (dict): The data to be formatted as an SSE event. + + Returns: + str: The formatted SSE data string. + """ + data = json.dumps(d) + return f"data: {data}\n\n" + + +def stream_start_event(conversation_id: str) -> str: + """ + Yield the start of the data stream. + + Format a Server-Sent Events (SSE) start event containing the + conversation ID. + + Parameters: + conversation_id (str): Unique identifier for the + conversation. + + Returns: + str: SSE-formatted string representing the start event. + """ + return format_stream_data( + { + "event": "start", + "data": { + "conversation_id": conversation_id, + }, + } + ) + + +def stream_end_event( + metadata_map: dict, + token_usage: TokenCounter, + available_quotas: dict[str, int], + referenced_documents: list[ReferencedDocument], + media_type: str = MEDIA_TYPE_JSON, +) -> str: + """ + Yield the end of the data stream. + + Format and return the end event for a streaming response, + including referenced document metadata and token usage information. + + Parameters: + metadata_map (dict): A mapping containing metadata about + referenced documents. + summary (TurnSummary): Summary of the conversation turn. + token_usage (TokenCounter): Token usage information. + media_type (str): The media type for the response format. + + Returns: + str: A Server-Sent Events (SSE) formatted string + representing the end of the data stream. + """ + if media_type == MEDIA_TYPE_TEXT: + ref_docs_string = "\n".join( + f'{v["title"]}: {v["docs_url"]}' + for v in filter( + lambda v: ("docs_url" in v) and ("title" in v), + metadata_map.values(), + ) + ) + return f"\n\n---\n\n{ref_docs_string}" if ref_docs_string else "" + + # Convert ReferencedDocument objects to dicts for JSON serialization + # Use mode="json" to ensure AnyUrl is serialized to string (not just model_dump()) + referenced_docs_dict = [doc.model_dump(mode="json") for doc in referenced_documents] + + return format_stream_data( + { + "event": "end", + "data": { + "referenced_documents": referenced_docs_dict, + "truncated": None, # TODO(jboos): implement truncated + "input_tokens": token_usage.input_tokens, + "output_tokens": token_usage.output_tokens, + }, + "available_quotas": available_quotas, + } + ) + + +def stream_event(data: dict, event_type: str, media_type: str) -> str: + """Build an item to yield based on media type. + + Args: + data: The data to yield. + event_type: The type of event (e.g. token, tool request, tool execution). + media_type: Media type of the response (e.g. text or JSON). + + Returns: + str: The formatted string or JSON to yield. + """ + if media_type == MEDIA_TYPE_TEXT: + if event_type == LLM_TOKEN_EVENT: + return data["token"] + if event_type == LLM_TOOL_CALL_EVENT: + return f"\nTool call: {json.dumps(data)}\n" + if event_type == LLM_TOOL_RESULT_EVENT: + return f"\nTool result: {json.dumps(data)}\n" + logger.error("Unknown event type: %s", event_type) + return "" + return format_stream_data( + { + "event": event_type, + "data": data, + } + ) + + +# ----------------------------------- +# Error handling +# ----------------------------------- +def _handle_error_event( + chunk: Any, chunk_id: int, media_type: str = MEDIA_TYPE_JSON +) -> Iterator[str]: + """ + Yield error event. + + Yield a formatted Server-Sent Events (SSE) error event + containing the error message from a streaming chunk. + + Parameters: + chunk_id (int): The unique identifier for the current + streaming chunk. + media_type (str): The media type for the response format. + """ + if media_type == MEDIA_TYPE_TEXT: + yield f"Error: {chunk.error['message']}" + else: + yield format_stream_data( + { + "event": "error", + "data": { + "id": chunk_id, + "token": chunk.error["message"], + }, + } + ) + + +def prompt_too_long_error(error: Exception, media_type: str) -> str: + """Return error representation for long prompts. + + Args: + error: The exception raised for long prompts. + media_type: Media type of the response (e.g. text or JSON). + + Returns: + str: The error message formatted for the media type. + """ + logger.error("Prompt is too long: %s", error) + if media_type == MEDIA_TYPE_TEXT: + return f"Prompt is too long: {error}" + return format_stream_data( + { + "event": "error", + "data": { + "status_code": 413, + "response": "Prompt is too long", + "cause": str(error), + }, + } + ) + + +def generic_llm_error(error: Exception, media_type: str) -> str: + """Return error representation for generic LLM errors. + + Args: + error: The exception raised during processing. + media_type: Media type of the response (e.g. text or JSON). + + Returns: + str: The error message formatted for the media type. + """ + logger.error("Error while obtaining answer for user question") + logger.exception(error) + + if media_type == MEDIA_TYPE_TEXT: + return f"Error: {str(error)}" + return format_stream_data( + { + "event": "error", + "data": { + "response": "Internal server error", + "cause": str(error), + }, + } + ) + + +def stream_http_error(error: AbstractErrorResponse) -> Iterator[str]: + """ + Yield an SSE-formatted error response for generic LLM or API errors. + + Args: + error: An AbstractErrorResponse instance representing the error. + + Yields: + str: A Server-Sent Events (SSE) formatted error message containing + the serialized error details. + """ + logger.error("Error while obtaining answer for user question") + logger.exception(error) + + yield format_stream_data({"event": "error", "data": {**error.detail.model_dump()}}) + + +# ----------------------------------- +# Turn handling +# ----------------------------------- +def _handle_turn_start_event( + _chunk_id: int, + media_type: str = MEDIA_TYPE_JSON, + conversation_id: Optional[str] = None, +) -> Iterator[str]: + """ + Yield turn start event. + + Yield a Server-Sent Event (SSE) start event indicating the + start of a new conversation turn. + + Parameters: + chunk_id (int): The unique identifier for the current + chunk. + + Yields: + str: SSE-formatted start event with conversation_id. + """ + # Use provided conversation_id or generate one if not available + if conversation_id is None: + conversation_id = str(uuid.uuid4()) + + if media_type == MEDIA_TYPE_TEXT: + yield ( + f"data: {json.dumps({'event': 'start', 'data': {'conversation_id': conversation_id}})}\n\n" # pylint: disable=line-too-long + ) + else: + yield format_stream_data( + { + "event": "start", + "data": {"conversation_id": conversation_id}, + } + ) + + +def _handle_turn_complete_event( + chunk: Any, _chunk_id: int, media_type: str = MEDIA_TYPE_JSON +) -> Iterator[str]: + """ + Yield turn complete event. + + Yields a Server-Sent Event (SSE) indicating the completion of a + conversation turn, including the full output message content. + + Parameters: + chunk_id (int): The unique identifier for the current + chunk. + + Yields: + str: SSE-formatted string containing the turn completion + event and output message content. + """ + full_response = content_to_str(chunk.event.payload.turn.output_message.content) + + if media_type == MEDIA_TYPE_TEXT: + yield ( + f"data: {json.dumps({'event': 'turn_complete', 'data': {'token': full_response}})}\n\n" + ) + else: + yield format_stream_data( + { + "event": "turn_complete", + "data": {"token": full_response}, + } + ) + + +# ----------------------------------- +# Shield handling +# ----------------------------------- +def _handle_shield_event( + chunk: Any, chunk_id: int, media_type: str = MEDIA_TYPE_JSON +) -> Iterator[str]: + """ + Yield shield event. + + Processes a shield event chunk and yields a formatted SSE token + event indicating shield validation results. + + Yields a "No Violation" token if no violation is detected, or a + violation message if a shield violation occurs. Increments + validation error metrics when violations are present. + """ + if chunk.event.payload.event_type == "step_complete": + violation = chunk.event.payload.step_details.violation + if not violation: + yield stream_event( + data={ + "id": chunk_id, + "token": "No Violation", + }, + event_type=LLM_VALIDATION_EVENT, + media_type=media_type, + ) + else: + # Metric for LLM validation errors + metrics.llm_calls_validation_errors_total.inc() + violation = ( + f"Violation: {violation.user_message} (Metadata: {violation.metadata})" + ) + yield stream_event( + data={ + "id": chunk_id, + "token": violation, + }, + event_type=LLM_VALIDATION_EVENT, + media_type=media_type, + ) + + +# ----------------------------------- +# Tool Execution handling +# ----------------------------------- +# pylint: disable=R1702,R0912 +def _handle_tool_execution_event( + chunk: Any, chunk_id: int, metadata_map: dict, media_type: str = MEDIA_TYPE_JSON +) -> Iterator[str]: + """ + Yield tool call event. + + Processes tool execution events from a streaming chunk and + yields formatted Server-Sent Events (SSE) strings. + + Handles both tool call initiation and completion, including + tool call arguments, responses, and summaries. Extracts and + updates document metadata from knowledge search tool responses + when present. + + Parameters: + chunk_id (int): Unique identifier for the current streaming + chunk. metadata_map (dict): Dictionary to be updated with + document metadata extracted from tool responses. + + Yields: + str: SSE-formatted event strings representing tool call + events and responses. + """ + if chunk.event.payload.event_type == "step_start": + yield stream_event( + data={ + "id": chunk_id, + "token": "", + }, + event_type=LLM_TOOL_CALL_EVENT, + media_type=media_type, + ) + + elif chunk.event.payload.event_type == "step_complete": + for t in chunk.event.payload.step_details.tool_calls: + yield stream_event( + data={ + "id": chunk_id, + "token": { + "tool_name": t.tool_name, + "arguments": t.arguments, + }, + }, + event_type=LLM_TOOL_CALL_EVENT, + media_type=media_type, + ) + + for r in chunk.event.payload.step_details.tool_responses: + if r.tool_name == "query_from_memory": + inserted_context = content_to_str(r.content) + yield stream_event( + data={ + "id": chunk_id, + "token": { + "tool_name": r.tool_name, + "response": f"Fetched {len(inserted_context)} bytes from memory", + }, + }, + event_type=LLM_TOOL_RESULT_EVENT, + media_type=media_type, + ) + + elif r.tool_name == DEFAULT_RAG_TOOL and r.content: + summary = "" + for i, text_content_item in enumerate(r.content): + if isinstance(text_content_item, TextContentItem): + if i == 0: + summary = text_content_item.text + newline_pos = summary.find("\n") + if newline_pos > 0: + summary = summary[:newline_pos] + for match in METADATA_PATTERN.findall(text_content_item.text): + try: + meta = ast.literal_eval(match) + if "document_id" in meta: + metadata_map[meta["document_id"]] = meta + except Exception: # pylint: disable=broad-except + logger.debug( + "An exception was thrown in processing %s", + match, + ) + + yield stream_event( + data={ + "id": chunk_id, + "token": { + "tool_name": r.tool_name, + "summary": summary, + }, + }, + event_type=LLM_TOOL_RESULT_EVENT, + media_type=media_type, + ) + + else: + yield stream_event( + data={ + "id": chunk_id, + "token": { + "tool_name": r.tool_name, + "response": content_to_str(r.content), + }, + }, + event_type=LLM_TOOL_RESULT_EVENT, + media_type=media_type, + ) + + +# ----------------------------------- +# Catch-all for everything else +# ----------------------------------- +def _handle_heartbeat_event( + chunk_id: int, media_type: str = MEDIA_TYPE_JSON +) -> Iterator[str]: + """ + Yield a heartbeat event. + + Yield a heartbeat event as a Server-Sent Event (SSE) for the + given chunk ID. + + Parameters: + chunk_id (int): The identifier for the current streaming + chunk. + + Yields: + str: SSE-formatted heartbeat event string. + """ + yield stream_event( + data={ + "id": chunk_id, + "token": "heartbeat", + }, + event_type=LLM_TOKEN_EVENT, + media_type=media_type, + ) + + +async def streaming_query_endpoint_handler_base( # pylint: disable=too-many-locals,too-many-statements,too-many-arguments,too-many-positional-arguments + request: Request, + query_request: QueryRequest, + auth: AuthTuple, + mcp_headers: dict[str, dict[str, str]], + retrieve_response_func: Callable[..., Any], + create_response_generator_func: Callable[..., Any], +) -> StreamingResponse: + """ + Handle streaming query endpoints with common logic. + + This base handler contains all the common logic for streaming query endpoints + and accepts functions for API-specific behavior (Agent API vs Responses API). + + Args: + request: The FastAPI request object + query_request: The query request from the user + auth: Authentication tuple (user_id, username, skip_check, token) + mcp_headers: MCP headers for tool integrations + retrieve_response_func: Function to retrieve the streaming response + create_response_generator_func: Function factory that creates the response generator + + Returns: + StreamingResponse: An HTTP streaming response yielding SSE-formatted events + + Raises: + HTTPException: Returns HTTP 500 if unable to connect to Llama Stack + """ + # Nothing interesting in the request + _ = request + + check_configuration_loaded(configuration) + started_at = datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ") + + # Enforce RBAC: optionally disallow overriding model/provider in requests + validate_model_provider_override(query_request, request.state.authorized_actions) + + # log Llama Stack configuration + logger.info("Llama stack config: %s", configuration.llama_stack_configuration) + + user_id, _user_name, _skip_userid_check, token = auth + + user_conversation: Optional[UserConversation] = None + if query_request.conversation_id: + user_conversation = validate_conversation_ownership( + user_id=user_id, conversation_id=query_request.conversation_id + ) + + if user_conversation is None: + logger.warning( + "User %s attempted to query conversation %s they don't own", + user_id, + query_request.conversation_id, + ) + forbidden_error = ForbiddenResponse.conversation( + action="read", + resource_id=query_request.conversation_id, + user_id=user_id, + ) + return StreamingResponse( + stream_http_error(forbidden_error), + media_type="text/event-stream", + status_code=forbidden_error.status_code, + ) + + try: + # try to get Llama Stack client + client = AsyncLlamaStackClientHolder().get_client() + llama_stack_model_id, model_id, provider_id = select_model_and_provider_id( + await client.models.list(), + *evaluate_model_hints( + user_conversation=user_conversation, query_request=query_request + ), + ) + + if ( + provider_id == "azure" + and AzureEntraIDManager().is_entra_id_configured + and AzureEntraIDManager().is_token_expired + and AzureEntraIDManager().refresh_token() + ): + if AsyncLlamaStackClientHolder().is_library_client: + client = await AsyncLlamaStackClientHolder().reload_library_client() + else: + azure_config = next( + p.config + for p in await client.providers.list() + if p.provider_type == "remote::azure" + ) + client = AsyncLlamaStackClientHolder().update_provider_data( + { + "azure_api_key": AzureEntraIDManager().access_token.get_secret_value(), + "azure_api_base": str(azure_config.get("api_base")), + } + ) + + response, conversation_id = await retrieve_response_func( + client, + llama_stack_model_id, + query_request, + token, + mcp_headers=mcp_headers, + ) + + metadata_map: dict[str, dict[str, Any]] = {} + + # Create context object for response generator + context = ResponseGeneratorContext( + conversation_id=conversation_id, + user_id=user_id, + skip_userid_check=_skip_userid_check, + model_id=model_id, + provider_id=provider_id, + llama_stack_model_id=llama_stack_model_id, + query_request=query_request, + started_at=started_at, + client=client, + metadata_map=metadata_map, + ) + + # Create the response generator using the provided factory function + response_generator = create_response_generator_func(context) + + # Update metrics for the LLM call + metrics.llm_calls_total.labels(provider_id, model_id).inc() + + # Determine media type for response + # Note: The HTTP Content-Type header is always text/event-stream for SSE, + # but the media_type parameter controls how the content is formatted + return StreamingResponse( + response_generator(response), media_type="text/event-stream" + ) + except APIConnectionError as e: + metrics.llm_calls_failures_total.inc() + logger.error("Unable to connect to Llama Stack: %s", e) + error_response = ServiceUnavailableResponse( + backend_name="Llama Stack", + cause=str(e), + ) + return StreamingResponse( + stream_http_error(error_response), + status_code=error_response.status_code, + media_type="text/event-stream", + ) + except RateLimitError as e: + used_model = getattr(e, "model", "") + if used_model: + error_response = QuotaExceededResponse.model(used_model) + else: + error_response = QuotaExceededResponse( + response="The quota has been exceeded", cause=str(e) + ) + return StreamingResponse( + stream_http_error(error_response), + status_code=error_response.status_code, + media_type="text/event-stream", + ) + except APIStatusError as e: + metrics.llm_calls_failures_total.inc() + logger.error("API status error: %s", e) + error_response = InternalServerErrorResponse.generic() + return StreamingResponse( + stream_http_error(error_response), + status_code=error_response.status_code, + media_type=query_request.media_type or MEDIA_TYPE_JSON, + ) diff --git a/src/app/endpoints/streaming_query_v2.py b/src/app/endpoints/streaming_query_v2.py deleted file mode 100644 index ee7465617..000000000 --- a/src/app/endpoints/streaming_query_v2.py +++ /dev/null @@ -1,508 +0,0 @@ -"""Streaming query handler using Responses API (v2).""" - -import logging -from typing import Annotated, Any, AsyncIterator, Optional, cast - -from fastapi import APIRouter, Depends, Request -from fastapi.responses import StreamingResponse -from llama_stack_api.openai_responses import ( - OpenAIResponseObject, - OpenAIResponseObjectStream, - OpenAIResponseObjectStreamResponseCompleted, - OpenAIResponseObjectStreamResponseFailed, - OpenAIResponseObjectStreamResponseOutputItemDone, - OpenAIResponseObjectStreamResponseOutputTextDelta, - OpenAIResponseObjectStreamResponseOutputTextDone, -) -from llama_stack_client import AsyncLlamaStackClient - -from app.endpoints.query import ( - is_transcripts_enabled, - persist_user_conversation_details, - validate_attachments_metadata, -) -from app.endpoints.query_v2 import ( - _build_tool_call_summary, - extract_token_usage_from_responses_api, - get_topic_summary, - parse_referenced_documents_from_responses_api, - prepare_tools_for_responses_api, -) -from app.endpoints.streaming_query import ( - LLM_TOKEN_EVENT, - LLM_TOOL_CALL_EVENT, - LLM_TOOL_RESULT_EVENT, - format_stream_data, - stream_end_event, - stream_event, - stream_start_event, - streaming_query_endpoint_handler_base, -) -from authentication import get_auth_dependency -from authentication.interface import AuthTuple -from authorization.middleware import authorize -from configuration import configuration -from constants import ( - MEDIA_TYPE_JSON, -) -from models.config import Action -from models.context import ResponseGeneratorContext -from models.requests import QueryRequest -from models.responses import ( - ForbiddenResponse, - InternalServerErrorResponse, - NotFoundResponse, - QuotaExceededResponse, - ReferencedDocument, - ServiceUnavailableResponse, - StreamingQueryResponse, - UnauthorizedResponse, - UnprocessableEntityResponse, -) -from utils.endpoints import ( - cleanup_after_streaming, - get_system_prompt, -) -from utils.query import create_violation_stream -from utils.quota import consume_tokens, get_available_quotas -from utils.suid import normalize_conversation_id, to_llama_stack_conversation_id -from utils.mcp_headers import mcp_headers_dependency -from utils.shields import ( - append_turn_to_conversation, - run_shield_moderation, -) -from utils.token_counter import TokenCounter -from utils.transcripts import store_transcript -from utils.types import RAGChunk, TurnSummary -from utils.vector_search import perform_vector_search, format_rag_context_for_injection - -logger = logging.getLogger("app.endpoints.handlers") -router = APIRouter(tags=["streaming_query_v1"]) -auth_dependency = get_auth_dependency() - -streaming_query_v2_responses: dict[int | str, dict[str, Any]] = { - 200: StreamingQueryResponse.openapi_response(), - 401: UnauthorizedResponse.openapi_response( - examples=["missing header", "missing token"] - ), - 403: ForbiddenResponse.openapi_response( - examples=["conversation read", "endpoint", "model override"] - ), - 404: NotFoundResponse.openapi_response( - examples=["conversation", "model", "provider"] - ), - # 413: PromptTooLongResponse.openapi_response(), - 422: UnprocessableEntityResponse.openapi_response(), - 429: QuotaExceededResponse.openapi_response(), - 500: InternalServerErrorResponse.openapi_response(examples=["configuration"]), - 503: ServiceUnavailableResponse.openapi_response(), -} - - -def create_responses_response_generator( # pylint: disable=too-many-locals,too-many-statements - context: ResponseGeneratorContext, - doc_ids_from_chunks: Optional[list[ReferencedDocument]] = None, -) -> Any: - """ - Create a response generator function for Responses API streaming. - - This factory function returns an async generator that processes streaming - responses from the Responses API and yields Server-Sent Events (SSE). - - Args: - context: Context object containing all necessary parameters for response generation - doc_ids_from_chunks: Referenced documents extracted from vector DB chunks - - Returns: - An async generator function that yields SSE-formatted strings - """ - - async def response_generator( # pylint: disable=too-many-branches,too-many-statements - turn_response: AsyncIterator[OpenAIResponseObjectStream], - ) -> AsyncIterator[str]: - """ - Generate SSE formatted streaming response. - - Asynchronously generates a stream of Server-Sent Events - (SSE) representing incremental responses from a - language model turn. - - Yields start, token, tool call, turn completion, and - end events as SSE-formatted strings. Collects the - complete response for transcript storage if enabled. - """ - chunk_id = 0 - summary = TurnSummary( - llm_response="", tool_calls=[], tool_results=[], rag_chunks=[] - ) - - # Determine media type for response formatting - media_type = context.query_request.media_type or MEDIA_TYPE_JSON - - # Accumulators for Responses API - text_parts: list[str] = [] - emitted_turn_complete = False - - # Use the conversation_id from context (either provided or newly created) - conv_id = context.conversation_id - - # Track the latest response object from response.completed event - latest_response_object: Optional[Any] = None - - # RAG chunks - rag_chunks: list[RAGChunk] = [] - - logger.debug("Starting streaming response (Responses API) processing") - - async for chunk in turn_response: - event_type = getattr(chunk, "type", None) - logger.debug("Processing chunk %d, type: %s", chunk_id, event_type) - - # Emit start event when response is created - if event_type == "response.created": - yield stream_start_event(conv_id) - - # Text streaming - if event_type == "response.output_text.delta": - delta_chunk = cast( - OpenAIResponseObjectStreamResponseOutputTextDelta, chunk - ) - if delta_chunk.delta: - text_parts.append(delta_chunk.delta) - yield stream_event( - { - "id": chunk_id, - "token": delta_chunk.delta, - }, - LLM_TOKEN_EVENT, - media_type, - ) - chunk_id += 1 - - # Final text of the output (capture, but emit at response.completed) - elif event_type == "response.output_text.done": - text_done_chunk = cast( - OpenAIResponseObjectStreamResponseOutputTextDone, chunk - ) - if text_done_chunk.text: - summary.llm_response = text_done_chunk.text - - # Content part started - emit an empty token to kick off UI streaming - elif event_type == "response.content_part.added": - yield stream_event( - { - "id": chunk_id, - "token": "", - }, - LLM_TOKEN_EVENT, - media_type, - ) - chunk_id += 1 - - # Process tool calls and results are emitted together when output items are done - # TODO(asimurka): support emitting tool calls and results separately when ready - elif event_type == "response.output_item.done": - output_item_done_chunk = cast( - OpenAIResponseObjectStreamResponseOutputItemDone, chunk - ) - if output_item_done_chunk.item.type == "message": - continue - tool_call, tool_result = _build_tool_call_summary( - output_item_done_chunk.item, rag_chunks - ) - if tool_call: - summary.tool_calls.append(tool_call) - yield stream_event( - tool_call.model_dump(), - LLM_TOOL_CALL_EVENT, - media_type, - ) - if tool_result: - summary.tool_results.append(tool_result) - yield stream_event( - tool_result.model_dump(), - LLM_TOOL_RESULT_EVENT, - media_type, - ) - - # Completed response - capture final text and response object - elif event_type == "response.completed": - # Capture the response object for token usage extraction - completed_chunk = cast( - OpenAIResponseObjectStreamResponseCompleted, chunk - ) - latest_response_object = completed_chunk.response - - if not emitted_turn_complete: - final_message = summary.llm_response or "".join(text_parts) - if not final_message: - final_message = "No response from the model" - summary.llm_response = final_message - yield stream_event( - { - "id": chunk_id, - "token": final_message, - }, - "turn_complete", - media_type, - ) - chunk_id += 1 - emitted_turn_complete = True - - # Incomplete response - emit error because LLS does not - # support incomplete responses "incomplete_detail" attribute yet - elif event_type == "response.incomplete": - error_response = InternalServerErrorResponse.query_failed( - "An unexpected error occurred while processing the request." - ) - logger.error("Error while obtaining answer for user question") - yield format_stream_data( - {"event": "error", "data": {**error_response.detail.model_dump()}} - ) - return - - # Failed response - emit error with custom cause from error message - elif event_type == "response.failed": - failed_chunk = cast(OpenAIResponseObjectStreamResponseFailed, chunk) - latest_response_object = failed_chunk.response - error_message = ( - failed_chunk.response.error.message - if failed_chunk.response.error - else "An unexpected error occurred while processing the request." - ) - error_response = InternalServerErrorResponse.query_failed(error_message) - logger.error("Error while obtaining answer for user question") - yield format_stream_data( - {"event": "error", "data": {**error_response.detail.model_dump()}} - ) - return - - logger.debug( - "Streaming complete - Tool calls: %d, Response chars: %d", - len(summary.tool_calls), - len(summary.llm_response), - ) - - # Extract token usage from the response object - token_usage = ( - extract_token_usage_from_responses_api( - latest_response_object, context.model_id, context.provider_id - ) - if latest_response_object is not None - else TokenCounter() - ) - consume_tokens( - configuration.quota_limiters, - configuration.token_usage_history, - context.user_id, - input_tokens=token_usage.input_tokens, - output_tokens=token_usage.output_tokens, - model_id=context.model_id, - provider_id=context.provider_id, - ) - response_referenced_documents = parse_referenced_documents_from_responses_api( - cast(OpenAIResponseObject, latest_response_object) - ) - # Combine doc_ids_from_chunks with response_referenced_documents - all_referenced_documents = ( - doc_ids_from_chunks or [] - ) + response_referenced_documents - available_quotas = get_available_quotas( - configuration.quota_limiters, context.user_id - ) - yield stream_end_event( - context.metadata_map, - token_usage, - available_quotas, - all_referenced_documents, - media_type, - ) - - # Perform cleanup tasks (database and cache operations)) - await cleanup_after_streaming( - user_id=context.user_id, - conversation_id=conv_id, - model_id=context.model_id, - provider_id=context.provider_id, - llama_stack_model_id=context.llama_stack_model_id, - query_request=context.query_request, - summary=summary, - metadata_map=context.metadata_map, - started_at=context.started_at, - client=context.client, - config=configuration, - skip_userid_check=context.skip_userid_check, - get_topic_summary_func=get_topic_summary, - is_transcripts_enabled_func=is_transcripts_enabled, - store_transcript_func=store_transcript, - persist_user_conversation_details_func=persist_user_conversation_details, - rag_chunks=[rag_chunk.model_dump() for rag_chunk in rag_chunks], - ) - - return response_generator - - -@router.post( - "/streaming_query", - response_class=StreamingResponse, - responses=streaming_query_v2_responses, - summary="Streaming Query Endpoint Handler V1", -) -@authorize(Action.STREAMING_QUERY) -async def streaming_query_endpoint_handler_v2( # pylint: disable=too-many-locals - request: Request, - query_request: QueryRequest, - auth: Annotated[AuthTuple, Depends(auth_dependency)], - mcp_headers: dict[str, dict[str, str]] = Depends(mcp_headers_dependency), -) -> StreamingResponse: - """ - Handle request to the /streaming_query endpoint using Responses API. - - Returns a streaming response using Server-Sent Events (SSE) format with - content type text/event-stream. - - Returns: - StreamingResponse: An HTTP streaming response yielding - SSE-formatted events for the query lifecycle with content type - text/event-stream. - - Raises: - HTTPException: - - 401: Unauthorized - Missing or invalid credentials - - 403: Forbidden - Insufficient permissions or model override not allowed - - 404: Not Found - Conversation, model, or provider not found - - 422: Unprocessable Entity - Request validation failed - - 429: Too Many Requests - Quota limit exceeded - - 500: Internal Server Error - Configuration not loaded or other server errors - - 503: Service Unavailable - Unable to connect to Llama Stack backend - """ - return await streaming_query_endpoint_handler_base( - request=request, - query_request=query_request, - auth=auth, - mcp_headers=mcp_headers, - retrieve_response_func=retrieve_response, - create_response_generator_func=create_responses_response_generator, - ) - - -async def retrieve_response( # pylint: disable=too-many-locals - client: AsyncLlamaStackClient, - model_id: str, - query_request: QueryRequest, - token: str, - mcp_headers: Optional[dict[str, dict[str, str]]] = None, -) -> tuple[AsyncIterator[OpenAIResponseObjectStream], str, list[ReferencedDocument]]: - """ - Retrieve response from LLMs and agents. - - Asynchronously retrieves a streaming response and conversation - ID from the Llama Stack agent for a given user query. - - This function configures shields, system prompt, and tool usage - based on the request and environment. It prepares the agent with - appropriate headers and toolgroups, validates attachments if - present, and initiates a streaming turn with the user's query - and any provided documents. - - Parameters: - model_id (str): Identifier of the model to use for the query. - query_request (QueryRequest): The user's query and associated metadata. - token (str): Authentication token for downstream services. - mcp_headers (dict[str, dict[str, str]], optional): - Multi-cluster proxy headers for tool integrations. - - Returns: - tuple: A tuple containing the streaming response object, - the conversation ID, and the list of referenced documents from vector DB chunks. - """ - # use system prompt from request or default one - system_prompt = get_system_prompt(query_request, configuration) - logger.debug("Using system prompt: %s", system_prompt) - - # TODO(lucasagomes): redact attachments content before sending to LLM - # if attachments are provided, validate them - if query_request.attachments: - validate_attachments_metadata(query_request.attachments) - - # Prepare tools for responses API - skip RAG tools since we're doing direct vector query - toolgroups = await prepare_tools_for_responses_api( - client, - query_request, - token, - configuration, - mcp_headers=mcp_headers, - skip_rag_tools=True, - ) - - # Extract RAG chunks from vector DB query response BEFORE calling responses API - _, _, doc_ids_from_chunks, rag_chunks = await perform_vector_search( - client, query_request, configuration - ) - - # Format RAG context for injection into user message - rag_context = format_rag_context_for_injection(rag_chunks) - - # Prepare input for Responses API - # Convert attachments to text and concatenate with query - input_text = query_request.query - if query_request.attachments: - for attachment in query_request.attachments: - input_text += ( - f"\n\n[Attachment: {attachment.attachment_type}]\n" - f"{attachment.content}" - ) - - # Add RAG context to input text - input_text += rag_context - - # Handle conversation ID for Responses API - # Create conversation upfront if not provided - conversation_id = query_request.conversation_id - if conversation_id: - # Conversation ID was provided - convert to llama-stack format - logger.debug("Using existing conversation ID: %s", conversation_id) - llama_stack_conv_id = to_llama_stack_conversation_id(conversation_id) - else: - # No conversation_id provided - create a new conversation first - logger.debug("No conversation_id provided, creating new conversation") - conversation = await client.conversations.create(metadata={}) - llama_stack_conv_id = conversation.id - # Store the normalized version for later use - conversation_id = normalize_conversation_id(llama_stack_conv_id) - logger.info( - "Created new conversation with ID: %s (normalized: %s)", - llama_stack_conv_id, - conversation_id, - ) - - # Run shield moderation before calling LLM - moderation_result = await run_shield_moderation(client, input_text) - if moderation_result.blocked: - violation_message = moderation_result.message or "" - await append_turn_to_conversation( - client, llama_stack_conv_id, input_text, violation_message - ) - return ( - create_violation_stream(violation_message, moderation_result.shield_model), - normalize_conversation_id(conversation_id), - ) - - create_params: dict[str, Any] = { - "input": input_text, - "model": model_id, - "instructions": system_prompt, - "stream": True, - "store": True, - "tools": toolgroups, - "conversation": llama_stack_conv_id, - } - - response = await client.responses.create(**create_params) - response_stream = cast(AsyncIterator[OpenAIResponseObjectStream], response) - - return ( - response_stream, - normalize_conversation_id(conversation_id), - doc_ids_from_chunks, - ) diff --git a/src/app/routers.py b/src/app/routers.py index b8e1d9af9..44e7ff5d3 100644 --- a/src/app/routers.py +++ b/src/app/routers.py @@ -5,6 +5,7 @@ from app.endpoints import ( info, models, + query, shields, providers, rags, @@ -12,7 +13,7 @@ health, config, feedback, - streaming_query_v2, + streaming_query, authorized, conversations_v2, conversations_v3, @@ -20,8 +21,6 @@ tools, mcp_auth, # V2 endpoints for Response API support - query_v2, - # RHEL Lightspeed rlsapi v1 compatibility rlsapi_v1, # A2A (Agent-to-Agent) protocol support a2a, @@ -50,8 +49,8 @@ def include_routers(app: FastAPI) -> None: app.include_router(providers.router, prefix="/v1") app.include_router(rags.router, prefix="/v1") # V1 endpoints now use V2 implementations (query and streaming_query are deprecated) - app.include_router(query_v2.router, prefix="/v1") - app.include_router(streaming_query_v2.router, prefix="/v1") + app.include_router(query.router, prefix="/v1") + app.include_router(streaming_query.router, prefix="/v1") app.include_router(config.router, prefix="/v1") app.include_router(feedback.router, prefix="/v1") # V1 conversations endpoint now uses V3 implementation (conversations is deprecated) diff --git a/tests/integration/endpoints/test_query_v2_integration.py b/tests/integration/endpoints/test_query_v2_integration.py index 6bd292361..56abee2a5 100644 --- a/tests/integration/endpoints/test_query_v2_integration.py +++ b/tests/integration/endpoints/test_query_v2_integration.py @@ -16,8 +16,8 @@ from sqlalchemy.orm import Session, sessionmaker import app.database -import app.endpoints.query -from app.endpoints.query_v2 import query_endpoint_handler_v2 +import app.endpoints.query_old +from app.endpoints.query import query_endpoint_handler_v2 from authentication.interface import AuthTuple from configuration import AppConfig from models.cache_entry import CacheEntry @@ -861,7 +861,7 @@ async def test_query_v2_endpoint_creates_valid_cache_entry( _ = mock_llama_stack_client _ = patch_db_session - cache_spy = mocker.spy(app.endpoints.query, "store_conversation_into_cache") + cache_spy = mocker.spy(app.endpoints.query_old, "store_conversation_into_cache") query_request = QueryRequest(query="What is Ansible?") @@ -1153,8 +1153,8 @@ async def test_query_v2_endpoint_quota_integration( mock_llama_stack_client.responses.create.return_value = mock_response - mock_consume = mocker.spy(app.endpoints.query, "consume_tokens") - _ = mocker.spy(app.endpoints.query, "get_available_quotas") + mock_consume = mocker.spy(app.endpoints.query_old, "consume_tokens") + _ = mocker.spy(app.endpoints.query_old, "get_available_quotas") query_request = QueryRequest(query="What is Ansible?") diff --git a/tests/unit/app/endpoints/test_query.py b/tests/unit/app/endpoints/test_query.py index 459c991aa..2aff61eff 100644 --- a/tests/unit/app/endpoints/test_query.py +++ b/tests/unit/app/endpoints/test_query.py @@ -1,26 +1,25 @@ -"""Unit tests for the /query REST API endpoint.""" - -# pylint: disable=redefined-outer-name -# pylint: disable=too-many-lines -# pylint: disable=ungrouped-imports +# pylint: disable=redefined-outer-name, import-error,too-many-locals,too-many-lines +# pyright: reportCallIssue=false +"""Unit tests for the /query (v2) REST API endpoint using Responses API.""" +from pathlib import Path from typing import Any import pytest from fastapi import HTTPException, Request, status +import httpx +from llama_stack_client import APIConnectionError, RateLimitError from pytest_mock import MockerFixture from app.endpoints.query import ( - evaluate_model_hints, - is_transcripts_enabled, - select_model_and_provider_id, - validate_attachments_metadata, + get_mcp_tools, + get_rag_tools, + query_endpoint_handler_v2, + retrieve_response, ) -from configuration import AppConfig -from models.config import Action -from models.database.conversations import UserConversation +from models.config import ModelContextProtocolServer from models.requests import Attachment, QueryRequest -from utils.token_counter import TokenCounter +from utils.types import ShieldModerationResult # User ID must be proper UUID MOCK_AUTH = ( @@ -33,454 +32,1008 @@ @pytest.fixture def dummy_request() -> Request: - """Dummy request fixture for testing. - - Create a minimal FastAPI Request with test-ready authorization state. + """Create a dummy FastAPI Request object for testing. - The returned Request has a minimal HTTP scope and a - `state.authorized_actions` attribute initialized to a set containing all - members of the `Action` enum, suitable for use in unit tests that require - an authenticated request context. + Create a minimal FastAPI Request object suitable for unit tests. Returns: - req (Request): FastAPI Request with `state.authorized_actions` set to `set(Action)`. + request (fastapi.Request): A Request constructed with a bare HTTP scope + (type "http") for use in tests. """ - req = Request( - scope={ - "type": "http", - } - ) - - req.state.authorized_actions = set(Action) + req = Request(scope={"type": "http"}) return req -def mock_metrics(mocker: MockerFixture) -> None: - """Helper function to mock metrics operations for query endpoints. - - Configure the provided pytest-mock `mocker` to stub token metrics and - related metrics counters used by query endpoint tests. +def test_get_rag_tools() -> None: + """Test get_rag_tools returns None for empty list and correct tool format for vector stores.""" + assert get_rag_tools([]) is None + + tools = get_rag_tools(["db1", "db2"]) + assert isinstance(tools, list) + assert tools[0]["type"] == "file_search" + assert tools[0]["vector_store_ids"] == ["db1", "db2"] + assert tools[0]["max_num_results"] == 10 + assert "solr" not in tools[0] + + # Test with Solr parameters + solr_params = {"fq": ["product:*openshift*", "product_version:*4.16*"]} + tools_with_solr = get_rag_tools(["db1", "db2"], solr_params) + assert isinstance(tools_with_solr, list) + assert tools_with_solr[0]["type"] == "file_search" + assert tools_with_solr[0]["vector_store_ids"] == ["db1", "db2"] + assert tools_with_solr[0]["max_num_results"] == 10 + assert tools_with_solr[0]["solr"] == solr_params + + +def test_get_mcp_tools_with_and_without_token() -> None: + """Test get_mcp_tools with resolved_authorization_headers.""" + # Servers without authorization headers + servers_no_auth = [ + ModelContextProtocolServer(name="fs", url="http://localhost:3000"), + ModelContextProtocolServer(name="git", url="https://git.example.com/mcp"), + ] - Patches the token metrics extraction helper and the LLM metrics counters so - tests can run without emitting real metrics. - """ - mocker.patch( - "app.endpoints.query.extract_and_update_token_metrics", - return_value=TokenCounter(), - ) - # Mock the metrics that are called inside extract_and_update_token_metrics - mocker.patch("metrics.llm_token_sent_total") - mocker.patch("metrics.llm_token_received_total") - mocker.patch("metrics.llm_calls_total") + tools_no_auth = get_mcp_tools(servers_no_auth, token=None) + assert len(tools_no_auth) == 2 + assert tools_no_auth[0]["type"] == "mcp" + assert tools_no_auth[0]["server_label"] == "fs" + assert tools_no_auth[0]["server_url"] == "http://localhost:3000" + assert "headers" not in tools_no_auth[0] + + # Servers with kubernetes auth + servers_k8s = [ + ModelContextProtocolServer( + name="k8s-server", + url="http://localhost:3000", + authorization_headers={"Authorization": "kubernetes"}, + ), + ] + tools_k8s = get_mcp_tools(servers_k8s, token="user-k8s-token") + assert len(tools_k8s) == 1 + assert tools_k8s[0]["headers"] == {"Authorization": "Bearer user-k8s-token"} + + +def test_get_mcp_tools_with_mcp_headers() -> None: + """Test get_mcp_tools with client-provided headers.""" + # Server with client auth + servers = [ + ModelContextProtocolServer( + name="fs", + url="http://localhost:3000", + authorization_headers={"Authorization": "client", "X-Custom": "client"}, + ), + ] + # Test with mcp_headers provided + mcp_headers = { + "fs": { + "Authorization": "client-provided-token", + "X-Custom": "custom-value", + } + } + tools = get_mcp_tools(servers, token=None, mcp_headers=mcp_headers) + assert len(tools) == 1 + assert tools[0]["headers"] == { + "Authorization": "client-provided-token", + "X-Custom": "custom-value", + } -def mock_database_operations(mocker: MockerFixture) -> None: - """Helper function to mock database operations for query endpoints. + # Test with mcp_headers=None (server should be skipped since auth is required but unavailable) + tools_no_headers = get_mcp_tools(servers, token=None, mcp_headers=None) + assert len(tools_no_headers) == 0 # Server skipped due to missing required auth - Patch common database operations used by query endpoint tests. - This applies test-time patches so that conversation ownership checks - succeed, persistence of conversation details is stubbed out, and - `get_session` returns a context-manager mock whose - `query(...).filter_by(...).first()` returns `None`. +def test_get_mcp_tools_with_static_headers(tmp_path: Path) -> None: + """Test get_mcp_tools with static headers from config files.""" + # Create a secret file + secret_file = tmp_path / "token.txt" + secret_file.write_text("static-secret-token") - Parameters: - mocker (MockerFixture): The pytest-mock fixture used to apply patches. - """ - mocker.patch( - "app.endpoints.query.validate_conversation_ownership", return_value=True - ) - mocker.patch("app.endpoints.query.persist_user_conversation_details") + servers = [ + ModelContextProtocolServer( + name="server1", + url="http://localhost:3000", + authorization_headers={"Authorization": str(secret_file)}, + ), + ] - # Mock the database session and query - mock_session = mocker.Mock() - mock_session.query.return_value.filter_by.return_value.first.return_value = None - mock_session.__enter__ = mocker.Mock(return_value=mock_session) - mock_session.__exit__ = mocker.Mock(return_value=None) - mocker.patch("app.endpoints.query.get_session", return_value=mock_session) + tools = get_mcp_tools(servers, token=None) + assert len(tools) == 1 + assert tools[0]["headers"] == {"Authorization": "static-secret-token"} + + +def test_get_mcp_tools_with_mixed_headers(tmp_path: Path) -> None: + """Test get_mcp_tools with mixed header types.""" + # Create a secret file + secret_file = tmp_path / "api-key.txt" + secret_file.write_text("secret-api-key") + + servers = [ + ModelContextProtocolServer( + name="mixed-server", + url="http://localhost:3000", + authorization_headers={ + "Authorization": "kubernetes", + "X-API-Key": str(secret_file), + "X-Custom": "client", + }, + ), + ] + mcp_headers = { + "mixed-server": { + "X-Custom": "client-custom-value", + } + } -@pytest.fixture(name="setup_configuration") -def setup_configuration_fixture() -> AppConfig: - """Set up configuration for tests. + tools = get_mcp_tools(servers, token="k8s-token", mcp_headers=mcp_headers) + assert len(tools) == 1 + assert tools[0]["headers"] == { + "Authorization": "Bearer k8s-token", + "X-API-Key": "secret-api-key", + "X-Custom": "client-custom-value", + } - Create a reusable application configuration tailored for unit tests. - The returned AppConfig is initialized from a fixed dictionary that sets: - - a lightweight service configuration (localhost, port 8080, minimal workers, logging enabled), - - a test Llama Stack configuration (test API key and URL, not used as a library client), - - user data collection with transcripts disabled, - - an empty MCP servers list, - - a noop conversation cache. +def test_get_mcp_tools_skips_server_with_missing_auth() -> None: + """Test that servers with required but unavailable auth headers are skipped.""" + servers = [ + # Server with kubernetes auth but no token provided + ModelContextProtocolServer( + name="missing-k8s-auth", + url="http://localhost:3001", + authorization_headers={"Authorization": "kubernetes"}, + ), + # Server with client auth but no MCP-HEADERS provided + ModelContextProtocolServer( + name="missing-client-auth", + url="http://localhost:3002", + authorization_headers={"X-Token": "client"}, + ), + # Server with partial auth (2 headers required, only 1 available) + ModelContextProtocolServer( + name="partial-auth", + url="http://localhost:3003", + authorization_headers={ + "Authorization": "kubernetes", + "X-Custom": "client", + }, + ), + ] - Returns: - AppConfig: an initialized configuration instance suitable for test fixtures. - """ - config_dict: dict[Any, Any] = { - "name": "test", - "service": { - "host": "localhost", - "port": 8080, - "auth_enabled": False, - "workers": 1, - "color_log": True, - "access_log": True, - }, - "llama_stack": { - "api_key": "test-key", - "url": "http://test.com:1234", - "use_as_library_client": False, - }, - "user_data_collection": { - "transcripts_enabled": False, - }, - "mcp_servers": [], - "customization": None, - "conversation_cache": { - "type": "noop", - }, - } - cfg = AppConfig() - cfg.init_from_dict(config_dict) - return cfg + # No token, no mcp_headers + tools = get_mcp_tools(servers, token=None, mcp_headers=None) + # All servers should be skipped + assert len(tools) == 0 -def test_is_transcripts_enabled( - setup_configuration: AppConfig, mocker: MockerFixture -) -> None: - """Test that is_transcripts_enabled returns True when transcripts is not disabled.""" - # Override the transcripts_enabled setting - mocker.patch.object( - setup_configuration.user_data_collection_configuration, - "transcripts_enabled", - True, - ) - mocker.patch("app.endpoints.query.configuration", setup_configuration) +def test_get_mcp_tools_includes_server_without_auth() -> None: + """Test that servers without auth config are always included.""" + servers = [ + # Server with no auth requirements + ModelContextProtocolServer( + name="public-server", + url="http://localhost:3000", + authorization_headers={}, + ), + ] - assert is_transcripts_enabled() is True, "Transcripts should be enabled" + # Should work even without token or headers + tools = get_mcp_tools(servers, token=None, mcp_headers=None) + assert len(tools) == 1 + assert tools[0]["server_label"] == "public-server" + assert "headers" not in tools[0] -def test_is_transcripts_disabled( - setup_configuration: AppConfig, mocker: MockerFixture -) -> None: - """Test that is_transcripts_enabled returns False when transcripts is disabled.""" - # Use default transcripts_enabled=False from setup - mocker.patch("app.endpoints.query.configuration", setup_configuration) +@pytest.mark.asyncio +async def test_retrieve_response_no_tools_bypasses_tools(mocker: MockerFixture) -> None: + """Test that no_tools=True bypasses tool configuration and passes None to responses API.""" + mock_client = mocker.Mock() + # responses.create returns a synthetic OpenAI-like response + response_obj = mocker.Mock() + response_obj.id = "resp-1" + response_obj.output = [] + response_obj.usage = None # No usage info + mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) + # Mock conversations.create for new conversation creation + mock_conversation = mocker.Mock() + mock_conversation.id = "conv_abc123def456" + mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) + # vector_stores.list should not matter when no_tools=True, but keep it valid + mock_vector_stores = mocker.Mock() + mock_vector_stores.data = [] + mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) + # Mock shields.list and models.list for run_shield_moderation + mock_client.shields.list = mocker.AsyncMock(return_value=[]) + mock_client.models.list = mocker.AsyncMock(return_value=[]) + + # Ensure system prompt resolution does not require real config + mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") + mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) + + qr = QueryRequest(query="hello", no_tools=True) + summary, conv_id, referenced_docs, token_usage = await retrieve_response( + mock_client, "model-x", qr, token="tkn" + ) - assert is_transcripts_enabled() is False, "Transcripts should be disabled" + assert conv_id == "abc123def456" # Normalized (without conv_ prefix) + assert summary.llm_response == "" + assert referenced_docs == [] + assert token_usage.input_tokens == 0 # No usage info, so 0 + assert token_usage.output_tokens == 0 + # tools must be passed as None + kwargs = mock_client.responses.create.call_args.kwargs + assert kwargs["tools"] is None + assert kwargs["model"] == "model-x" + assert kwargs["instructions"] == "PROMPT" -def test_select_model_and_provider_id_from_request(mocker: MockerFixture) -> None: - """Test the select_model_and_provider_id function.""" - mocker.patch( - "metrics.utils.configuration.inference.default_provider", - "default_provider", - ) +@pytest.mark.asyncio +async def test_retrieve_response_builds_rag_and_mcp_tools( # pylint: disable=too-many-locals + mocker: MockerFixture, +) -> None: + """Test that retrieve_response correctly builds RAG and MCP tools from configuration.""" + mock_client = mocker.Mock() + response_obj = mocker.Mock() + response_obj.id = "resp-2" + response_obj.output = [] + response_obj.usage = None + mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) + # Mock conversations.create for new conversation creation + mock_conversation = mocker.Mock() + mock_conversation.id = "conv_abc123def456" + mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) + mock_vector_stores = mocker.Mock() + mock_vector_stores.data = [mocker.Mock(id="dbA")] + mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) + # Mock shields.list and models.list for run_shield_moderation + mock_client.shields.list = mocker.AsyncMock(return_value=[]) + mock_client.models.list = mocker.AsyncMock(return_value=[]) + + # Mock vector_io.query for direct vector querying + mock_query_response = mocker.Mock() + mock_query_response.chunks = [] + mock_query_response.scores = [] + mock_client.vector_io.query = mocker.AsyncMock(return_value=mock_query_response) + + mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") + + # Mock shield moderation + mock_moderation_result = mocker.Mock() + mock_moderation_result.blocked = False mocker.patch( - "metrics.utils.configuration.inference.default_model", - "default_model", + "app.endpoints.query_v2.run_shield_moderation", + return_value=mock_moderation_result, ) - model_list = [ - mocker.Mock( - id="provider1/model1", - custom_metadata={"model_type": "llm", "provider_id": "provider1"}, - ), - mocker.Mock( - id="provider2/model2", - custom_metadata={"model_type": "llm", "provider_id": "provider2"}, - ), - mocker.Mock( - id="default_provider/default_model", - custom_metadata={"model_type": "llm", "provider_id": "default_provider"}, + mock_cfg = mocker.Mock() + mock_cfg.mcp_servers = [ + ModelContextProtocolServer( + name="fs", + url="http://localhost:3000", + authorization_headers={"Authorization": "kubernetes"}, ), ] + mocker.patch("app.endpoints.query_v2.configuration", mock_cfg) - # Create a query request with model and provider specified - query_request = QueryRequest( - query="What is OpenStack?", model="model2", provider="provider2" + qr = QueryRequest(query="hello") + _summary, conv_id, referenced_docs, token_usage = await retrieve_response( + mock_client, "model-y", qr, token="mytoken" ) - # Assert the model and provider from request take precedence from the configuration one - llama_stack_model_id, model_id, provider_id = select_model_and_provider_id( - model_list, query_request.model, query_request.provider - ) + assert conv_id == "abc123def456" # Normalized (without conv_ prefix) + assert referenced_docs == [] + assert token_usage.input_tokens == 0 # No usage info, so 0 + assert token_usage.output_tokens == 0 - assert llama_stack_model_id == "provider2/model2" - assert model_id == "model2" - assert provider_id == "provider2" + kwargs = mock_client.responses.create.call_args.kwargs + tools = kwargs["tools"] + assert isinstance(tools, list) + # Expect only MCP tools since RAG tools are skipped when doing direct vector querying + tool_types = {t.get("type") for t in tools} + assert tool_types == {"mcp"} + mcp_tool = next(t for t in tools if t["type"] == "mcp") + assert mcp_tool["server_label"] == "fs" + assert mcp_tool["headers"] == {"Authorization": "Bearer mytoken"} -def test_select_model_and_provider_id_from_configuration(mocker: MockerFixture) -> None: - """Test the select_model_and_provider_id function.""" - mocker.patch( - "metrics.utils.configuration.inference.default_provider", - "default_provider", +@pytest.mark.asyncio +async def test_retrieve_response_parses_output_and_tool_calls( + mocker: MockerFixture, +) -> None: + """Test that retrieve_response correctly parses output content and tool calls from response.""" + mock_client = mocker.Mock() + + # Build output with content variants and tool calls + part1 = mocker.Mock(text="Hello ") + part1.annotations = [] # Ensure annotations is a list to avoid iteration error + part2 = mocker.Mock(text="world") + part2.annotations = [] + + output_item_1 = mocker.Mock() + output_item_1.type = "message" + output_item_1.role = "assistant" + output_item_1.content = [part1, part2] + + output_item_2 = mocker.Mock() + output_item_2.type = "message" + output_item_2.role = "assistant" + output_item_2.content = "!" + + # Tool call as a separate output item (Responses API format) + tool_call_item = mocker.Mock() + tool_call_item.type = "function_call" + tool_call_item.id = "tc-1" + tool_call_item.call_id = "tc-1" + tool_call_item.name = "do_something" + tool_call_item.arguments = '{"x": 1}' + tool_call_item.status = None # Explicitly set to avoid Mock auto-creation + + response_obj = mocker.Mock() + response_obj.id = "resp-3" + response_obj.output = [output_item_1, output_item_2, tool_call_item] + response_obj.usage = None + + mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) + # Mock conversations.create for new conversation creation + mock_conversation = mocker.Mock() + mock_conversation.id = "conv_abc123def456" + mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) + mock_vector_stores = mocker.Mock() + mock_vector_stores.data = [] + mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) + # Mock shields.list and models.list for run_shield_moderation + mock_client.shields.list = mocker.AsyncMock(return_value=[]) + mock_client.models.list = mocker.AsyncMock(return_value=[]) + + mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") + mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) + + qr = QueryRequest(query="hello") + summary, conv_id, referenced_docs, token_usage = await retrieve_response( + mock_client, "model-z", qr, token="tkn" ) - mocker.patch( - "metrics.utils.configuration.inference.default_model", - "default_model", + + assert conv_id == "abc123def456" # Normalized (without conv_ prefix) + assert summary.llm_response == "Hello world!" + assert len(summary.tool_calls) == 1 + assert summary.tool_calls[0].id == "tc-1" + assert summary.tool_calls[0].name == "do_something" + assert summary.tool_calls[0].args == {"x": 1} + assert referenced_docs == [] + assert token_usage.input_tokens == 0 # No usage info, so 0 + assert token_usage.output_tokens == 0 + + +@pytest.mark.asyncio +async def test_retrieve_response_with_usage_info(mocker: MockerFixture) -> None: + """Test that token usage is extracted when provided by the API as an object.""" + mock_client = mocker.Mock() + + output_item = mocker.Mock() + output_item.type = "message" + output_item.role = "assistant" + output_item.content = "Test response" + output_item.tool_calls = [] + + # Mock usage information as object + mock_usage = mocker.Mock() + mock_usage.input_tokens = 150 + mock_usage.output_tokens = 75 + + response_obj = mocker.Mock() + response_obj.id = "resp-with-usage" + response_obj.output = [output_item] + response_obj.usage = mock_usage + + mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) + # Mock conversations.create for new conversation creation + mock_conversation = mocker.Mock() + mock_conversation.id = "conv_abc123def456" + mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) + mock_vector_stores = mocker.Mock() + mock_vector_stores.data = [] + mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) + # Mock shields.list and models.list for run_shield_moderation + mock_client.shields.list = mocker.AsyncMock(return_value=[]) + mock_client.models.list = mocker.AsyncMock(return_value=[]) + + mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") + mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) + + qr = QueryRequest(query="hello") + summary, conv_id, _referenced_docs, token_usage = await retrieve_response( + mock_client, "model-usage", qr, token="tkn", provider_id="test-provider" ) - model_list = [ - mocker.Mock( - id="provider1/model1", - custom_metadata={"model_type": "llm", "provider_id": "provider1"}, - ), - mocker.Mock( - id="default_provider/default_model", - custom_metadata={"model_type": "llm", "provider_id": "default_provider"}, - ), - ] + assert conv_id == "abc123def456" # Normalized (without conv_ prefix) + assert summary.llm_response == "Test response" + assert token_usage.input_tokens == 150 + assert token_usage.output_tokens == 75 + assert token_usage.llm_calls == 1 - # Create a query request without model and provider specified - query_request = QueryRequest( - query="What is OpenStack?", + +@pytest.mark.asyncio +async def test_retrieve_response_with_usage_dict(mocker: MockerFixture) -> None: + """Test that token usage is extracted when provided by the API as a dict.""" + mock_client = mocker.Mock() + + output_item = mocker.Mock() + output_item.type = "message" + output_item.role = "assistant" + output_item.content = "Test response dict" + output_item.tool_calls = [] + + # Mock usage information as dict (like llama stack does) + response_obj = mocker.Mock() + response_obj.id = "resp-with-usage-dict" + response_obj.output = [output_item] + response_obj.usage = {"input_tokens": 200, "output_tokens": 100} + + mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) + # Mock conversations.create for new conversation creation + mock_conversation = mocker.Mock() + mock_conversation.id = "conv_abc123def456" + mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) + mock_vector_stores = mocker.Mock() + mock_vector_stores.data = [] + mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) + # Mock shields.list and models.list for run_shield_moderation + mock_client.shields.list = mocker.AsyncMock(return_value=[]) + mock_client.models.list = mocker.AsyncMock(return_value=[]) + + mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") + mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) + + qr = QueryRequest(query="hello") + summary, conv_id, _referenced_docs, token_usage = await retrieve_response( + mock_client, "model-usage-dict", qr, token="tkn", provider_id="test-provider" ) - llama_stack_model_id, model_id, provider_id = select_model_and_provider_id( - model_list, query_request.model, query_request.provider + assert conv_id == "abc123def456" # Normalized (without conv_ prefix) + assert summary.llm_response == "Test response dict" + assert token_usage.input_tokens == 200 + assert token_usage.output_tokens == 100 + assert token_usage.llm_calls == 1 + + +@pytest.mark.asyncio +async def test_retrieve_response_with_empty_usage_dict(mocker: MockerFixture) -> None: + """Test that empty usage dict is handled gracefully.""" + mock_client = mocker.Mock() + + output_item = mocker.Mock() + output_item.type = "message" + output_item.role = "assistant" + output_item.content = "Test response empty usage" + output_item.tool_calls = [] + + # Mock usage information as empty dict (tokens are 0 or missing) + response_obj = mocker.Mock() + response_obj.id = "resp-empty-usage" + response_obj.output = [output_item] + response_obj.usage = {} # Empty dict + + mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) + # Mock conversations.create for new conversation creation + mock_conversation = mocker.Mock() + mock_conversation.id = "conv_abc123def456" + mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) + mock_vector_stores = mocker.Mock() + mock_vector_stores.data = [] + mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) + # Mock shields.list and models.list for run_shield_moderation + mock_client.shields.list = mocker.AsyncMock(return_value=[]) + mock_client.models.list = mocker.AsyncMock(return_value=[]) + + mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") + mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) + + qr = QueryRequest(query="hello") + summary, conv_id, _referenced_docs, token_usage = await retrieve_response( + mock_client, "model-empty-usage", qr, token="tkn", provider_id="test-provider" ) - # Assert that the default model and provider from the configuration are returned - assert llama_stack_model_id == "default_provider/default_model" - assert model_id == "default_model" - assert provider_id == "default_provider" + assert conv_id == "abc123def456" # Normalized (without conv_ prefix) + assert summary.llm_response == "Test response empty usage" + assert token_usage.input_tokens == 0 + assert token_usage.output_tokens == 0 + assert token_usage.llm_calls == 1 # Always 1, even when no token usage data -def test_select_model_and_provider_id_first_from_list(mocker: MockerFixture) -> None: - """Test the select_model_and_provider_id function when no model is specified.""" - model_list = [ - mocker.Mock( - id="not_llm_type", - custom_metadata={"model_type": "embedding", "provider_id": "provider1"}, - ), - mocker.Mock( - id="first_model", - custom_metadata={"model_type": "llm", "provider_id": "provider1"}, - ), - mocker.Mock( - id="second_model", - custom_metadata={"model_type": "llm", "provider_id": "provider2"}, - ), - ] +@pytest.mark.asyncio +async def test_retrieve_response_validates_attachments(mocker: MockerFixture) -> None: + """Test that retrieve_response validates attachments and includes them in the input string.""" + mock_client = mocker.Mock() + response_obj = mocker.Mock() + response_obj.id = "resp-4" + response_obj.output = [] + response_obj.usage = None + mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) + # Mock conversations.create for new conversation creation + mock_conversation = mocker.Mock() + mock_conversation.id = "conv_abc123def456" + mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) + mock_vector_stores = mocker.Mock() + mock_vector_stores.data = [] + mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) + # Mock shields.list and models.list for run_shield_moderation + mock_client.shields.list = mocker.AsyncMock(return_value=[]) + mock_client.models.list = mocker.AsyncMock(return_value=[]) + + mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") + mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) + + validate_spy = mocker.patch( + "app.endpoints.query_v2.validate_attachments_metadata", return_value=None + ) - query_request = QueryRequest(query="What is OpenStack?") + attachments = [ + Attachment(attachment_type="log", content_type="text/plain", content="x"), + ] - llama_stack_model_id, model_id, provider_id = select_model_and_provider_id( - model_list, query_request.model, query_request.provider + qr = QueryRequest(query="hello", attachments=attachments) + _summary, _cid, _ref_docs, _token_usage = await retrieve_response( + mock_client, "model-a", qr, token="tkn" ) - # Assert return the first available LLM model when no model/provider is - # specified in the request or in the configuration - assert llama_stack_model_id == "first_model" - assert model_id == "first_model" - assert provider_id == "provider1" + validate_spy.assert_called_once() + # Verify that attachments are included in the input + kwargs = mock_client.responses.create.call_args.kwargs + assert "input" in kwargs + # Input should be a string containing both query and attachment + assert isinstance(kwargs["input"], str) + assert "hello" in kwargs["input"] + assert "[Attachment: log]" in kwargs["input"] + assert "x" in kwargs["input"] + +@pytest.mark.asyncio +async def test_query_endpoint_handler_v2_success( + mocker: MockerFixture, dummy_request: Request +) -> None: + """Test successful query endpoint handler execution with proper response structure.""" + # Mock configuration to avoid configuration not loaded errors + mock_config = mocker.Mock() + mock_config.llama_stack_configuration = mocker.Mock() + mock_config.quota_limiters = [] + mocker.patch("app.endpoints.query_v2.configuration", mock_config) -def test_select_model_and_provider_id_invalid_model(mocker: MockerFixture) -> None: - """Test the select_model_and_provider_id function with an invalid model.""" mock_client = mocker.Mock() - mock_client.models.list.return_value = [ - mocker.Mock( - id="model1", - custom_metadata={"model_type": "llm", "provider_id": "provider1"}, - ), - ] + mock_client.models.list = mocker.AsyncMock(return_value=[mocker.Mock()]) + mocker.patch( + "client.AsyncLlamaStackClientHolder.get_client", return_value=mock_client + ) + mocker.patch("app.endpoints.query.evaluate_model_hints", return_value=(None, None)) + mocker.patch( + "app.endpoints.query.select_model_and_provider_id", + return_value=("llama/m", "m", "p"), + ) - query_request = QueryRequest( - query="What is OpenStack?", model="invalid_model", provider="provider1" + summary = mocker.Mock( + llm_response="ANSWER", tool_calls=[], tool_results=[], rag_chunks=[] + ) + token_usage = mocker.Mock(input_tokens=10, output_tokens=20) + # Use a valid SUID for conversation_id + test_conversation_id = "00000000-0000-0000-0000-000000000001" + mocker.patch( + "app.endpoints.query_v2.retrieve_response", + return_value=(summary, test_conversation_id, [], token_usage), + ) + mocker.patch("app.endpoints.query_v2.get_topic_summary", return_value="Topic") + mocker.patch("app.endpoints.query.is_transcripts_enabled", return_value=False) + mocker.patch("app.endpoints.query.persist_user_conversation_details") + mocker.patch("utils.endpoints.store_conversation_into_cache") + mocker.patch("app.endpoints.query.get_session") + + # Add missing mocks for quota functions + mocker.patch("utils.quota.check_tokens_available") + mocker.patch("utils.quota.consume_tokens") + mocker.patch("utils.quota.get_available_quotas", return_value={}) + + # Mock the request state + dummy_request.state.authorized_actions = [] + + res = await query_endpoint_handler_v2( + request=dummy_request, + query_request=QueryRequest(query="hi"), + auth=MOCK_AUTH, + mcp_headers={}, ) - with pytest.raises(HTTPException) as exc_info: - select_model_and_provider_id( - mock_client.models.list(), query_request.model, query_request.provider + assert res.conversation_id == test_conversation_id + assert res.response == "ANSWER" + + +@pytest.mark.asyncio +async def test_query_endpoint_handler_v2_api_connection_error( + mocker: MockerFixture, dummy_request: Request +) -> None: + """Test that query endpoint handler properly handles and reports API connection errors.""" + # Mock configuration to avoid configuration not loaded errors + mock_config = mocker.Mock() + mock_config.llama_stack_configuration = mocker.Mock() + mocker.patch("app.endpoints.query_v2.configuration", mock_config) + + def _raise(*_args: Any, **_kwargs: Any) -> Exception: + """Raises a custom APIConnectionError exception. + + Args: + *_args: Variable length argument list. + **_kwargs: Arbitrary keyword arguments. + + Returns: + None + + Raises: + APIConnectionError: Always raises this exception with a Request object. + """ + request = Request(scope={"type": "http"}) + raise APIConnectionError(request=request) # type: ignore + + mocker.patch("client.AsyncLlamaStackClientHolder.get_client", side_effect=_raise) + + fail_metric = mocker.patch("metrics.llm_calls_failures_total") + + with pytest.raises(HTTPException) as exc: + await query_endpoint_handler_v2( + request=dummy_request, + query_request=QueryRequest(query="hi"), + auth=("user123", "", False, "token-abc"), + mcp_headers={}, ) - assert exc_info.value.status_code == status.HTTP_404_NOT_FOUND - detail = exc_info.value.detail + assert exc.value.status_code == status.HTTP_503_SERVICE_UNAVAILABLE + detail = exc.value.detail assert isinstance(detail, dict) - assert detail["response"] == "Model not found" - assert "invalid_model" in detail["cause"] + assert detail["response"] == "Unable to connect to Llama Stack" # type: ignore[index] + fail_metric.inc.assert_called_once() -def test_select_model_and_provider_id_no_available_models( - mocker: MockerFixture, +@pytest.mark.asyncio +async def test_query_endpoint_quota_exceeded( + mocker: MockerFixture, dummy_request: Request ) -> None: - """Test the select_model_and_provider_id function with no available models.""" - mock_client = mocker.Mock() - # empty list of models - mock_client.models.list.return_value = [] - - query_request = QueryRequest(query="What is OpenStack?", model=None, provider=None) + """Test that query endpoint raises HTTP 429 when model quota is exceeded.""" + query_request = QueryRequest( + query="What is OpenStack?", + provider="openai", + model="gpt-4o-mini", + attachments=[], + ) # type: ignore + mock_client = mocker.AsyncMock() + mock_client.models.list = mocker.AsyncMock(return_value=[]) + mock_response = httpx.Response(429, request=httpx.Request("POST", "http://test")) + mock_client.responses.create.side_effect = RateLimitError( + "Rate limit exceeded for model gpt-4o-mini", + response=mock_response, + body=None, + ) + # Mock conversation creation (needed for query_v2) + mock_conversation = mocker.Mock() + mock_conversation.id = "conv_abc123" + mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) + mocker.patch( + "app.endpoints.query.select_model_and_provider_id", + return_value=("openai/gpt-4o-mini", "gpt-4o-mini", "openai"), + ) + mocker.patch("app.endpoints.query.validate_model_provider_override") + mocker.patch( + "client.AsyncLlamaStackClientHolder.get_client", + return_value=mock_client, + ) + mocker.patch("app.endpoints.query.check_tokens_available") + mocker.patch("app.endpoints.query.get_session") + mocker.patch("app.endpoints.query.is_transcripts_enabled", return_value=False) + mocker.patch( + "app.endpoints.query_v2.run_shield_moderation", + return_value=ShieldModerationResult(blocked=False), + ) + mocker.patch( + "app.endpoints.query_v2.prepare_tools_for_responses_api", return_value=None + ) with pytest.raises(HTTPException) as exc_info: - select_model_and_provider_id( - mock_client.models.list(), query_request.model, query_request.provider + await query_endpoint_handler_v2( + dummy_request, query_request=query_request, auth=MOCK_AUTH ) - - assert exc_info.value.status_code == status.HTTP_404_NOT_FOUND + assert exc_info.value.status_code == status.HTTP_429_TOO_MANY_REQUESTS detail = exc_info.value.detail assert isinstance(detail, dict) - assert detail["response"] == "Model not found" - # The cause may vary, but should indicate no model found - assert "Model" in detail["cause"] + assert detail["response"] == "The quota has been exceeded" # type: ignore + assert "gpt-4o-mini" in detail["cause"] # type: ignore -def test_validate_attachments_metadata() -> None: - """Test the validate_attachments_metadata function.""" - attachments = [ - Attachment( - attachment_type="log", - content_type="text/plain", - content="this is attachment", - ), - Attachment( - attachment_type="configuration", - content_type="application/yaml", - content="kind: Pod\n metadata:\n name: private-reg", - ), - ] - - # If no exception is raised, the test passes - validate_attachments_metadata(attachments) - +@pytest.mark.asyncio +async def test_retrieve_response_with_shields_available(mocker: MockerFixture) -> None: + """Test that shield moderation runs and passes when content is safe.""" + mock_client = mocker.Mock() -def test_validate_attachments_metadata_invalid_type() -> None: - """Test the validate_attachments_metadata function with invalid attachment type.""" - attachments = [ - Attachment( - attachment_type="invalid_type", - content_type="text/plain", - content="this is attachment", - ), - ] + # Create mock shield with provider_resource_id + mock_shield = mocker.Mock() + mock_shield.identifier = "content-safety-shield" + mock_shield.provider_resource_id = "moderation-model" + mock_client.shields.list = mocker.AsyncMock(return_value=[mock_shield]) + + # Create mock model matching the shield's provider_resource_id + mock_model = mocker.Mock() + mock_model.id = "moderation-model" + mock_client.models.list = mocker.AsyncMock(return_value=[mock_model]) + + # Mock moderations.create to return safe (not flagged) content + mock_moderation_result = mocker.Mock() + mock_moderation_result.flagged = False + mock_moderation_response = mocker.Mock() + mock_moderation_response.results = [mock_moderation_result] + mock_client.moderations.create = mocker.AsyncMock( + return_value=mock_moderation_response + ) - with pytest.raises(HTTPException) as exc_info: - validate_attachments_metadata(attachments) - assert exc_info.value.status_code == status.HTTP_422_UNPROCESSABLE_CONTENT + output_item = mocker.Mock() + output_item.type = "message" + output_item.role = "assistant" + output_item.content = "Safe response" + + response_obj = mocker.Mock() + response_obj.id = "resp-shields" + response_obj.output = [output_item] + response_obj.usage = None + + mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) + # Mock conversations.create for new conversation creation + mock_conversation = mocker.Mock() + mock_conversation.id = "conv_abc123def456" + mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) + mock_vector_stores = mocker.Mock() + mock_vector_stores.data = [] + mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) + + mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") + mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) + + qr = QueryRequest(query="hello") + summary, conv_id, _referenced_docs, _token_usage = await retrieve_response( + mock_client, "model-shields", qr, token="tkn", provider_id="test-provider" + ) - detail = exc_info.value.detail - assert isinstance(detail, dict) - assert detail["response"] == "Invalid attribute value" - assert "Invalid attatchment type invalid_type" in detail["cause"] + assert conv_id == "abc123def456" # Normalized (without conv_ prefix) + assert summary.llm_response == "Safe response" + # Verify that moderation was called with the user's query + mock_client.moderations.create.assert_called_once_with( + input="hello", model="moderation-model" + ) + # Verify that responses.create was called (moderation passed) + mock_client.responses.create.assert_called_once() -def test_validate_attachments_metadata_invalid_content_type() -> None: - """Test the validate_attachments_metadata function with invalid attachment type.""" - attachments = [ - Attachment( - attachment_type="log", - content_type="text/invalid_content_type", - content="this is attachment", - ), - ] - with pytest.raises(HTTPException) as exc_info: - validate_attachments_metadata(attachments) - assert exc_info.value.status_code == status.HTTP_422_UNPROCESSABLE_CONTENT +@pytest.mark.asyncio +async def test_retrieve_response_with_no_shields_available( + mocker: MockerFixture, +) -> None: + """Test that LLM is called when no shields are configured.""" + mock_client = mocker.Mock() - detail = exc_info.value.detail - assert isinstance(detail, dict) - assert detail["response"] == "Invalid attribute value" - assert ( - "Invalid attatchment content type text/invalid_content_type" in detail["cause"] + # Mock shields.list and models.list for run_shield_moderation + mock_client.shields.list = mocker.AsyncMock(return_value=[]) + mock_client.models.list = mocker.AsyncMock(return_value=[]) + + output_item = mocker.Mock() + output_item.type = "message" + output_item.role = "assistant" + output_item.content = "Response without shields" + + response_obj = mocker.Mock() + response_obj.id = "resp-no-shields" + response_obj.output = [output_item] + response_obj.usage = None + + mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) + # Mock conversations.create for new conversation creation + mock_conversation = mocker.Mock() + mock_conversation.id = "conv_abc123def456" + mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) + mock_vector_stores = mocker.Mock() + mock_vector_stores.data = [] + mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) + + mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") + mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) + + qr = QueryRequest(query="hello") + summary, conv_id, _referenced_docs, _token_usage = await retrieve_response( + mock_client, "model-no-shields", qr, token="tkn", provider_id="test-provider" ) + assert conv_id == "abc123def456" # Normalized (without conv_ prefix) + assert summary.llm_response == "Response without shields" -def test_no_tools_parameter_backward_compatibility() -> None: - """Test that default behavior is unchanged when no_tools parameter is not specified.""" - # This test ensures that existing code that doesn't specify no_tools continues to work - query_request = QueryRequest(query="What is OpenStack?") + # Verify that responses.create was called + mock_client.responses.create.assert_called_once() - # Verify default value - assert query_request.no_tools is False - # Test that QueryRequest can be created without no_tools parameter - query_request_minimal = QueryRequest(query="Simple query") - assert query_request_minimal.no_tools is False +@pytest.mark.asyncio +async def test_retrieve_response_detects_shield_violation( + mocker: MockerFixture, +) -> None: + """Test that shield moderation blocks content and returns early.""" + mock_client = mocker.Mock() + # Mock conversations.create for new conversation creation + mock_conversation = mocker.Mock() + mock_conversation.id = "conv_abc123def456" + mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) + mock_client.conversations.items.create = mocker.AsyncMock(return_value=None) + mock_vector_stores = mocker.Mock() + mock_vector_stores.data = [] + mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) -@pytest.mark.parametrize( - "user_conversation,request_values,expected_values", - [ - # No user conversation, no request values - ( - None, - (None, None), - # Expect no values to be used - (None, None), - ), - # No user conversation, request values provided - ( - None, - ("foo", "bar"), - # Expect request values to be used - ("foo", "bar"), - ), - # User conversation exists, no request values - ( - UserConversation( - id="conv1", - user_id="user1", - last_used_provider="foo", - last_used_model="bar", - message_count=1, - ), - ( - None, - None, - ), - # Expect conversation values to be used - ( - "foo", - "bar", - ), - ), - # Request matches user conversation - ( - UserConversation( - id="conv1", - user_id="user1", - last_used_provider="foo", - last_used_model="bar", - message_count=1, - ), - ( - "foo", - "bar", - ), - # Expect request values to be used - ( - "foo", - "bar", - ), + mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") + mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) + + # Mock run_shield_moderation to return blocked + mocker.patch( + "app.endpoints.query_v2.run_shield_moderation", + return_value=ShieldModerationResult( + blocked=True, message="Content violates safety policy" ), - ], - ids=[ - "No user conversation, no request values", - "No user conversation, request values provided", - "User conversation exists, no request values", - "Request matches user conversation", - ], -) -def test_evaluate_model_hints( - user_conversation: list, - request_values: list, - expected_values: list, -) -> None: - """Test evaluate_model_hints function with various scenarios.""" - # Unpack fixtures - request_provider, request_model = request_values - expected_provider, expected_model = expected_values + ) - query_request = QueryRequest( - query="What is love?", - provider=request_provider, - model=request_model, - ) # pylint: disable=missing-kwoa + qr = QueryRequest(query="dangerous query") + summary, conv_id, _referenced_docs, _token_usage = await retrieve_response( + mock_client, "model-violation", qr, token="tkn", provider_id="test-provider" + ) + + assert conv_id == "abc123def456" # Normalized (without conv_ prefix) + assert summary.llm_response == "Content violates safety policy" + + # Verify that responses.create was NOT called (blocked by moderation) + mock_client.responses.create.assert_not_called() + + +def _create_message_output_with_citations(mocker: MockerFixture) -> Any: + """Create mock message output item with content annotations (citations).""" + # 1. Output item with message content annotations (citations) + output_item = mocker.Mock() + output_item.type = "message" + output_item.role = "assistant" + + # Mock content with annotations + content_part = mocker.Mock() + content_part.type = "output_text" + content_part.text = "Here is a citation." + + annotation1 = mocker.Mock() + annotation1.type = "url_citation" + annotation1.url = "http://example.com/doc1" + annotation1.title = "Doc 1" + + annotation2 = mocker.Mock() + annotation2.type = "file_citation" + annotation2.filename = "file1.txt" + annotation2.url = None + annotation2.title = None + + content_part.annotations = [annotation1, annotation2] + output_item.content = [content_part] + return output_item + + +def _create_file_search_output(mocker: MockerFixture) -> Any: + """Create mock file search tool call output with results.""" + # 2. Output item with file search tool call results + output_item = mocker.Mock() + output_item.type = "file_search_call" + output_item.id = "file-search-1" + output_item.queries = ( + [] + ) # Ensure queries is a list to avoid iteration error in tool summary + output_item.status = "completed" + # Create mock result objects with proper attributes matching real llama-stack response + result_1 = mocker.Mock() + result_1.filename = "file2.pdf" + result_1.attributes = { + "docs_url": "http://example.com/doc2", + "title": "Title 1", + "document_id": "doc-123", + } + result_1.text = "Sample text from file2.pdf" + result_1.score = 0.95 + result_1.file_id = "file-123" + result_1.model_dump = mocker.Mock( + return_value={ + "filename": "file2.pdf", + "attributes": { + "docs_url": "http://example.com/doc2", + "title": "Title 1", + "document_id": "doc-123", + }, + "text": "Sample text from file2.pdf", + "score": 0.95, + "file_id": "file-123", + } + ) + + result_2 = mocker.Mock() + result_2.filename = "file3.docx" + result_2.attributes = { + "docs_url": "http://example.com/doc3", + "title": "Title 2", + "document_id": "doc-456", + } + result_2.text = "Sample text from file3.docx" + result_2.score = 0.85 + result_2.file_id = "file-456" + result_2.model_dump = mocker.Mock( + return_value={ + "filename": "file3.docx", + "attributes": { + "docs_url": "http://example.com/doc3", + "title": "Title 2", + "document_id": "doc-456", + }, + "text": "Sample text from file3.docx", + "score": 0.85, + "file_id": "file-456", + } + ) - model_id, provider_id = evaluate_model_hints(user_conversation, query_request) + output_item.results = [result_1, result_2] + return output_item + + +@pytest.mark.asyncio +async def test_retrieve_response_parses_referenced_documents( + mocker: MockerFixture, +) -> None: + """Test that retrieve_response correctly parses referenced documents from response.""" + mock_client = mocker.AsyncMock() + + # Create output items using helper functions + output_item_1 = _create_message_output_with_citations(mocker) + output_item_2 = _create_file_search_output(mocker) + + response_obj = mocker.Mock() + response_obj.id = "resp-docs" + response_obj.output = [output_item_1, output_item_2] + response_obj.usage = None + + mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) + mock_vector_stores = mocker.Mock() + mock_vector_stores.data = [] + mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) + # Mock shields.list and models.list for run_shield_moderation + mock_client.shields.list = mocker.AsyncMock(return_value=[]) + mock_client.models.list = mocker.AsyncMock(return_value=[]) + + mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") + mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) + + qr = QueryRequest(query="query with docs") + _summary, _conv_id, referenced_docs, _token_usage = await retrieve_response( + mock_client, "model-docs", qr, token="tkn", provider_id="test-provider" + ) - assert provider_id == expected_provider - assert model_id == expected_model + # Referenced documents are now extracted only from file_search_call attributes + assert len(referenced_docs) == 2 + + # Verify Title 1 (File search result with URL) + doc1 = next((d for d in referenced_docs if d.doc_title == "Title 1"), None) + assert doc1 + assert doc1.doc_title == "Title 1" + assert str(doc1.doc_url) == "http://example.com/doc2" + + # Verify Title 2 (File search result with URL) + doc2 = next((d for d in referenced_docs if d.doc_title == "Title 2"), None) + assert doc2 + assert doc2.doc_title == "Title 2" + assert str(doc2.doc_url) == "http://example.com/doc3" + + # Verify RAG chunks were extracted from file_search_call results + assert len(_summary.rag_chunks) == 2 + assert _summary.rag_chunks[0].content == "Sample text from file2.pdf" + assert _summary.rag_chunks[0].source == "file2.pdf" + assert _summary.rag_chunks[0].score == 0.95 + assert _summary.rag_chunks[1].content == "Sample text from file3.docx" + assert _summary.rag_chunks[1].source == "file3.docx" + assert _summary.rag_chunks[1].score == 0.85 diff --git a/tests/unit/app/endpoints/test_query_old.py b/tests/unit/app/endpoints/test_query_old.py new file mode 100644 index 000000000..e9eaa4eaa --- /dev/null +++ b/tests/unit/app/endpoints/test_query_old.py @@ -0,0 +1,486 @@ +"""Unit tests for the /query REST API endpoint.""" + +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-lines +# pylint: disable=ungrouped-imports + +from typing import Any + +import pytest +from fastapi import HTTPException, Request, status +from pytest_mock import MockerFixture + +from app.endpoints.query_old import ( + evaluate_model_hints, + is_transcripts_enabled, + select_model_and_provider_id, + validate_attachments_metadata, +) +from configuration import AppConfig +from models.config import Action +from models.database.conversations import UserConversation +from models.requests import Attachment, QueryRequest +from utils.token_counter import TokenCounter + +# User ID must be proper UUID +MOCK_AUTH = ( + "00000001-0001-0001-0001-000000000001", + "mock_username", + False, + "mock_token", +) + + +@pytest.fixture +def dummy_request() -> Request: + """Dummy request fixture for testing. + + Create a minimal FastAPI Request with test-ready authorization state. + + The returned Request has a minimal HTTP scope and a + `state.authorized_actions` attribute initialized to a set containing all + members of the `Action` enum, suitable for use in unit tests that require + an authenticated request context. + + Returns: + req (Request): FastAPI Request with `state.authorized_actions` set to `set(Action)`. + """ + req = Request( + scope={ + "type": "http", + } + ) + + req.state.authorized_actions = set(Action) + return req + + +def mock_metrics(mocker: MockerFixture) -> None: + """Helper function to mock metrics operations for query endpoints. + + Configure the provided pytest-mock `mocker` to stub token metrics and + related metrics counters used by query endpoint tests. + + Patches the token metrics extraction helper and the LLM metrics counters so + tests can run without emitting real metrics. + """ + mocker.patch( + "app.endpoints.query.extract_and_update_token_metrics", + return_value=TokenCounter(), + ) + # Mock the metrics that are called inside extract_and_update_token_metrics + mocker.patch("metrics.llm_token_sent_total") + mocker.patch("metrics.llm_token_received_total") + mocker.patch("metrics.llm_calls_total") + + +def mock_database_operations(mocker: MockerFixture) -> None: + """Helper function to mock database operations for query endpoints. + + Patch common database operations used by query endpoint tests. + + This applies test-time patches so that conversation ownership checks + succeed, persistence of conversation details is stubbed out, and + `get_session` returns a context-manager mock whose + `query(...).filter_by(...).first()` returns `None`. + + Parameters: + mocker (MockerFixture): The pytest-mock fixture used to apply patches. + """ + mocker.patch( + "app.endpoints.query.validate_conversation_ownership", return_value=True + ) + mocker.patch("app.endpoints.query.persist_user_conversation_details") + + # Mock the database session and query + mock_session = mocker.Mock() + mock_session.query.return_value.filter_by.return_value.first.return_value = None + mock_session.__enter__ = mocker.Mock(return_value=mock_session) + mock_session.__exit__ = mocker.Mock(return_value=None) + mocker.patch("app.endpoints.query.get_session", return_value=mock_session) + + +@pytest.fixture(name="setup_configuration") +def setup_configuration_fixture() -> AppConfig: + """Set up configuration for tests. + + Create a reusable application configuration tailored for unit tests. + + The returned AppConfig is initialized from a fixed dictionary that sets: + - a lightweight service configuration (localhost, port 8080, minimal workers, logging enabled), + - a test Llama Stack configuration (test API key and URL, not used as a library client), + - user data collection with transcripts disabled, + - an empty MCP servers list, + - a noop conversation cache. + + Returns: + AppConfig: an initialized configuration instance suitable for test fixtures. + """ + config_dict: dict[Any, Any] = { + "name": "test", + "service": { + "host": "localhost", + "port": 8080, + "auth_enabled": False, + "workers": 1, + "color_log": True, + "access_log": True, + }, + "llama_stack": { + "api_key": "test-key", + "url": "http://test.com:1234", + "use_as_library_client": False, + }, + "user_data_collection": { + "transcripts_enabled": False, + }, + "mcp_servers": [], + "customization": None, + "conversation_cache": { + "type": "noop", + }, + } + cfg = AppConfig() + cfg.init_from_dict(config_dict) + return cfg + + +def test_is_transcripts_enabled( + setup_configuration: AppConfig, mocker: MockerFixture +) -> None: + """Test that is_transcripts_enabled returns True when transcripts is not disabled.""" + # Override the transcripts_enabled setting + mocker.patch.object( + setup_configuration.user_data_collection_configuration, + "transcripts_enabled", + True, + ) + mocker.patch("app.endpoints.query.configuration", setup_configuration) + + assert is_transcripts_enabled() is True, "Transcripts should be enabled" + + +def test_is_transcripts_disabled( + setup_configuration: AppConfig, mocker: MockerFixture +) -> None: + """Test that is_transcripts_enabled returns False when transcripts is disabled.""" + # Use default transcripts_enabled=False from setup + mocker.patch("app.endpoints.query.configuration", setup_configuration) + + assert is_transcripts_enabled() is False, "Transcripts should be disabled" + + +def test_select_model_and_provider_id_from_request(mocker: MockerFixture) -> None: + """Test the select_model_and_provider_id function.""" + mocker.patch( + "metrics.utils.configuration.inference.default_provider", + "default_provider", + ) + mocker.patch( + "metrics.utils.configuration.inference.default_model", + "default_model", + ) + + model_list = [ + mocker.Mock( + id="provider1/model1", + custom_metadata={"model_type": "llm", "provider_id": "provider1"}, + ), + mocker.Mock( + id="provider2/model2", + custom_metadata={"model_type": "llm", "provider_id": "provider2"}, + ), + mocker.Mock( + id="default_provider/default_model", + custom_metadata={"model_type": "llm", "provider_id": "default_provider"}, + ), + ] + + # Create a query request with model and provider specified + query_request = QueryRequest( + query="What is OpenStack?", model="model2", provider="provider2" + ) + + # Assert the model and provider from request take precedence from the configuration one + llama_stack_model_id, model_id, provider_id = select_model_and_provider_id( + model_list, query_request.model, query_request.provider + ) + + assert llama_stack_model_id == "provider2/model2" + assert model_id == "model2" + assert provider_id == "provider2" + + +def test_select_model_and_provider_id_from_configuration(mocker: MockerFixture) -> None: + """Test the select_model_and_provider_id function.""" + mocker.patch( + "metrics.utils.configuration.inference.default_provider", + "default_provider", + ) + mocker.patch( + "metrics.utils.configuration.inference.default_model", + "default_model", + ) + + model_list = [ + mocker.Mock( + id="provider1/model1", + custom_metadata={"model_type": "llm", "provider_id": "provider1"}, + ), + mocker.Mock( + id="default_provider/default_model", + custom_metadata={"model_type": "llm", "provider_id": "default_provider"}, + ), + ] + + # Create a query request without model and provider specified + query_request = QueryRequest( + query="What is OpenStack?", + ) + + llama_stack_model_id, model_id, provider_id = select_model_and_provider_id( + model_list, query_request.model, query_request.provider + ) + + # Assert that the default model and provider from the configuration are returned + assert llama_stack_model_id == "default_provider/default_model" + assert model_id == "default_model" + assert provider_id == "default_provider" + + +def test_select_model_and_provider_id_first_from_list(mocker: MockerFixture) -> None: + """Test the select_model_and_provider_id function when no model is specified.""" + model_list = [ + mocker.Mock( + id="not_llm_type", + custom_metadata={"model_type": "embedding", "provider_id": "provider1"}, + ), + mocker.Mock( + id="first_model", + custom_metadata={"model_type": "llm", "provider_id": "provider1"}, + ), + mocker.Mock( + id="second_model", + custom_metadata={"model_type": "llm", "provider_id": "provider2"}, + ), + ] + + query_request = QueryRequest(query="What is OpenStack?") + + llama_stack_model_id, model_id, provider_id = select_model_and_provider_id( + model_list, query_request.model, query_request.provider + ) + + # Assert return the first available LLM model when no model/provider is + # specified in the request or in the configuration + assert llama_stack_model_id == "first_model" + assert model_id == "first_model" + assert provider_id == "provider1" + + +def test_select_model_and_provider_id_invalid_model(mocker: MockerFixture) -> None: + """Test the select_model_and_provider_id function with an invalid model.""" + mock_client = mocker.Mock() + mock_client.models.list.return_value = [ + mocker.Mock( + id="model1", + custom_metadata={"model_type": "llm", "provider_id": "provider1"}, + ), + ] + + query_request = QueryRequest( + query="What is OpenStack?", model="invalid_model", provider="provider1" + ) + + with pytest.raises(HTTPException) as exc_info: + select_model_and_provider_id( + mock_client.models.list(), query_request.model, query_request.provider + ) + + assert exc_info.value.status_code == status.HTTP_404_NOT_FOUND + detail = exc_info.value.detail + assert isinstance(detail, dict) + assert detail["response"] == "Model not found" + assert "invalid_model" in detail["cause"] + + +def test_select_model_and_provider_id_no_available_models( + mocker: MockerFixture, +) -> None: + """Test the select_model_and_provider_id function with no available models.""" + mock_client = mocker.Mock() + # empty list of models + mock_client.models.list.return_value = [] + + query_request = QueryRequest(query="What is OpenStack?", model=None, provider=None) + + with pytest.raises(HTTPException) as exc_info: + select_model_and_provider_id( + mock_client.models.list(), query_request.model, query_request.provider + ) + + assert exc_info.value.status_code == status.HTTP_404_NOT_FOUND + detail = exc_info.value.detail + assert isinstance(detail, dict) + assert detail["response"] == "Model not found" + # The cause may vary, but should indicate no model found + assert "Model" in detail["cause"] + + +def test_validate_attachments_metadata() -> None: + """Test the validate_attachments_metadata function.""" + attachments = [ + Attachment( + attachment_type="log", + content_type="text/plain", + content="this is attachment", + ), + Attachment( + attachment_type="configuration", + content_type="application/yaml", + content="kind: Pod\n metadata:\n name: private-reg", + ), + ] + + # If no exception is raised, the test passes + validate_attachments_metadata(attachments) + + +def test_validate_attachments_metadata_invalid_type() -> None: + """Test the validate_attachments_metadata function with invalid attachment type.""" + attachments = [ + Attachment( + attachment_type="invalid_type", + content_type="text/plain", + content="this is attachment", + ), + ] + + with pytest.raises(HTTPException) as exc_info: + validate_attachments_metadata(attachments) + assert exc_info.value.status_code == status.HTTP_422_UNPROCESSABLE_CONTENT + + detail = exc_info.value.detail + assert isinstance(detail, dict) + assert detail["response"] == "Invalid attribute value" + assert "Invalid attatchment type invalid_type" in detail["cause"] + + +def test_validate_attachments_metadata_invalid_content_type() -> None: + """Test the validate_attachments_metadata function with invalid attachment type.""" + attachments = [ + Attachment( + attachment_type="log", + content_type="text/invalid_content_type", + content="this is attachment", + ), + ] + + with pytest.raises(HTTPException) as exc_info: + validate_attachments_metadata(attachments) + assert exc_info.value.status_code == status.HTTP_422_UNPROCESSABLE_CONTENT + + detail = exc_info.value.detail + assert isinstance(detail, dict) + assert detail["response"] == "Invalid attribute value" + assert ( + "Invalid attatchment content type text/invalid_content_type" in detail["cause"] + ) + + +def test_no_tools_parameter_backward_compatibility() -> None: + """Test that default behavior is unchanged when no_tools parameter is not specified.""" + # This test ensures that existing code that doesn't specify no_tools continues to work + query_request = QueryRequest(query="What is OpenStack?") + + # Verify default value + assert query_request.no_tools is False + + # Test that QueryRequest can be created without no_tools parameter + query_request_minimal = QueryRequest(query="Simple query") + assert query_request_minimal.no_tools is False + + +@pytest.mark.parametrize( + "user_conversation,request_values,expected_values", + [ + # No user conversation, no request values + ( + None, + (None, None), + # Expect no values to be used + (None, None), + ), + # No user conversation, request values provided + ( + None, + ("foo", "bar"), + # Expect request values to be used + ("foo", "bar"), + ), + # User conversation exists, no request values + ( + UserConversation( + id="conv1", + user_id="user1", + last_used_provider="foo", + last_used_model="bar", + message_count=1, + ), + ( + None, + None, + ), + # Expect conversation values to be used + ( + "foo", + "bar", + ), + ), + # Request matches user conversation + ( + UserConversation( + id="conv1", + user_id="user1", + last_used_provider="foo", + last_used_model="bar", + message_count=1, + ), + ( + "foo", + "bar", + ), + # Expect request values to be used + ( + "foo", + "bar", + ), + ), + ], + ids=[ + "No user conversation, no request values", + "No user conversation, request values provided", + "User conversation exists, no request values", + "Request matches user conversation", + ], +) +def test_evaluate_model_hints( + user_conversation: list, + request_values: list, + expected_values: list, +) -> None: + """Test evaluate_model_hints function with various scenarios.""" + # Unpack fixtures + request_provider, request_model = request_values + expected_provider, expected_model = expected_values + + query_request = QueryRequest( + query="What is love?", + provider=request_provider, + model=request_model, + ) # pylint: disable=missing-kwoa + + model_id, provider_id = evaluate_model_hints(user_conversation, query_request) + + assert provider_id == expected_provider + assert model_id == expected_model diff --git a/tests/unit/app/endpoints/test_query_v2.py b/tests/unit/app/endpoints/test_query_v2.py deleted file mode 100644 index 6fd6f3720..000000000 --- a/tests/unit/app/endpoints/test_query_v2.py +++ /dev/null @@ -1,1039 +0,0 @@ -# pylint: disable=redefined-outer-name, import-error,too-many-locals,too-many-lines -# pyright: reportCallIssue=false -"""Unit tests for the /query (v2) REST API endpoint using Responses API.""" - -from pathlib import Path -from typing import Any - -import pytest -from fastapi import HTTPException, Request, status -import httpx -from llama_stack_client import APIConnectionError, RateLimitError -from pytest_mock import MockerFixture - -from app.endpoints.query_v2 import ( - get_mcp_tools, - get_rag_tools, - query_endpoint_handler_v2, - retrieve_response, -) -from models.config import ModelContextProtocolServer -from models.requests import Attachment, QueryRequest -from utils.types import ShieldModerationResult - -# User ID must be proper UUID -MOCK_AUTH = ( - "00000001-0001-0001-0001-000000000001", - "mock_username", - False, - "mock_token", -) - - -@pytest.fixture -def dummy_request() -> Request: - """Create a dummy FastAPI Request object for testing. - - Create a minimal FastAPI Request object suitable for unit tests. - - Returns: - request (fastapi.Request): A Request constructed with a bare HTTP scope - (type "http") for use in tests. - """ - req = Request(scope={"type": "http"}) - return req - - -def test_get_rag_tools() -> None: - """Test get_rag_tools returns None for empty list and correct tool format for vector stores.""" - assert get_rag_tools([]) is None - - tools = get_rag_tools(["db1", "db2"]) - assert isinstance(tools, list) - assert tools[0]["type"] == "file_search" - assert tools[0]["vector_store_ids"] == ["db1", "db2"] - assert tools[0]["max_num_results"] == 10 - assert "solr" not in tools[0] - - # Test with Solr parameters - solr_params = {"fq": ["product:*openshift*", "product_version:*4.16*"]} - tools_with_solr = get_rag_tools(["db1", "db2"], solr_params) - assert isinstance(tools_with_solr, list) - assert tools_with_solr[0]["type"] == "file_search" - assert tools_with_solr[0]["vector_store_ids"] == ["db1", "db2"] - assert tools_with_solr[0]["max_num_results"] == 10 - assert tools_with_solr[0]["solr"] == solr_params - - -def test_get_mcp_tools_with_and_without_token() -> None: - """Test get_mcp_tools with resolved_authorization_headers.""" - # Servers without authorization headers - servers_no_auth = [ - ModelContextProtocolServer(name="fs", url="http://localhost:3000"), - ModelContextProtocolServer(name="git", url="https://git.example.com/mcp"), - ] - - tools_no_auth = get_mcp_tools(servers_no_auth, token=None) - assert len(tools_no_auth) == 2 - assert tools_no_auth[0]["type"] == "mcp" - assert tools_no_auth[0]["server_label"] == "fs" - assert tools_no_auth[0]["server_url"] == "http://localhost:3000" - assert "headers" not in tools_no_auth[0] - - # Servers with kubernetes auth - servers_k8s = [ - ModelContextProtocolServer( - name="k8s-server", - url="http://localhost:3000", - authorization_headers={"Authorization": "kubernetes"}, - ), - ] - tools_k8s = get_mcp_tools(servers_k8s, token="user-k8s-token") - assert len(tools_k8s) == 1 - assert tools_k8s[0]["headers"] == {"Authorization": "Bearer user-k8s-token"} - - -def test_get_mcp_tools_with_mcp_headers() -> None: - """Test get_mcp_tools with client-provided headers.""" - # Server with client auth - servers = [ - ModelContextProtocolServer( - name="fs", - url="http://localhost:3000", - authorization_headers={"Authorization": "client", "X-Custom": "client"}, - ), - ] - - # Test with mcp_headers provided - mcp_headers = { - "fs": { - "Authorization": "client-provided-token", - "X-Custom": "custom-value", - } - } - tools = get_mcp_tools(servers, token=None, mcp_headers=mcp_headers) - assert len(tools) == 1 - assert tools[0]["headers"] == { - "Authorization": "client-provided-token", - "X-Custom": "custom-value", - } - - # Test with mcp_headers=None (server should be skipped since auth is required but unavailable) - tools_no_headers = get_mcp_tools(servers, token=None, mcp_headers=None) - assert len(tools_no_headers) == 0 # Server skipped due to missing required auth - - -def test_get_mcp_tools_with_static_headers(tmp_path: Path) -> None: - """Test get_mcp_tools with static headers from config files.""" - # Create a secret file - secret_file = tmp_path / "token.txt" - secret_file.write_text("static-secret-token") - - servers = [ - ModelContextProtocolServer( - name="server1", - url="http://localhost:3000", - authorization_headers={"Authorization": str(secret_file)}, - ), - ] - - tools = get_mcp_tools(servers, token=None) - assert len(tools) == 1 - assert tools[0]["headers"] == {"Authorization": "static-secret-token"} - - -def test_get_mcp_tools_with_mixed_headers(tmp_path: Path) -> None: - """Test get_mcp_tools with mixed header types.""" - # Create a secret file - secret_file = tmp_path / "api-key.txt" - secret_file.write_text("secret-api-key") - - servers = [ - ModelContextProtocolServer( - name="mixed-server", - url="http://localhost:3000", - authorization_headers={ - "Authorization": "kubernetes", - "X-API-Key": str(secret_file), - "X-Custom": "client", - }, - ), - ] - - mcp_headers = { - "mixed-server": { - "X-Custom": "client-custom-value", - } - } - - tools = get_mcp_tools(servers, token="k8s-token", mcp_headers=mcp_headers) - assert len(tools) == 1 - assert tools[0]["headers"] == { - "Authorization": "Bearer k8s-token", - "X-API-Key": "secret-api-key", - "X-Custom": "client-custom-value", - } - - -def test_get_mcp_tools_skips_server_with_missing_auth() -> None: - """Test that servers with required but unavailable auth headers are skipped.""" - servers = [ - # Server with kubernetes auth but no token provided - ModelContextProtocolServer( - name="missing-k8s-auth", - url="http://localhost:3001", - authorization_headers={"Authorization": "kubernetes"}, - ), - # Server with client auth but no MCP-HEADERS provided - ModelContextProtocolServer( - name="missing-client-auth", - url="http://localhost:3002", - authorization_headers={"X-Token": "client"}, - ), - # Server with partial auth (2 headers required, only 1 available) - ModelContextProtocolServer( - name="partial-auth", - url="http://localhost:3003", - authorization_headers={ - "Authorization": "kubernetes", - "X-Custom": "client", - }, - ), - ] - - # No token, no mcp_headers - tools = get_mcp_tools(servers, token=None, mcp_headers=None) - # All servers should be skipped - assert len(tools) == 0 - - -def test_get_mcp_tools_includes_server_without_auth() -> None: - """Test that servers without auth config are always included.""" - servers = [ - # Server with no auth requirements - ModelContextProtocolServer( - name="public-server", - url="http://localhost:3000", - authorization_headers={}, - ), - ] - - # Should work even without token or headers - tools = get_mcp_tools(servers, token=None, mcp_headers=None) - assert len(tools) == 1 - assert tools[0]["server_label"] == "public-server" - assert "headers" not in tools[0] - - -@pytest.mark.asyncio -async def test_retrieve_response_no_tools_bypasses_tools(mocker: MockerFixture) -> None: - """Test that no_tools=True bypasses tool configuration and passes None to responses API.""" - mock_client = mocker.Mock() - # responses.create returns a synthetic OpenAI-like response - response_obj = mocker.Mock() - response_obj.id = "resp-1" - response_obj.output = [] - response_obj.usage = None # No usage info - mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - # vector_stores.list should not matter when no_tools=True, but keep it valid - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - # Mock shields.list and models.list for run_shield_moderation - mock_client.shields.list = mocker.AsyncMock(return_value=[]) - mock_client.models.list = mocker.AsyncMock(return_value=[]) - - # Ensure system prompt resolution does not require real config - mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") - mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) - - qr = QueryRequest(query="hello", no_tools=True) - summary, conv_id, referenced_docs, token_usage = await retrieve_response( - mock_client, "model-x", qr, token="tkn" - ) - - assert conv_id == "abc123def456" # Normalized (without conv_ prefix) - assert summary.llm_response == "" - assert referenced_docs == [] - assert token_usage.input_tokens == 0 # No usage info, so 0 - assert token_usage.output_tokens == 0 - # tools must be passed as None - kwargs = mock_client.responses.create.call_args.kwargs - assert kwargs["tools"] is None - assert kwargs["model"] == "model-x" - assert kwargs["instructions"] == "PROMPT" - - -@pytest.mark.asyncio -async def test_retrieve_response_builds_rag_and_mcp_tools( # pylint: disable=too-many-locals - mocker: MockerFixture, -) -> None: - """Test that retrieve_response correctly builds RAG and MCP tools from configuration.""" - mock_client = mocker.Mock() - response_obj = mocker.Mock() - response_obj.id = "resp-2" - response_obj.output = [] - response_obj.usage = None - mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [mocker.Mock(id="dbA")] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - # Mock shields.list and models.list for run_shield_moderation - mock_client.shields.list = mocker.AsyncMock(return_value=[]) - mock_client.models.list = mocker.AsyncMock(return_value=[]) - - # Mock vector_io.query for direct vector querying - mock_query_response = mocker.Mock() - mock_query_response.chunks = [] - mock_query_response.scores = [] - mock_client.vector_io.query = mocker.AsyncMock(return_value=mock_query_response) - - mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") - - # Mock shield moderation - mock_moderation_result = mocker.Mock() - mock_moderation_result.blocked = False - mocker.patch( - "app.endpoints.query_v2.run_shield_moderation", - return_value=mock_moderation_result, - ) - - mock_cfg = mocker.Mock() - mock_cfg.mcp_servers = [ - ModelContextProtocolServer( - name="fs", - url="http://localhost:3000", - authorization_headers={"Authorization": "kubernetes"}, - ), - ] - mocker.patch("app.endpoints.query_v2.configuration", mock_cfg) - - qr = QueryRequest(query="hello") - _summary, conv_id, referenced_docs, token_usage = await retrieve_response( - mock_client, "model-y", qr, token="mytoken" - ) - - assert conv_id == "abc123def456" # Normalized (without conv_ prefix) - assert referenced_docs == [] - assert token_usage.input_tokens == 0 # No usage info, so 0 - assert token_usage.output_tokens == 0 - - kwargs = mock_client.responses.create.call_args.kwargs - tools = kwargs["tools"] - assert isinstance(tools, list) - # Expect only MCP tools since RAG tools are skipped when doing direct vector querying - tool_types = {t.get("type") for t in tools} - assert tool_types == {"mcp"} - mcp_tool = next(t for t in tools if t["type"] == "mcp") - assert mcp_tool["server_label"] == "fs" - assert mcp_tool["headers"] == {"Authorization": "Bearer mytoken"} - - -@pytest.mark.asyncio -async def test_retrieve_response_parses_output_and_tool_calls( - mocker: MockerFixture, -) -> None: - """Test that retrieve_response correctly parses output content and tool calls from response.""" - mock_client = mocker.Mock() - - # Build output with content variants and tool calls - part1 = mocker.Mock(text="Hello ") - part1.annotations = [] # Ensure annotations is a list to avoid iteration error - part2 = mocker.Mock(text="world") - part2.annotations = [] - - output_item_1 = mocker.Mock() - output_item_1.type = "message" - output_item_1.role = "assistant" - output_item_1.content = [part1, part2] - - output_item_2 = mocker.Mock() - output_item_2.type = "message" - output_item_2.role = "assistant" - output_item_2.content = "!" - - # Tool call as a separate output item (Responses API format) - tool_call_item = mocker.Mock() - tool_call_item.type = "function_call" - tool_call_item.id = "tc-1" - tool_call_item.call_id = "tc-1" - tool_call_item.name = "do_something" - tool_call_item.arguments = '{"x": 1}' - tool_call_item.status = None # Explicitly set to avoid Mock auto-creation - - response_obj = mocker.Mock() - response_obj.id = "resp-3" - response_obj.output = [output_item_1, output_item_2, tool_call_item] - response_obj.usage = None - - mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - # Mock shields.list and models.list for run_shield_moderation - mock_client.shields.list = mocker.AsyncMock(return_value=[]) - mock_client.models.list = mocker.AsyncMock(return_value=[]) - - mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") - mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) - - qr = QueryRequest(query="hello") - summary, conv_id, referenced_docs, token_usage = await retrieve_response( - mock_client, "model-z", qr, token="tkn" - ) - - assert conv_id == "abc123def456" # Normalized (without conv_ prefix) - assert summary.llm_response == "Hello world!" - assert len(summary.tool_calls) == 1 - assert summary.tool_calls[0].id == "tc-1" - assert summary.tool_calls[0].name == "do_something" - assert summary.tool_calls[0].args == {"x": 1} - assert referenced_docs == [] - assert token_usage.input_tokens == 0 # No usage info, so 0 - assert token_usage.output_tokens == 0 - - -@pytest.mark.asyncio -async def test_retrieve_response_with_usage_info(mocker: MockerFixture) -> None: - """Test that token usage is extracted when provided by the API as an object.""" - mock_client = mocker.Mock() - - output_item = mocker.Mock() - output_item.type = "message" - output_item.role = "assistant" - output_item.content = "Test response" - output_item.tool_calls = [] - - # Mock usage information as object - mock_usage = mocker.Mock() - mock_usage.input_tokens = 150 - mock_usage.output_tokens = 75 - - response_obj = mocker.Mock() - response_obj.id = "resp-with-usage" - response_obj.output = [output_item] - response_obj.usage = mock_usage - - mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - # Mock shields.list and models.list for run_shield_moderation - mock_client.shields.list = mocker.AsyncMock(return_value=[]) - mock_client.models.list = mocker.AsyncMock(return_value=[]) - - mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") - mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) - - qr = QueryRequest(query="hello") - summary, conv_id, _referenced_docs, token_usage = await retrieve_response( - mock_client, "model-usage", qr, token="tkn", provider_id="test-provider" - ) - - assert conv_id == "abc123def456" # Normalized (without conv_ prefix) - assert summary.llm_response == "Test response" - assert token_usage.input_tokens == 150 - assert token_usage.output_tokens == 75 - assert token_usage.llm_calls == 1 - - -@pytest.mark.asyncio -async def test_retrieve_response_with_usage_dict(mocker: MockerFixture) -> None: - """Test that token usage is extracted when provided by the API as a dict.""" - mock_client = mocker.Mock() - - output_item = mocker.Mock() - output_item.type = "message" - output_item.role = "assistant" - output_item.content = "Test response dict" - output_item.tool_calls = [] - - # Mock usage information as dict (like llama stack does) - response_obj = mocker.Mock() - response_obj.id = "resp-with-usage-dict" - response_obj.output = [output_item] - response_obj.usage = {"input_tokens": 200, "output_tokens": 100} - - mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - # Mock shields.list and models.list for run_shield_moderation - mock_client.shields.list = mocker.AsyncMock(return_value=[]) - mock_client.models.list = mocker.AsyncMock(return_value=[]) - - mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") - mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) - - qr = QueryRequest(query="hello") - summary, conv_id, _referenced_docs, token_usage = await retrieve_response( - mock_client, "model-usage-dict", qr, token="tkn", provider_id="test-provider" - ) - - assert conv_id == "abc123def456" # Normalized (without conv_ prefix) - assert summary.llm_response == "Test response dict" - assert token_usage.input_tokens == 200 - assert token_usage.output_tokens == 100 - assert token_usage.llm_calls == 1 - - -@pytest.mark.asyncio -async def test_retrieve_response_with_empty_usage_dict(mocker: MockerFixture) -> None: - """Test that empty usage dict is handled gracefully.""" - mock_client = mocker.Mock() - - output_item = mocker.Mock() - output_item.type = "message" - output_item.role = "assistant" - output_item.content = "Test response empty usage" - output_item.tool_calls = [] - - # Mock usage information as empty dict (tokens are 0 or missing) - response_obj = mocker.Mock() - response_obj.id = "resp-empty-usage" - response_obj.output = [output_item] - response_obj.usage = {} # Empty dict - - mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - # Mock shields.list and models.list for run_shield_moderation - mock_client.shields.list = mocker.AsyncMock(return_value=[]) - mock_client.models.list = mocker.AsyncMock(return_value=[]) - - mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") - mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) - - qr = QueryRequest(query="hello") - summary, conv_id, _referenced_docs, token_usage = await retrieve_response( - mock_client, "model-empty-usage", qr, token="tkn", provider_id="test-provider" - ) - - assert conv_id == "abc123def456" # Normalized (without conv_ prefix) - assert summary.llm_response == "Test response empty usage" - assert token_usage.input_tokens == 0 - assert token_usage.output_tokens == 0 - assert token_usage.llm_calls == 1 # Always 1, even when no token usage data - - -@pytest.mark.asyncio -async def test_retrieve_response_validates_attachments(mocker: MockerFixture) -> None: - """Test that retrieve_response validates attachments and includes them in the input string.""" - mock_client = mocker.Mock() - response_obj = mocker.Mock() - response_obj.id = "resp-4" - response_obj.output = [] - response_obj.usage = None - mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - # Mock shields.list and models.list for run_shield_moderation - mock_client.shields.list = mocker.AsyncMock(return_value=[]) - mock_client.models.list = mocker.AsyncMock(return_value=[]) - - mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") - mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) - - validate_spy = mocker.patch( - "app.endpoints.query_v2.validate_attachments_metadata", return_value=None - ) - - attachments = [ - Attachment(attachment_type="log", content_type="text/plain", content="x"), - ] - - qr = QueryRequest(query="hello", attachments=attachments) - _summary, _cid, _ref_docs, _token_usage = await retrieve_response( - mock_client, "model-a", qr, token="tkn" - ) - - validate_spy.assert_called_once() - # Verify that attachments are included in the input - kwargs = mock_client.responses.create.call_args.kwargs - assert "input" in kwargs - # Input should be a string containing both query and attachment - assert isinstance(kwargs["input"], str) - assert "hello" in kwargs["input"] - assert "[Attachment: log]" in kwargs["input"] - assert "x" in kwargs["input"] - - -@pytest.mark.asyncio -async def test_query_endpoint_handler_v2_success( - mocker: MockerFixture, dummy_request: Request -) -> None: - """Test successful query endpoint handler execution with proper response structure.""" - # Mock configuration to avoid configuration not loaded errors - mock_config = mocker.Mock() - mock_config.llama_stack_configuration = mocker.Mock() - mock_config.quota_limiters = [] - mocker.patch("app.endpoints.query_v2.configuration", mock_config) - - mock_client = mocker.Mock() - mock_client.models.list = mocker.AsyncMock(return_value=[mocker.Mock()]) - mocker.patch( - "client.AsyncLlamaStackClientHolder.get_client", return_value=mock_client - ) - mocker.patch("app.endpoints.query.evaluate_model_hints", return_value=(None, None)) - mocker.patch( - "app.endpoints.query.select_model_and_provider_id", - return_value=("llama/m", "m", "p"), - ) - - summary = mocker.Mock( - llm_response="ANSWER", tool_calls=[], tool_results=[], rag_chunks=[] - ) - token_usage = mocker.Mock(input_tokens=10, output_tokens=20) - # Use a valid SUID for conversation_id - test_conversation_id = "00000000-0000-0000-0000-000000000001" - mocker.patch( - "app.endpoints.query_v2.retrieve_response", - return_value=(summary, test_conversation_id, [], token_usage), - ) - mocker.patch("app.endpoints.query_v2.get_topic_summary", return_value="Topic") - mocker.patch("app.endpoints.query.is_transcripts_enabled", return_value=False) - mocker.patch("app.endpoints.query.persist_user_conversation_details") - mocker.patch("utils.endpoints.store_conversation_into_cache") - mocker.patch("app.endpoints.query.get_session") - - # Add missing mocks for quota functions - mocker.patch("utils.quota.check_tokens_available") - mocker.patch("utils.quota.consume_tokens") - mocker.patch("utils.quota.get_available_quotas", return_value={}) - - # Mock the request state - dummy_request.state.authorized_actions = [] - - res = await query_endpoint_handler_v2( - request=dummy_request, - query_request=QueryRequest(query="hi"), - auth=MOCK_AUTH, - mcp_headers={}, - ) - - assert res.conversation_id == test_conversation_id - assert res.response == "ANSWER" - - -@pytest.mark.asyncio -async def test_query_endpoint_handler_v2_api_connection_error( - mocker: MockerFixture, dummy_request: Request -) -> None: - """Test that query endpoint handler properly handles and reports API connection errors.""" - # Mock configuration to avoid configuration not loaded errors - mock_config = mocker.Mock() - mock_config.llama_stack_configuration = mocker.Mock() - mocker.patch("app.endpoints.query_v2.configuration", mock_config) - - def _raise(*_args: Any, **_kwargs: Any) -> Exception: - """Raises a custom APIConnectionError exception. - - Args: - *_args: Variable length argument list. - **_kwargs: Arbitrary keyword arguments. - - Returns: - None - - Raises: - APIConnectionError: Always raises this exception with a Request object. - """ - request = Request(scope={"type": "http"}) - raise APIConnectionError(request=request) # type: ignore - - mocker.patch("client.AsyncLlamaStackClientHolder.get_client", side_effect=_raise) - - fail_metric = mocker.patch("metrics.llm_calls_failures_total") - - with pytest.raises(HTTPException) as exc: - await query_endpoint_handler_v2( - request=dummy_request, - query_request=QueryRequest(query="hi"), - auth=("user123", "", False, "token-abc"), - mcp_headers={}, - ) - - assert exc.value.status_code == status.HTTP_503_SERVICE_UNAVAILABLE - detail = exc.value.detail - assert isinstance(detail, dict) - assert detail["response"] == "Unable to connect to Llama Stack" # type: ignore[index] - fail_metric.inc.assert_called_once() - - -@pytest.mark.asyncio -async def test_query_endpoint_quota_exceeded( - mocker: MockerFixture, dummy_request: Request -) -> None: - """Test that query endpoint raises HTTP 429 when model quota is exceeded.""" - query_request = QueryRequest( - query="What is OpenStack?", - provider="openai", - model="gpt-4o-mini", - attachments=[], - ) # type: ignore - mock_client = mocker.AsyncMock() - mock_client.models.list = mocker.AsyncMock(return_value=[]) - mock_response = httpx.Response(429, request=httpx.Request("POST", "http://test")) - mock_client.responses.create.side_effect = RateLimitError( - "Rate limit exceeded for model gpt-4o-mini", - response=mock_response, - body=None, - ) - # Mock conversation creation (needed for query_v2) - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - mocker.patch( - "app.endpoints.query.select_model_and_provider_id", - return_value=("openai/gpt-4o-mini", "gpt-4o-mini", "openai"), - ) - mocker.patch("app.endpoints.query.validate_model_provider_override") - mocker.patch( - "client.AsyncLlamaStackClientHolder.get_client", - return_value=mock_client, - ) - mocker.patch("app.endpoints.query.check_tokens_available") - mocker.patch("app.endpoints.query.get_session") - mocker.patch("app.endpoints.query.is_transcripts_enabled", return_value=False) - mocker.patch( - "app.endpoints.query_v2.run_shield_moderation", - return_value=ShieldModerationResult(blocked=False), - ) - mocker.patch( - "app.endpoints.query_v2.prepare_tools_for_responses_api", return_value=None - ) - - with pytest.raises(HTTPException) as exc_info: - await query_endpoint_handler_v2( - dummy_request, query_request=query_request, auth=MOCK_AUTH - ) - assert exc_info.value.status_code == status.HTTP_429_TOO_MANY_REQUESTS - detail = exc_info.value.detail - assert isinstance(detail, dict) - assert detail["response"] == "The quota has been exceeded" # type: ignore - assert "gpt-4o-mini" in detail["cause"] # type: ignore - - -@pytest.mark.asyncio -async def test_retrieve_response_with_shields_available(mocker: MockerFixture) -> None: - """Test that shield moderation runs and passes when content is safe.""" - mock_client = mocker.Mock() - - # Create mock shield with provider_resource_id - mock_shield = mocker.Mock() - mock_shield.identifier = "content-safety-shield" - mock_shield.provider_resource_id = "moderation-model" - mock_client.shields.list = mocker.AsyncMock(return_value=[mock_shield]) - - # Create mock model matching the shield's provider_resource_id - mock_model = mocker.Mock() - mock_model.id = "moderation-model" - mock_client.models.list = mocker.AsyncMock(return_value=[mock_model]) - - # Mock moderations.create to return safe (not flagged) content - mock_moderation_result = mocker.Mock() - mock_moderation_result.flagged = False - mock_moderation_response = mocker.Mock() - mock_moderation_response.results = [mock_moderation_result] - mock_client.moderations.create = mocker.AsyncMock( - return_value=mock_moderation_response - ) - - output_item = mocker.Mock() - output_item.type = "message" - output_item.role = "assistant" - output_item.content = "Safe response" - - response_obj = mocker.Mock() - response_obj.id = "resp-shields" - response_obj.output = [output_item] - response_obj.usage = None - - mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - - mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") - mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) - - qr = QueryRequest(query="hello") - summary, conv_id, _referenced_docs, _token_usage = await retrieve_response( - mock_client, "model-shields", qr, token="tkn", provider_id="test-provider" - ) - - assert conv_id == "abc123def456" # Normalized (without conv_ prefix) - assert summary.llm_response == "Safe response" - - # Verify that moderation was called with the user's query - mock_client.moderations.create.assert_called_once_with( - input="hello", model="moderation-model" - ) - # Verify that responses.create was called (moderation passed) - mock_client.responses.create.assert_called_once() - - -@pytest.mark.asyncio -async def test_retrieve_response_with_no_shields_available( - mocker: MockerFixture, -) -> None: - """Test that LLM is called when no shields are configured.""" - mock_client = mocker.Mock() - - # Mock shields.list and models.list for run_shield_moderation - mock_client.shields.list = mocker.AsyncMock(return_value=[]) - mock_client.models.list = mocker.AsyncMock(return_value=[]) - - output_item = mocker.Mock() - output_item.type = "message" - output_item.role = "assistant" - output_item.content = "Response without shields" - - response_obj = mocker.Mock() - response_obj.id = "resp-no-shields" - response_obj.output = [output_item] - response_obj.usage = None - - mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - - mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") - mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) - - qr = QueryRequest(query="hello") - summary, conv_id, _referenced_docs, _token_usage = await retrieve_response( - mock_client, "model-no-shields", qr, token="tkn", provider_id="test-provider" - ) - - assert conv_id == "abc123def456" # Normalized (without conv_ prefix) - assert summary.llm_response == "Response without shields" - - # Verify that responses.create was called - mock_client.responses.create.assert_called_once() - - -@pytest.mark.asyncio -async def test_retrieve_response_detects_shield_violation( - mocker: MockerFixture, -) -> None: - """Test that shield moderation blocks content and returns early.""" - mock_client = mocker.Mock() - - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - mock_client.conversations.items.create = mocker.AsyncMock(return_value=None) - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - - mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") - mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) - - # Mock run_shield_moderation to return blocked - mocker.patch( - "app.endpoints.query_v2.run_shield_moderation", - return_value=ShieldModerationResult( - blocked=True, message="Content violates safety policy" - ), - ) - - qr = QueryRequest(query="dangerous query") - summary, conv_id, _referenced_docs, _token_usage = await retrieve_response( - mock_client, "model-violation", qr, token="tkn", provider_id="test-provider" - ) - - assert conv_id == "abc123def456" # Normalized (without conv_ prefix) - assert summary.llm_response == "Content violates safety policy" - - # Verify that responses.create was NOT called (blocked by moderation) - mock_client.responses.create.assert_not_called() - - -def _create_message_output_with_citations(mocker: MockerFixture) -> Any: - """Create mock message output item with content annotations (citations).""" - # 1. Output item with message content annotations (citations) - output_item = mocker.Mock() - output_item.type = "message" - output_item.role = "assistant" - - # Mock content with annotations - content_part = mocker.Mock() - content_part.type = "output_text" - content_part.text = "Here is a citation." - - annotation1 = mocker.Mock() - annotation1.type = "url_citation" - annotation1.url = "http://example.com/doc1" - annotation1.title = "Doc 1" - - annotation2 = mocker.Mock() - annotation2.type = "file_citation" - annotation2.filename = "file1.txt" - annotation2.url = None - annotation2.title = None - - content_part.annotations = [annotation1, annotation2] - output_item.content = [content_part] - return output_item - - -def _create_file_search_output(mocker: MockerFixture) -> Any: - """Create mock file search tool call output with results.""" - # 2. Output item with file search tool call results - output_item = mocker.Mock() - output_item.type = "file_search_call" - output_item.id = "file-search-1" - output_item.queries = ( - [] - ) # Ensure queries is a list to avoid iteration error in tool summary - output_item.status = "completed" - # Create mock result objects with proper attributes matching real llama-stack response - result_1 = mocker.Mock() - result_1.filename = "file2.pdf" - result_1.attributes = { - "docs_url": "http://example.com/doc2", - "title": "Title 1", - "document_id": "doc-123", - } - result_1.text = "Sample text from file2.pdf" - result_1.score = 0.95 - result_1.file_id = "file-123" - result_1.model_dump = mocker.Mock( - return_value={ - "filename": "file2.pdf", - "attributes": { - "docs_url": "http://example.com/doc2", - "title": "Title 1", - "document_id": "doc-123", - }, - "text": "Sample text from file2.pdf", - "score": 0.95, - "file_id": "file-123", - } - ) - - result_2 = mocker.Mock() - result_2.filename = "file3.docx" - result_2.attributes = { - "docs_url": "http://example.com/doc3", - "title": "Title 2", - "document_id": "doc-456", - } - result_2.text = "Sample text from file3.docx" - result_2.score = 0.85 - result_2.file_id = "file-456" - result_2.model_dump = mocker.Mock( - return_value={ - "filename": "file3.docx", - "attributes": { - "docs_url": "http://example.com/doc3", - "title": "Title 2", - "document_id": "doc-456", - }, - "text": "Sample text from file3.docx", - "score": 0.85, - "file_id": "file-456", - } - ) - - output_item.results = [result_1, result_2] - return output_item - - -@pytest.mark.asyncio -async def test_retrieve_response_parses_referenced_documents( - mocker: MockerFixture, -) -> None: - """Test that retrieve_response correctly parses referenced documents from response.""" - mock_client = mocker.AsyncMock() - - # Create output items using helper functions - output_item_1 = _create_message_output_with_citations(mocker) - output_item_2 = _create_file_search_output(mocker) - - response_obj = mocker.Mock() - response_obj.id = "resp-docs" - response_obj.output = [output_item_1, output_item_2] - response_obj.usage = None - - mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - # Mock shields.list and models.list for run_shield_moderation - mock_client.shields.list = mocker.AsyncMock(return_value=[]) - mock_client.models.list = mocker.AsyncMock(return_value=[]) - - mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") - mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) - - qr = QueryRequest(query="query with docs") - _summary, _conv_id, referenced_docs, _token_usage = await retrieve_response( - mock_client, "model-docs", qr, token="tkn", provider_id="test-provider" - ) - - # Referenced documents are now extracted only from file_search_call attributes - assert len(referenced_docs) == 2 - - # Verify Title 1 (File search result with URL) - doc1 = next((d for d in referenced_docs if d.doc_title == "Title 1"), None) - assert doc1 - assert doc1.doc_title == "Title 1" - assert str(doc1.doc_url) == "http://example.com/doc2" - - # Verify Title 2 (File search result with URL) - doc2 = next((d for d in referenced_docs if d.doc_title == "Title 2"), None) - assert doc2 - assert doc2.doc_title == "Title 2" - assert str(doc2.doc_url) == "http://example.com/doc3" - - # Verify RAG chunks were extracted from file_search_call results - assert len(_summary.rag_chunks) == 2 - assert _summary.rag_chunks[0].content == "Sample text from file2.pdf" - assert _summary.rag_chunks[0].source == "file2.pdf" - assert _summary.rag_chunks[0].score == 0.95 - assert _summary.rag_chunks[1].content == "Sample text from file3.docx" - assert _summary.rag_chunks[1].source == "file3.docx" - assert _summary.rag_chunks[1].score == 0.85 diff --git a/tests/unit/app/endpoints/test_streaming_query.py b/tests/unit/app/endpoints/test_streaming_query.py index a892aff5d..64b226a31 100644 --- a/tests/unit/app/endpoints/test_streaming_query.py +++ b/tests/unit/app/endpoints/test_streaming_query.py @@ -1,654 +1,637 @@ -"""Unit tests for the /streaming-query REST API endpoint.""" +# pylint: disable=redefined-outer-name,import-error, too-many-function-args +"""Unit tests for the /streaming_query (v2) endpoint using Responses API.""" -# pylint: disable=too-many-lines,too-many-function-args -import json -from typing import Any +from typing import Any, AsyncIterator +from unittest.mock import Mock import pytest -from pydantic import AnyUrl +from fastapi import Request, status +from fastapi.responses import StreamingResponse +import httpx +from llama_stack_client import APIConnectionError, RateLimitError from pytest_mock import MockerFixture from app.endpoints.streaming_query import ( - LLM_TOKEN_EVENT, - LLM_TOOL_CALL_EVENT, - LLM_TOOL_RESULT_EVENT, - generic_llm_error, - prompt_too_long_error, - stream_end_event, - stream_event, + retrieve_response, + streaming_query_endpoint_handler_v2, ) -from configuration import AppConfig -from constants import MEDIA_TYPE_JSON, MEDIA_TYPE_TEXT +from models.config import Action, ModelContextProtocolServer from models.requests import QueryRequest -from models.responses import ReferencedDocument -from utils.token_counter import TokenCounter +from utils.types import ShieldModerationResult -# Note: content_delta module doesn't exist in llama-stack-client 0.3.x -# These are mock classes for backward compatibility with Agent API tests -# pylint: disable=too-few-public-methods,redefined-builtin +@pytest.fixture +def dummy_request() -> Request: + """Create a dummy FastAPI Request for testing with authorized actions. -class TextDelta: - """Mock TextDelta for Agent API tests.""" + Create a FastAPI Request configured for tests with permissive RBAC. - def __init__(self, text: str, type: str = "text"): # noqa: A002 - """ - Initialize the object with textual content and a chunk type. - - Parameters: - text (str): The textual content for this instance. - type (str): The content type or category (for example, "text"). Defaults to "text". - """ - self.text = text - self.type = type - - -class ToolCallDelta: - """Mock ToolCallDelta for Agent API tests.""" - - def __init__(self, **kwargs: Any): - """ - Initialize the instance by setting attributes from the provided keyword arguments. - - Parameters: - **kwargs: Any - Attribute names and values to assign to the instance. Each key in - `kwargs` becomes an attribute on the created object with the - corresponding value. - """ - for key, value in kwargs.items(): - setattr(self, key, value) - - -# Note: Agent API types don't exist in llama-stack-client 0.3.x -# These are mock classes for backward compatibility with Agent API tests - - -class TurnResponseEvent: - """Mock TurnResponseEvent for Agent API tests.""" - - def __init__(self, **kwargs: Any): - """ - Initialize the instance by setting attributes from the provided keyword arguments. - - Parameters: - **kwargs: Any - Attribute names and values to assign to the instance. Each key in - `kwargs` becomes an attribute on the created object with the - corresponding value. - """ - for key, value in kwargs.items(): - setattr(self, key, value) - - -class AgentTurnResponseStreamChunk: - """Mock AgentTurnResponseStreamChunk for Agent API tests.""" - - def __init__(self, **kwargs: Any): - """ - Initialize the instance by setting attributes from the provided keyword arguments. - - Parameters: - **kwargs: Any - Attribute names and values to assign to the instance. Each key in - `kwargs` becomes an attribute on the created object with the - corresponding value. - """ - for key, value in kwargs.items(): - setattr(self, key, value) + Returns: + Request: A FastAPI Request whose `state.authorized_actions` is set to a + set of all `Action` members. + """ + req = Request(scope={"type": "http"}) + # Provide a permissive authorized_actions set to satisfy RBAC check + req.state.authorized_actions = set(Action) + return req + + +@pytest.mark.asyncio +async def test_retrieve_response_builds_rag_and_mcp_tools( + mocker: MockerFixture, +) -> None: + """Test that retrieve_response correctly builds RAG and MCP tools.""" + mock_client = mocker.Mock() + mock_vector_stores = mocker.Mock() + mock_vector_stores.data = [mocker.Mock(id="db1")] + mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) + mock_client.responses.create = mocker.AsyncMock(return_value=mocker.Mock()) + # Mock conversations.create for new conversation creation + mock_conversation = mocker.Mock() + mock_conversation.id = "conv_abc123def456" + mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) + # Mock shields.list and models.list for run_shield_moderation + mock_client.shields.list = mocker.AsyncMock(return_value=[]) + mock_client.models.list = mocker.AsyncMock(return_value=[]) + # Mock vector_io.query for direct vector querying + mock_query_response = mocker.Mock() + mock_query_response.chunks = [] + mock_client.vector_io.query = mocker.AsyncMock(return_value=mock_query_response) + mocker.patch( + "app.endpoints.streaming_query_v2.get_system_prompt", return_value="PROMPT" + ) -class AgentTurnResponseStepCompletePayload: - """Mock AgentTurnResponseStepCompletePayload for Agent API tests.""" + mock_cfg = mocker.Mock() + mock_cfg.mcp_servers = [ + ModelContextProtocolServer( + name="fs", + url="http://localhost:3000", + authorization_headers={"Authorization": "kubernetes"}, + ), + ] + mocker.patch("app.endpoints.streaming_query_v2.configuration", mock_cfg) + mocker.patch("app.endpoints.query_v2.configuration", mock_cfg) + + qr = QueryRequest(query="hello") + await retrieve_response(mock_client, "model-z", qr, token="tok") + + kwargs = mock_client.responses.create.call_args.kwargs + assert kwargs["stream"] is True + tools = kwargs["tools"] + assert isinstance(tools, list) + types = {t.get("type") for t in tools} + # Since we're now skipping RAG tools and doing direct vector querying, + # we should only see MCP tools, not file_search tools + assert types == {"mcp"} + + +@pytest.mark.asyncio +async def test_retrieve_response_no_tools_passes_none(mocker: MockerFixture) -> None: + """Test that retrieve_response passes None for tools when no_tools=True.""" + mock_client = mocker.Mock() + mock_vector_stores = mocker.Mock() + mock_vector_stores.data = [] + mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) + mock_client.responses.create = mocker.AsyncMock(return_value=mocker.Mock()) + # Mock conversations.create for new conversation creation + mock_conversation = mocker.Mock() + mock_conversation.id = "conv_abc123def456" + mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) + # Mock shields.list and models.list for run_shield_moderation + mock_client.shields.list = mocker.AsyncMock(return_value=[]) + mock_client.models.list = mocker.AsyncMock(return_value=[]) + # Mock vector_io.query for direct vector querying + mock_query_response = mocker.Mock() + mock_query_response.chunks = [] + mock_client.vector_io.query = mocker.AsyncMock(return_value=mock_query_response) - def __init__(self, **kwargs: Any): - """ - Initialize the instance by setting attributes from the provided keyword arguments. + mocker.patch( + "app.endpoints.streaming_query_v2.get_system_prompt", return_value="PROMPT" + ) + mocker.patch( + "app.endpoints.streaming_query_v2.configuration", mocker.Mock(mcp_servers=[]) + ) - Parameters: - **kwargs: Any - Attribute names and values to assign to the instance. Each key in - `kwargs` becomes an attribute on the created object with the - corresponding value. - """ - for key, value in kwargs.items(): - setattr(self, key, value) + qr = QueryRequest(query="hello", no_tools=True) + await retrieve_response(mock_client, "model-z", qr, token="tok") + kwargs = mock_client.responses.create.call_args.kwargs + assert kwargs["tools"] is None + assert kwargs["stream"] is True -class AgentTurnResponseStepProgressPayload: - """Mock AgentTurnResponseStepProgressPayload for Agent API tests.""" - def __init__(self, **kwargs: Any): - """ - Initialize the instance by setting attributes from the provided keyword arguments. +@pytest.mark.asyncio +async def test_streaming_query_endpoint_handler_v2_success_yields_events( + mocker: MockerFixture, dummy_request: Request +) -> None: + """Test that streaming_query_endpoint_handler_v2 yields correct SSE events.""" + # Skip real config checks - patch in streaming_query where the base handler is + mocker.patch("app.endpoints.streaming_query.check_configuration_loaded") - Parameters: - **kwargs: Any - Attribute names and values to assign to the instance. Each key in - `kwargs` becomes an attribute on the created object with the - corresponding value. - """ - for key, value in kwargs.items(): - setattr(self, key, value) + # Model selection plumbing + mock_client = mocker.Mock() + mock_client.models.list = mocker.AsyncMock(return_value=[mocker.Mock()]) + mocker.patch( + "client.AsyncLlamaStackClientHolder.get_client", return_value=mock_client + ) + mocker.patch( + "app.endpoints.streaming_query.evaluate_model_hints", + return_value=(None, None), + ) + mocker.patch( + "app.endpoints.streaming_query.select_model_and_provider_id", + return_value=("llama/m", "m", "p"), + ) + # Replace SSE helpers for deterministic output + mocker.patch( + "app.endpoints.streaming_query_v2.stream_start_event", + lambda conv_id: f"START:{conv_id}\n", + ) + mocker.patch( + "app.endpoints.streaming_query_v2.stream_event", + lambda data, event_type, media_type: f"EV:{event_type}:{data.get('token','')}\n", + ) + mocker.patch( + "app.endpoints.streaming_query_v2.stream_end_event", + lambda _m, _t, _aq, _rd, _media: "END\n", + ) -class AgentTurnResponseTurnAwaitingInputPayload: - """Mock AgentTurnResponseTurnAwaitingInputPayload for Agent API tests.""" + # Mock the cleanup function that handles all post-streaming database/cache work + cleanup_spy = mocker.patch( + "app.endpoints.streaming_query_v2.cleanup_after_streaming", + mocker.AsyncMock(return_value=None), + ) - def __init__(self, **kwargs: Any): + # Build a fake async stream of chunks + async def fake_stream() -> AsyncIterator[Mock]: """ - Initialize the instance by setting attributes from the provided keyword arguments. - - Parameters: - **kwargs: Any - Attribute names and values to assign to the instance. Each key in - `kwargs` becomes an attribute on the created object with the - corresponding value. + Produce a fake asynchronous stream of response events used for testing streaming endpoints. + + Yields Mock objects that emulate event frames from a + streaming responses API, including: + - a "response.created" event with a conversation id, + - content and text delta events ("response.content_part.added", + "response.output_text.delta"), + - function call events ("response.output_item.done" with completed tool call), + - a final "response.output_text.done" event and a "response.completed" event. + + Returns: + AsyncIterator[Mock]: An async iterator that yields + event-like Mock objects representing the streamed + response frames; the final yielded response contains an `output` + attribute (an empty list) to allow shield violation detection in + tests. """ - for key, value in kwargs.items(): - setattr(self, key, value) + yield Mock(type="response.created", response=Mock(id="conv-xyz")) + yield Mock(type="response.content_part.added") + yield Mock(type="response.output_text.delta", delta="Hello ") + yield Mock(type="response.output_text.delta", delta="world") + item_mock = Mock(type="function_call", id="item1", call_id="call1") + item_mock.name = "search" # 'name' is a special Mock param, set explicitly + item_mock.arguments = '{"q":"x"}' + yield Mock(type="response.output_item.done", item=item_mock) + yield Mock(type="response.output_text.done", text="Hello world") + # Include a response object with output attribute for shield violation detection + mock_response = Mock(output=[]) + yield Mock(type="response.completed", response=mock_response) + mocker.patch( + "app.endpoints.streaming_query_v2.retrieve_response", + return_value=(fake_stream(), "abc123def456"), + ) -class AgentTurnResponseTurnCompletePayload: - """Mock AgentTurnResponseTurnCompletePayload for Agent API tests.""" + metric = mocker.patch("metrics.llm_calls_total") - def __init__(self, **kwargs: Any): - """ - Initialize the instance by setting attributes from the provided keyword arguments. + resp = await streaming_query_endpoint_handler_v2( + request=dummy_request, + query_request=QueryRequest(query="hi"), + auth=("user123", "", True, "token-abc"), # skip_userid_check=True + mcp_headers={}, + ) - Parameters: - **kwargs: Any - Attribute names and values to assign to the instance. Each key in - `kwargs` becomes an attribute on the created object with the - corresponding value. + assert isinstance(resp, StreamingResponse) + metric.labels("p", "m").inc.assert_called_once() + + # Collect emitted events + events: list[str] = [] + async for chunk in resp.body_iterator: + s = chunk.decode() if isinstance(chunk, (bytes, bytearray)) else str(chunk) + events.append(s) + + # Validate event sequence and content + assert events[0] == "START:abc123def456\n" + # content_part.added triggers empty token + assert events[1] == "EV:token:\n" + assert events[2] == "EV:token:Hello \n" + assert events[3] == "EV:token:world\n" + # tool call delta + assert events[4].startswith("EV:tool_call:") + # turn complete and end + assert "EV:turn_complete:Hello world\n" in events + assert events[-1] == "END\n" + + # Verify cleanup function was invoked after streaming + assert cleanup_spy.call_count == 1 + # Verify cleanup was called with correct user_id and conversation_id + call_args = cleanup_spy.call_args + assert call_args.kwargs["user_id"] == "user123" + assert call_args.kwargs["conversation_id"] == "abc123def456" + assert call_args.kwargs["model_id"] == "m" + assert call_args.kwargs["provider_id"] == "p" + + +@pytest.mark.asyncio +async def test_streaming_query_endpoint_handler_v2_api_connection_error( + mocker: MockerFixture, dummy_request: Request +) -> None: + """Test that streaming_query_endpoint_handler_v2 handles API connection errors.""" + mocker.patch("app.endpoints.streaming_query.check_configuration_loaded") + + def _raise(*_a: Any, **_k: Any) -> None: """ - for key, value in kwargs.items(): - setattr(self, key, value) - + Always raises an APIConnectionError with its `request` attribute set to None. -class AgentTurnResponseTurnStartPayload: - """Mock AgentTurnResponseTurnStartPayload for Agent API tests.""" - - def __init__(self, **kwargs: Any): + Raises: + APIConnectionError: Raised every time the function is called; the + exception's `request` is None. """ - Initialize the instance by setting attributes from the provided keyword arguments. + raise APIConnectionError(request=None) # type: ignore[arg-type] - Parameters: - **kwargs: Any - Attribute names and values to assign to the instance. Each key in - `kwargs` becomes an attribute on the created object with the - corresponding value. - """ - for key, value in kwargs.items(): - setattr(self, key, value) + mocker.patch("client.AsyncLlamaStackClientHolder.get_client", side_effect=_raise) + fail_metric = mocker.patch("metrics.llm_calls_failures_total") -class ToolExecutionStep: - """Mock ToolExecutionStep for Agent API tests.""" + mocker.patch( + "app.endpoints.streaming_query.evaluate_model_hints", + return_value=(None, None), + ) - def __init__(self, **kwargs: Any): - """ - Initialize the instance by setting attributes from the provided keyword arguments. + response = await streaming_query_endpoint_handler_v2( + request=dummy_request, + query_request=QueryRequest(query="hi"), + auth=("user123", "", False, "tok"), + mcp_headers={}, + ) - Parameters: - **kwargs: Any - Attribute names and values to assign to the instance. Each key in - `kwargs` becomes an attribute on the created object with the - corresponding value. - """ - for key, value in kwargs.items(): - setattr(self, key, value) + assert isinstance(response, StreamingResponse) + assert response.status_code == status.HTTP_503_SERVICE_UNAVAILABLE + fail_metric.inc.assert_called_once() + + +@pytest.mark.asyncio +async def test_retrieve_response_with_shields_available(mocker: MockerFixture) -> None: + """Test that shield moderation runs and passes when content is safe.""" + mock_client = mocker.Mock() + + # Create mock shield with provider_resource_id + mock_shield = mocker.Mock() + mock_shield.identifier = "content-safety-shield" + mock_shield.provider_resource_id = "moderation-model" + mock_client.shields.list = mocker.AsyncMock(return_value=[mock_shield]) + + # Create mock model matching the shield's provider_resource_id + mock_model = mocker.Mock() + mock_model.id = "moderation-model" + mock_client.models.list = mocker.AsyncMock(return_value=[mock_model]) + + # Mock moderations.create to return safe (not flagged) content + mock_moderation_result = mocker.Mock() + mock_moderation_result.flagged = False + mock_moderation_response = mocker.Mock() + mock_moderation_response.results = [mock_moderation_result] + mock_client.moderations.create = mocker.AsyncMock( + return_value=mock_moderation_response + ) + mock_vector_stores = mocker.Mock() + mock_vector_stores.data = [] + mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) + mock_client.responses.create = mocker.AsyncMock(return_value=mocker.Mock()) + # Mock conversations.create for new conversation creation + mock_conversation = mocker.Mock() + mock_conversation.id = "conv_abc123def456" + mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) -class ToolResponse: - """Mock ToolResponse for Agent API tests.""" + mocker.patch( + "app.endpoints.streaming_query_v2.get_system_prompt", return_value="PROMPT" + ) + mocker.patch( + "app.endpoints.streaming_query_v2.configuration", mocker.Mock(mcp_servers=[]) + ) - def __init__(self, **kwargs: Any): - """ - Initialize the instance by setting attributes from the provided keyword arguments. + qr = QueryRequest(query="hello") + await retrieve_response(mock_client, "model-shields", qr, token="tok") - Parameters: - **kwargs: Any - Attribute names and values to assign to the instance. Each key in - `kwargs` becomes an attribute on the created object with the - corresponding value. - """ - for key, value in kwargs.items(): - setattr(self, key, value) + # Verify that moderation was called with the user's query + mock_client.moderations.create.assert_called_once_with( + input="hello", model="moderation-model" + ) + # Verify that responses.create was called (moderation passed) + mock_client.responses.create.assert_called_once() + + +@pytest.mark.asyncio +async def test_retrieve_response_with_no_shields_available( + mocker: MockerFixture, +) -> None: + """Test that LLM is called when no shields are configured.""" + mock_client = mocker.Mock() + + # Mock shields.list and models.list for run_shield_moderation + mock_client.shields.list = mocker.AsyncMock(return_value=[]) + mock_client.models.list = mocker.AsyncMock(return_value=[]) + + mock_vector_stores = mocker.Mock() + mock_vector_stores.data = [] + mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) + mock_client.responses.create = mocker.AsyncMock(return_value=mocker.Mock()) + # Mock conversations.create for new conversation creation + mock_conversation = mocker.Mock() + mock_conversation.id = "conv_abc123def456" + mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) + mocker.patch( + "app.endpoints.streaming_query_v2.get_system_prompt", return_value="PROMPT" + ) + mocker.patch( + "app.endpoints.streaming_query_v2.configuration", mocker.Mock(mcp_servers=[]) + ) -# pylint: enable=too-few-public-methods,redefined-builtin + qr = QueryRequest(query="hello") + await retrieve_response(mock_client, "model-no-shields", qr, token="tok") -MOCK_AUTH = ( - "017adfa4-7cc6-46e4-b663-3653e1ae69df", - "mock_username", - False, - "mock_token", -) + # Verify that responses.create was called + mock_client.responses.create.assert_called_once() -def mock_database_operations(mocker: MockerFixture) -> None: - """Helper function to mock database operations for streaming query endpoints. +@pytest.mark.asyncio +async def test_streaming_response_blocked_by_shield_moderation( + mocker: MockerFixture, dummy_request: Request +) -> None: + """Test that when shield moderation blocks, a violation stream is returned.""" + # Skip real config checks + mocker.patch("app.endpoints.streaming_query.check_configuration_loaded") - Configure test mocks for conversation ownership validation and post-stream - cleanup used by streaming-query tests. + # Model selection plumbing + mock_client = mocker.Mock() + mock_client.models.list = mocker.AsyncMock(return_value=[mocker.Mock()]) + mocker.patch( + "client.AsyncLlamaStackClientHolder.get_client", return_value=mock_client + ) + mocker.patch( + "app.endpoints.streaming_query.evaluate_model_hints", + return_value=(None, None), + ) + mocker.patch( + "app.endpoints.streaming_query.select_model_and_provider_id", + return_value=("llama/m", "m", "p"), + ) - Parameters: - mocker (MockerFixture): Pytest-mock fixture used to patch functions. - After calling this helper, `validate_conversation_ownership` is patched - to return `True` and `cleanup_after_streaming` is patched to an async - no-op. - """ + # SSE helpers + mocker.patch( + "app.endpoints.streaming_query_v2.stream_start_event", + lambda conv_id: f"START:{conv_id}\n", + ) + mocker.patch( + "app.endpoints.streaming_query_v2.stream_event", + lambda data, event_type, media_type: f"EV:{event_type}:{data.get('token','')}\n", + ) mocker.patch( - "app.endpoints.streaming_query.validate_conversation_ownership", - return_value=True, + "app.endpoints.streaming_query_v2.stream_end_event", + lambda _m, _t, _aq, _rd, _media: "END\n", ) + # Mock the cleanup function that handles all post-streaming database/cache work mocker.patch( - "app.endpoints.streaming_query.cleanup_after_streaming", + "app.endpoints.streaming_query_v2.cleanup_after_streaming", mocker.AsyncMock(return_value=None), ) - -def mock_metrics(mocker: MockerFixture) -> None: - """Helper function to mock metrics operations for streaming query endpoints.""" - # Mock the metrics that are used in the streaming query endpoints - mocker.patch("metrics.llm_token_sent_total") - mocker.patch("metrics.llm_token_received_total") - mocker.patch("metrics.llm_calls_total") - - -SAMPLE_KNOWLEDGE_SEARCH_RESULTS = [ - """knowledge_search tool found 2 chunks: -BEGIN of knowledge_search tool results. -""", - """Result 1 -Content: ABC -Metadata: {'docs_url': 'https://example.com/doc1', 'title': 'Doc1', 'document_id': 'doc-1', \ -'source': None} -""", - """Result 2 -Content: ABC -Metadata: {'docs_url': 'https://example.com/doc2', 'title': 'Doc2', 'document_id': 'doc-2', \ -'source': None} -""", - """END of knowledge_search tool results. -""", - # Following metadata contains an intentionally incorrect keyword "Title" (instead of "title") - # and it is not picked as a referenced document. - """Result 3 -Content: ABC -Metadata: {'docs_url': 'https://example.com/doc3', 'Title': 'Doc3', 'document_id': 'doc-3', \ -'source': None} -""", - """The above results were retrieved to help answer the user\'s query: "Sample Query". -Use them as supporting information only in answering this query. -""", -] - - -@pytest.fixture(autouse=True, name="setup_configuration") -def setup_configuration_fixture() -> AppConfig: - """Set up configuration for tests. - - Create and initialize an AppConfig instance preconfigured for unit tests. - - The configuration uses a local service (localhost:8080), a test Llama Stack - API key and URL, disables user transcript collection, and sets a noop - conversation cache and empty MCP servers to avoid external dependencies. - - Returns: - AppConfig: An initialized AppConfig populated with the test settings. - """ - config_dict = { - "name": "test", - "service": { - "host": "localhost", - "port": 8080, - "auth_enabled": False, - "workers": 1, - "color_log": True, - "access_log": True, - }, - "llama_stack": { - "api_key": "test-key", - "url": "http://test.com:1234", - "use_as_library_client": False, - }, - "user_data_collection": { - "transcripts_enabled": False, - }, - "mcp_servers": [], - "conversation_cache": { - "type": "noop", - }, - } - cfg = AppConfig() - cfg.init_from_dict(config_dict) - return cfg - - -# ============================================================================ -# OLS Compatibility Tests -# ============================================================================ - - -class TestOLSStreamEventFormatting: - """Test the stream_event function for both media types (OLS compatibility).""" - - def test_stream_event_json_token(self) -> None: - """Test token event formatting for JSON media type.""" - data = {"id": 0, "token": "Hello"} - result = stream_event(data, LLM_TOKEN_EVENT, MEDIA_TYPE_JSON) - - expected = 'data: {"event": "token", "data": {"id": 0, "token": "Hello"}}\n\n' - assert result == expected - - def test_stream_event_text_token(self) -> None: - """Test token event formatting for text media type.""" - - data = {"id": 0, "token": "Hello"} - result = stream_event(data, LLM_TOKEN_EVENT, MEDIA_TYPE_TEXT) - - assert result == "Hello" - - def test_stream_event_json_tool_call(self) -> None: - """Test tool call event formatting for JSON media type.""" - - data = { - "id": 0, - "token": {"tool_name": "search", "arguments": {"query": "test"}}, - } - result = stream_event(data, LLM_TOOL_CALL_EVENT, MEDIA_TYPE_JSON) - - expected = ( - 'data: {"event": "tool_call", "data": {"id": 0, "token": ' - '{"tool_name": "search", "arguments": {"query": "test"}}}}\n\n' - ) - assert result == expected - - def test_stream_event_text_tool_call(self) -> None: - """Test tool call event formatting for text media type.""" - - data = { - "id": 0, - "token": {"tool_name": "search", "arguments": {"query": "test"}}, - } - result = stream_event(data, LLM_TOOL_CALL_EVENT, MEDIA_TYPE_TEXT) - - expected = ( - '\nTool call: {"id": 0, "token": ' - '{"tool_name": "search", "arguments": {"query": "test"}}}\n' - ) - assert result == expected - - def test_stream_event_json_tool_result(self) -> None: - """Test tool result event formatting for JSON media type.""" - - data = { - "id": 0, - "token": {"tool_name": "search", "response": "Found results"}, - } - result = stream_event(data, LLM_TOOL_RESULT_EVENT, MEDIA_TYPE_JSON) - - expected = ( - 'data: {"event": "tool_result", "data": {"id": 0, "token": ' - '{"tool_name": "search", "response": "Found results"}}}\n\n' - ) - assert result == expected - - def test_stream_event_text_tool_result(self) -> None: - """Test tool result event formatting for text media type.""" - - data = { - "id": 0, - "token": {"tool_name": "search", "response": "Found results"}, - } - result = stream_event(data, LLM_TOOL_RESULT_EVENT, MEDIA_TYPE_TEXT) - - expected = ( - '\nTool result: {"id": 0, "token": ' - '{"tool_name": "search", "response": "Found results"}}\n' - ) - assert result == expected - - def test_stream_event_unknown_type(self) -> None: - """Test handling of unknown event types.""" - - data = {"id": 0, "token": "test"} - result = stream_event(data, "unknown_event", MEDIA_TYPE_TEXT) - - assert result == "" - - -class TestOLSStreamEndEvent: - """Test the stream_end_event function for both media types (OLS compatibility).""" - - def test_stream_end_event_json(self) -> None: - """Test end event formatting for JSON media type.""" - - metadata_map = { - "doc1": {"title": "Test Doc 1", "docs_url": "https://example.com/doc1"}, - "doc2": {"title": "Test Doc 2", "docs_url": "https://example.com/doc2"}, - } - # Create mock objects for the test - mock_token_usage = TokenCounter(input_tokens=100, output_tokens=50) - available_quotas: dict[str, int] = {} - referenced_documents = [ - ReferencedDocument( - doc_url=AnyUrl("https://example.com/doc1"), doc_title="Test Doc 1" - ), - ReferencedDocument( - doc_url=AnyUrl("https://example.com/doc2"), doc_title="Test Doc 2" - ), - ] - result = stream_end_event( - metadata_map, - mock_token_usage, - available_quotas, - referenced_documents, - MEDIA_TYPE_JSON, + # Build a fake async stream for violation response + async def fake_violation_stream() -> AsyncIterator[Mock]: + """Produce an async iterator simulating a shield violation response.""" + yield Mock( + type="response.content_part.added", + response_id="resp_shield", + item_id="msg_shield", ) - - # Parse the result to verify structure - data_part = result.replace("data: ", "").strip() - parsed = json.loads(data_part) - - assert parsed["event"] == "end" - assert "referenced_documents" in parsed["data"] - assert len(parsed["data"]["referenced_documents"]) == 2 - assert parsed["data"]["referenced_documents"][0]["doc_title"] == "Test Doc 1" - assert ( - parsed["data"]["referenced_documents"][0]["doc_url"] - == "https://example.com/doc1" + yield Mock( + type="response.output_text.delta", delta="Content violates safety policy" ) - assert "available_quotas" in parsed - - def test_stream_end_event_text(self) -> None: - """Test end event formatting for text media type.""" - - metadata_map = { - "doc1": {"title": "Test Doc 1", "docs_url": "https://example.com/doc1"}, - "doc2": {"title": "Test Doc 2", "docs_url": "https://example.com/doc2"}, - } - # Create mock objects for the test - mock_token_usage = TokenCounter(input_tokens=100, output_tokens=50) - available_quotas: dict[str, int] = {} - referenced_documents = [ - ReferencedDocument( - doc_url=AnyUrl("https://example.com/doc1"), doc_title="Test Doc 1" - ), - ReferencedDocument( - doc_url=AnyUrl("https://example.com/doc2"), doc_title="Test Doc 2" - ), - ] - result = stream_end_event( - metadata_map, - mock_token_usage, - available_quotas, - referenced_documents, - MEDIA_TYPE_TEXT, - ) - - expected = ( - "\n\n---\n\nTest Doc 1: https://example.com/doc1\n" - "Test Doc 2: https://example.com/doc2" + violation_item = Mock( + type="message", + role="assistant", + content="Content violates safety policy", + refusal=None, ) - assert result == expected - - def test_stream_end_event_text_no_docs(self) -> None: - """Test end event formatting for text media type with no documents.""" - - metadata_map: dict = {} - # Create mock objects for the test - mock_token_usage = TokenCounter(input_tokens=100, output_tokens=50) - available_quotas: dict[str, int] = {} - referenced_documents: list[ReferencedDocument] = [] - result = stream_end_event( - metadata_map, - mock_token_usage, - available_quotas, - referenced_documents, - MEDIA_TYPE_TEXT, + yield Mock( + type="response.completed", + response=Mock(id="resp_shield", output=[violation_item]), ) - assert result == "" - - -class TestOLSErrorHandling: - """Test error handling functions (OLS compatibility).""" - - def test_prompt_too_long_error_json(self) -> None: - """Test prompt too long error for JSON media type.""" - - error = Exception("Prompt exceeds maximum length") - result = prompt_too_long_error(error, MEDIA_TYPE_JSON) - - data_part = result.replace("data: ", "").strip() - parsed = json.loads(data_part) - assert parsed["event"] == "error" - assert parsed["data"]["status_code"] == 413 - assert parsed["data"]["response"] == "Prompt is too long" - assert parsed["data"]["cause"] == "Prompt exceeds maximum length" + mocker.patch( + "app.endpoints.streaming_query_v2.retrieve_response", + return_value=(fake_violation_stream(), "conv123"), + ) - def test_prompt_too_long_error_text(self) -> None: - """Test prompt too long error for text media type.""" + mocker.patch("metrics.llm_calls_total") - error = Exception("Prompt exceeds maximum length") - result = prompt_too_long_error(error, MEDIA_TYPE_TEXT) + resp = await streaming_query_endpoint_handler_v2( + request=dummy_request, + query_request=QueryRequest(query="dangerous query"), + auth=("user123", "", True, "token-abc"), + mcp_headers={}, + ) - assert result == "Prompt is too long: Prompt exceeds maximum length" + assert isinstance(resp, StreamingResponse) - def test_generic_llm_error_json(self) -> None: - """Test generic LLM error for JSON media type.""" + # Collect emitted events to trigger the generator + events: list[str] = [] + async for chunk in resp.body_iterator: + s = chunk.decode() if isinstance(chunk, (bytes, bytearray)) else str(chunk) + events.append(s) - error = Exception("Connection failed") - result = generic_llm_error(error, MEDIA_TYPE_JSON) + # Verify that the stream contains the violation message + all_events = "".join(events) + assert "Content violates safety policy" in all_events - data_part = result.replace("data: ", "").strip() - parsed = json.loads(data_part) - assert parsed["event"] == "error" - assert parsed["data"]["response"] == "Internal server error" - assert parsed["data"]["cause"] == "Connection failed" - def test_generic_llm_error_text(self) -> None: - """Test generic LLM error for text media type.""" +@pytest.mark.asyncio +async def test_streaming_response_no_shield_violation( + mocker: MockerFixture, dummy_request: Request +) -> None: + """Test that no metric is incremented when there's no shield violation in streaming.""" + # Skip real config checks + mocker.patch("app.endpoints.streaming_query.check_configuration_loaded") - error = Exception("Connection failed") - result = generic_llm_error(error, MEDIA_TYPE_TEXT) + # Model selection plumbing + mock_client = mocker.Mock() + mock_client.models.list = mocker.AsyncMock(return_value=[mocker.Mock()]) + mocker.patch( + "client.AsyncLlamaStackClientHolder.get_client", return_value=mock_client + ) + mocker.patch( + "app.endpoints.streaming_query.evaluate_model_hints", + return_value=(None, None), + ) + mocker.patch( + "app.endpoints.streaming_query.select_model_and_provider_id", + return_value=("llama/m", "m", "p"), + ) - assert result == "Error: Connection failed" + # SSE helpers + mocker.patch( + "app.endpoints.streaming_query_v2.stream_start_event", + lambda conv_id: f"START:{conv_id}\n", + ) + mocker.patch( + "app.endpoints.streaming_query_v2.stream_event", + lambda data, event_type, media_type: f"EV:{event_type}:{data.get('token','')}\n", + ) + mocker.patch( + "app.endpoints.streaming_query_v2.stream_end_event", + lambda _m, _t, _aq, _rd, _media: "END\n", + ) + # Mock the cleanup function that handles all post-streaming database/cache work + mocker.patch( + "app.endpoints.streaming_query_v2.cleanup_after_streaming", + mocker.AsyncMock(return_value=None), + ) -class TestOLSCompatibilityIntegration: - """Integration tests for OLS compatibility.""" + # Mock the validation error metric + validation_metric = mocker.patch("metrics.llm_calls_validation_errors_total") - def test_media_type_validation(self) -> None: - """Test that media type validation works correctly.""" + # Build a fake async stream without violation + async def fake_stream_without_violation() -> AsyncIterator[Mock]: + """ + Produce a deterministic sequence of streaming response events that end with a message. + + Yields four events in order: + - `response.created` with a response id, + - `response.output_text.delta` with a text fragment, + - `response.output_text.done` with the final text, + - `response.completed` whose `response.output` contains an assistant + message where `refusal` is `None`. + + Returns: + An iterator yielding Mock objects representing the + streaming events of a successful response with no refusal. + """ + yield Mock(type="response.created", response=Mock(id="conv-safe")) + yield Mock(type="response.output_text.delta", delta="Safe ") + yield Mock(type="response.output_text.done", text="Safe response") + # Response completed without refusal + safe_item = Mock(type="message", role="assistant", refusal=None) + response_safe = Mock(id="conv-safe", output=[safe_item]) + yield Mock(type="response.completed", response=response_safe) - # Valid media types - valid_request = QueryRequest(query="test", media_type="application/json") - assert valid_request.media_type == "application/json" + mocker.patch( + "app.endpoints.streaming_query_v2.retrieve_response", + return_value=(fake_stream_without_violation(), ""), + ) - valid_request = QueryRequest(query="test", media_type="text/plain") - assert valid_request.media_type == "text/plain" + mocker.patch("metrics.llm_calls_total") - # Invalid media type should raise error - with pytest.raises(ValueError, match="media_type must be either"): - QueryRequest(query="test", media_type="invalid/type") + resp = await streaming_query_endpoint_handler_v2( + request=dummy_request, + query_request=QueryRequest(query="safe query"), + auth=("user123", "", True, "token-abc"), + mcp_headers={}, + ) - def test_ols_event_structure(self) -> None: - """Test that events follow OLS structure.""" + assert isinstance(resp, StreamingResponse) - # Test token event structure - token_data = {"id": 0, "token": "Hello"} - token_event = stream_event(token_data, LLM_TOKEN_EVENT, MEDIA_TYPE_JSON) + # Collect emitted events to trigger the generator + events: list[str] = [] + async for chunk in resp.body_iterator: + s = chunk.decode() if isinstance(chunk, (bytes, bytearray)) else str(chunk) + events.append(s) - data_part = token_event.replace("data: ", "").strip() - parsed = json.loads(data_part) + # Verify that the validation error metric was NOT incremented + validation_metric.inc.assert_not_called() - assert parsed["event"] == "token" - assert "id" in parsed["data"] - assert "token" in parsed["data"] - assert "role" not in parsed["data"] # Role field is not included - # Test tool call event structure - tool_data = { - "id": 0, - "token": {"tool_name": "search", "arguments": {"query": "test"}}, - } - tool_event = stream_event(tool_data, LLM_TOOL_CALL_EVENT, MEDIA_TYPE_JSON) +@pytest.mark.asyncio +async def test_streaming_query_endpoint_handler_v2_quota_exceeded( + mocker: MockerFixture, dummy_request: Request +) -> None: + """Test that streaming query endpoint v2 streams HTTP 429 when model quota is exceeded.""" + mocker.patch("app.endpoints.streaming_query.check_configuration_loaded") - data_part = tool_event.replace("data: ", "").strip() - parsed = json.loads(data_part) + mock_client = mocker.Mock() + mock_client.models.list = mocker.AsyncMock(return_value=[mocker.Mock()]) + mock_response = httpx.Response(429, request=httpx.Request("POST", "http://test")) + mock_client.responses.create.side_effect = RateLimitError( + "Rate limit exceeded for model gpt-4o-mini", + response=mock_response, + body=None, + ) + # Mock conversation creation (needed for query_v2) + mock_conversation = mocker.Mock() + mock_conversation.id = "conv_abc123" + mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) + mock_client.vector_stores.list = mocker.AsyncMock(return_value=mocker.Mock(data=[])) + mock_client.shields.list = mocker.AsyncMock(return_value=[]) + mock_client.models.list = mocker.AsyncMock(return_value=[]) - assert parsed["event"] == "tool_call" - assert "id" in parsed["data"] - assert "role" not in parsed["data"] - assert "token" in parsed["data"] + mocker.patch( + "client.AsyncLlamaStackClientHolder.get_client", return_value=mock_client + ) + mocker.patch( + "app.endpoints.streaming_query.evaluate_model_hints", + return_value=(None, None), + ) + mocker.patch( + "app.endpoints.streaming_query.select_model_and_provider_id", + return_value=("openai/gpt-4o-mini", "gpt-4o-mini", "openai"), + ) + mocker.patch("app.endpoints.streaming_query.validate_model_provider_override") + mocker.patch( + "app.endpoints.streaming_query_v2.run_shield_moderation", + return_value=ShieldModerationResult(blocked=False), + ) + mocker.patch( + "app.endpoints.streaming_query_v2.prepare_tools_for_responses_api", + return_value=None, + ) + mocker.patch( + "app.endpoints.streaming_query_v2.get_system_prompt", return_value="PROMPT" + ) + mocker.patch( + "app.endpoints.streaming_query_v2.to_llama_stack_conversation_id", + return_value="conv_abc123", + ) + mocker.patch( + "app.endpoints.streaming_query_v2.normalize_conversation_id", + return_value="abc123", + ) - # Test tool result event structure - result_data = { - "id": 0, - "token": {"tool_name": "search", "response": "Found results"}, - } - result_event = stream_event(result_data, LLM_TOOL_RESULT_EVENT, MEDIA_TYPE_JSON) + response = await streaming_query_endpoint_handler_v2( + request=dummy_request, + query_request=QueryRequest(query="What is OpenStack?"), + auth=("user123", "", False, "token-abc"), + mcp_headers={}, + ) - data_part = result_event.replace("data: ", "").strip() - parsed = json.loads(data_part) - - assert parsed["event"] == "tool_result" - assert "id" in parsed["data"] - assert "role" not in parsed["data"] - assert "token" in parsed["data"] - - def test_ols_end_event_structure(self) -> None: - """Test that end event follows OLS structure.""" - - metadata_map = { - "doc1": {"title": "Test Doc", "docs_url": "https://example.com/doc"} - } - # Create mock objects for the test - mock_token_usage = TokenCounter(input_tokens=100, output_tokens=50) - available_quotas: dict[str, int] = {} - referenced_documents = [ - ReferencedDocument( - doc_url=AnyUrl("https://example.com/doc"), doc_title="Test Doc" - ), - ] - end_event = stream_end_event( - metadata_map, - mock_token_usage, - available_quotas, - referenced_documents, - MEDIA_TYPE_JSON, - ) - data_part = end_event.replace("data: ", "").strip() - parsed = json.loads(data_part) - - assert parsed["event"] == "end" - assert "referenced_documents" in parsed["data"] - assert "truncated" in parsed["data"] - assert "input_tokens" in parsed["data"] - assert "output_tokens" in parsed["data"] - assert "available_quotas" in parsed # At root level, not inside data + assert isinstance(response, StreamingResponse) + assert response.status_code == status.HTTP_429_TOO_MANY_REQUESTS + + # Read the streamed error response (SSE format) + content = b"" + async for chunk in response.body_iterator: + if isinstance(chunk, bytes): + content += chunk + elif isinstance(chunk, str): + content += chunk.encode() + else: + # Handle memoryview or other types + content += bytes(chunk) + + content_str = content.decode() + # The error is formatted as SSE: data: {"event":"error","response":"...","cause":"..."}\n\n + # Check for the error message in the content + assert "The quota has been exceeded" in content_str + assert "gpt-4o-mini" in content_str diff --git a/tests/unit/app/endpoints/test_streaming_query_old.py b/tests/unit/app/endpoints/test_streaming_query_old.py new file mode 100644 index 000000000..9552d2885 --- /dev/null +++ b/tests/unit/app/endpoints/test_streaming_query_old.py @@ -0,0 +1,654 @@ +"""Unit tests for the /streaming-query REST API endpoint.""" + +# pylint: disable=too-many-lines,too-many-function-args +import json +from typing import Any + +import pytest +from pydantic import AnyUrl +from pytest_mock import MockerFixture + +from app.endpoints.streaming_query_old import ( + LLM_TOKEN_EVENT, + LLM_TOOL_CALL_EVENT, + LLM_TOOL_RESULT_EVENT, + generic_llm_error, + prompt_too_long_error, + stream_end_event, + stream_event, +) +from configuration import AppConfig +from constants import MEDIA_TYPE_JSON, MEDIA_TYPE_TEXT +from models.requests import QueryRequest +from models.responses import ReferencedDocument +from utils.token_counter import TokenCounter + +# Note: content_delta module doesn't exist in llama-stack-client 0.3.x +# These are mock classes for backward compatibility with Agent API tests +# pylint: disable=too-few-public-methods,redefined-builtin + + +class TextDelta: + """Mock TextDelta for Agent API tests.""" + + def __init__(self, text: str, type: str = "text"): # noqa: A002 + """ + Initialize the object with textual content and a chunk type. + + Parameters: + text (str): The textual content for this instance. + type (str): The content type or category (for example, "text"). Defaults to "text". + """ + self.text = text + self.type = type + + +class ToolCallDelta: + """Mock ToolCallDelta for Agent API tests.""" + + def __init__(self, **kwargs: Any): + """ + Initialize the instance by setting attributes from the provided keyword arguments. + + Parameters: + **kwargs: Any + Attribute names and values to assign to the instance. Each key in + `kwargs` becomes an attribute on the created object with the + corresponding value. + """ + for key, value in kwargs.items(): + setattr(self, key, value) + + +# Note: Agent API types don't exist in llama-stack-client 0.3.x +# These are mock classes for backward compatibility with Agent API tests + + +class TurnResponseEvent: + """Mock TurnResponseEvent for Agent API tests.""" + + def __init__(self, **kwargs: Any): + """ + Initialize the instance by setting attributes from the provided keyword arguments. + + Parameters: + **kwargs: Any + Attribute names and values to assign to the instance. Each key in + `kwargs` becomes an attribute on the created object with the + corresponding value. + """ + for key, value in kwargs.items(): + setattr(self, key, value) + + +class AgentTurnResponseStreamChunk: + """Mock AgentTurnResponseStreamChunk for Agent API tests.""" + + def __init__(self, **kwargs: Any): + """ + Initialize the instance by setting attributes from the provided keyword arguments. + + Parameters: + **kwargs: Any + Attribute names and values to assign to the instance. Each key in + `kwargs` becomes an attribute on the created object with the + corresponding value. + """ + for key, value in kwargs.items(): + setattr(self, key, value) + + +class AgentTurnResponseStepCompletePayload: + """Mock AgentTurnResponseStepCompletePayload for Agent API tests.""" + + def __init__(self, **kwargs: Any): + """ + Initialize the instance by setting attributes from the provided keyword arguments. + + Parameters: + **kwargs: Any + Attribute names and values to assign to the instance. Each key in + `kwargs` becomes an attribute on the created object with the + corresponding value. + """ + for key, value in kwargs.items(): + setattr(self, key, value) + + +class AgentTurnResponseStepProgressPayload: + """Mock AgentTurnResponseStepProgressPayload for Agent API tests.""" + + def __init__(self, **kwargs: Any): + """ + Initialize the instance by setting attributes from the provided keyword arguments. + + Parameters: + **kwargs: Any + Attribute names and values to assign to the instance. Each key in + `kwargs` becomes an attribute on the created object with the + corresponding value. + """ + for key, value in kwargs.items(): + setattr(self, key, value) + + +class AgentTurnResponseTurnAwaitingInputPayload: + """Mock AgentTurnResponseTurnAwaitingInputPayload for Agent API tests.""" + + def __init__(self, **kwargs: Any): + """ + Initialize the instance by setting attributes from the provided keyword arguments. + + Parameters: + **kwargs: Any + Attribute names and values to assign to the instance. Each key in + `kwargs` becomes an attribute on the created object with the + corresponding value. + """ + for key, value in kwargs.items(): + setattr(self, key, value) + + +class AgentTurnResponseTurnCompletePayload: + """Mock AgentTurnResponseTurnCompletePayload for Agent API tests.""" + + def __init__(self, **kwargs: Any): + """ + Initialize the instance by setting attributes from the provided keyword arguments. + + Parameters: + **kwargs: Any + Attribute names and values to assign to the instance. Each key in + `kwargs` becomes an attribute on the created object with the + corresponding value. + """ + for key, value in kwargs.items(): + setattr(self, key, value) + + +class AgentTurnResponseTurnStartPayload: + """Mock AgentTurnResponseTurnStartPayload for Agent API tests.""" + + def __init__(self, **kwargs: Any): + """ + Initialize the instance by setting attributes from the provided keyword arguments. + + Parameters: + **kwargs: Any + Attribute names and values to assign to the instance. Each key in + `kwargs` becomes an attribute on the created object with the + corresponding value. + """ + for key, value in kwargs.items(): + setattr(self, key, value) + + +class ToolExecutionStep: + """Mock ToolExecutionStep for Agent API tests.""" + + def __init__(self, **kwargs: Any): + """ + Initialize the instance by setting attributes from the provided keyword arguments. + + Parameters: + **kwargs: Any + Attribute names and values to assign to the instance. Each key in + `kwargs` becomes an attribute on the created object with the + corresponding value. + """ + for key, value in kwargs.items(): + setattr(self, key, value) + + +class ToolResponse: + """Mock ToolResponse for Agent API tests.""" + + def __init__(self, **kwargs: Any): + """ + Initialize the instance by setting attributes from the provided keyword arguments. + + Parameters: + **kwargs: Any + Attribute names and values to assign to the instance. Each key in + `kwargs` becomes an attribute on the created object with the + corresponding value. + """ + for key, value in kwargs.items(): + setattr(self, key, value) + + +# pylint: enable=too-few-public-methods,redefined-builtin + +MOCK_AUTH = ( + "017adfa4-7cc6-46e4-b663-3653e1ae69df", + "mock_username", + False, + "mock_token", +) + + +def mock_database_operations(mocker: MockerFixture) -> None: + """Helper function to mock database operations for streaming query endpoints. + + Configure test mocks for conversation ownership validation and post-stream + cleanup used by streaming-query tests. + + Parameters: + mocker (MockerFixture): Pytest-mock fixture used to patch functions. + After calling this helper, `validate_conversation_ownership` is patched + to return `True` and `cleanup_after_streaming` is patched to an async + no-op. + """ + mocker.patch( + "app.endpoints.streaming_query.validate_conversation_ownership", + return_value=True, + ) + # Mock the cleanup function that handles all post-streaming database/cache work + mocker.patch( + "app.endpoints.streaming_query.cleanup_after_streaming", + mocker.AsyncMock(return_value=None), + ) + + +def mock_metrics(mocker: MockerFixture) -> None: + """Helper function to mock metrics operations for streaming query endpoints.""" + # Mock the metrics that are used in the streaming query endpoints + mocker.patch("metrics.llm_token_sent_total") + mocker.patch("metrics.llm_token_received_total") + mocker.patch("metrics.llm_calls_total") + + +SAMPLE_KNOWLEDGE_SEARCH_RESULTS = [ + """knowledge_search tool found 2 chunks: +BEGIN of knowledge_search tool results. +""", + """Result 1 +Content: ABC +Metadata: {'docs_url': 'https://example.com/doc1', 'title': 'Doc1', 'document_id': 'doc-1', \ +'source': None} +""", + """Result 2 +Content: ABC +Metadata: {'docs_url': 'https://example.com/doc2', 'title': 'Doc2', 'document_id': 'doc-2', \ +'source': None} +""", + """END of knowledge_search tool results. +""", + # Following metadata contains an intentionally incorrect keyword "Title" (instead of "title") + # and it is not picked as a referenced document. + """Result 3 +Content: ABC +Metadata: {'docs_url': 'https://example.com/doc3', 'Title': 'Doc3', 'document_id': 'doc-3', \ +'source': None} +""", + """The above results were retrieved to help answer the user\'s query: "Sample Query". +Use them as supporting information only in answering this query. +""", +] + + +@pytest.fixture(autouse=True, name="setup_configuration") +def setup_configuration_fixture() -> AppConfig: + """Set up configuration for tests. + + Create and initialize an AppConfig instance preconfigured for unit tests. + + The configuration uses a local service (localhost:8080), a test Llama Stack + API key and URL, disables user transcript collection, and sets a noop + conversation cache and empty MCP servers to avoid external dependencies. + + Returns: + AppConfig: An initialized AppConfig populated with the test settings. + """ + config_dict = { + "name": "test", + "service": { + "host": "localhost", + "port": 8080, + "auth_enabled": False, + "workers": 1, + "color_log": True, + "access_log": True, + }, + "llama_stack": { + "api_key": "test-key", + "url": "http://test.com:1234", + "use_as_library_client": False, + }, + "user_data_collection": { + "transcripts_enabled": False, + }, + "mcp_servers": [], + "conversation_cache": { + "type": "noop", + }, + } + cfg = AppConfig() + cfg.init_from_dict(config_dict) + return cfg + + +# ============================================================================ +# OLS Compatibility Tests +# ============================================================================ + + +class TestOLSStreamEventFormatting: + """Test the stream_event function for both media types (OLS compatibility).""" + + def test_stream_event_json_token(self) -> None: + """Test token event formatting for JSON media type.""" + data = {"id": 0, "token": "Hello"} + result = stream_event(data, LLM_TOKEN_EVENT, MEDIA_TYPE_JSON) + + expected = 'data: {"event": "token", "data": {"id": 0, "token": "Hello"}}\n\n' + assert result == expected + + def test_stream_event_text_token(self) -> None: + """Test token event formatting for text media type.""" + + data = {"id": 0, "token": "Hello"} + result = stream_event(data, LLM_TOKEN_EVENT, MEDIA_TYPE_TEXT) + + assert result == "Hello" + + def test_stream_event_json_tool_call(self) -> None: + """Test tool call event formatting for JSON media type.""" + + data = { + "id": 0, + "token": {"tool_name": "search", "arguments": {"query": "test"}}, + } + result = stream_event(data, LLM_TOOL_CALL_EVENT, MEDIA_TYPE_JSON) + + expected = ( + 'data: {"event": "tool_call", "data": {"id": 0, "token": ' + '{"tool_name": "search", "arguments": {"query": "test"}}}}\n\n' + ) + assert result == expected + + def test_stream_event_text_tool_call(self) -> None: + """Test tool call event formatting for text media type.""" + + data = { + "id": 0, + "token": {"tool_name": "search", "arguments": {"query": "test"}}, + } + result = stream_event(data, LLM_TOOL_CALL_EVENT, MEDIA_TYPE_TEXT) + + expected = ( + '\nTool call: {"id": 0, "token": ' + '{"tool_name": "search", "arguments": {"query": "test"}}}\n' + ) + assert result == expected + + def test_stream_event_json_tool_result(self) -> None: + """Test tool result event formatting for JSON media type.""" + + data = { + "id": 0, + "token": {"tool_name": "search", "response": "Found results"}, + } + result = stream_event(data, LLM_TOOL_RESULT_EVENT, MEDIA_TYPE_JSON) + + expected = ( + 'data: {"event": "tool_result", "data": {"id": 0, "token": ' + '{"tool_name": "search", "response": "Found results"}}}\n\n' + ) + assert result == expected + + def test_stream_event_text_tool_result(self) -> None: + """Test tool result event formatting for text media type.""" + + data = { + "id": 0, + "token": {"tool_name": "search", "response": "Found results"}, + } + result = stream_event(data, LLM_TOOL_RESULT_EVENT, MEDIA_TYPE_TEXT) + + expected = ( + '\nTool result: {"id": 0, "token": ' + '{"tool_name": "search", "response": "Found results"}}\n' + ) + assert result == expected + + def test_stream_event_unknown_type(self) -> None: + """Test handling of unknown event types.""" + + data = {"id": 0, "token": "test"} + result = stream_event(data, "unknown_event", MEDIA_TYPE_TEXT) + + assert result == "" + + +class TestOLSStreamEndEvent: + """Test the stream_end_event function for both media types (OLS compatibility).""" + + def test_stream_end_event_json(self) -> None: + """Test end event formatting for JSON media type.""" + + metadata_map = { + "doc1": {"title": "Test Doc 1", "docs_url": "https://example.com/doc1"}, + "doc2": {"title": "Test Doc 2", "docs_url": "https://example.com/doc2"}, + } + # Create mock objects for the test + mock_token_usage = TokenCounter(input_tokens=100, output_tokens=50) + available_quotas: dict[str, int] = {} + referenced_documents = [ + ReferencedDocument( + doc_url=AnyUrl("https://example.com/doc1"), doc_title="Test Doc 1" + ), + ReferencedDocument( + doc_url=AnyUrl("https://example.com/doc2"), doc_title="Test Doc 2" + ), + ] + result = stream_end_event( + metadata_map, + mock_token_usage, + available_quotas, + referenced_documents, + MEDIA_TYPE_JSON, + ) + + # Parse the result to verify structure + data_part = result.replace("data: ", "").strip() + parsed = json.loads(data_part) + + assert parsed["event"] == "end" + assert "referenced_documents" in parsed["data"] + assert len(parsed["data"]["referenced_documents"]) == 2 + assert parsed["data"]["referenced_documents"][0]["doc_title"] == "Test Doc 1" + assert ( + parsed["data"]["referenced_documents"][0]["doc_url"] + == "https://example.com/doc1" + ) + assert "available_quotas" in parsed + + def test_stream_end_event_text(self) -> None: + """Test end event formatting for text media type.""" + + metadata_map = { + "doc1": {"title": "Test Doc 1", "docs_url": "https://example.com/doc1"}, + "doc2": {"title": "Test Doc 2", "docs_url": "https://example.com/doc2"}, + } + # Create mock objects for the test + mock_token_usage = TokenCounter(input_tokens=100, output_tokens=50) + available_quotas: dict[str, int] = {} + referenced_documents = [ + ReferencedDocument( + doc_url=AnyUrl("https://example.com/doc1"), doc_title="Test Doc 1" + ), + ReferencedDocument( + doc_url=AnyUrl("https://example.com/doc2"), doc_title="Test Doc 2" + ), + ] + result = stream_end_event( + metadata_map, + mock_token_usage, + available_quotas, + referenced_documents, + MEDIA_TYPE_TEXT, + ) + + expected = ( + "\n\n---\n\nTest Doc 1: https://example.com/doc1\n" + "Test Doc 2: https://example.com/doc2" + ) + assert result == expected + + def test_stream_end_event_text_no_docs(self) -> None: + """Test end event formatting for text media type with no documents.""" + + metadata_map: dict = {} + # Create mock objects for the test + mock_token_usage = TokenCounter(input_tokens=100, output_tokens=50) + available_quotas: dict[str, int] = {} + referenced_documents: list[ReferencedDocument] = [] + result = stream_end_event( + metadata_map, + mock_token_usage, + available_quotas, + referenced_documents, + MEDIA_TYPE_TEXT, + ) + + assert result == "" + + +class TestOLSErrorHandling: + """Test error handling functions (OLS compatibility).""" + + def test_prompt_too_long_error_json(self) -> None: + """Test prompt too long error for JSON media type.""" + + error = Exception("Prompt exceeds maximum length") + result = prompt_too_long_error(error, MEDIA_TYPE_JSON) + + data_part = result.replace("data: ", "").strip() + parsed = json.loads(data_part) + assert parsed["event"] == "error" + assert parsed["data"]["status_code"] == 413 + assert parsed["data"]["response"] == "Prompt is too long" + assert parsed["data"]["cause"] == "Prompt exceeds maximum length" + + def test_prompt_too_long_error_text(self) -> None: + """Test prompt too long error for text media type.""" + + error = Exception("Prompt exceeds maximum length") + result = prompt_too_long_error(error, MEDIA_TYPE_TEXT) + + assert result == "Prompt is too long: Prompt exceeds maximum length" + + def test_generic_llm_error_json(self) -> None: + """Test generic LLM error for JSON media type.""" + + error = Exception("Connection failed") + result = generic_llm_error(error, MEDIA_TYPE_JSON) + + data_part = result.replace("data: ", "").strip() + parsed = json.loads(data_part) + assert parsed["event"] == "error" + assert parsed["data"]["response"] == "Internal server error" + assert parsed["data"]["cause"] == "Connection failed" + + def test_generic_llm_error_text(self) -> None: + """Test generic LLM error for text media type.""" + + error = Exception("Connection failed") + result = generic_llm_error(error, MEDIA_TYPE_TEXT) + + assert result == "Error: Connection failed" + + +class TestOLSCompatibilityIntegration: + """Integration tests for OLS compatibility.""" + + def test_media_type_validation(self) -> None: + """Test that media type validation works correctly.""" + + # Valid media types + valid_request = QueryRequest(query="test", media_type="application/json") + assert valid_request.media_type == "application/json" + + valid_request = QueryRequest(query="test", media_type="text/plain") + assert valid_request.media_type == "text/plain" + + # Invalid media type should raise error + with pytest.raises(ValueError, match="media_type must be either"): + QueryRequest(query="test", media_type="invalid/type") + + def test_ols_event_structure(self) -> None: + """Test that events follow OLS structure.""" + + # Test token event structure + token_data = {"id": 0, "token": "Hello"} + token_event = stream_event(token_data, LLM_TOKEN_EVENT, MEDIA_TYPE_JSON) + + data_part = token_event.replace("data: ", "").strip() + parsed = json.loads(data_part) + + assert parsed["event"] == "token" + assert "id" in parsed["data"] + assert "token" in parsed["data"] + assert "role" not in parsed["data"] # Role field is not included + + # Test tool call event structure + tool_data = { + "id": 0, + "token": {"tool_name": "search", "arguments": {"query": "test"}}, + } + tool_event = stream_event(tool_data, LLM_TOOL_CALL_EVENT, MEDIA_TYPE_JSON) + + data_part = tool_event.replace("data: ", "").strip() + parsed = json.loads(data_part) + + assert parsed["event"] == "tool_call" + assert "id" in parsed["data"] + assert "role" not in parsed["data"] + assert "token" in parsed["data"] + + # Test tool result event structure + result_data = { + "id": 0, + "token": {"tool_name": "search", "response": "Found results"}, + } + result_event = stream_event(result_data, LLM_TOOL_RESULT_EVENT, MEDIA_TYPE_JSON) + + data_part = result_event.replace("data: ", "").strip() + parsed = json.loads(data_part) + + assert parsed["event"] == "tool_result" + assert "id" in parsed["data"] + assert "role" not in parsed["data"] + assert "token" in parsed["data"] + + def test_ols_end_event_structure(self) -> None: + """Test that end event follows OLS structure.""" + + metadata_map = { + "doc1": {"title": "Test Doc", "docs_url": "https://example.com/doc"} + } + # Create mock objects for the test + mock_token_usage = TokenCounter(input_tokens=100, output_tokens=50) + available_quotas: dict[str, int] = {} + referenced_documents = [ + ReferencedDocument( + doc_url=AnyUrl("https://example.com/doc"), doc_title="Test Doc" + ), + ] + end_event = stream_end_event( + metadata_map, + mock_token_usage, + available_quotas, + referenced_documents, + MEDIA_TYPE_JSON, + ) + data_part = end_event.replace("data: ", "").strip() + parsed = json.loads(data_part) + + assert parsed["event"] == "end" + assert "referenced_documents" in parsed["data"] + assert "truncated" in parsed["data"] + assert "input_tokens" in parsed["data"] + assert "output_tokens" in parsed["data"] + assert "available_quotas" in parsed # At root level, not inside data diff --git a/tests/unit/app/endpoints/test_streaming_query_v2.py b/tests/unit/app/endpoints/test_streaming_query_v2.py deleted file mode 100644 index 69cde6e9e..000000000 --- a/tests/unit/app/endpoints/test_streaming_query_v2.py +++ /dev/null @@ -1,637 +0,0 @@ -# pylint: disable=redefined-outer-name,import-error, too-many-function-args -"""Unit tests for the /streaming_query (v2) endpoint using Responses API.""" - -from typing import Any, AsyncIterator -from unittest.mock import Mock - -import pytest -from fastapi import Request, status -from fastapi.responses import StreamingResponse -import httpx -from llama_stack_client import APIConnectionError, RateLimitError -from pytest_mock import MockerFixture - -from app.endpoints.streaming_query_v2 import ( - retrieve_response, - streaming_query_endpoint_handler_v2, -) -from models.config import Action, ModelContextProtocolServer -from models.requests import QueryRequest -from utils.types import ShieldModerationResult - - -@pytest.fixture -def dummy_request() -> Request: - """Create a dummy FastAPI Request for testing with authorized actions. - - Create a FastAPI Request configured for tests with permissive RBAC. - - Returns: - Request: A FastAPI Request whose `state.authorized_actions` is set to a - set of all `Action` members. - """ - req = Request(scope={"type": "http"}) - # Provide a permissive authorized_actions set to satisfy RBAC check - req.state.authorized_actions = set(Action) - return req - - -@pytest.mark.asyncio -async def test_retrieve_response_builds_rag_and_mcp_tools( - mocker: MockerFixture, -) -> None: - """Test that retrieve_response correctly builds RAG and MCP tools.""" - mock_client = mocker.Mock() - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [mocker.Mock(id="db1")] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - mock_client.responses.create = mocker.AsyncMock(return_value=mocker.Mock()) - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - # Mock shields.list and models.list for run_shield_moderation - mock_client.shields.list = mocker.AsyncMock(return_value=[]) - mock_client.models.list = mocker.AsyncMock(return_value=[]) - # Mock vector_io.query for direct vector querying - mock_query_response = mocker.Mock() - mock_query_response.chunks = [] - mock_client.vector_io.query = mocker.AsyncMock(return_value=mock_query_response) - - mocker.patch( - "app.endpoints.streaming_query_v2.get_system_prompt", return_value="PROMPT" - ) - - mock_cfg = mocker.Mock() - mock_cfg.mcp_servers = [ - ModelContextProtocolServer( - name="fs", - url="http://localhost:3000", - authorization_headers={"Authorization": "kubernetes"}, - ), - ] - mocker.patch("app.endpoints.streaming_query_v2.configuration", mock_cfg) - mocker.patch("app.endpoints.query_v2.configuration", mock_cfg) - - qr = QueryRequest(query="hello") - await retrieve_response(mock_client, "model-z", qr, token="tok") - - kwargs = mock_client.responses.create.call_args.kwargs - assert kwargs["stream"] is True - tools = kwargs["tools"] - assert isinstance(tools, list) - types = {t.get("type") for t in tools} - # Since we're now skipping RAG tools and doing direct vector querying, - # we should only see MCP tools, not file_search tools - assert types == {"mcp"} - - -@pytest.mark.asyncio -async def test_retrieve_response_no_tools_passes_none(mocker: MockerFixture) -> None: - """Test that retrieve_response passes None for tools when no_tools=True.""" - mock_client = mocker.Mock() - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - mock_client.responses.create = mocker.AsyncMock(return_value=mocker.Mock()) - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - # Mock shields.list and models.list for run_shield_moderation - mock_client.shields.list = mocker.AsyncMock(return_value=[]) - mock_client.models.list = mocker.AsyncMock(return_value=[]) - # Mock vector_io.query for direct vector querying - mock_query_response = mocker.Mock() - mock_query_response.chunks = [] - mock_client.vector_io.query = mocker.AsyncMock(return_value=mock_query_response) - - mocker.patch( - "app.endpoints.streaming_query_v2.get_system_prompt", return_value="PROMPT" - ) - mocker.patch( - "app.endpoints.streaming_query_v2.configuration", mocker.Mock(mcp_servers=[]) - ) - - qr = QueryRequest(query="hello", no_tools=True) - await retrieve_response(mock_client, "model-z", qr, token="tok") - - kwargs = mock_client.responses.create.call_args.kwargs - assert kwargs["tools"] is None - assert kwargs["stream"] is True - - -@pytest.mark.asyncio -async def test_streaming_query_endpoint_handler_v2_success_yields_events( - mocker: MockerFixture, dummy_request: Request -) -> None: - """Test that streaming_query_endpoint_handler_v2 yields correct SSE events.""" - # Skip real config checks - patch in streaming_query where the base handler is - mocker.patch("app.endpoints.streaming_query.check_configuration_loaded") - - # Model selection plumbing - mock_client = mocker.Mock() - mock_client.models.list = mocker.AsyncMock(return_value=[mocker.Mock()]) - mocker.patch( - "client.AsyncLlamaStackClientHolder.get_client", return_value=mock_client - ) - mocker.patch( - "app.endpoints.streaming_query.evaluate_model_hints", - return_value=(None, None), - ) - mocker.patch( - "app.endpoints.streaming_query.select_model_and_provider_id", - return_value=("llama/m", "m", "p"), - ) - - # Replace SSE helpers for deterministic output - mocker.patch( - "app.endpoints.streaming_query_v2.stream_start_event", - lambda conv_id: f"START:{conv_id}\n", - ) - mocker.patch( - "app.endpoints.streaming_query_v2.stream_event", - lambda data, event_type, media_type: f"EV:{event_type}:{data.get('token','')}\n", - ) - mocker.patch( - "app.endpoints.streaming_query_v2.stream_end_event", - lambda _m, _t, _aq, _rd, _media: "END\n", - ) - - # Mock the cleanup function that handles all post-streaming database/cache work - cleanup_spy = mocker.patch( - "app.endpoints.streaming_query_v2.cleanup_after_streaming", - mocker.AsyncMock(return_value=None), - ) - - # Build a fake async stream of chunks - async def fake_stream() -> AsyncIterator[Mock]: - """ - Produce a fake asynchronous stream of response events used for testing streaming endpoints. - - Yields Mock objects that emulate event frames from a - streaming responses API, including: - - a "response.created" event with a conversation id, - - content and text delta events ("response.content_part.added", - "response.output_text.delta"), - - function call events ("response.output_item.done" with completed tool call), - - a final "response.output_text.done" event and a "response.completed" event. - - Returns: - AsyncIterator[Mock]: An async iterator that yields - event-like Mock objects representing the streamed - response frames; the final yielded response contains an `output` - attribute (an empty list) to allow shield violation detection in - tests. - """ - yield Mock(type="response.created", response=Mock(id="conv-xyz")) - yield Mock(type="response.content_part.added") - yield Mock(type="response.output_text.delta", delta="Hello ") - yield Mock(type="response.output_text.delta", delta="world") - item_mock = Mock(type="function_call", id="item1", call_id="call1") - item_mock.name = "search" # 'name' is a special Mock param, set explicitly - item_mock.arguments = '{"q":"x"}' - yield Mock(type="response.output_item.done", item=item_mock) - yield Mock(type="response.output_text.done", text="Hello world") - # Include a response object with output attribute for shield violation detection - mock_response = Mock(output=[]) - yield Mock(type="response.completed", response=mock_response) - - mocker.patch( - "app.endpoints.streaming_query_v2.retrieve_response", - return_value=(fake_stream(), "abc123def456"), - ) - - metric = mocker.patch("metrics.llm_calls_total") - - resp = await streaming_query_endpoint_handler_v2( - request=dummy_request, - query_request=QueryRequest(query="hi"), - auth=("user123", "", True, "token-abc"), # skip_userid_check=True - mcp_headers={}, - ) - - assert isinstance(resp, StreamingResponse) - metric.labels("p", "m").inc.assert_called_once() - - # Collect emitted events - events: list[str] = [] - async for chunk in resp.body_iterator: - s = chunk.decode() if isinstance(chunk, (bytes, bytearray)) else str(chunk) - events.append(s) - - # Validate event sequence and content - assert events[0] == "START:abc123def456\n" - # content_part.added triggers empty token - assert events[1] == "EV:token:\n" - assert events[2] == "EV:token:Hello \n" - assert events[3] == "EV:token:world\n" - # tool call delta - assert events[4].startswith("EV:tool_call:") - # turn complete and end - assert "EV:turn_complete:Hello world\n" in events - assert events[-1] == "END\n" - - # Verify cleanup function was invoked after streaming - assert cleanup_spy.call_count == 1 - # Verify cleanup was called with correct user_id and conversation_id - call_args = cleanup_spy.call_args - assert call_args.kwargs["user_id"] == "user123" - assert call_args.kwargs["conversation_id"] == "abc123def456" - assert call_args.kwargs["model_id"] == "m" - assert call_args.kwargs["provider_id"] == "p" - - -@pytest.mark.asyncio -async def test_streaming_query_endpoint_handler_v2_api_connection_error( - mocker: MockerFixture, dummy_request: Request -) -> None: - """Test that streaming_query_endpoint_handler_v2 handles API connection errors.""" - mocker.patch("app.endpoints.streaming_query.check_configuration_loaded") - - def _raise(*_a: Any, **_k: Any) -> None: - """ - Always raises an APIConnectionError with its `request` attribute set to None. - - Raises: - APIConnectionError: Raised every time the function is called; the - exception's `request` is None. - """ - raise APIConnectionError(request=None) # type: ignore[arg-type] - - mocker.patch("client.AsyncLlamaStackClientHolder.get_client", side_effect=_raise) - - fail_metric = mocker.patch("metrics.llm_calls_failures_total") - - mocker.patch( - "app.endpoints.streaming_query.evaluate_model_hints", - return_value=(None, None), - ) - - response = await streaming_query_endpoint_handler_v2( - request=dummy_request, - query_request=QueryRequest(query="hi"), - auth=("user123", "", False, "tok"), - mcp_headers={}, - ) - - assert isinstance(response, StreamingResponse) - assert response.status_code == status.HTTP_503_SERVICE_UNAVAILABLE - fail_metric.inc.assert_called_once() - - -@pytest.mark.asyncio -async def test_retrieve_response_with_shields_available(mocker: MockerFixture) -> None: - """Test that shield moderation runs and passes when content is safe.""" - mock_client = mocker.Mock() - - # Create mock shield with provider_resource_id - mock_shield = mocker.Mock() - mock_shield.identifier = "content-safety-shield" - mock_shield.provider_resource_id = "moderation-model" - mock_client.shields.list = mocker.AsyncMock(return_value=[mock_shield]) - - # Create mock model matching the shield's provider_resource_id - mock_model = mocker.Mock() - mock_model.id = "moderation-model" - mock_client.models.list = mocker.AsyncMock(return_value=[mock_model]) - - # Mock moderations.create to return safe (not flagged) content - mock_moderation_result = mocker.Mock() - mock_moderation_result.flagged = False - mock_moderation_response = mocker.Mock() - mock_moderation_response.results = [mock_moderation_result] - mock_client.moderations.create = mocker.AsyncMock( - return_value=mock_moderation_response - ) - - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - mock_client.responses.create = mocker.AsyncMock(return_value=mocker.Mock()) - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - - mocker.patch( - "app.endpoints.streaming_query_v2.get_system_prompt", return_value="PROMPT" - ) - mocker.patch( - "app.endpoints.streaming_query_v2.configuration", mocker.Mock(mcp_servers=[]) - ) - - qr = QueryRequest(query="hello") - await retrieve_response(mock_client, "model-shields", qr, token="tok") - - # Verify that moderation was called with the user's query - mock_client.moderations.create.assert_called_once_with( - input="hello", model="moderation-model" - ) - # Verify that responses.create was called (moderation passed) - mock_client.responses.create.assert_called_once() - - -@pytest.mark.asyncio -async def test_retrieve_response_with_no_shields_available( - mocker: MockerFixture, -) -> None: - """Test that LLM is called when no shields are configured.""" - mock_client = mocker.Mock() - - # Mock shields.list and models.list for run_shield_moderation - mock_client.shields.list = mocker.AsyncMock(return_value=[]) - mock_client.models.list = mocker.AsyncMock(return_value=[]) - - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - mock_client.responses.create = mocker.AsyncMock(return_value=mocker.Mock()) - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - - mocker.patch( - "app.endpoints.streaming_query_v2.get_system_prompt", return_value="PROMPT" - ) - mocker.patch( - "app.endpoints.streaming_query_v2.configuration", mocker.Mock(mcp_servers=[]) - ) - - qr = QueryRequest(query="hello") - await retrieve_response(mock_client, "model-no-shields", qr, token="tok") - - # Verify that responses.create was called - mock_client.responses.create.assert_called_once() - - -@pytest.mark.asyncio -async def test_streaming_response_blocked_by_shield_moderation( - mocker: MockerFixture, dummy_request: Request -) -> None: - """Test that when shield moderation blocks, a violation stream is returned.""" - # Skip real config checks - mocker.patch("app.endpoints.streaming_query.check_configuration_loaded") - - # Model selection plumbing - mock_client = mocker.Mock() - mock_client.models.list = mocker.AsyncMock(return_value=[mocker.Mock()]) - mocker.patch( - "client.AsyncLlamaStackClientHolder.get_client", return_value=mock_client - ) - mocker.patch( - "app.endpoints.streaming_query.evaluate_model_hints", - return_value=(None, None), - ) - mocker.patch( - "app.endpoints.streaming_query.select_model_and_provider_id", - return_value=("llama/m", "m", "p"), - ) - - # SSE helpers - mocker.patch( - "app.endpoints.streaming_query_v2.stream_start_event", - lambda conv_id: f"START:{conv_id}\n", - ) - mocker.patch( - "app.endpoints.streaming_query_v2.stream_event", - lambda data, event_type, media_type: f"EV:{event_type}:{data.get('token','')}\n", - ) - mocker.patch( - "app.endpoints.streaming_query_v2.stream_end_event", - lambda _m, _t, _aq, _rd, _media: "END\n", - ) - - # Mock the cleanup function that handles all post-streaming database/cache work - mocker.patch( - "app.endpoints.streaming_query_v2.cleanup_after_streaming", - mocker.AsyncMock(return_value=None), - ) - - # Build a fake async stream for violation response - async def fake_violation_stream() -> AsyncIterator[Mock]: - """Produce an async iterator simulating a shield violation response.""" - yield Mock( - type="response.content_part.added", - response_id="resp_shield", - item_id="msg_shield", - ) - yield Mock( - type="response.output_text.delta", delta="Content violates safety policy" - ) - violation_item = Mock( - type="message", - role="assistant", - content="Content violates safety policy", - refusal=None, - ) - yield Mock( - type="response.completed", - response=Mock(id="resp_shield", output=[violation_item]), - ) - - mocker.patch( - "app.endpoints.streaming_query_v2.retrieve_response", - return_value=(fake_violation_stream(), "conv123"), - ) - - mocker.patch("metrics.llm_calls_total") - - resp = await streaming_query_endpoint_handler_v2( - request=dummy_request, - query_request=QueryRequest(query="dangerous query"), - auth=("user123", "", True, "token-abc"), - mcp_headers={}, - ) - - assert isinstance(resp, StreamingResponse) - - # Collect emitted events to trigger the generator - events: list[str] = [] - async for chunk in resp.body_iterator: - s = chunk.decode() if isinstance(chunk, (bytes, bytearray)) else str(chunk) - events.append(s) - - # Verify that the stream contains the violation message - all_events = "".join(events) - assert "Content violates safety policy" in all_events - - -@pytest.mark.asyncio -async def test_streaming_response_no_shield_violation( - mocker: MockerFixture, dummy_request: Request -) -> None: - """Test that no metric is incremented when there's no shield violation in streaming.""" - # Skip real config checks - mocker.patch("app.endpoints.streaming_query.check_configuration_loaded") - - # Model selection plumbing - mock_client = mocker.Mock() - mock_client.models.list = mocker.AsyncMock(return_value=[mocker.Mock()]) - mocker.patch( - "client.AsyncLlamaStackClientHolder.get_client", return_value=mock_client - ) - mocker.patch( - "app.endpoints.streaming_query.evaluate_model_hints", - return_value=(None, None), - ) - mocker.patch( - "app.endpoints.streaming_query.select_model_and_provider_id", - return_value=("llama/m", "m", "p"), - ) - - # SSE helpers - mocker.patch( - "app.endpoints.streaming_query_v2.stream_start_event", - lambda conv_id: f"START:{conv_id}\n", - ) - mocker.patch( - "app.endpoints.streaming_query_v2.stream_event", - lambda data, event_type, media_type: f"EV:{event_type}:{data.get('token','')}\n", - ) - mocker.patch( - "app.endpoints.streaming_query_v2.stream_end_event", - lambda _m, _t, _aq, _rd, _media: "END\n", - ) - - # Mock the cleanup function that handles all post-streaming database/cache work - mocker.patch( - "app.endpoints.streaming_query_v2.cleanup_after_streaming", - mocker.AsyncMock(return_value=None), - ) - - # Mock the validation error metric - validation_metric = mocker.patch("metrics.llm_calls_validation_errors_total") - - # Build a fake async stream without violation - async def fake_stream_without_violation() -> AsyncIterator[Mock]: - """ - Produce a deterministic sequence of streaming response events that end with a message. - - Yields four events in order: - - `response.created` with a response id, - - `response.output_text.delta` with a text fragment, - - `response.output_text.done` with the final text, - - `response.completed` whose `response.output` contains an assistant - message where `refusal` is `None`. - - Returns: - An iterator yielding Mock objects representing the - streaming events of a successful response with no refusal. - """ - yield Mock(type="response.created", response=Mock(id="conv-safe")) - yield Mock(type="response.output_text.delta", delta="Safe ") - yield Mock(type="response.output_text.done", text="Safe response") - # Response completed without refusal - safe_item = Mock(type="message", role="assistant", refusal=None) - response_safe = Mock(id="conv-safe", output=[safe_item]) - yield Mock(type="response.completed", response=response_safe) - - mocker.patch( - "app.endpoints.streaming_query_v2.retrieve_response", - return_value=(fake_stream_without_violation(), ""), - ) - - mocker.patch("metrics.llm_calls_total") - - resp = await streaming_query_endpoint_handler_v2( - request=dummy_request, - query_request=QueryRequest(query="safe query"), - auth=("user123", "", True, "token-abc"), - mcp_headers={}, - ) - - assert isinstance(resp, StreamingResponse) - - # Collect emitted events to trigger the generator - events: list[str] = [] - async for chunk in resp.body_iterator: - s = chunk.decode() if isinstance(chunk, (bytes, bytearray)) else str(chunk) - events.append(s) - - # Verify that the validation error metric was NOT incremented - validation_metric.inc.assert_not_called() - - -@pytest.mark.asyncio -async def test_streaming_query_endpoint_handler_v2_quota_exceeded( - mocker: MockerFixture, dummy_request: Request -) -> None: - """Test that streaming query endpoint v2 streams HTTP 429 when model quota is exceeded.""" - mocker.patch("app.endpoints.streaming_query.check_configuration_loaded") - - mock_client = mocker.Mock() - mock_client.models.list = mocker.AsyncMock(return_value=[mocker.Mock()]) - mock_response = httpx.Response(429, request=httpx.Request("POST", "http://test")) - mock_client.responses.create.side_effect = RateLimitError( - "Rate limit exceeded for model gpt-4o-mini", - response=mock_response, - body=None, - ) - # Mock conversation creation (needed for query_v2) - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mocker.Mock(data=[])) - mock_client.shields.list = mocker.AsyncMock(return_value=[]) - mock_client.models.list = mocker.AsyncMock(return_value=[]) - - mocker.patch( - "client.AsyncLlamaStackClientHolder.get_client", return_value=mock_client - ) - mocker.patch( - "app.endpoints.streaming_query.evaluate_model_hints", - return_value=(None, None), - ) - mocker.patch( - "app.endpoints.streaming_query.select_model_and_provider_id", - return_value=("openai/gpt-4o-mini", "gpt-4o-mini", "openai"), - ) - mocker.patch("app.endpoints.streaming_query.validate_model_provider_override") - mocker.patch( - "app.endpoints.streaming_query_v2.run_shield_moderation", - return_value=ShieldModerationResult(blocked=False), - ) - mocker.patch( - "app.endpoints.streaming_query_v2.prepare_tools_for_responses_api", - return_value=None, - ) - mocker.patch( - "app.endpoints.streaming_query_v2.get_system_prompt", return_value="PROMPT" - ) - mocker.patch( - "app.endpoints.streaming_query_v2.to_llama_stack_conversation_id", - return_value="conv_abc123", - ) - mocker.patch( - "app.endpoints.streaming_query_v2.normalize_conversation_id", - return_value="abc123", - ) - - response = await streaming_query_endpoint_handler_v2( - request=dummy_request, - query_request=QueryRequest(query="What is OpenStack?"), - auth=("user123", "", False, "token-abc"), - mcp_headers={}, - ) - - assert isinstance(response, StreamingResponse) - assert response.status_code == status.HTTP_429_TOO_MANY_REQUESTS - - # Read the streamed error response (SSE format) - content = b"" - async for chunk in response.body_iterator: - if isinstance(chunk, bytes): - content += chunk - elif isinstance(chunk, str): - content += chunk.encode() - else: - # Handle memoryview or other types - content += bytes(chunk) - - content_str = content.decode() - # The error is formatted as SSE: data: {"event":"error","response":"...","cause":"..."}\n\n - # Check for the error message in the content - assert "The quota has been exceeded" in content_str - assert "gpt-4o-mini" in content_str diff --git a/tests/unit/app/test_routers.py b/tests/unit/app/test_routers.py index 0da8c15f1..6ef18c5a5 100644 --- a/tests/unit/app/test_routers.py +++ b/tests/unit/app/test_routers.py @@ -9,17 +9,17 @@ from app.endpoints import ( conversations_v2, conversations_v3, + query, root, info, models, shields, rags, providers, - query_v2, health, config, feedback, - streaming_query_v2, + streaming_query, authorized, metrics, tools, @@ -115,9 +115,9 @@ def test_include_routers() -> None: assert shields.router in app.get_routers() assert providers.router in app.get_routers() # assert query.router in app.get_routers() - assert query_v2.router in app.get_routers() + assert query.router in app.get_routers() # assert streaming_query.router in app.get_routers() - assert streaming_query_v2.router in app.get_routers() + assert streaming_query.router in app.get_routers() assert config.router in app.get_routers() assert feedback.router in app.get_routers() assert health.router in app.get_routers() @@ -155,8 +155,8 @@ def test_check_prefixes() -> None: assert app.get_router_prefix(rags.router) == "/v1" # assert app.get_router_prefix(query.router) == "/v1" # assert app.get_router_prefix(streaming_query.router) == "/v1" - assert app.get_router_prefix(query_v2.router) == "/v1" - assert app.get_router_prefix(streaming_query_v2.router) == "/v1" + assert app.get_router_prefix(query.router) == "/v1" + assert app.get_router_prefix(streaming_query.router) == "/v1" assert app.get_router_prefix(config.router) == "/v1" assert app.get_router_prefix(feedback.router) == "/v1" assert app.get_router_prefix(health.router) == "" From abccf0735581d3ac62d0c44e6077b672bda6d32f Mon Sep 17 00:00:00 2001 From: Anxhela Coba Date: Fri, 13 Feb 2026 11:45:27 -0500 Subject: [PATCH 9/9] rebase main Signed-off-by: Anxhela Coba --- .github/PULL_REQUEST_TEMPLATE.md | 1 + .tekton/lightspeed-stack-pull-request.yaml | 124 +- .tekton/lightspeed-stack-push.yaml | 83 +- Containerfile | 15 +- Makefile | 3 + README.md | 182 +- docs/config.html | 10 +- docs/config.json | 17 +- docs/config.md | 5 +- docs/config.png | Bin 483521 -> 418276 bytes docs/config.puml | 2 +- docs/config.svg | 4 +- docs/openapi.json | 328 +- docs/openapi.md | 174 +- docs/splunk.md | 2 +- pyproject.toml | 15 +- requirements-build.txt | 3 + requirements.hashes.source.txt | 115 +- requirements.hashes.wheel.txt | 7 - requirements.overrides.txt | 1 + rpms.in.yaml | 9 +- rpms.lock.yaml | 716 +---- src/app/endpoints/README.md | 15 +- src/app/endpoints/a2a.py | 54 +- src/app/endpoints/conversations.py | 390 --- ...onversations_v3.py => conversations_v1.py} | 173 +- src/app/endpoints/conversations_v2.py | 56 +- src/app/endpoints/models.py | 22 +- src/app/endpoints/query.py | 1047 ++----- src/app/endpoints/query_old.py | 579 ---- src/app/endpoints/rlsapi_v1.py | 23 +- src/app/endpoints/streaming_query.py | 1017 ++++--- src/app/endpoints/streaming_query_old.py | 726 ----- src/app/main.py | 18 +- src/app/routers.py | 15 +- src/authentication/noop.py | 4 +- src/authentication/noop_with_token.py | 4 +- src/cache/postgres_cache.py | 4 +- src/cache/sqlite_cache.py | 4 +- src/client.py | 21 +- src/constants.py | 7 + src/models/cache_entry.py | 3 +- src/models/config.py | 36 +- src/models/context.py | 11 +- src/models/database/conversations.py | 32 +- src/models/requests.py | 15 + src/models/responses.py | 156 +- src/observability/README.md | 2 +- src/runners/uvicorn.py | 3 +- src/utils/README.md | 6 + src/utils/conversations.py | 425 +++ src/utils/endpoints.py | 490 +-- src/utils/mcp_headers.py | 10 +- src/utils/prompts.py | 97 + src/utils/query.py | 721 ++++- src/utils/quota.py | 15 +- src/utils/responses.py | 768 ++++- src/utils/schema_dumper.py | 14 +- src/utils/shields.py | 51 +- src/utils/suid.py | 6 +- src/utils/transcripts.py | 11 +- src/utils/types.py | 109 +- src/utils/vector_search.py | 8 +- src/version.py | 2 +- test.containerfile | 45 +- tests/benchmarks/test_app_database.py | 406 ++- ...peed-stack.yaml => benchmarks-sqlite.yaml} | 0 tests/e2e/features/authorized_noop.feature | 6 +- .../features/authorized_noop_token.feature | 6 +- .../features/conversation_cache_v2.feature | 10 +- tests/e2e/features/conversations.feature | 13 +- tests/e2e/features/info.feature | 4 +- tests/e2e/features/query.feature | 31 +- tests/e2e/features/steps/README.md | 3 + tests/e2e/features/steps/token_counters.py | 204 ++ tests/e2e/features/streaming_query.feature | 67 +- tests/integration/endpoints/README.md | 2 +- .../endpoints/test_config_integration.py | 8 +- .../endpoints/test_query_v2_integration.py | 74 +- .../endpoints/test_rlsapi_v1_integration.py | 113 +- tests/integration/test_configuration.py | 2 +- tests/integration/test_openapi_json.py | 4 +- tests/profiles/empty.py | 1 + tests/profiles/syntax_error.py | 3 + tests/profiles/test_four/profile.py | 49 + tests/unit/app/endpoints/README.md | 6 - tests/unit/app/endpoints/test_a2a.py | 56 +- .../unit/app/endpoints/test_conversations.py | 1339 ++++++--- .../app/endpoints/test_conversations_v2.py | 246 +- tests/unit/app/endpoints/test_models.py | 258 +- tests/unit/app/endpoints/test_query.py | 1603 ++++------ tests/unit/app/endpoints/test_query_old.py | 486 --- tests/unit/app/endpoints/test_rlsapi_v1.py | 140 +- .../app/endpoints/test_streaming_query.py | 2642 +++++++++++++---- .../app/endpoints/test_streaming_query_old.py | 654 ---- tests/unit/app/test_routers.py | 12 +- tests/unit/authentication/test_noop.py | 19 +- .../authentication/test_noop_with_token.py | 23 + tests/unit/cache/test_postgres_cache.py | 4 +- tests/unit/cache/test_sqlite_cache.py | 4 +- .../test_authentication_configuration.py | 10 +- .../models/config/test_dump_configuration.py | 5 + .../config/test_llama_stack_configuration.py | 35 + .../config/test_service_configuration.py | 22 + tests/unit/models/responses/README.md | 6 + .../models/responses/test_query_response.py | 4 +- .../models/responses/test_response_types.py | 83 + .../responses/test_successful_responses.py | 146 +- tests/unit/models/responses/test_types.py | 83 + .../unit/observability/formats/test_rlsapi.py | 4 +- tests/unit/runners/test_uvicorn_runner.py | 28 + tests/unit/test_configuration.py | 121 +- tests/unit/utils/README.md | 11 +- tests/unit/utils/test_checks.py | 60 + tests/unit/utils/test_conversations.py | 722 +++++ tests/unit/utils/test_endpoints.py | 717 ++--- tests/unit/utils/test_prompts.py | 304 ++ tests/unit/utils/test_query.py | 1027 +++++++ tests/unit/utils/test_responses.py | 1332 ++++++++- tests/unit/utils/test_shields.py | 13 +- tests/unit/utils/test_suid.py | 25 + tests/unit/utils/test_types.py | 148 +- ubi.repo | 62 - uv.lock | 686 +++-- 124 files changed, 14165 insertions(+), 9018 deletions(-) delete mode 100644 src/app/endpoints/conversations.py rename src/app/endpoints/{conversations_v3.py => conversations_v1.py} (80%) delete mode 100644 src/app/endpoints/query_old.py delete mode 100644 src/app/endpoints/streaming_query_old.py create mode 100644 src/utils/conversations.py create mode 100644 src/utils/prompts.py rename tests/configuration/{benchmarks-lightspeed-stack.yaml => benchmarks-sqlite.yaml} (100%) create mode 100644 tests/e2e/features/steps/token_counters.py create mode 100644 tests/profiles/empty.py create mode 100644 tests/profiles/syntax_error.py create mode 100644 tests/profiles/test_four/profile.py delete mode 100644 tests/unit/app/endpoints/test_query_old.py delete mode 100644 tests/unit/app/endpoints/test_streaming_query_old.py create mode 100644 tests/unit/models/responses/test_response_types.py create mode 100644 tests/unit/models/responses/test_types.py create mode 100644 tests/unit/utils/test_conversations.py create mode 100644 tests/unit/utils/test_prompts.py create mode 100644 tests/unit/utils/test_query.py delete mode 100644 ubi.repo diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index eadaba657..a8cb3b673 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -19,6 +19,7 @@ - [ ] Unit tests improvement - [ ] Integration tests improvement - [ ] End to end tests improvement +- [ ] Benchmarks improvement ## Tools used to create PR diff --git a/.tekton/lightspeed-stack-pull-request.yaml b/.tekton/lightspeed-stack-pull-request.yaml index 2fc8d0d96..e11d5199d 100644 --- a/.tekton/lightspeed-stack-pull-request.yaml +++ b/.tekton/lightspeed-stack-pull-request.yaml @@ -8,9 +8,8 @@ metadata: build.appstudio.redhat.com/target_branch: '{{target_branch}}' pipelinesascode.tekton.dev/cancel-in-progress: "true" pipelinesascode.tekton.dev/max-keep-runs: "3" - pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch - == "main" - creationTimestamp: null + pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch == "main" + creationTimestamp: labels: appstudio.openshift.io/application: lightspeed-stack appstudio.openshift.io/component: lightspeed-stack @@ -40,32 +39,34 @@ spec: # those need cmake to build: pyarrow # those need cargo to build: jiter, tiktoken, cryptography, fastuuid, hf_xet, maturin, pydantic_core, rpds_py, safetensors, tokenizers value: | - [ - { - "type": "rpm", - "path": "." - }, - { - "type": "pip", - "path": ".", - "requirements_files": [ - "requirements.hashes.wheel.txt", - "requirements.hashes.source.txt", - "requirements.hermetic.txt" - ], - "requirements_build_files": ["requirements-build.txt"], - "binary": { - "packages": "accelerate,aiohappyeyeballs,aiohttp,aiosignal,aiosqlite,annotated-doc,annotated-types,anyio,asyncpg,attrs,autoevals,cffi,charset-normalizer,chevron,click,cryptography,datasets,dill,distro,dnspython,durationpy,faiss-cpu,fire,frozenlist,fsspec,googleapis-common-protos,grpcio,h11,hf-xet,httpcore,httpx,httpx-sse,huggingface-hub,idna,jinja2,jiter,joblib,jsonschema-specifications,lxml,markdown-it-py,mdurl,mpmath,networkx,nltk,numpy,oauthlib,opentelemetry-api,opentelemetry-exporter-otlp,opentelemetry-exporter-otlp-proto-common,opentelemetry-exporter-otlp-proto-grpc,opentelemetry-exporter-otlp-proto-http,opentelemetry-instrumentation,opentelemetry-proto,opentelemetry-sdk,opentelemetry-semantic-conventions,packaging,pandas,pillow,ply,polyleven,prompt-toolkit,propcache,proto-plus,psycopg2-binary,pyaml,pyarrow,pyasn1,pyasn1-modules,pydantic,pydantic-core,pydantic-settings,pygments,python-dateutil,python-dotenv,pytz,pyyaml,referencing,requests,requests-oauthlib,rpds-py,rsa,safetensors,scikit-learn,scipy,setuptools,six,sniffio,sqlalchemy,starlette,sympy,threadpoolctl,tiktoken,tokenizers,torch,tornado,transformers,triton,typing-extensions,typing-inspection,tzdata,websocket-client,wrapt,xxhash,yarl,zipp,uv,pip,maturin", - "os": "linux", - "arch": "x86_64,aarch64", - "py_version": 312 - } - } - ] + [ + { + "type": "rpm", + "path": "." + }, + { + "type": "pip", + "path": ".", + "requirements_files": [ + "requirements.hashes.wheel.txt", + "requirements.hashes.source.txt", + "requirements.hermetic.txt" + ], + "requirements_build_files": ["requirements-build.txt"], + "binary": { + "packages": "accelerate,aiohappyeyeballs,aiohttp,aiosignal,aiosqlite,annotated-doc,annotated-types,anyio,asyncpg,attrs,autoevals,cffi,charset-normalizer,chevron,click,cryptography,datasets,dill,distro,dnspython,durationpy,faiss-cpu,fire,frozenlist,fsspec,googleapis-common-protos,grpcio,h11,hf-xet,httpcore,httpx,httpx-sse,idna,jinja2,jiter,joblib,jsonschema-specifications,lxml,markdown-it-py,mdurl,mpmath,networkx,nltk,numpy,oauthlib,opentelemetry-api,opentelemetry-exporter-otlp,opentelemetry-exporter-otlp-proto-common,opentelemetry-exporter-otlp-proto-grpc,opentelemetry-exporter-otlp-proto-http,opentelemetry-instrumentation,opentelemetry-proto,opentelemetry-sdk,opentelemetry-semantic-conventions,packaging,pandas,pillow,ply,prompt-toolkit,propcache,psycopg2-binary,pyaml,pyarrow,pyasn1,pyasn1-modules,pydantic,pydantic-core,pydantic-settings,pygments,python-dateutil,python-dotenv,pytz,pyyaml,referencing,requests,requests-oauthlib,rpds-py,rsa,safetensors,scikit-learn,scipy,setuptools,six,sniffio,sqlalchemy,starlette,sympy,threadpoolctl,tiktoken,tokenizers,torch,tornado,transformers,triton,typing-extensions,typing-inspection,tzdata,websocket-client,wrapt,xxhash,yarl,zipp,uv,pip,maturin", + "os": "linux", + "arch": "x86_64,aarch64", + "py_version": 312 + } + } + ] - name: hermetic value: 'true' - name: dockerfile value: Containerfile + - name: build-args-file + value: build-args-konflux.conf pipelineSpec: description: | This pipeline is ideal for building multi-arch container images from a Containerfile while maintaining trust after pipeline customization. @@ -84,19 +85,13 @@ spec: name: output-image type: string - default: . - description: Path to the source code of an application's component from where - to build image. + description: Path to the source code of an application's component from where to build image. name: path-context type: string - default: Dockerfile - description: Path to the Dockerfile inside the context specified by parameter - path-context + description: Path to the Dockerfile inside the context specified by parameter path-context name: dockerfile type: string - - default: "false" - description: Force rebuild image - name: rebuild - type: string - default: "false" description: Skip checks against built image name: skip-checks @@ -110,8 +105,7 @@ spec: name: prefetch-input type: string - default: "" - description: Image tag expiration time, time values could be something like - 1h, 2d, 3w for hours, days, and weeks, respectively. + description: Image tag expiration time, time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively. name: image-expires-after type: string - default: "false" @@ -123,8 +117,7 @@ spec: name: build-image-index type: string - default: docker - description: The format for the resulting image's mediaType. Valid values are - oci or docker. + description: The format for the resulting image's mediaType. Valid values are oci or docker. name: buildah-format type: string - default: [] @@ -136,14 +129,12 @@ spec: name: build-args-file type: string - default: "false" - description: Whether to enable privileged mode, should be used only with remote - VMs + description: Whether to enable privileged mode, should be used only with remote VMs name: privileged-nested type: string - default: - linux/x86_64 - description: List of platforms to build the container images on. The available - set of values is determined by the configuration of the multi-platform-controller. + description: List of platforms to build the container images on. The available set of values is determined by the configuration of the multi-platform-controller. name: build-platforms type: array - name: enable-cache-proxy @@ -166,12 +157,6 @@ spec: tasks: - name: init params: - - name: image-url - value: $(params.output-image) - - name: rebuild - value: $(params.rebuild) - - name: skip-checks - value: $(params.skip-checks) - name: enable-cache-proxy value: $(params.enable-cache-proxy) taskRef: @@ -179,7 +164,7 @@ spec: - name: name value: init - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:ebf06778aeacbbeb081f9231eafbdfdb8e380ad04e211d7ed80ae9101e37fd82 + value: quay.io/konflux-ci/tekton-catalog/task-init:0.3@sha256:aa6f8632cc23d605c5942505ff1d00280db16a6fda5c4c56c4ed9ae936b5fbc6 - name: kind value: task resolver: bundles @@ -204,11 +189,6 @@ spec: - name: kind value: task resolver: bundles - when: - - input: $(tasks.init.results.build) - operator: in - values: - - "true" workspaces: - name: basic-auth workspace: git-auth @@ -287,15 +267,10 @@ spec: - name: name value: buildah-remote-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.8@sha256:da99fce12bf72da86f6a86a5370d826c16ea8db001d27181dcaf087af9ab60cb + value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.8@sha256:4ba24114693920806b35f398fe766c167c18c77fab5f0648a0e1c0de702e4a47 - name: kind value: task resolver: bundles - when: - - input: $(tasks.init.results.build) - operator: in - values: - - "true" - name: build-image-index params: - name: IMAGE @@ -318,15 +293,10 @@ spec: - name: name value: build-image-index - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.2@sha256:05d3d8a5ded44c51b074a56a408ddf5d65c56b4c15e110abb1a99e3aff269d49 + value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.2@sha256:8c422a5380a3d877257003dee153190322af84fe6f4f25e9eee7d8bf61a62577 - name: kind value: task resolver: bundles - when: - - input: $(tasks.init.results.build) - operator: in - values: - - "true" - name: build-source-image params: - name: BINARY_IMAGE @@ -349,10 +319,6 @@ spec: value: task resolver: bundles when: - - input: $(tasks.init.results.build) - operator: in - values: - - "true" - input: $(params.build-source-image) operator: in values: @@ -397,7 +363,7 @@ spec: - name: name value: clair-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:dadfea7633d82e4303ba73d5e9c7e2bc16834bde0fd7688880453b26452067eb + value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:3ff4d1c3c503454c6b7f072e225df43656fb415a5d2a658ab6ce279c0dc128aa - name: kind value: task resolver: bundles @@ -422,7 +388,7 @@ spec: - name: name value: ecosystem-cert-preflight-checks - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:204fd3914d83c7b60e8eee72b5a944337720c79a3e660e7c994435456dcf7175 + value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:33b7133c0c132c361295c30947f73bd45a3a3b62a24b83f3d8cd7c71f757828c - name: kind value: task resolver: bundles @@ -450,7 +416,7 @@ spec: - name: name value: sast-snyk-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:a70272ae12f6d7f0da2902158e1bcee756877aa8f71fd1a22ef9afd8b177fb41 + value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:0c2ab8ce6d419400b63dd67d061052ac51de7b1ebe93f8ae86ed07ac638d756d - name: kind value: task resolver: bundles @@ -477,7 +443,7 @@ spec: - name: name value: clamav-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:76efc0119a10bc8a420dbbb0cdab9ef8eafd263f6827498d2b644e450e93f446 + value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:4f5ccf2324ecef92aaad6e2adb46c0bb15be49b4869b5b407346c514b764404f - name: kind value: task resolver: bundles @@ -522,7 +488,7 @@ spec: - name: name value: sast-coverity-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:9d0bc704bca6b6faad37b2ce8106b96e0cef35e1f647d037a878bf416589de9d + value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:e8c63570f1d01d70b2a21b22a2a4aad9ca7d5c0327d8b2a4058a6e616cce17ca - name: kind value: task resolver: bundles @@ -543,7 +509,7 @@ spec: - name: name value: coverity-availability-check - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-coverity-availability-check:0.2@sha256:267d5bc069a0323f41e24732ddfd1057e5c639e853d1e620c67505fab78f1301 + value: quay.io/konflux-ci/tekton-catalog/task-coverity-availability-check:0.2@sha256:a24d8f3cd01ccc54fa6fb73aa57a78f5559a0e58eddfe0583fc9cb97d59b4efc - name: kind value: task resolver: bundles @@ -569,7 +535,7 @@ spec: - name: name value: sast-shell-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:e7a51575f9188a1461d4520da25aaa4efdd3b896c97dc750941fa22840e55c13 + value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:f475b4b6b0c1687fa1aafa5ba38813e04f080b185af2975e12b457742d9dd857 - name: kind value: task resolver: bundles @@ -595,7 +561,7 @@ spec: - name: name value: sast-unicode-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.4@sha256:1818a5b3e4fa86c838ae71226a157241967d1f19c5ed377e4b2fddad7a3ceefe + value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.4@sha256:b38140b2f0b2163def80e28a792b2702245d38a5610a504f2e56c198f3b8f70b - name: kind value: task resolver: bundles @@ -640,7 +606,7 @@ spec: - name: name value: push-dockerfile-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:08bba4a659ecd48f871bef00b80af58954e5a09fcbb28a1783ddd640c4f6535e + value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:6fb61bec5ef161225a850005233db68cfdc03ad54e1a54cc49cc98d98ea3d259 - name: kind value: task resolver: bundles @@ -657,7 +623,7 @@ spec: - name: name value: rpms-signature-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-rpms-signature-scan:0.2@sha256:fb6c97a57e221fa106a8b45be3e12c49e7124a3a8e2a0f0d5fbaeb17b5bf68a5 + value: quay.io/konflux-ci/tekton-catalog/task-rpms-signature-scan:0.2@sha256:a99d8fd4c9027356b18e5d2910cc44dbc2fcb53c384ba34696645d9e7faa9084 - name: kind value: task resolver: bundles diff --git a/.tekton/lightspeed-stack-push.yaml b/.tekton/lightspeed-stack-push.yaml index 282be81ed..7b5844eab 100644 --- a/.tekton/lightspeed-stack-push.yaml +++ b/.tekton/lightspeed-stack-push.yaml @@ -7,9 +7,8 @@ metadata: build.appstudio.redhat.com/target_branch: '{{target_branch}}' pipelinesascode.tekton.dev/cancel-in-progress: "false" pipelinesascode.tekton.dev/max-keep-runs: "3" - pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch - == "main" - creationTimestamp: null + pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch == "main" + creationTimestamp: labels: appstudio.openshift.io/application: lightspeed-stack appstudio.openshift.io/component: lightspeed-stack @@ -47,7 +46,7 @@ spec: ], "requirements_build_files": ["requirements-build.txt"], "binary": { - "packages": "accelerate,aiohappyeyeballs,aiohttp,aiosignal,aiosqlite,annotated-doc,annotated-types,anyio,asyncpg,attrs,autoevals,cffi,charset-normalizer,chevron,click,cryptography,datasets,dill,distro,dnspython,durationpy,faiss-cpu,fire,frozenlist,fsspec,googleapis-common-protos,grpcio,h11,hf-xet,httpcore,httpx,httpx-sse,huggingface-hub,idna,jinja2,jiter,joblib,jsonschema-specifications,lxml,markdown-it-py,mdurl,mpmath,networkx,nltk,numpy,oauthlib,opentelemetry-api,opentelemetry-exporter-otlp,opentelemetry-exporter-otlp-proto-common,opentelemetry-exporter-otlp-proto-grpc,opentelemetry-exporter-otlp-proto-http,opentelemetry-instrumentation,opentelemetry-proto,opentelemetry-sdk,opentelemetry-semantic-conventions,packaging,pandas,pillow,ply,polyleven,prompt-toolkit,propcache,proto-plus,psycopg2-binary,pyaml,pyarrow,pyasn1,pyasn1-modules,pydantic,pydantic-core,pydantic-settings,pygments,python-dateutil,python-dotenv,pytz,pyyaml,referencing,requests,requests-oauthlib,rpds-py,rsa,safetensors,scikit-learn,scipy,setuptools,six,sniffio,sqlalchemy,starlette,sympy,threadpoolctl,tiktoken,tokenizers,torch,tornado,transformers,triton,typing-extensions,typing-inspection,tzdata,websocket-client,wrapt,xxhash,yarl,zipp,uv,pip,maturin", + "packages": "accelerate,aiohappyeyeballs,aiohttp,aiosignal,aiosqlite,annotated-doc,annotated-types,anyio,asyncpg,attrs,autoevals,cffi,charset-normalizer,chevron,click,cryptography,datasets,dill,distro,dnspython,durationpy,faiss-cpu,fire,frozenlist,fsspec,googleapis-common-protos,grpcio,h11,hf-xet,httpcore,httpx,httpx-sse,idna,jinja2,jiter,joblib,jsonschema-specifications,lxml,markdown-it-py,mdurl,mpmath,networkx,nltk,numpy,oauthlib,opentelemetry-api,opentelemetry-exporter-otlp,opentelemetry-exporter-otlp-proto-common,opentelemetry-exporter-otlp-proto-grpc,opentelemetry-exporter-otlp-proto-http,opentelemetry-instrumentation,opentelemetry-proto,opentelemetry-sdk,opentelemetry-semantic-conventions,packaging,pandas,pillow,ply,prompt-toolkit,propcache,psycopg2-binary,pyaml,pyarrow,pyasn1,pyasn1-modules,pydantic,pydantic-core,pydantic-settings,pygments,python-dateutil,python-dotenv,pytz,pyyaml,referencing,requests,requests-oauthlib,rpds-py,rsa,safetensors,scikit-learn,scipy,setuptools,six,sniffio,sqlalchemy,starlette,sympy,threadpoolctl,tiktoken,tokenizers,torch,tornado,transformers,triton,typing-extensions,typing-inspection,tzdata,websocket-client,wrapt,xxhash,yarl,zipp,uv,pip,maturin", "os": "linux", "arch": "x86_64,aarch64", "py_version": 312 @@ -58,6 +57,8 @@ spec: value: 'true' - name: dockerfile value: Containerfile + - name: build-args-file + value: build-args-konflux.conf pipelineSpec: description: | This pipeline is ideal for building multi-arch container images from a Containerfile while maintaining trust after pipeline customization. @@ -76,19 +77,13 @@ spec: name: output-image type: string - default: . - description: Path to the source code of an application's component from where - to build image. + description: Path to the source code of an application's component from where to build image. name: path-context type: string - default: Dockerfile - description: Path to the Dockerfile inside the context specified by parameter - path-context + description: Path to the Dockerfile inside the context specified by parameter path-context name: dockerfile type: string - - default: "false" - description: Force rebuild image - name: rebuild - type: string - default: "false" description: Skip checks against built image name: skip-checks @@ -102,8 +97,7 @@ spec: name: prefetch-input type: string - default: "" - description: Image tag expiration time, time values could be something like - 1h, 2d, 3w for hours, days, and weeks, respectively. + description: Image tag expiration time, time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively. name: image-expires-after type: string - default: "false" @@ -115,8 +109,7 @@ spec: name: build-image-index type: string - default: docker - description: The format for the resulting image's mediaType. Valid values are - oci or docker. + description: The format for the resulting image's mediaType. Valid values are oci or docker. name: buildah-format type: string - default: [] @@ -128,14 +121,12 @@ spec: name: build-args-file type: string - default: "false" - description: Whether to enable privileged mode, should be used only with remote - VMs + description: Whether to enable privileged mode, should be used only with remote VMs name: privileged-nested type: string - default: - linux/x86_64 - description: List of platforms to build the container images on. The available - set of values is determined by the configuration of the multi-platform-controller. + description: List of platforms to build the container images on. The available set of values is determined by the configuration of the multi-platform-controller. name: build-platforms type: array results: @@ -153,19 +144,12 @@ spec: value: $(tasks.clone-repository.results.commit) tasks: - name: init - params: - - name: image-url - value: $(params.output-image) - - name: rebuild - value: $(params.rebuild) - - name: skip-checks - value: $(params.skip-checks) taskRef: params: - name: name value: init - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:ebf06778aeacbbeb081f9231eafbdfdb8e380ad04e211d7ed80ae9101e37fd82 + value: quay.io/konflux-ci/tekton-catalog/task-init:0.3@sha256:aa6f8632cc23d605c5942505ff1d00280db16a6fda5c4c56c4ed9ae936b5fbc6 - name: kind value: task resolver: bundles @@ -190,11 +174,6 @@ spec: - name: kind value: task resolver: bundles - when: - - input: $(tasks.init.results.build) - operator: in - values: - - "true" workspaces: - name: basic-auth workspace: git-auth @@ -269,15 +248,10 @@ spec: - name: name value: buildah-remote-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.8@sha256:da99fce12bf72da86f6a86a5370d826c16ea8db001d27181dcaf087af9ab60cb + value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.8@sha256:4ba24114693920806b35f398fe766c167c18c77fab5f0648a0e1c0de702e4a47 - name: kind value: task resolver: bundles - when: - - input: $(tasks.init.results.build) - operator: in - values: - - "true" - name: build-image-index params: - name: IMAGE @@ -300,15 +274,10 @@ spec: - name: name value: build-image-index - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.2@sha256:05d3d8a5ded44c51b074a56a408ddf5d65c56b4c15e110abb1a99e3aff269d49 + value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.2@sha256:8c422a5380a3d877257003dee153190322af84fe6f4f25e9eee7d8bf61a62577 - name: kind value: task resolver: bundles - when: - - input: $(tasks.init.results.build) - operator: in - values: - - "true" - name: build-source-image params: - name: BINARY_IMAGE @@ -331,10 +300,6 @@ spec: value: task resolver: bundles when: - - input: $(tasks.init.results.build) - operator: in - values: - - "true" - input: $(params.build-source-image) operator: in values: @@ -379,7 +344,7 @@ spec: - name: name value: clair-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:dadfea7633d82e4303ba73d5e9c7e2bc16834bde0fd7688880453b26452067eb + value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:3ff4d1c3c503454c6b7f072e225df43656fb415a5d2a658ab6ce279c0dc128aa - name: kind value: task resolver: bundles @@ -404,7 +369,7 @@ spec: - name: name value: ecosystem-cert-preflight-checks - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:204fd3914d83c7b60e8eee72b5a944337720c79a3e660e7c994435456dcf7175 + value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:33b7133c0c132c361295c30947f73bd45a3a3b62a24b83f3d8cd7c71f757828c - name: kind value: task resolver: bundles @@ -432,7 +397,7 @@ spec: - name: name value: sast-snyk-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:a70272ae12f6d7f0da2902158e1bcee756877aa8f71fd1a22ef9afd8b177fb41 + value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:0c2ab8ce6d419400b63dd67d061052ac51de7b1ebe93f8ae86ed07ac638d756d - name: kind value: task resolver: bundles @@ -459,7 +424,7 @@ spec: - name: name value: clamav-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:76efc0119a10bc8a420dbbb0cdab9ef8eafd263f6827498d2b644e450e93f446 + value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:4f5ccf2324ecef92aaad6e2adb46c0bb15be49b4869b5b407346c514b764404f - name: kind value: task resolver: bundles @@ -504,7 +469,7 @@ spec: - name: name value: sast-coverity-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:9d0bc704bca6b6faad37b2ce8106b96e0cef35e1f647d037a878bf416589de9d + value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:e8c63570f1d01d70b2a21b22a2a4aad9ca7d5c0327d8b2a4058a6e616cce17ca - name: kind value: task resolver: bundles @@ -525,7 +490,7 @@ spec: - name: name value: coverity-availability-check - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-coverity-availability-check:0.2@sha256:267d5bc069a0323f41e24732ddfd1057e5c639e853d1e620c67505fab78f1301 + value: quay.io/konflux-ci/tekton-catalog/task-coverity-availability-check:0.2@sha256:a24d8f3cd01ccc54fa6fb73aa57a78f5559a0e58eddfe0583fc9cb97d59b4efc - name: kind value: task resolver: bundles @@ -551,7 +516,7 @@ spec: - name: name value: sast-shell-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:e7a51575f9188a1461d4520da25aaa4efdd3b896c97dc750941fa22840e55c13 + value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:f475b4b6b0c1687fa1aafa5ba38813e04f080b185af2975e12b457742d9dd857 - name: kind value: task resolver: bundles @@ -577,7 +542,7 @@ spec: - name: name value: sast-unicode-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.4@sha256:1818a5b3e4fa86c838ae71226a157241967d1f19c5ed377e4b2fddad7a3ceefe + value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.4@sha256:b38140b2f0b2163def80e28a792b2702245d38a5610a504f2e56c198f3b8f70b - name: kind value: task resolver: bundles @@ -622,7 +587,7 @@ spec: - name: name value: push-dockerfile-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:08bba4a659ecd48f871bef00b80af58954e5a09fcbb28a1783ddd640c4f6535e + value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:6fb61bec5ef161225a850005233db68cfdc03ad54e1a54cc49cc98d98ea3d259 - name: kind value: task resolver: bundles @@ -639,7 +604,7 @@ spec: - name: name value: rpms-signature-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-rpms-signature-scan:0.2@sha256:fb6c97a57e221fa106a8b45be3e12c49e7124a3a8e2a0f0d5fbaeb17b5bf68a5 + value: quay.io/konflux-ci/tekton-catalog/task-rpms-signature-scan:0.2@sha256:a99d8fd4c9027356b18e5d2910cc44dbc2fcb53c384ba34696645d9e7faa9084 - name: kind value: task resolver: bundles diff --git a/Containerfile b/Containerfile index fb040d60d..8e79bd999 100644 --- a/Containerfile +++ b/Containerfile @@ -1,6 +1,12 @@ # vim: set filetype=dockerfile -FROM registry.access.redhat.com/ubi9/python-312 AS builder +ARG BUILDER_BASE_IMAGE=registry.access.redhat.com/ubi9/python-312 +ARG BUILDER_DNF_COMMAND=dnf +ARG RUNTIME_BASE_IMAGE=registry.access.redhat.com/ubi9/python-312-minimal +ARG RUNTIME_DNF_COMMAND=microdnf +FROM ${BUILDER_BASE_IMAGE} AS builder + +ARG BUILDER_DNF_COMMAND=dnf ARG APP_ROOT=/app-root ARG LSC_SOURCE_DIR=. @@ -18,7 +24,7 @@ USER root # Install gcc - required by polyleven python package on aarch64 # (dependency of autoevals, no pre-built binary wheels for linux on aarch64) # cmake and cargo are required by fastuuid, maturin -RUN dnf install -y --nodocs --setopt=keepcache=0 --setopt=tsflags=nodocs gcc cmake cargo +RUN ${BUILDER_DNF_COMMAND} install -y --nodocs --setopt=keepcache=0 --setopt=tsflags=nodocs gcc gcc-c++ cmake cargo # Install uv package manager RUN pip3.12 install "uv>=0.8.15" @@ -51,7 +57,8 @@ RUN if [ -f /cachi2/cachi2.env ]; then \ RUN uv pip uninstall ecdsa # Final image without uv package manager -FROM registry.access.redhat.com/ubi9/python-312-minimal +FROM ${RUNTIME_BASE_IMAGE} +ARG RUNTIME_DNF_COMMAND=microdnf ARG APP_ROOT=/app-root WORKDIR /app-root @@ -79,7 +86,7 @@ COPY --from=builder /app-root/LICENSE /licenses/ USER root # Additional tools for derived images -RUN microdnf install -y --nodocs --setopt=keepcache=0 --setopt=tsflags=nodocs jq patch libpq libtiff openjpeg2 lcms2 libjpeg-turbo libwebp +RUN ${RUNTIME_DNF_COMMAND} install -y --nodocs --setopt=keepcache=0 --setopt=tsflags=nodocs jq patch # Create llama-stack directories for library mode RUN mkdir -p /opt/app-root/src/.llama/storage /opt/app-root/src/.llama/providers.d && \ diff --git a/Makefile b/Makefile index 62a7f4a8d..c050f112b 100644 --- a/Makefile +++ b/Makefile @@ -124,6 +124,9 @@ upload-distribution-archives: ## Upload distribution archives into Python regist konflux-requirements: ## generate hermetic requirements.*.txt file for konflux build ./scripts/konflux_requirements.sh +konflux-rpm-lock: ## generate rpm.lock.yaml file for konflux build + ./scripts/generate-rpm-lock.sh + help: ## Show this help screen @echo 'Usage: make ... ' @echo '' diff --git a/README.md b/README.md index 2be2cdeca..9775e8fa7 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ [![License](https://img.shields.io/badge/license-Apache-blue)](https://github.com/lightspeed-core/lightspeed-stack/blob/main/LICENSE) [![made-with-python](https://img.shields.io/badge/Made%20with-Python-1f425f.svg)](https://www.python.org/) [![Required Python version](https://img.shields.io/python/required-version-toml?tomlFilePath=https%3A%2F%2Fraw.githubusercontent.com%2Flightspeed-core%2Flightspeed-stack%2Frefs%2Fheads%2Fmain%2Fpyproject.toml)](https://www.python.org/) -[![Tag](https://img.shields.io/github/v/tag/lightspeed-core/lightspeed-stack)](https://github.com/lightspeed-core/lightspeed-stack/releases/tag/0.4.0) +[![Tag](https://img.shields.io/github/v/tag/lightspeed-core/lightspeed-stack)](https://github.com/lightspeed-core/lightspeed-stack/releases/tag/0.4.1) Lightspeed Core Stack (LCS) is an AI-powered assistant that provides answers to product questions using backend LLM services, agents, and RAG databases. @@ -15,94 +15,96 @@ The service includes comprehensive user data collection capabilities for various -* [lightspeed-stack](#lightspeed-stack) - * [About The Project](#about-the-project) * [Architecture](#architecture) * [Prerequisites](#prerequisites) * [Installation](#installation) * [Run LCS locally](#run-lcs-locally) * [Configuration](#configuration) - * [LLM Compatibility](#llm-compatibility) - * [Set LLM provider and model](#set-llm-provider-and-model) - * [Selecting provider and model](#selecting-provider-and-model) - * [Provider and model selection in REST API request](#provider-and-model-selection-in-rest-api-request) - * [Default provider and model](#default-provider-and-model) - * [Supported providers](#supported-providers) - * [Integration with Llama Stack](#integration-with-llama-stack) - * [Llama Stack as separate server](#llama-stack-as-separate-server) - * [MCP Server and Tool Configuration](#mcp-server-and-tool-configuration) - * [Configuring MCP Servers](#configuring-mcp-servers) - * [Configuring MCP Server Authentication](#configuring-mcp-server-authentication) - * [1. Static Tokens from Files (Recommended for Service Credentials)](#1-static-tokens-from-files-recommended-for-service-credentials) - * [2. Kubernetes Service Account Tokens (For K8s Deployments)](#2-kubernetes-service-account-tokens-for-k8s-deployments) - * [3. Client-Provided Tokens (For Per-User Authentication)](#3-client-provided-tokens-for-per-user-authentication) - * [Combining Authentication Methods](#combining-authentication-methods) - * [Authentication Method Comparison](#authentication-method-comparison) - * [Important: Automatic Server Skipping](#important-automatic-server-skipping) - * [Llama Stack project and configuration](#llama-stack-project-and-configuration) - * [Check connection to Llama Stack](#check-connection-to-llama-stack) - * [Llama Stack as client library](#llama-stack-as-client-library) - * [Llama Stack version check](#llama-stack-version-check) - * [User data collection](#user-data-collection) - * [System prompt](#system-prompt) - * [System Prompt Path](#system-prompt-path) - * [System Prompt Literal](#system-prompt-literal) - * [Custom Profile](#custom-profile) - * [Control model/provider overrides via authorization](#control-modelprovider-overrides-via-authorization) - * [Safety Shields](#safety-shields) - * [Authentication](#authentication) - * [CORS](#cors) - * [Default values](#default-values) - * [Allow credentials](#allow-credentials) + * [LLM Compatibility](#llm-compatibility) + * [Set LLM provider and model](#set-llm-provider-and-model) + * [Selecting provider and model](#selecting-provider-and-model) + * [Provider and model selection in REST API request](#provider-and-model-selection-in-rest-api-request) + * [Default provider and model](#default-provider-and-model) + * [Supported providers](#supported-providers) + * [Integration with Llama Stack](#integration-with-llama-stack) + * [Llama Stack as separate server](#llama-stack-as-separate-server) + * [MCP Server and Tool Configuration](#mcp-server-and-tool-configuration) + * [Configuring MCP Servers](#configuring-mcp-servers) + * [Configuring MCP Server Authentication](#configuring-mcp-server-authentication) + * [1. Static Tokens from Files (Recommended for Service Credentials)](#1-static-tokens-from-files-recommended-for-service-credentials) + * [2. Kubernetes Service Account Tokens (For K8s Deployments)](#2-kubernetes-service-account-tokens-for-k8s-deployments) + * [3. Client-Provided Tokens (For Per-User Authentication)](#3-client-provided-tokens-for-per-user-authentication) + * [Client-Authenticated MCP Servers Discovery](#client-authenticated-mcp-servers-discovery) + * [Combining Authentication Methods](#combining-authentication-methods) + * [Authentication Method Comparison](#authentication-method-comparison) + * [Important: Automatic Server Skipping](#important-automatic-server-skipping) + * [Llama Stack project and configuration](#llama-stack-project-and-configuration) + * [Check connection to Llama Stack](#check-connection-to-llama-stack) + * [Llama Stack as client library](#llama-stack-as-client-library) + * [Llama Stack version check](#llama-stack-version-check) + * [User data collection](#user-data-collection) + * [System prompt](#system-prompt) + * [System Prompt Path](#system-prompt-path) + * [System Prompt Literal](#system-prompt-literal) + * [Custom Profile](#custom-profile) + * [Control model/provider overrides via authorization](#control-modelprovider-overrides-via-authorization) + * [Safety Shields](#safety-shields) + * [Authentication](#authentication) + * [CORS](#cors) + * [Default values](#default-values) + * [Allow credentials](#allow-credentials) * [RAG Configuration](#rag-configuration) - * [Example configurations for inference](#example-configurations-for-inference) + * [Example configurations for inference](#example-configurations-for-inference) * [Usage](#usage) - * [Make targets](#make-targets) - * [Running Linux container image](#running-linux-container-image) - * [Building Container Images](#building-container-images) - * [Llama-Stack as Separate Service (Server Mode)](#llama-stack-as-separate-service-server-mode) - * [macOS (arm64)](#macos-arm64) - * [Llama-Stack as Library (Library Mode)](#llama-stack-as-library-library-mode) - * [macOS](#macos) - * [Verify it's running properly](#verify-its-running-properly) - * [Custom Container Image](#custom-container-image) + * [CLI options](#cli-options) + * [Dumping configuration](#dumping-configuration) + * [Dumping configuration schema](#dumping-configuration-schema) + * [Make targets](#make-targets) + * [Running Linux container image](#running-linux-container-image) + * [Building Container Images](#building-container-images) + * [Llama-Stack as Separate Service (Server Mode)](#llama-stack-as-separate-service-server-mode) + * [macOS (arm64)](#macos-arm64) + * [Llama-Stack as Library (Library Mode)](#llama-stack-as-library-library-mode) + * [macOS](#macos) + * [Verify it's running properly](#verify-its-running-properly) + * [Custom Container Image](#custom-container-image) * [Endpoints](#endpoints) - * [OpenAPI specification](#openapi-specification) - * [Readiness Endpoint](#readiness-endpoint) - * [Liveness Endpoint](#liveness-endpoint) + * [OpenAPI specification](#openapi-specification) + * [Readiness Endpoint](#readiness-endpoint) + * [Liveness Endpoint](#liveness-endpoint) * [Database structure](#database-structure) * [Publish the service as Python package on PyPI](#publish-the-service-as-python-package-on-pypi) - * [Generate distribution archives to be uploaded into Python registry](#generate-distribution-archives-to-be-uploaded-into-python-registry) - * [Upload distribution archives into selected Python registry](#upload-distribution-archives-into-selected-python-registry) - * [Packages on PyPI and Test PyPI](#packages-on-pypi-and-test-pypi) + * [Generate distribution archives to be uploaded into Python registry](#generate-distribution-archives-to-be-uploaded-into-python-registry) + * [Upload distribution archives into selected Python registry](#upload-distribution-archives-into-selected-python-registry) + * [Packages on PyPI and Test PyPI](#packages-on-pypi-and-test-pypi) * [Contributing](#contributing) * [Testing](#testing) * [License](#license) * [Additional tools](#additional-tools) - * [Utility to generate OpenAPI schema](#utility-to-generate-openapi-schema) - * [Path](#path) - * [Usage](#usage-1) - * [Makefile target to generate OpenAPI specification](#makefile-target-to-generate-openapi-specification) - * [Utility to generate documentation from source code](#utility-to-generate-documentation-from-source-code) - * [Path](#path-1) - * [Usage](#usage-2) + * [Utility to generate OpenAPI schema](#utility-to-generate-openapi-schema) + * [Path](#path) + * [Usage](#usage-1) + * [Makefile target to generate OpenAPI specification](#makefile-target-to-generate-openapi-specification) + * [Utility to generate documentation from source code](#utility-to-generate-documentation-from-source-code) + * [Path](#path-1) + * [Usage](#usage-2) * [Data Export Integration](#data-export-integration) - * [Quick Integration](#quick-integration) - * [Documentation](#documentation) + * [Quick Integration](#quick-integration) + * [Documentation](#documentation) * [Project structure](#project-structure) - * [Configuration classes](#configuration-classes) - * [REST API](#rest-api) - * [Sequence diagrams](#sequence-diagrams) - * [Query endpoint REST API handler](#query-endpoint-rest-api-handler) - * [Streaming query endpoint REST API handler](#streaming-query-endpoint-rest-api-handler) - * [Versioning](#versioning) + * [Configuration classes](#configuration-classes) + * [REST API](#rest-api) + * [Sequence diagrams](#sequence-diagrams) + * [Query endpoint REST API handler](#query-endpoint-rest-api-handler) + * [Streaming query endpoint REST API handler](#streaming-query-endpoint-rest-api-handler) + * [Versioning](#versioning) * [Development Tools](#development-tools) - * [MCP Mock Server](#mcp-mock-server) + * [MCP Mock Server](#mcp-mock-server) * [Konflux](#konflux) - * [Updating Dependencies for Hermetic Builds](#updating-dependencies-for-hermetic-builds) - * [When to Update Dependency Files](#when-to-update-dependency-files) - * [Updating Python Dependencies](#updating-python-dependencies) - * [Updating RPM Dependencies](#updating-rpm-dependencies) + * [Updating Dependencies for Hermetic Builds](#updating-dependencies-for-hermetic-builds) + * [When to Update Dependency Files](#when-to-update-dependency-files) + * [Updating Python Dependencies](#updating-python-dependencies) + * [Updating RPM Dependencies](#updating-rpm-dependencies) @@ -732,6 +734,22 @@ options: ``` +## CLI options + +### Dumping configuration + +If `--dump-configuration` CLI option is provided, LCORE writes the active +configuration to a file named `configuration.json` and exits (exits with status +1 on failure). + +### Dumping configuration schema + +If `--dump-schema` CLI option is provided, LCORE writes the active +configuration schema to a file named `schema.json` and exits (exits with status +1 on failure). + + + ## Make targets ``` @@ -768,6 +786,7 @@ verify Run all linters distribution-archives Generate distribution archives to be uploaded into Python registry upload-distribution-archives Upload distribution archives into Python registry konflux-requirements generate hermetic requirements.*.txt file for konflux build +konflux-rpm-lock generate rpm.lock.yaml file for konflux build ``` ## Running Linux container image @@ -1229,7 +1248,10 @@ The script also updates the Tekton pipeline configurations (`.tekton/lightspeed- ### Updating RPM Dependencies -**Prerequisites:** Install [rpm-lockfile-prototype](https://github.com/konflux-ci/rpm-lockfile-prototype?tab=readme-ov-file#installation) +**Prerequisites:** +- Install [rpm-lockfile-prototype](https://github.com/konflux-ci/rpm-lockfile-prototype?tab=readme-ov-file#installation) +- Have an active RHEL Subscription, get activation keys from [RH console](https://console.redhat.com/insights/connector/activation-keys) +- Have `dnf` installed in system **Steps:** @@ -1237,12 +1259,24 @@ The script also updates the Tekton pipeline configurations (`.tekton/lightspeed- 2. **If you changed the base image**, extract its repo file: ```shell +# UBI images podman run -it $BASE_IMAGE cat /etc/yum.repos.d/ubi.repo > ubi.repo +# RHEL images +podman run -it $BASE_IMAGE cat /etc/yum.repos.d/redhat.repo > redhat.repo +``` +If the repo file contains too many entries, we can filter them and keep only required repositories. +Here is the command to check active repositories: +```shell +dnf repolist +``` +Replace the architecture tag (`uname -m`) to `$basearch` so that rpm-lockfile-prototype can replace it with requested architecture names. +```shell +sed -i "s/$(uname -m)/\$basearch/g" redhat.repo ``` -3. **Generate the lock file**: +1. **Generate the lock file**: ```shell -rpm-lockfile-prototype --image $BASE_IMAGE rpms.in.yaml +make konflux-rpm-lock ``` This creates `rpms.lock.yaml` with pinned RPM versions. diff --git a/docs/config.html b/docs/config.html index 4c0d066e0..fd0ef2f46 100644 --- a/docs/config.html +++ b/docs/config.html @@ -407,7 +407,7 @@

ByokRag

vector_db_id string - Vector DB identification. + Vector database identification. db_path @@ -969,7 +969,8 @@

LlamaStackConfiguration

url string - URL to Llama Stack service; used when library mode is disabled + URL to Llama Stack service; used when library mode is disabled. Must +be a valid HTTP or HTTPS URL. api_key @@ -1393,6 +1394,11 @@

ServiceConfiguration

Transport Layer Security configuration for HTTPS support + root_path + string + ASGI root path for serving behind a reverse proxy on a subpath + + cors Cross-Origin Resource Sharing configuration for cross-domain diff --git a/docs/config.json b/docs/config.json index 124c005af..a392b3890 100644 --- a/docs/config.json +++ b/docs/config.json @@ -275,7 +275,7 @@ "type": "integer" }, "vector_db_id": { - "description": "Vector DB identification.", + "description": "Vector database identification.", "minLength": 1, "title": "Vector DB ID", "type": "string" @@ -794,7 +794,7 @@ "type": "string", "nullable": true, "default": null, - "description": "URL to Llama Stack service; used when library mode is disabled", + "description": "URL to Llama Stack service; used when library mode is disabled. Must be a valid HTTP or HTTPS URL.", "title": "Llama Stack URL" }, "api_key": { @@ -817,6 +817,13 @@ "default": null, "description": "Path to configuration file used when Llama Stack is run in library mode", "title": "Llama Stack configuration path" + }, + "timeout": { + "default": 180, + "description": "Timeout in seconds for requests to Llama Stack service. Default is 180 seconds (3 minutes) to accommodate long-running RAG queries.", + "minimum": 0, + "title": "Request timeout", + "type": "integer" } }, "title": "LlamaStackConfiguration", @@ -1147,6 +1154,12 @@ "description": "Transport Layer Security configuration for HTTPS support", "title": "TLS configuration" }, + "root_path": { + "default": "", + "description": "ASGI root path for serving behind a reverse proxy on a subpath", + "title": "Root path", + "type": "string" + }, "cors": { "$ref": "#/components/schemas/CORSConfiguration", "description": "Cross-Origin Resource Sharing configuration for cross-domain requests", diff --git a/docs/config.md b/docs/config.md index 1c5dd1cf6..7aaba0e39 100644 --- a/docs/config.md +++ b/docs/config.md @@ -117,7 +117,7 @@ BYOK (Bring Your Own Knowledge) RAG configuration. | rag_type | string | Type of RAG database. | | embedding_model | string | Embedding model identification | | embedding_dimension | integer | Dimensionality of embedding vectors. | -| vector_db_id | string | Vector DB identification. | +| vector_db_id | string | Vector database identification. | | db_path | string | Path to RAG database. | @@ -341,7 +341,7 @@ Useful resources: | Field | Type | Description | |-------|------|-------------| -| url | string | URL to Llama Stack service; used when library mode is disabled | +| url | string | URL to Llama Stack service; used when library mode is disabled. Must be a valid HTTP or HTTPS URL. | | api_key | string | API key to access Llama Stack service | | use_as_library_client | boolean | When set to true Llama Stack will be used in library mode, not in server mode (default) | | library_client_config_path | string | Path to configuration file used when Llama Stack is run in library mode | @@ -513,6 +513,7 @@ the service can handle requests concurrently. | color_log | boolean | Enables colorized logging | | access_log | boolean | Enables logging of all access information | | tls_config | | Transport Layer Security configuration for HTTPS support | +| root_path | string | ASGI root path for serving behind a reverse proxy on a subpath | | cors | | Cross-Origin Resource Sharing configuration for cross-domain requests | diff --git a/docs/config.png b/docs/config.png index 5ca4fb3ef199bc39a8bdecdd8943edcb85b9b0d4..503a4cb81363a9995562b3a2b0919f7e0337faca 100644 GIT binary patch literal 418276 zcma&ObwE_>*9N)~0TmUC5&;z~kWj!;8UqxiW9Sg+j-gX*1q>P~ksLxghY$s&1%^)P z?vA-@!0-Hi=X~e8_wGMHa2WP}-?g6gtY@ zQ_s@ezOITHLH4B>E2`N2&-W1$cpoD#oLaYE=e1)VE81d<=YB>P8rg^)S&bew_@I^1 zZGLUSOnZ-aRIF+k-yJsAAs-hUVXKttp8xF*8>L05^Up=<4nAMFL-@@5u7HiMicao8 z^8+vOk0+ue-iAt7Z~`SldXxM)>5Vo%guykkuNnE5x4Y2MWlrsSkW z^1QeZeHZ^9W7N^jTKN87vHsbolIM@}JaPBv5`UY@luP;~FhdjPmSQDtMJ_+^epKqk z9FM-${Y3-SrgTZ;x555d<-ENmtp!CvHAiU9rk%vIu%d#8#p(Ia&vb84x!aUZTIp72 zbu=Cm+4mA98r;(-|0)1?o&Ai8P+G`Wfwzlgk+%e61N+n98ld)RSD-)~zt&Q;Pn ze&C~dqyPQ%m(Gr)pt1@N#bZ6z96z7CHWy!V9xFM3ICGP*r?msbf_!Wx4!?1(i+m-rKBB(m3C2_&@{0yULL#pG~Va zZ0lC9p21tA1tb%qMYFk3y^Jg?q(0X67S@d>DYq+fxakILC>m1ltA?xQbiC#We55&1 zs$SLj9mjZgh*MiU!{?BAK;x$uRu1&i!9)2}mcz#-&o4hsH|i5$BSZE249RXI#~cmr z(z85OlFs2vWn)j@lTUegrDAkh-s({AV#*1%M~^u!|7@gqM!sTtj<<~At1Y>Fi27a| ziK%NzkEna?A7(JpkH*WNPCrU!`1s0?%`-zva>8y=2Wy_$i!5Ha8qwDABkeqen2`is zt<<+`4}#_VG;OJ@#dmL(6?vb6HN@oW6C)y$@wS z*%1rFB@Ptr30x^lqNcX=JZBf~ z)>!5i^toVzJvU_!{Ql_O-&rLvdsypei-7DM<%6@W2L_q2xJFq=FZG#eFLD=W#w9f4 zN0FL5fmKb~Uol3Y@Wc(s+Y%zV_Mq18h&0 zEIi(v9r(r>?L{GfrTa6x8&AT>{Y&<|6!d52PVV>O?sv*$jSFBumzN&tcdo69r_3?( zvV6%RyVnVMznZgK+X)mu9nPulORe;pIFonjSAp>?KBoBiK@FzK7?xV=ihUd>a}C6O zo`m%8|Nmm>2i}@LOEZZ=ab>vw3E+TC3X8k+T-av z_u&o3q|!JZ%AJ!Nm;2F0e#Mwl`MVWG_4TL2&ohJhdl>yhBOjb6^jw(NJK?XTgCXrY z&)kt^JzEi*m$IUh#htG=z<0SVo8i>nrjnCcnTLX}y%7}hbjUt7F!5IY;iB>de+2~% zJ$1fA76o3aXMe725xLO3MCq zSw@77OT$>u2?|YADyw+8_;$}J6{l@srwUEV_ zx9TI?k>2U8uzLyD)Xzb=gWhY&KoiSu=4Kk0KPM1&a z6~+kp4m5fcGvUJ~KB(rWY>Sz+mNP1woFP1G$*P>tvHm!%Qq~f&O)@u4eb<(eGi+Px zX6cz|>N%3r+996}tSV~Xczk&otM=Y1;GUWZp9ZsZ+?h??b<~;juBeY}$)W?5hpcKy zY<>uQ^>gdv7uZ;+yrLOLr!(GdbiiQ4k(X?!bfNBc+qcZJ6U~D$rF$C+`b9_{w!Mzr zvy9rqI4vbzHl6kJJn}teW^bvAw*E1VtdOfD)(v67Phttu$#DVPB zl{{R_nX=cPamV|}69@Mu-)*^ej_$(y z&mN3!t>F}D1@Q)~F%5+?-v)kT4qi85uth)Giggrtw(&!KKD{+#uhq;}{>Gr)&C5*X zn1v^9H}^4mcM3cDIdZ+Y^~}q>XSq<~PSf?Cocf_z<9`3r>*7@XdW4rGh6i0|wg}DF zk9Ph>WEP_09g<@F%&FtUH!S)c<3+?3mY7+@^T&^Wlx&EgAIq<_V%U)~5dOjVgQe*yk83^=yHp1UbWj2 zm6<#3brd%c!cF)R8naO^5{Zk@JYJmo7}_Sdz2X()m!D*k96e)qOmY-=k2di!uh?k1&VP>%~zc(IPKpclbYA7S49g!OZgc2H2OfxW%Ka*vdqL2=|vcnvV2e_V4~ zSD*dkTkxg#|Na)jz3+d2Q-{dd{&|rT3D-Zae|ur+>A$_`x60kSKSqDWR3dW`#R;9c zmLm={#1|YS(!swjRxx&Tsz$Nv+O5-G4Xv25#@Nka;tM>*{(1@VtIxx|gl!bx&|Q@Z zU&d-nPU3&k?fvhU5Fcl*Gd|M2>i_!R-IxF4F5zwnB@|RlW*tRiTb@0|+wNqqPgDi4 z2ch3R9%4s3=E}6{PXzY*iIyiR-c=v==i)$5?e!ym8QyhBLc z{*`&>K8NGO?enW{=V<9mPf)KdwaV{1|95J5Wx{jgfk5KzWK4?vG3FFZhKHysTW9wC<}Ge zbj{{xKW^bt%V>L?Rx;}!hN~(nr1k)>GQo0 zA|Bhi<xnI`Ev+9yLAcmCUR5K0awy3TfAj35w<^RW1R0eXWyRxrs$;=RD`JUdVmg{!S z`PjbkW(sB58MJVV#^)m{u0lOKE+tIAFmC(Okh*hw0rj*)^tH|QOv5zI!W{l~tgH5+ z4OqGb0f*_)M7NFQMEerkk-B*4@T;#S1nfrNp1V(j-P9!UJTDb$ls`<-fNhMI5wYmH z&QapFVc(LhTIev7+Oiso(&@}GSI@QZv2*w}8%fxdSXvn#(5v*lqv!kBXm#*|jPT~{ zt%=I(om?bH{TR=u4hE}r)Ca-~(tkT}{m`b0Z!e09imZ77IQz<#nRzJ9v9uyzQL zDNQb>;yxGd^BCFV$B(l@-;=k*(Qs)cPc4q)?`Q74tXa_PY*G_^CFl6z2VvLD2P%A8 zv|3Ws<4emw-FclgxV|)D!kWf$@n=*@$b@Cv7qLq1=$m9ycH5h4!<6PL46(VQ2oZI5 z694bni?CeTizK1bgtsOLV-wnOCO7acD3e~+#&M4=9^LlA`2hC3cK6%2pL8tC1ap_o zUBZvXONA*x;%hvHW_4L-zwO|C)yw4I%O5$Ba)x(ly0N0br!R+PHP=RxMuf zRwN`hTFXg)v>}?7OY4oD!>a?2ssn42KDRC_C5q@*oyc_V>Fw=R%ao12sqB<%vZ7RG zDJ7*-tiiX^vDBQXkP;sEAf)jFBHB~rWZ6?RW)}skYGA-{JM-*CRvm$JkMrk7ab$ZC zsGo)alGs)BCGB+ARu&Cuj*P47epqp_UpZ{jDnHWDU6q*GEuiVntr`GZM)=$x|pdKnC1niTov zLb;a+yN4HS4$^U##{1WQp=asdw5O<}>({Ijh$1{!i!8AJ*YIbmLB2R{*mNfKCf~?N zs&XAHy24b4$TmzJL`>8oIW-H$n&Jh7V};k@#?w;*P-9crlHxTL63=Vq9iwFcY*3=E zJ4|a-PIZ-dl$dws9N3@N8yU1ZXv zh-pjmy#Tg=C3}8{X)`ipG5Y!;qvN&`ovOe930NMa)gTzvjvq4FyGRw4 z#kj+k{$5K<9pgwmiD=FH@P#?fvU)m2se4;(yH)7Y(dnzRxE>N*_sqlk2%+(+UMP#V zo*z!+Bvkk?0AyaJT!c!P60f-74hg#DI6rtVV8gKPc0}NaZa)ss=|yn%XxbUL8Z=!usMmS;GDB>QdashHVuReI2inV|NQ`xWzntnfi&2O zypUFWV0R%l(NCV^>Joy~dfygIR->pZBApMuggR2_uY+Zc&(ktUmCOQxcv%+xYs zK;CAXwwh2zM@K{5s(;7w{F;8Th@K<&Y=rk=nc~iU$5!nQt<`X=7(!chUBc4^$ zl(d{5ubHcTrw-*a9iEyp7m6)+8H)|d3QAVZ98ZKz5`dt%eLKqpFRA_m*sq_TpZs2B zL{z=u==e|-T^o-3G@BkeQ{5t47mKUM$E?`+YC+l@ye%%0k5P- z<{TvrBdiMa$;?j7Tp8e$(Oa7zT9-2GDavmYTVMEXNApf)>4Ci@k-Zb^;(uLlZqp*N z-R8it;ytc3<+QDMTazRup^sVt>Nz{nURKLlt+p(FFL-BOH&db=zjs4vd7>TbT}(k?b`ifi{xGT@p0 zJczdJ8Y>G{&$AuzZ9;$VQ$LO9u!bO>jl$Gy%Bzt*gnrWlL2RFT7tL;DuF!W{_T+Gl zJimPMhUEjFToWmERP~jq7>>O5Z3fGtemYieQbGHBOs4#Mwtfd=jaz)hyvbE%j9>Xd zS%{MisH&MQS{n{a>h|j?>?w3e+YSX}BGSV3*U??lr#8KC_EVwm9x2QN2J!!ve0L{F zkj#U*X6+eRDpynWNdc4Xyx&7OrA5oYk)&G$2C|-yMJ5H=Hya}KAwbi5zijKiw_Bc6 z?JO_}xHQv)+t58Gj-1|R5OlPv_v^{s;-gxvFD0-S%Mdne4n=kbYzS;O>G!#8k?(~E z@)Oy1oZnwN_6zv%LV8H4^+Dv~w@)`X3xEKAe3QW+nyoj{k}|9?zMi(9lXvUs$r1Mb zF-cqXB7`m_x{43ZcKBX(5w@$JyT32XPE6#cOTy_xIP? z;aWIn<3P{Y`$1~em2c?v1o}PCue1R+Px%5tYy}ohwp+TXibSf)OYJCKk7RrH>G6s;0!6r1=S{p^!?i}s{gt$;@p z-7~!2`F7kp3MnCJe!U8Vqg@sGa68}X!sHn=TAzq+r?+Z37ypy<#Zj1d(N zwN%4P_Hffr=#T_uT*njr59X%plqXi?g`DF_GWTr)yRLi-Ii}_$d1V3oH4*g zcNzq^#eXn<;L7_Dx_cLqD0)FQ#bh1zCc@e<*;r$|!X?)LawYHO%h`b*=>u3U2aa?~ z#DAG1hU2yX((pyMJcfXoY)c0vBUvMV9yAo^*K6Zz%B0uQ<;)6nlciBaR{pV7CT zpCHYH9E8rXM$l7!Z&C`H9>>I`S->k5%KMy(*;gD9rGi_QFSMV6E^2HJWDMSYEi`tU z|4Oi&xtxoj%ED>^H;3FYwLy65bykv$il71de|x@@&V;1xa)UQn91{c`4bP-DrGbd$ zwJfb3ve$te+w);*tTOLh+a~35uw>9ppkqJjq223Ak)LKpet!rMlgLV!?Yi6!2g+zc zTe>SvO|+l5BtP0~^3#&0NgL;^6ebxd?6y4FzA*SRW{-2_0T&2&wn8q@Uplgkp=0ez zR^_p@v@|8sDlXjO`{nn1rpiM2JZnAJq8Be-D5t0yQ|8+Y&Ckury(&F`7*X=y#A(oz z7NYME`6FEaD?JeC`S0E&LG~X$ynJcuc$Q&^xiukSZQxbVit?`#)$mAwNYsI%8%}sT zQ?eB8qDON}3@w$bb02TmiZd?a2YSacQIzXvpUs*97LTjZCLxW$}a#v>PyO>CHV;3 zq!dQ^5ks#J{U4D}LX_i0i8o11`_rkH=2J6+h~<#%_c^y4m43`=O=j-uJBvYl%`RO1 z&yT<}Agr4F7>LFL@m4SD7pcR!b@!B{5MQ}-!CUUE7;>8UC9#CQPrag7VYU5iRb~O1 z?IhYWb}k*(3Pe;?v~(PgWn`{YzvB2-S66p>BU@;z_~ES4@o(|Bhzs@~)oSOB|NJa` zdegQKfh@aC7Ub(UkP?662S9bWpi|>|x>f(z>B{jj3Rd#xykf5WAPxADB9t_j=}1c} zzCNYgPkav|+xeeI+I>&>U7|=IAz^&IoJu6iUC@5Atu_p$CxCFfZ#wQl;9U$51j1_< z&gou?AhS?;@C+?>6P@L`ue8g~K85a2DZwcE)po%8@=UINHH}bpKJ%{rz4JRtXD=gG zTkA`}#-KUJLeRaY7e^4Pyv*hd!6%fie-fORocZ)9@urD)%Kf0_U(A*0*NKr3s?D8?p28 z8I4-6D@v8vpkP}aN7<)K#_QYCh_@xkTl<1k|HMv@GZM;;C_;s~-mGp=FOdOU_gF|_Z4>JoV4?($6 zLZV>Lfr(zlZlps@gcLfEpmfJI*OlcY9lg>@K@5D~3Xc^n#V7``FFU%9i{;h}fP2&nNq0K2RQR`@zp!~#b6yX;?A47{td(t`j z-SppmTF;}Z$Z1|)Mf~7+=*-ASyon>4#PiFSFR+7*Le9<3(Q0F1wHx>)5mwg0!Ji+- zlRcm<00uEO=gTOt7^75cX$q0s(fsoGg;VSU_~u>v0Gq>U3mO}VC2qA(czv4KIlu0He z7+>bh{OgALk$Be4lwijR)cshN6yn2$Lcycw)p>yQ58NAyWw~^yd z(-2n|AvB>vZ>e5qk=l}L+3ROMik#qRm4cCBQ_f=O$O4-&OP9j!(SM9^(Ew|Se zMzBvkKyu{k_Z%TXI>Wx6I4HtG(qn`40;F+pwuMZc97qaNc`e~63m^CrPwu16-o$V6? zsH;JFV_f6JC+_in;R1m-YL+# z>mt5~3K`v2tMb3($0!h22?Mg4_vc=RA)Z;rP2xlI!?hW}!;%KKKzkfE&D5`s3kuU5 z_gL+J4!NZdayp=3s>L-x#Csc8XH*z(*nBIJ7*nb1i4r4SY9$URn6O1XY}2jP6L_p4 zP1nS^0(FyO2nUNjNQ!!{awcJ8Wu`NA%;b?7Di$}{nOm#awgvD(tZG}9=qnf2vfjlme>02;bv2p+L z>D@v_vm>@83zKQq+ALBWunEHELvtw(UT7=~AiMzVD)m85RCCEd<-o*e;oyuQA^!v?CfHpn?B zR2Z=k%*>>1JS(tH%qj)hYjiQN7nD~Jt&{Lsz82{)FKf_9eIxes{W;7afPz_PPRjUp zEp34sj7LOsC70|o3~JR1?afW4&@6g|iQR<`6Hh@5`LXpxFQp^`?{x#0nRuvpxx=)> z7&=0irE&cnOIyC(>}G|zSa(X#_3Z}DZlfIBM9QGdqJ^l7%E2QHmV=j`|8jeI=VN!a zgSNn26?K?XCwsrRga;KvB=2$e%nGDk4g{WeUkz}V{H}2Eu$wS;KtF;>LiPxv6*)l z;9Vu%F4?I{H7Sp?g}FC>&pyn^pYZZ2=HxIh5@k=T{8n=WlhgFD;0A;1L`p6%OMFf; z&x7#q#wB4WOM3zv6)N@k_ql<}%EoL2v(*~Qt2wW*sfq5j+xopwF9Jhn>8z3XCe^F~ z?h3X-UDx%kW4mcc{K$RG5h?v#JP*6Iuqpg*T2S2B4P`eYpL-aRjKMf;YM{~&<{#x2 zYu{hfGSU{P_EL9~H0xgP*?w*Jh}Y87f^nStqcD~sCRC@Z)-+q8(%X)w=*so^y~vv< z$B{3Ec!o+61dR$cpK+GhAkTXK9Z8=Pk1(KP;YfdxS-AX;9HHUVNC>%C^WxosYbQ|_ zuJa`|-nl~LGJ6n%pAwFac`b^~CevN{G9r`fdI{c&FCG)`dZ(NoyMsiz4jniiD3y~k z#-)(`aS!6tfqI#>u1$ELdgaz}vCyzaYEE23sCQ%F`O2(yrM3u%Y|Gvrs4_qyC6A7l z0ng-37kzqwnm4v|b8Vs4a7hs;fdFRwQD@LsYXRVaO{OW;hM^tuTO9uKDn*Xz`3=?s z(r6wk7^?gPou|ooxgq+NOr-FXLFT8BRe#?GlEtY`2l&Aj8?;1f`IoSarV~4a*U|?M zW15;9;wf`-W_{IMB_JH=0kv%=lxsk{X)kpq$^C+}MCktJlj0wIWy2bOC22oCT@ zd!Tba1qcT@)&t_ET%dO~6>4P}y&EYv%GHja`HJ6F?}&>64G8NeL^N66+C!hh9u)z@ zvS+Um&w&&z?2`^Whh_ZKIHnw(>n-tyF1pz#Oklts6-CU1!igzI(hdthrk?Ni7)9SK z0h}97($wUL8T*V-SI{%-uTS>#uYyc8u9e^OZa@nJElWlff62=qnY7;YW;-Yd=)$1u zCW?%~zETGrKC`OYW*!an6I9$+iMp+CcY=-BvWtJ(hd5GSx(*JCKf`54!dv58Li>@+ z`}VVG73Jk$qD~`S(0uu=Nyd#rQ#aIzS4)26*=~NrCJcY=v|+>#WBdED zofwO_>K8X`2I;Pu1!JcYqKkX#;5HXkMjWA?Xm!Wz7d;Nu;zrY0MR=+KlV3~oGKT&x zdxCBWy^2FY@!7FZs%X&cLe&+}w}e;wNNri9{WKO_Os-0WKjn`wtPA0h_~{=0ZCvd^ zqI_&sActx8g>S|%!ypr9=j zJET_WEVi z%*>&h5L=)wEyn$TQ8Ok2{3z)N!8KS%{oFYi*YcTnG+Pf?C^yZ4OQ_X32`YauWr>3C4UQqOq(T_;lGo)uyCis%N(PSmv z+~bp)k#*oBs2%Ds2HpWdH&`3iil>%Gl^ad9))rbV>`KSGn?UTC_aNevi}Yz^ajSZSK4oaqJ#_ZaVk%8D{k~z-&N!I#m~SqN&wp z;hV4|i6=1miKqNA42!peI5ihzIvlTe`-V=iBVWwgubMK6LSTQcgZdnLwf+jyXUE6r z_h~Ta!`zrEdcw3=Fua{uL<f7w3P|;@(|EiGo??Ul$R$VeBUeT(3#J$G0Y| z?}GG4D~-{`h~j}mqY325$LU`m3fnAvtBjG-96?cr^eR%p5;($D1D96|jLYZ?#()DJqtEGkoEtu-|8oJcc96R=jrJ2>6 zx$`>oCQVb5UHMIda?QG+#oyxDe|Xo$0ElPfd~?(Q)^e(0%*Hz~PgR@V`Dq8AoG3IL z)G7|PgK7%6iPVD~ zo$OJ&pq4h|;dWTUcIh@dnNOvt$6#csM*?7cQWF;YkjILuRP&2yUe(GY1+`mvDBV z9?KeXBO@kn`-Xo&9mGzYKS|U1G*nt{!m=yxSI!j=7<7H2MH(i()*t1XcN*YW9j3cL zL^lw|j4MuDk|K;iW82T&QQS~lc>1r1{L}T()n7&W#+B0sI4mn$Lz}uOC@4BP?Q9LZ ztOrt;r`eFcJPE3qhlMxT;w#&+Xs!tfzG|@f*~4N=@F9J1-+qTX^jLd1UU6*gahv+4 zuoIEexkvty&0mZc;^d2CVL~I)TXb4;-Hu;3wK2-1AtJVA9vZKuq11CZrtS;n1yxFH zp#FaEYTor|7!QBp>yD_qn{sK^9;_XQSNJKK67z28h1a!THLP=;Y`ZdXITd`75`=0E z3zvs+VXVYsdg0hUqU8D3#aC_ z*S0#8<}}#2TJ}dzRdgrE4ah}ogXHI&Y+n4Q`N=LmtOLsUFs1k7$KX^8BZGR*T_#1OEtd)khNcqaVXg!~{~pPdOA z`Wy`?zYhP{F37bjS112N=vDS0Y*$vWleE|Mo|x+sCw3=V)nK`SUxuc6^NgC+!_4N=Ik-3;K;)}pt738Sl< z+saI@r3+JV%`+_vhleyLPbU4|0B1;OI_CSqQ0RoDAbakyvkU6vhC@GGJXgWM&zvLe z$pRXGwn^;JpV7!#A?Bu0p*0FS=D0Xunud$xg9&MhAg-qnyM(!!YHsl3?U|M1E%yqs zEVN6*txJ@Qu7Hn=mf>s@r-*{AvqC}er<9?eA0$TlBUoEuaqRYJ=UM48rf?qOHEnrd z%hKjMOY~_nj&+Y+lEVH;9#B>OmOQLKiMqDu0e+CRqf|P~k}cZtzF)t7O>Sugod0Hr zl#Zonp>vBE6S+l!e;(K1n<${#1`V3QSD96jEy8$98rF*h|>PlCS;*SbBd3 zarJ`ZGC>W_MmYz^9{p9Q2T2&Uq(m{Zh?w2KY?olN9A85Ej!Ela&rZv>NP@&iKE)!U zZw?BO5R)4OZynY^#51|~OR?jg<=P zHsT(%U$sBJ_7lvU{sqdUF7bv^k{__-IFLdm4_^;wZa5U4HeR#^;M8&qb6D1`1nZX1 zkJO%{9jnkxInP$r8fq_qDJX7fHqt0^{AJBz;oIdp4n2DgC7*Jt?}d|geBVAMhy{@X zlQ%%^i_qX-w4$53y`;G_(Aotf?qg8%le*$zdq1&U(9Yf3r%aX}6jKeH{K`P^l3lQZXnL~L&{ zw1yL2^9OxR#bfzk z0&5ax0xrwROOBw?hMI_)GyINKxB&UZX`TO~b%qrn}Hk zYGm5oty0?1ImNjEH1_uokkaD?CED#`uEhhJyq0Wr?zIDJ+Gkc4?CnlqUzOyrR`m<&)vJube2lzB??Ob>9H+1jL+Sy`FVCRQ@ck@^PMuZ!%X(hQM?iCVo%F$8v1lpOatyrWA2xGP0@)N(&T(_vh~0FOJGhJRR>!quFMAfF`)t+PQ&@LbS$s+K)d3ehRonxWioGQNE;+JiLAMY(GVVdkM(RPt@BSc*Q+sY?qs#Dkd#%0UP%cp$D z@n~&4NnyLMeahIL2eSQHL4XPFX14?|n1omKMq9+Ggn2vO{*;5cv<0`v_9k&$ zRTC^{O9p-t*a9FROsSBi|B&eaF}l6WPvQxDIYFHez>cnA2Jw9I{LZ8mc9}?oot=Oo zz$`JIVthz@;Ux^R!cgzWfiFw>FbVM|k$A!^&zK7&*ZEmlz>Kt;9swI7n=?M!(Fjkb4DCn85AKz)+m95;CYZ z_;HEo3+0&|&B@Fhc}XEIj|$NMLo=8PlN8WFTac0$Np+UGVGffEah*K`=TV9m8}CPC z)jJ$S;8a~c3?@Nt|0utgZ5=Gj(P6jv7Rb-t4{TQWkCM$H(Cs^rO0;L^egGuwlDXcp zKA~#f^a1h2O2N@56DOFb0;HQW{B_$5BqStN<&MH{)I0zkyZU@98bT~!h6%~ECEzS@ z2&~F+oKDI!-z&SJ80k)xg9A_TC*}?E$cUNlI8aamL+ILB3LrL)ytW{L#+ zqr_*}i9H?!tS^6EQ0re%5*Kw+G<@d6yw5eIiQ?_`iJqPwur2v%Ihf;?cW8kQE11jz z)fe>O^_fU1WG65*qHz?c^H?SEybAc}A!^f8>{bRB`0XcEvJ9za1(7HkcZf~}tGn47 z&=?D^X%ClmGI>8I@yzXUzTX}{J8PAa?4&qc7e2A3Zl6s#6mIvnN^XbQ{qIBpn*bdm zD>hC-kuG;Q%#+v+S04v2xlcv4(tA)|8gIg4ihrd-%-X)3(Jpq~t>FzIPn%=0evrYA zvwikc9ZS11GsqVZFg$Rlc-=4&3mBI`{|`!(8l+nO?Rt~wr4F#C8PQ4`ermXeLHJp5 z8WU~q415Ze&~d=oEaeR;Y=w5UjZwyJ~$=*viTCOlZfAVAzj&6aO z;=_5V#*eHmXTq;rKZHn3>d*!qI^PP0LHuukw1|>z!8)+E$2T%aIg?QyC6W444A!+RUWU2zg@03--9bda`Q5;#@n%N&R~Xpdj>?Io%7 z;e|s!U`9+mTrq~W0|i=4Obps>d|38<{yTch$97N%5UBL@AJfckED)C)G*eJ9Wk)bd zXw#lI@E#%OS`A2Y3o;FBL$g8eO?ru5Dmu``atKK{Aq?sn1ImIoh)d;*=+>mts}zyL ze*|TkB)ejR+RzqLrSX)n7U(bz#ROR(vzRt0N{wYI^oWu>uyJjaKgj8SNDaS`T0CVT z_|IEIJL&oDjG*-sSs!Its03nSqAwH-=fs>BCfc$li0QzguFT^T{QUF49LT|Dr4No8 zMP&`cU>;mmiH2}nV|2)m2q72nJWbLh$+^M24bBqURu&F&6|a5@J8HG$6vAa~I|!0< z6GOA%FR;D;Ib(*`5r<-3m@%anPh3c%h--!6mhQFj+Cz2%Wa35p#WRqFZ;30m!|rz0 zY973E8bEmjKc-6K0!S}llX1vy2O6hx+$|x$*kpwf-Siazc4wK;$tv#vV_&@3 zC&USMjq?<<%gfiO#AC`lDB@i4b1An9T|aOQEaF!yI!o{N%ykHH-qFa9Q}K_Aa<5SzbH3iwXtlD zcPas)^kghxc0LudckkYWBzF+|TE2q(ihZle*X12C+8F!VUC^q}GlESuV-QHgk`5$Z z(x!dDl?DSmdC$6>#OTO~;T9bz0gd)}C(~%(-mzc&nhCFL`QCtKUJm8P_f-UpSx^QS z9pQA%x?KIUr%%UZ0S!#1?h6g$&3Xh``8IL`pFe-znX(3^4t@|)8=00mz|_^rffg34 zu6^SL>j5H(#~@L-wW+<(P~zc%_NF&RL~)zdTS57Z@gxYUm&BYEtmr4LIGjL4t)*w} zI`w8iJ%^FLF!vTQLWt?MtBn)C+PNU*JfQ;E!MskwDcg|?Km(p1U)(K)^a1EzR89^a z|AJ5G(MUMpg129p>Ri!d0K4%o1yH{gnB9bAsaA7)-8*Xy%n3=OLuwC~yWxQ7vt4iT zi5o`E3GY%1gY80{eT8lB@rn_PDxt+xW3J<@{f~-{NIZ>B%c3WC~ z@~mMfE~lPz$~)7tc|=0ib=%;Eh*~Ez2|6-pL?TtDs_QYf%#Io14qNGTQ-MQK#3%m@ zwm?ZSDCF(-4>TCxU~|n@>>)S)%|AyZKX9h&m^C7bnYBp5*3KI#VX@K0SV_0;P9`MVA_|X@%#8n83}Eg|bcrlYf=lF8 zKGE5$9Fi9nldYLBJjWLq3}>sH^a@}k(m_1nzZ7G@+VF0N=cTN7txUAmTR*A;7PrSC zAU{lmZ8NIF4NM=ONV!=kXw0KQL7pjE@ZBK7g_HZB^7o>!GS_v)Gi=7Q$tUjy-HNu{ ze>v06n1sM-X=+yiAvLt6SQ-rD!nBO+ zV+j4vnSn$5gL4h)SSUO8nkMx~Qhe(G*hkoISiuR3_+Kw11K99bbbSr==%pc&Q>4fa zP4xH*HLWT3e*D!HfGKb^r7UC1ue)#N!BVX|-5L9l^2#aRn;y<_KxD)-Sle=2^jsw& zE(lRq0{Sqyc)0=!QmtV7Yah3E^Hcx5THoNr|Hp3MHzF%n^sFd}L`S5#*+VL@zWY`pt8Ti`{TCQ_;+<~b!@LM-HotDfD0hx3FLq@)6gJV=`EL`VM zc8mhM89!aEK?9sX#}^80hnljeO)m;1C6~aBn0!c+YdoTqD1T=z;;uyL*a&qjOD@}R z_+f*CMERTIqs6z{vOK5i|H9GI?_=$73Q=$D{VUEZd65se$x(;@OYQ|M5q!N7d?k+z z^P?CJc#IGi4c0GdB_C@sF+GD#n#j>o&o)s4hIIB263jPuzzg_O<#Y_%ZxigL!nLO- z3X=kt;jq9LO-InAz%%>ty7kvB+Mdgbt8F?s?4L(mC~&+Prse z9|wp6C76l8u%HRt^~3LWqhR2N=Ak&oPDJ#98^tGQ?&;nPj=!?yE=<@!@5RjYYNU9E*T9MhwaR!pA58zDP4zcoRS59s7U11`!L13ZO{e|KhI<{m zMxJgtc`Tk?F@YQWJ-Di_)+hUq^KNJXu7M(e18+bjF2T?7x>c-xOcFHa=J2qr7MiuFH;~NdoJ1?M`Z7tbzd~=E#3n6fnr6{xdK(=>{Rqr zp(Nh#)QyMTR*FGJ=oL;S8-ByOQ|6KmbTZ-mv1c{Ar1hB?9xhX*`e- zzY*+PIk|l}xjS5nYUa5bX>4ws9`pm1-nkq-OOM>*SCnGp6yPsL+`UeM$aLXcax+LV z?S2(;&7(NrVbdrEkr*-SOWSWex1Sa5@^k6?*MIwYdLoR2lw3uk#3$G(G51)IsD%)_ zvBpEEu3HWMc*pMpYEk?YHA-{t*QRN~rW~Asmr9Pq>ZR6+fCSPEN8~lHH?W$9del}S zqiVrg*!R=K6Y{Tcpf#EFLyTnWhvWQC@>LbJ#dDmvC>sTv)Zqd`3ow5-Fwx1J|Dj-{ zRoL{=p`bM0RH@*_u-yRvH8^&!dEAb#@Z3%r*j?Y!7x^%|WOxeAdQxkv=KWYQyYh4} zUV?O^WNig3D2vRNOlfw4>9+*4+oNqT+1~0tYloSUSM*QQqy_gt?%^fNZgHa37|4y{ zv}J0JPT6BP%aNd_SQ!aq9di24l5kv;p1mza%UoUDg_T?86jjcBb_%}#4_|KrPWASG zkH3+WCMiWliUyP@QX(peN+?4JrP-J=&kc$qr8JNsWFC^SW2{7zkg1H7h$CZU=C}3{ z_ukL<|NPH$pFa2b+&Jg#_uj8zt@T=)4afq(s8(lk0DR|BQLMvEY#nRD&h^$icKV@< zZn|$chL~7uagUN8;kk$*Tyy*HD8#$%^TE*;HKw#{DNijjhIT?k)Mzn%1SguTsIZ5% zAV4%Z8R(t)+(Rq!0B_qmeU)`q;vX#~*PB+SL|X^GF{gjPT$-+&@PUx``-Ar1TD)vo z-22nU%Z;Rx1L_JOi&IuaK;N zXtAeh)$BQQDxQeybA8&W&v)K>{ac$^{)+OMzkfLG-*gu4B@FId@dCJW?WNk6=X})v z4K__`NAP+4u`J^j5RB=Xj%f$?5{OlLk=S>dS_VhrfsbVy5Mjt?s zBme9c)GKa`M?v6^`G(9CO(NpGD)a{Dr0+lCtI0Vr(DhCCJKgtAyS1!7wXsUW^#wxo6DkCG~X`*9!b6a3^bW~vd{d8ukI){r=i*%J$ zi;{m2!Rm0*oZpm8mQO4FL0!W|>#Ss+Y=V-YXt&GADI1RYADB!aXBs8p87?Z<+H-HP z@iN=IXQzwJ9bdN-3R%m%AtyWG^mE#eat3*8ofZX;7JF6|l`UjUOtiE1c`_GO!bN+) zPJIpvh_1L+t)0=ZOPGieSX9m9W1HGybSs+FTu z3kEFLQM5|!@F4gFo)5wC1q$(cTt0zy;jq-_@v6xGT#|AV!5OH=$9w}(?nkiGMs zpPOk*PCC9;(GHu-ClgiZ_~)1XH}}QpOq=MwhX8U*1U~t)epgiw5VWwu{|mq+F;o6b zhh*gdg>~9fM=^nP(@ad_^-o|Q9}3Y_`#(FBEmCXo-}>bgAT+W=DIi+j*Gn}u6APgm z*K{BH_Uc+u)v5RQR5nOCwvTtcnumho-J`9)>$4s4ZTaUckx<*_RA*Lxyl#E!ODd?L z(8~HlLPwr-b}~=vs2YBucEGc3b|Tc*Z$tm*>9J8(-f^fm@SYVKJ`hG$Sc}$(n4_Pt zZ%?g)?h}lFao>RUWp?ewr8vaC7%pFI=%l_l&7rsDSOU-d`K2+MB4#phxPoM&+MkP2 zR?+13HveSr5(IZ>ZD+#(iJB$^qxQQ$j257^n|djl5-NGE6&ID^&}V#O(Rz&s2{le+ zwm>$!Vuzs=6GA}T`o-&Yd!O7l=m|H>8K&|k#H&NK7m9_{L*jOV=?QaS#3I`NXDlM* ziw(uk&o5ufTYOEuEm1FBG{-qZ-FfkWI+!nE(+t_Bq;o7v?m3gCX>(XqzN3Yg4nNBW#Zk;y?yk4}3fP8NY_X26*vq)2gU%_W2K`Xe?Tq zta~Q=4`OUJxY;!+D!guUl{ZvLry0tr2ycW}2~c%Zu+H7zUnkVRM%f8RCK~$DOj`@> z*86BX0@`Xw7>dX`=M>OhlWyy3dh^<2$@u=IL=IJ|2RPIG&QaWaZl}M#eEG8G(}bMP+jGMzwf8y3C(>O{QXky?r||=aNj_bi_O2+{ zb!-rPcyzWiw2^25PA1&JwuBu}X0<8NHF|Epj+leNv%-p$rwGso8r;o3$@dKjAGCFP zetxdBaLpuHdh!Z}rnKtjS!xToHPHnaV) zw9CQD8ZR)9ZEf4eb3qaPz`xVWl5j@(xXg6kqcmx0J{l_Vuh7R#DU~bw@WmHQr^khy zWkWO`>q~T%G_O7E&4$q{TeQY5w;4S;Sq&6A1jp23rZ5+zCG?z@cJ?la_vY;s2M5P3 zK#w404$p!VC-JN3t`EOZtH@&TI3_P_`pcP!D9yL$C%jGIBl z58bFn+aXa?!-ValfcpA>=sh+VlP+ffZicXGxEH&wel%$-?*|ll*?$n&E6-aOY6m!( z1i}~q!i#~;1b_~7eLlW1y%9Q?->qk65hJ{H({y_(!rZFKZ7@O3xy|3iQZSo8M1^(Z#P{x8Orgjz+l*BCY)&Nn*h3mW~GZpG;IVZ12bp z=44&E_wL$m7)WKvZPvc_SZA)p5>N>#_hMy<5V3YyOl&%j0Pv&Wn`P_&=UFxiF;jbg z`Y?S@0=Pk&M}6K3dq%ipu+3c-)i%o_tNW^GV#~`HkVAr?El!k~{FZL>k^VskqRg{# z8i~8&?yi#>ubO*@nkD9aZVg~bp9@Z~r2ZUvnoq{S z&Gj9{wgDcI_U_$Mi8CQzSWVkQH6Xds2|xGGs9pOrf_c!&ny(SFQnXhI~(Ls@*^gdG`_#Tp>5kA6nsRb*VH&? z>E;vzbQ(cQxpvj2_;S$9_J6;zA?Ap|f|mhjFHRU5JOSA-QI`@Ix?Fx9RkDw;Pg;*PU>Nrrjupyg;O zGr;(UQB^sO$1=b5npHoWMdiPVTu^bj+v)Sv#5#Bed5Zr!>e0CO5rzOYF)f3x2<`!+ zu%0|seS5V;d4mdS5Laf|a(YDF#oAvX6cq0}1N(Vid)xWH9M_+EuCSWZ3O4(r4{ zT|m?-3NG%cfH@mN+@@=9=b?2OZirh`Q!^gQ=6pbbHQY4r#r6zW!U{ zrax=OjFKHhkvmmlBG2(tT?U z7un3tPc{c(G#RX>8+WM78X8rHfm{9Ql55zwx7oz4W-D2>X3YGbO;id5;GmEz5(IQh zlW^4_7MGMS*Z)BObpkcg%9T53?Yq17@#=oMV0@wfx2=#)6TqtWcC3ny;|IvB%)ULd zpS^cRl%gw`kLI0RF~q+T0#BG+I+#7)>HdL6$1Kb4xe|>RW788iI`ew2A?@+OBoL)$ z0j>cIZG}@KqfNHlS9Yi+=oeUxmHLNr2d>TKy&l}~Xu!#I>ic;qV59HYG+|QVJxsZE z=iXsCpVkx z)!m@sItsmA>`{IF@m)K2YTQn=8HV6V6$XcVou89-$w`~ZcNib41vxc7F!fSU6g|%vHk{ELPF3!4*#jK zfR%)DnGo@K7*YP`+Z~xkl^@E*ftsjU9*Q6T3uKYHWM~-K>F@^aWY=tkdFAnE0f0?$ zi{_PH%eH<>Tade6D&Sd7W;^|hqn5u1{QW!`6EnZ6}( zQJ&@dgv}KoD~qDep*BMKd97^$#ZQG;U}k>vq&|x5JwbrpfpmN+H2#uF%>WX)Ir7fk zyH#4tN_(jXpIw#PrQjPza|o+)-_YgQImUOxA8xmMnW_2n=VR<{FTN6xMJFdGjf0)r zk+ta+i`MH#xnP+57eSQ1CrAG(#->p%eRIXgk~5wdd(QZnqqWoqt+>yyphKH? zdiu1kE20YkxQ<$@910PN4(sl5bE77uUpR&gIG6E= z&tU=*r|u;F(F}{LJ~@4w#|}DAn87+cA2?8_G~cS2y%ZuGFw$=tT*r&PPOb^sFnNur zSEEd9FpTo5uLM69>pNtu8jb;7^he##&sanFs24;KL27;prgWGtK9v=b+pZpy?#H4OE7OAo-<9pH_pCg2f`^D zwjHW1ABC)w{)?fy4bU-Dj8F@+wtOI?J6h#9`mIiW%k0cqy7l~tY=^#u)6bNIyue=q zO*MqE%T52RD?iauO>|LsKhiwUR%!i=9sv)wI6$fs7AYs>-A;F4laxJ*r_|l zaEOnaI|I(Ru=M%YQ~E!@P2Lx)`LMsm)lSqp)$~%KGiQyh*%2Brc+k4&f{>Z%Dt@q2wW$$JBIs6bn3)LHrz+Y5mc?)H1V7i#`8$T;_Hi^t|K*`tvSXAy46P`Bv$#H^lofTV-R zW&rg0jeq;)Nl4Ts5^r90`qbG?jbeLT1#EOEQd7F>V7=}pN{nCLC{$!MAaBIIYlt{L zKC1ZQxkGf3mlK~9f94&XPPy?=-z_P5wm%>l3$9Uae`1Fm+xEZ-0jS>R`slhE*LQ*R zd)>faA(+&cg`MD4^W~X3gV)y7T6Dqd33WS)@yzB`B^QbF0r*!w7uR0}cgUNNa3z0t zWVFA5J}bfhE6O=Y+V3$jai$~5vaz!29CHI)x!xda09*23Ylu@ljT+OQaEgLOed5RN znBQ7V{eUyU%%b)4zv2WoDNQ7H+L59+kfWu50F+t^D?h}|Qy9Ys29#dY2MC~CKLWPNe39$?QC3Mo5I zFa5yoC72-cMczgWx=&9{JUgwK-d@uo5o56`)WE9{Pb)dcW`6g^fBI^m@ z&I40eG2jYCz=2@vp_5{AI|{o6$zqZ)^L1;Wffh7lkftL)8%f|^4-ny6+(gtt+Jo=% z*qufv$X_Qfvi_KGm|fAZjh9XHNhFM?Qpcc&HG`EfEi2Y7Q~n~eL)^<2Kut)HLztRr zeg~tmH@>Uhqt}Zw99*{b{OqNhKU651LJ6)F4_+u)hsWYH)D|&ri7-fnxyXlSAb&y< zPSa%*mj#R;H-2_V3+(7hN8L^C_XMG((8K@L=fcTk4)D-)Sa( zFhc-)oxsdm3j#%bD(j0CIQ{0<4)RT&*KTO8x2 z_oSxM2e|aXIq7^+uC54ZL~B{$`d4dto;Lr&HoD$&Q11lJJ>txPka@^2OMl!>wq%y( zSVF;Ihuoe>N2$fSe!bF4nsgDRqvnRY7p8T7^k2FobvWj0`ZmsrTKrCD(w`7p)_)B4 zi`HHW`#Xa_TH{loN9s|H?(wfLSlXf?G7?!hzqRuvI^^EzT-s{0zXF`GDHhHPnQzzR zr=23Cvdu!&s_*1e=UFz5Q)+`e*+4C!F#{Nvk{d?w&}Pq|UagoNqP(I3%^ad4hz^6r zepaO__ecJ_SVob6XUy1wFTvpU@AME$tq-qkd=PX?@@J&&E8lbe%Yw8`EK&Z3DnkJ744xj{VZzLm>B)Sz zsU-Q``8fT|%A)%h$<|;*K#37(8k+5^9*7@zVN`4K;a1Q0-q~lc!$K7qZS}NjI>gDwh^n5=_q$P`8DArpV`vt)&4F#@>m%lU&(pcU+|bN2 zBRaL1(lrguwW0ze`uP^g4yLCasrmI`u(x;GZ6+;&^@Q`@jxoh>&!n zjweq##+ug$#!`9k9ZQMEh|A&kXpFz1vukWT&Pti~hn}5gTpsm!(J+CHALiT?{De`% z`j?ni;WA0$y#`(vOkBOqTkxM*!6i}(a*i=32tls!XV3fhDru0Z zN#gFI-A`HgppnkN%-`fNntX7>n2TqkQ^ym*0QFHuO` zUx=vlt>TAvFT|4F-vEhONsqfJd;bJ#V7e~_SJi}VWs-541+hTYV{Y>;qn5bf0RE46?ZN^vMsG*(zCU5zq$Br*Jynfmi3ZEf1@jO0s>59W z3A9W8>t4QUSkk_dF>s$Tv=L!{0u$`2#pJlv>O|u@C?hv6p@QlST}Ph?EvZy>5m z!(ZhK;5uKXRNrd*!A1spn{}|%($=wo*AtpRM|hiE>4}XDu#Iuno|E1@k;ZTA+fsvF;YR>tpEX$q%<)A#(DyH73@J~w`npDaAdx*S4*nSVO!^cJKDogE$M zK0bY#yw@qJ4}ix+IOxsfa7@pEE3Q9SW)T?67yj>kgbK?@-Zvi3Zyr zWv%fA94QP@h#Y~~blo>e+0g^md(P&2%*U=(yI_@UJ|GmGCyP0ew?RtxMe9O#T$UNR z`NcaD5RmfjA2GU>{FoLM2x^#n$Y@7V9rE>O%#=f`INn>P+EHZ-!A*8XZtgf4R!=HI zWF)3s5ef#&qiiDmQX>kq%-HqK@;mxdT*t@Iw?04j?NTGtm8(}zp*#ePC;TjZbbvS# zOlT_d!L%N6{(hj}Wuo6*yE(-BOqFnmtz7QK0CF=zpC9S<(FypiQMbRE%E~)JCd{*2 z2KqFxWw4E4WNYQ4si(F(=kKKmc@;H2^?D^mwKEs2e&B@XwAz2SV?1sg`G<{D#9MqO)!n_}*o+*)ex&K7{?7kjb2eE{D7m4~y&!jK|A+g-Vzyrlj zVfo0?q4!@0JTnJ33dYH0jqE{(pU02R#nP`i5tagok z?F@QnM$lyu9k1zbcHc4k1^&Z5A)&cA*sxo$=x0=0Jn)010+U(I?e-NGDrSr7ebGSU zQT(>*Ec*-Aiw|0+t>&pbUU4K1B4LC)G};o{r2IfO*$&rvv-O}q0Owywp1+BDYFrV6 za#(LwXV5aYMKKTbW@)k~`nF7n7An^rtU6V3sp$RteZ1Wj{rc&0<&FljzasZP3$eyAOe)x0&}Z2+ehveQQPiMwv0|LzrF)EpjL z3mVT(tdY>-&1$bvpPyBI(Zs=?tHp$IwUsoX0w{US^6F;~*`Ef6u0T6v_EK|c{3#o) zpXb|^=7g^_Tay^(0D0J>0?NvD1X;XXWv%Q}$9ENr3!G-%Y~;Z}>F%@F+}tQLEx&%! zjK6P)Bdu?+K9v zM-UiF=QHuV_Yc~_4Qe~&dOMn&=!V?(opl`=I%$2!K?-OD$Fmuq3j#6?I#MsS4rs;* za@P?v1u1^qYNa`vINL4u*@4JJ$;G;o9o6o}Et=mg%(ZN#;v9_344XQQAHkDAVu@on zE;!<4iruKkDD>M-B>E5IWI)(aaU_aHU<3zl>Z$X#G$0-k0b81nS4fDgIGSG8lI?Dh zGpNWl9`h+JNEkTS&iO&*U+bS=*wd+Q$@-cyU7B!1_^Tu{V210Pt-HVPzVJA9gIBxW z)1}2{a5jecdxhY-E8-o(Gt+3;b}w%&iULCb9M}qHT`or^a@aM)27RC5W)=7Oz4<6m zxH-^2O%yDApU$2DD-8ZtXq(%xr##q~4JU7V75rULU=jjNu)5yDqIt1$CWJz0jST*$k7Ce6#ikV} zYOF7$Ur&y&woD$XJ@On1xCuk6SZ`u+N@fY)n++E^o~MXA)lRc7x}PAK0fDlWFW?I+ z2G$2-3O+s%4|DiXSYQxG=E)G;uWf#^XkTRy=c>I}tAT+5jMK$clxOGQ$LNW2C*E-Q zwYDiid#BL29tO8M7v_Gxd6`xPK6bL^2iAM(xx%W>cUQ8A*Ue6;-&-H7xHxfBg_Plg z&vU86X%-C$mrIwHX5RGhuzT0QZ??nmDN#&}7)( z`ipK5B={z?VyvQ)^MtNtrvoHJLvz_B(7;N#5DqhotOZiBb zm@JKwZ&z>+fNS1;xiAnH)hyl-lg*@u4j}dXo~itwf0y_%J==%JE@`_WjEP@;2z6?C z$|;zd(emTo`*-FHo2#jn07m4Qf-i_4;Vpo)t)gx^wl0v=FzR<#?Hc~R>IvuF^7@!| z8`^D10_lZ7J|YeTR-fg{h^?aUAT&>30<*2GmS&3ZFj$^nhpQ35x6O7x&dZ@}p!dNd%BS;QNE$wO(f_k2)v&_e^0m)?;2P$gy76*3Y!kLG#9M5%jG)C)0SdT0Ds6N`Fj_X3)*=t>o zecwnO9Z7c`$D|C@ZVB_t%BKZoDHyJw2`*0}DLTRl*JTqc`Nn2l zhOr_!)?!9$s1V(AuCC*_fDBi&Spn-ic*9DnsZ+QIj?d9rmHshW3`+rTnDVza!DBw~MhG_SYgI(4OZSy=I>icaUeVSJZ-g zjne)ZUvXcl5XmgOVAwM)8>hR<9?e&%k*qte`Qz$itOX;buC^N7Ri$QS)`U_SB~ZAs zD`oxq(Bf!o@5AGj$0eb==L7-qhK9^uu~GO4b@1+Pero zHR{h!DF3!3Okh>wa)D0$;7bu3&W&hg48BwlE?}V+?cPIfLv7~hbuQs7;uS}1gnA6O zQ(=0iHM?sMi+C&&3{dgogia%U;rGutkD!-Kt<4|c7AC$;;2wD>gpjC@vh>TLr#3Hl z>bkf(>WM8Y<9p!`#Y^l|K9ir0woQoiuPYgg_n@?9Tne%Zl!xWhvNQ8;o7=~7Z2hN* zIhYOLfBtp+1(#E5;~^|_%a?9n(eYXxT@Rv4 z-xL8huQ7>KNvm&mxM3K%@Moh}1(gTtgN2znD=Q1P34BUAzQ=u~#2U9nJ?YFzKOj66 z(o6d_Pi`bn4WEkSH*U*M1Fn$jNPnBh&JJXU{~+PYfI6doJ(6%_tNvzC2*a>?lNen0 zujsA8LV4Bz!L5+l-r@2-XtTL;C7edhvezE`__{y-$P*|=P3=0yNSP_1kjX_cRS@QX4W zE=XT^mHe9NsQv#qb-!M0JR*m=Vf=qVzC#h{wWm*?9%Fqyf)ygtLe$x~z~cQBa6opJ z(F&>y0KeV^E91zG&=;mdN;Eu`8K9xhMQjFPL-!O*|G-0Jns% z?X?aB`Ja;81)O}&j2UW&VnBI;LK{J0Eg5RPZ(mV-%57|d;4PMy#GBqff*la(dFSe;-5oW3V~zgLF3?k zZng&!pAek#`uckRV{Z2G^XEX-qkZ-rIqBra8nP=QyYcY6dAFrugna*JHmdvjk`z$V zNuYZD7lnXTL+z=}w)pB~v~pAUg|pmR^7!(Sbn%7f$VDaKW+A*;ubWab8ipWI;qlw? zFi7Z6Ow*=M2OjONI!*l`P%iMaVz}&_vGAo(=A?q~NjqECj2uRGf+|D;I%%dl*vNOM z5&9bG`M{$sFP@U`OJ27IT14GCSjD!AEkk_k_?urK(OuF7cB2`H{%?qx5T!PYFX$Q}*KlB(5b2vHoBE zrn(RQ`6Z9qv4X`}PnULIU+obD%`}23nL@@beAW#<+SmF)u>&X5>yJm^dv>;Lweh^K zZ?$RQ(T1AECij}+!hK5D<}4*d9T)|%CDbhroF=WAqWb$PQYgS&h=Si;CcY~EoLPl@ ze}hW|Y6F9-%sn{w3E&U=-Jtp-=iYtS7`BR?V+w+D6=ooeE8|*mdq{wmwB7a{8KvO! zvGq4iE_YBSuw7DLTD9QX8x}&57 zTUso1@C2x>*{qb{M*47Qnmpi;Xc#@LzKCybaqe6fHtff#Xva|;{WTMN&uiB~VpylT zeK36Dfcvvb?s~wrw~xJCUUwD)zBtbN zBrMud)FJPz!<}Dv=swHkKAxVV2mW*vNb6*?vOY1P_OrYa|nr)?#J?Sc}V z6x(R9P$S3i9K{oOIkn;(s!`4X5$EI}P1$u!Flh{z@Wvlnyr%9&D?~F&N*)mon>kyv zg;mak*u8=oHt>G)?G6v_T4ToH1Mq`aYS=a;v2Fp+vfaX>6Y^|Cx?3kk(KjF|R9eN- zZjdO1@Vh2r(ZS@_2MbUYVL|?ACTh0gQ_rF zm9*K9)FkA^Uw?3p71(2RkFV7PSur?Ikm`z_BfE>~9E*G`t2ALp4?qWM>@kVf+fKBqT}HqvAKoeuc)BjkJn-c3=Vix|NJNjqkp3K z_q^Qk(N2IBrD1Ce1NF^wr@@V{ChHaX*{ElnZm-;p+nqkBaeH>38g*l0 zk?4OW9Dtu0LU|;%x!87E+>zl zV7xLSg={L;jDZ23z5As#{xh&?c*T>+E#ZUX1R1(>xCgD3Meb-Cwmb9=21!P&^JfWk zH_Ngw7R60Vnv*4|pb%;UR2vVsURb@0iSjs(H++O!Sdpi1EEMA8;CKqpM$z2Kef6~k zQPAH|Nc%t!FLU!$*!Nxef`JmM`hWjh&K6s^7{9~YL*myaIF;2AB-$+oleW68p{NatX#gkcezt*!=|`372sFfUjRv!RcV6q|GZ@! zZa~6?e@FyIYrjVZy9}L0KSdiiz{SAUchQydANTzlW%eEQUGGekbmV&bWAdD$qp3*v zYbSF6XD}XVRrH}f_xWIpx*R9xmOQR`ZgFrx2KAfPQv3WjK-*nYzQniQwA@2!Y{tUE z!gGC~8DLK}gudV$KMXGf^^egrqr+a)_|i(3%|!>UzTq`QU*h~TZ(mx5GIpYK%>S5T zF^%f_0L%JJFe<1APT)~18a-c<3Gfd`yM^5mzX$)4&t)@lSg{Fk>*4@;h?S2Xd&rRr zb@xW72CG5lLR!`XwTqXsQRvk}&`pX$L4&i7QRRF$!F3QBuG5TJSO*tVisLdMm>!&M ztynFUJ6Gq!u*njK6wM#X_S=t9)q7exXz8D}_!RDW|GqZVP$;1WK~PVX@VDfnUpRSk zt^)1OgOU+lif z!p6;*YQoCMzYHBZ-}fNWtMB13;=Sa2OK^$hE>W)IV@@lTudi-642wh3p*hZ&ITO!* zT3>$o=nh8IR7@EvrcZl{lm(X;v+}4?P~M1`r!&D9r8?dbS8FpY1+XA)W64_p**ASJ4?Bsm9HgG3`~Q9ds{0)zIvOY9lqv4( z8CJlyh_}uUTw>nW={l}Aho5J`mM27ureTx=!m;{8p{g<4B5v+CjAEe_rwLa%4I1P=^PVc7>-d}$27Uq^BxdLIQ_iR>C`_A* z4`Sc}Nqk92{{wc5xqMk7^|ci@9qLwJm?6<#wWU3iou zTStt)Lb(|@W|4?5@88;r&2?f53RPV8C+;kEJA!h7T7C@nxew2#g2KW` zekl^PW*VSM&4eQlx#dU1;Ybdrs*NN{E0>^TMD9z-%gZCKYNGlMxI=(=(D2xLnLa|5 zb^rVaJP!nMBl3ZKQAokSDg!Foz06VF#c2A1-yTCZ&4DbVf#x{rEEYZjw?*sW!nN8x{Bn3T0d`t@@S@TX^)x+~WE z0X`Um4F%YR90EW*#Bt>H?W>xqu_xf`#;q##_U>vPv1Jjw7_zYpA7>jx24Ao9EZ_Wb z2U~vmGR?nVf@F?|8 zBO`)(fB9xpZjyg*t#wB1B8$P_M$Zyteij92o502TZTEF6 zPD^y_7(v#kDz3PsAJ`{*cC^KgwLWWaXYb3LHNm%IeUyVsk==-FNWTU%H2p+2;tq`G z!(E{}qJ1Wjr;;-44dv(ECmbh0SSbdB<~@|x)$Vmm&d^nv z7>jzQWtqy)z2qM3?(Wi=XfDDQj@&dhsIKq~YJ3>@u@vpJ=9edBQi1m${xq3Jar}l+ zG{Cbj2!0y@IOX%P%2rH^LL2aoxQKlIt!!s~Tf1L>Y~;79Yz`)=(!xxmL@V1|6^n}K zt}FZGZeGQ`)DA~*;A+Np?S;`n!}#X(KftdyjnDhLy#%+=)JDjW2+ylvj~UmxnBiX@ zCHpvGx`{HxfW-e+wuJR3#zr!s*DY|}u-c>vz1xd(HoK)ueSQ$YUMLZH0SL%az)(ni zp}prbMY@{(?B`u8?cQ|0PyB2$@KKH5O0r`=?x;$%U+!+0c1c>Rx?R6vj@cKQry54G z+Ez1j-oq&N+MA&penN=K?Y$-9nHcL?(jB`^Q%MdLiZM$w!_?FZXdg%^AhUaaRV!oB z4!r1f?P<gFL^pK;Z`h1IB<337tQC2 z5^0UF$#^R!?V@PDeYM#iOxbQ+Os~!v+Pfryg@Oe~T3ruzJDl2QZl%ND8Tx2B>j&(! zW*wLR{sA^HlB;Gg_7O+)A`@^A=+f2ZxvctWz0n#+C$b@~Y=La#X&Y(Q_%?lQ<}iEN=eeOj-jb?OKBJQT(9`KPDR+ zI|#v))x2HfdOAe~9`gkT*8N=zekROa-GIQaVpIKXQPvs*#mzxSlt04Mv#$~1_YQ6f zfVQOZ?duczHk*$exqZMXEOOCS|48lRzL-!4GyjRfeE`13Ow3 zlouF(sc4duEo6NC1I1rwQ2eV^=WnxW33SJ_s)1@xJVz3V;r?} zq>1q&VHJ59^V%(&WQ-38tzwUHfcJEQ#<(6&sl|B$@u5SerhPx5yY&{%QY37SJ#t=m|k=00C<` zE3AI96ND3uuD5#zm2KJC(pwW|QuKpKl0?P+Z^=zhGsmUc6y$&c=GelbCjv zBOqW|PWpNp;BjJPcrO;ZcD{1f^frwok|BeC)kclJ|x57hJzY+*pCQXJq+h?w? z;+=(?aN!Di^3AQs80!KsI;5_DEF}=*202Jw{_cAZ?cUTpu_)9n{-~TCnz=!7JbZkZ z<~pW7t&*!5OQMm94<;I6=ja4PrHmh7g&oG}-a=L4(9=7(iE}W!JjEV}J zI$PO+e`OFL-Phi=q0NL0RFA$t64kc!tW5jgryjM}G13YzLcl$g1iB4re{po~ZaZW3 zBK0S;`e|EBVGueh*M>xRBK;#iqjllV@%P-Vsg5cOk5;uBZk5-cf3saV@$Yx0#7_cb zr%NR&Q!=h^LD$61bziM-Y?I6C_c8X$%Ds-B+KAq`&HgFzr##nwJyMN zf%Jo_LOrP2-2i!T9~be=bw28z)cqy5K4qnIjkWY6CFFa(Z2zzo75*kU*HxQUX`zc{B;t!NCk1-klAG z+l%f+=sb`p)?Df6GmSc`;3z$N)ExfoB#8;m1+{^u&pdTaFJM(RFUBM%R!(p`U<=Mgb!lSSO8XsVe@gsYm7xryaboP zPa;n3F|%_mZhj-mU#OqZU|D&Xn#Pcf~~=Kk}s( zOtN2slE{?s#_xh}%AH>`0I;0_PYqmAYksC50`tK? zEWrdXfGDtxu$KKWBKLPmZgE7neVAJ9pPPh4W zGbPHkQTEX{F zR_WQ374lwy6dutNdfz|kQ1?>2>on9F2_)eZDL!Ouk){z}h;y0sIG!C$1)2~*@8%a? z!4~<7;GzD-5C?-i8sBwwyrtI4DlUp7*Yf!)vR-r}+}J{BXp5~pMU?eI}Lik_TWCX|r_=^mGW z-#VR7#l_I;9%)aXK4oTQe}oZWQbONDbA0C04?JeD+9Hr*pi4YGwGd+& zcHKrOh88%#$j2^4F&)kM1KMWM!%z8FuDs;K=dn)?G%VUBT!9E%z(?Vmz9^-zPMdZn zgmF@C=-$C)o@oFFgf+}RKmA7VLKts7g%G0a?%hx1f(n;R@-&WaG0}VmLQZ9d8a57u z8JG+67e`e=UT&uksOW)nkGM3DKQtXdG;C!x$cQMZ@FlxG70Rr3V6hb{ z?F7w3$zU8&1r8X$;o;OeY}ss4FOxc6B}jR;s(_Tu1!fyFzjRd#sUmi#IF-oDgtOm4 zJJ`Ct6oxd>n=SX6nG?zfd(%y@38EH%n#ux2(0g<==34jkRw>TDv4f(LzF{XzugY}j z6UG6hyrS4_pjKeiPWk00BA}#_n!;`sA1!=GtXi>SJR93VB{Tu%6~V^@o~@x8_vOr> z$Hf8FC##`;=m9%*c5&FTEhkdIyOIw<3CvX3g%=yH3vF-L2f%^j*^5Lzu{Sk=o$f(M zg(84aYc~ot8-YOE4fN63$%$Op2RTv!V?G$a%g9Ic6}+vD5H4npdXE71f~XfTgXfZ> z**qJ8$_hAT$X{B($$1cm3GnD11qHDsS5+3TugD`I3&aVjSL1an5I>L`+a8(20LF85 zh&gbgk;UBHpJB^L5_7%>(k-q=drI^r%V%2$8_DC{$t%yp{m*sWV|FliW#gHNuf+@eJ`C5cwv`5fms z=BsaQaNOkjb7}F1Uz;L-q-5n@yY>^34LtL5oP^0`Exv&X98G7mvAb7L&)T~pvXOS z`$2mX(>fa0`5E~>}AX)#^UK7_G&?E@!toPH1p-@q&IXVxF1b@S-5t= z)YSCMnYHI%fIsU7=9p!3CHxebmu zL62cD7qY&N^_jLlMMjS)H@g)|DPl+Ss5BPx$qqCP#Ut(yB^KpXGv{(BSXWSmdXbisXA`Xr&lx^WJ%l2%HgEX{f zGh92|xD0GsE@7D6!i*eOT} zluU&Z4Teof6Earj1`RTlqRf#k5=AK$MWrYdrHqM46J-t|N|X#0GVFKVn{%G$`G0!9 zoDZjB@B9A!u3=qkt*hCAWaVZp%bHI$d<9k!^nDltU*NuHh*wA^&=bMr_i8W2e4e!I zLQ2Q6LA?$0u;&MSzJ#$7!;Eo|ux;&gB%5YSRV*zh33ZD{z|Yp%`ZJidpGi{xPSoJ^jP|I+0NgMkXgt= zo?w@uo!x_El#Lb(36DgPKK*h@vS+O(8Ezhz(r=)*KFG;@G)utWfp3niH60FDl_cV927arWSW?2aWz*R!)wCgTr|! z*3!EeF+|xqQ|ygA`2$){%5H#^7_J`T$EEl^NSqokb>{vY`&OUP7&JX1Idg3NL&3%Z zarYZac~#`Tg9nvgEIBAdKCIc}YjW2Gs3IJ(O`iUIwz?G;1>A!f3AU!_l@io|Sv=Ho zQuqD=4410Yz@6XKc^2bRuJP<3veuA4NO|ccX$gZWOvt4MBp>_$O;ta>3sUVyHFFn~GJpTIGwDgE8u$mefIBdC#Ecj^-YfI|>)}v_TrnHE zosuGzd9H8T0IPv_fpl5*5{&rD^73-9LWaEgX%>_;2KHYB0b!Wbx!Qxs%zvR$X$$#v zZCK*|fT$rLDhhhM4R}Y6MaiNjD!>|(`!J;QC`aEekr7> zLoM%m!ak03j!JIm^I`Czs&V{$?~!5CDG>6;Gv*<--~xuA zYIV1clswGjr4n!SgNcb^bA3UYc#gjLp+5P#X_Lf;b1%+Ho-G$m7NF4$N`s|DM5DP> z6!B+KS$=EcJ`ACX-&mWv79j{t8@gHv4Bs@21!;w0Xjaijx13#a9NZlTz}yaQ9dBA%XXaRJN%jN$M6cX7m>fZYgB4SJ-Cn_CXzXS1a4lr*|e}fQL|in$)HObL109r^`F97NOA}v$zXpo-eu9 z2E>h{ryj%KDxS^cPf{@%S%=t!-a&ex1jimo&kIT%^Wday0glD)_Y2+2ObKD4*om6Y zR(~a22LV5$vvZ!wm17E3?QfcP)953UUv(MR$&b=yKw2FrDpsF6mr*&2e1zgAGBWa@ zJB>a+^!J7oGgI8&Mj?^~PV(btK1?`5!`90y?f{SG=SJ)#jX%*a?S9CQjBw`)a8?;2 z=7%0WcWm7Y@|>YBhD+~FNWH zTGVU`p)Pr?Us;1Z3I!ZM3Wi0E{*TdEZ#2I?pM2f^;Rq(=NHbRF0%qvFHP=yMk`P#v zaycqJmQMkr_OO8>^Ycumv{)jxh=lakt}O&!KQuICYnR~8yi{b#!H<7KzRCcs&ynoE z4A@mejyvL5E)?A+YXZ>wX|AM}PvC}^qAi{rt|M7g$SWg)WI(@j9-$rv_k;NLh&A3w zu)5>j=K;Co*wMpT377RC0l+k|>L~N-JQLi9ayrC#rK&1#M&*N<`l z7kS?>q7%yLXu_mD71m@ouqIR8%IYb6v5c{>McJdiha?-usK!aW^v=3rV5{8?-`}muMCs9_`rQ4-?dnSyY`Xy1~ z|Gw4ZYt~=Y#f;Ayk1VV^i2AvcV@Q z8Eb@S{$%D~wZE=vS>w@xmb`vETE9p#K9rEN(32w!GHp2&YRw72TL+U|F)5tPDx%k2 zNH;z3b6lCtMVM_!m>PH?dSArm-dA8_K5gW}4s&vkuGSvUC#QRiHi@4}2}%tb)09&1 zEbRzsN*-zrL%KvK^G;01pIpSO_PoDL9aQeH@vdX-eqYH(mIooVc){pZYoVt3971hEq&C2;!h0o7>(vWz^BEq2A|!S4GA3Xtb*ObEZD zA6(R3L{3glJPWfLz_%GR`xVr$c~3^VU$v!V6aUmQ#&ey#B*R>{wsQ-KnTOc5Re5_Z zrT^*Yt374v7nSWo%NcZa=Mh1g|7A3K7qHR_H&C?Xm8A%P!2XToFIxa`JSvLx^j_38 z4L)ryh;?Y}9?TrfoV{wsb4Ad~K4S(~iojuv>5<!!MU0C=^%x6kVkNcRyrg-vIJ0>k=9F$>sA16ZDBnPb4&-xjM}A=&H9q%*UXK zmpqMB(TUweB6YU>#}hO0!|cPX*kwFFfKfG7GCcN_Dx@>lUD!HzQ_f>kkvqcHPu!D_ z-CFY2`e^Aii+%_f7guzjJnCrjXCV)_gSR7ZDB7S1fNuM3SA$vbF|)_S41_($9z=(%Bg9)tjeptok`bn>!! zYym6c+_`g2!pzTq)T^_MBRwZ4@jFJEOaDEdvFAvb!@1L0=JpwDW5&BrdFhsB--7nh z-K@42eWdS8HQ0YDh5dXbZHsS0U<+d8!*(orx&M+k~^NpF)mD&;UQ1M|unY0ZijxQzS z?T|1o<`0y`Go5ugZ2=ihX=LVV~h@c<*u=cdvH{rsS}YoGjOoTzDa=lSC9 zsI_O{wtz~1#r@>#?LrJi4jUt`J-nptVEn$oieMBkBsZoyxmS#VQq$+<(j5sqpB~v4 zw);dHKMA%&>FbQf8JQ^R)N#yO1qy!Jnxoj-3WFc?*qzravdhMw`labJ;}9z5D^&HY z#eXhfe~^r_6p%^%3~!SDXN0gR2lop7@xY5DCYkEbKcweEL16PX}_fm;FN zzHU%2;k)&ACT3>De(jF^9$Y8Z{XuB&j_`>C3?Iq&hs|C~X>ihxoPfZ9U(xx23n1-r3|h9)2q&vyaCrg&pq&-I z5uC}uD&}l4bsj1A&K)Ab7Gt9X0hWn!Mr+HtMwJA}svl)@M+rA_zjA(BvVT2n*Ik zvCQf>GkGf~zv_A`7J@0a{;9Wd1SAzbQ?Mg{maeb6Yky$!!G^xX>FT_@G@>SV+u@{I z##qFsQ>P+Y)>b2G!p;PXTQ8&JY|HmDXr3lv*^$=D7cWrl2}lYG389ETeQ9p55YU4q zJVf>tubn%0k|5FV{>Q+*du-79pTJ2#hC01|PW%}m`AvG3Q(#tLyVZMurqB~qf6S9} z-5Q@V>50aosRTz7D4>y55MUC(vA@u8pv;7TK+~=Y)tvbTVRdYhK;3XUgMItsLuXjY zReB$fA5A!QA(BqV+C!r6)RzZ$#ka8ZbR5YxB`$AlK6Hc{&xSGh;#Wl=iPfx|HxvKP zy~o$8kYf$5sxon>;?mPb&z{*t8M4W4UwCTy^bsS>14^@FY%Y*tLlcVHv%ozn%FCs! z)1Z$Sj3ptaH)NoVjc2Rwzy!^ucJEBtLg&#Je1kM}8!{n`xD1U%HJL$~yji!GiklrB zc*KE+zH{8$fLOlvXxai6B5=pkgZq+<$MIVX3>deCC=^Pma`=OFAkd+~v6v<&LVwC5 z0Io@;6ylB0Gh$O;QwcrOGUQtEZ$B`cTQg}zS z8HLszCoIWzsV0QfRIpne$0A9qdQ0T-Y^d`I}QqSIsoE^afXVl)}UD@u-3TM4fp)yaS@Xqn?AW3v0D#^EoU{A9`rOp-Cwi7ABm zIRAK%VX}CFa>QL37Sh1-4C@e!bu^b!Km*FYo$`3)Y!VO;Zm3nj#M$3fH#!~naA-%=gHG^4y!43R_>t;lU>i{}F*-+j`ArSP{?_b5Lm8tR7jE7j1jNF6BFdn? z=RF>cs}%#-x0KVO1zTw}+Tac^D#*2lR7}<_i)n{Wus3ntJpwpql6{@O`v+~a%W;I% z(xqJMsRNG9G5ciCvj$@pmH%)yA+!?hpS=rpEXE8@MO>LeFDw^-;;0Dg*EG#3U2FFi zFuln`HLAyp3*;YF5++W2%=q3Ya%pqwv-k ze`0Rc<(2&zLFbollQ-<^e{mJ-G4!Pfvm5n2PG0^60ADRh*^=ZP&_Y9Nr@PMeyod53 z-9J2Yyy_d>Hy>m7tx1~qKxEYX+;-hvkLtZ>7wDpNN$$I0CTntEqPx%A{8ewC5o<8Q z()DzFABB%NA|A4YnIq+QNb3hFX464EL~ym$IF}w+r>-ujBtQ~;|6CX()9At!5kgqm zb`YY-gU&SiC$<0|;00)jIwWszLvNSfq z3#ivVL(y9CQSzr(Vf?lmlOP_9I4=PNmwjtNgW5>a;SklUjq$jF8}`A$pun?}+*u_v zAQ|b6YdQryd-e)?-MDc>D`KYd2nt0E^&*5*%!`4>B#=g9by+(D zAPfm+2d;2=o6k=kTa0=R7k>fr=(1k^wcO!+6EX#pWe+=Doi9sddgxb9U-mRh@_Oo9 zar2G^8he$^@9%0>4B`0Xud=yiMe3eu*T(`Mj3Q3e*L}HXbJB?p{KU?^yw3_4Vrc|I z8Fnm70UbkS>4rWYSvo%dus?VOst1i4$giO54m5oYIY+ z?^HxY>WZ+>sd~NKFxO8)uz~8tI^B##e*_0Mg+I(kIz(s-_ZmoWB&k0&GuUI3B+Vz> zM$pK115`1u9uc|9i8gc47FapAg#hM`au5=-I2Ic20TTl)nrc zbRDChAfA9TXh~{v!QVx=ZBy>U8s$>2r${ zK6J9B-7kUKuqfO~o3+jHlD5E+Z}t|KRr$4@TDT-38$lHJ#P-dxUk4>-8!#7@nhe- zCNA84?D~#gb-Y%scJ84ODl?|HKlM?%u&}A%*s>%fS7w7XgAFXvGjYV|eNyUf7K&UX z*3TSd()2(dnSdRCmlbT*9zm3awtP>6 zD6(4^iR!&TqnYAf@MvDr%I mlEdaP4G?Bxo^+%`PeWfa`K};9TQh_KY8BymvCVQ z;@l)pW((yzwnT8kjucLD^&=*>KL*uVb^{uSVjO7T78e!@|1oyqXc7~5k^k0|^BIvg zU`iR0I!d^r@G^Phim=gv^&~l-bk)9!`+^FZ?#@iBjx4V%(17ar4TsG>Tz9GdxnY;V zDO?QM2MVnJM83Hz2wP66bc>qG50%_E7{*(~?TcN)0UsMNCM3jop`co)dI?c#rWmlb z$RV6?0Ixf*%m}mmp_BG?tmpifz%`N{ev@b4{hasBZfU-^r<)1Qg>~~PW~=o&SYlIR z_&5q$Wu3)^VxpoiGB)7BC3a(U2y+H~no%Z4*oLsQG}%X7%&81-cl_0r2SjJv!cHP`#+C;sVJ|Q=wgQ=qu)A`fAY9Fzia1i&0AQ%|5SasPV_z*vH+%6 ztl}VI2lcBswLbwR;5eu2FU~KlCAFWB3t|??YR^HJ7&`U)+cvZUx}=mC?efTeLHrI@ zXsUu+qabl3XBefNPaZbQGy8b+SY_@n-vI~Dj3W4#eFXq+OD7d$^k$a>$Vk*MKrc0< zn20=!-KpTcb5Z72C9PUEon?Pp^N|x^NNsrw7R^BUdW2o!@Dw)LT zIV4zn!-r=-!K_VPgc6G}y}qV>_{HsaH^6hj4CQmZEY3jY7!n=GmppuaXngSreuLWq z0!s+9RpNL23Wph3OX$o#Oq(VQW<^C ztv<>68)$4{Vi!^O#Gb)HUHR(Qg4?76DaK}l7I>Q*dZZST+k`-F08(X%1MKSR3YhA} z9xBd3P%1`ourhXHj=RFfq=5+b!}|gD&>Jt_W0ns8_3@HfgAwtx3$4e}zq!`eGDeQ6 zD2iDAbzCUDy?)zIO_!G5j>}YqXRkw4WnI1ZYRtk=yo^cohoDT1V*q_6^9APLJM#Ab zh5!(>*&a^h;aB{05`hC_=%606hB2xc_W;D#@|yM>;J(xBc@pTC-5fX`DA>BO_G# z=r)oLijr4;Opcrqf!+Cm!5pBDtsqHj$nxa);p*y&hJTP=Jqmd%U52nr?ty?!mid(A z6UO<5g!{I0I;XWqy89SE^$$N*Nqij8nfN5xC^vI{LEx$5TPHKbcJ6!CprUCbrm#IX zU71=1YiIDP>@5ywI(@TEbzUrVX(8s<$(rulEx&<6VEDb*)u*W>CdjcwU~)~!q{ac zHmBhGl62XJ%(9|{42d?50TnaA%aBj(8F^LJ`+#ooZVgUp&O!h^$*hFdGaZEaC1dcY zS_+)qb}jUw@4im)>6%skGM+kC#gP8}aM!hKS|1f#9VfC-9kbowj?zV$43Xw zDE|hZ|1&Dq;;BD7F4jpbP%=v?Dxozn$U#Pyqh#br=XY8iX_uEHcFWag4C>XWu@g%& zb}gHQsoMO_$U9cjzcZRV*q$NO?>XT~wx2 zf)9r8n z8QXonPHigTfK@n=vhVg6%~Zw^U&%cI2LrQd#4hs|0CY!6!WVbo=fZwRNnKh=930x< zE$_|mtUSxwAxw^c z9X@1IdX|rV?j00lC^C#Lna*|^Lie_)WyKVM&_WU%;t^3~k1RvISZ77^-vwM+AoXlkw!lQyM}5pJh75XA zFoBsuqVw4*(J4;?P`PrJ^Ay|x4L5O~&*mrUGVO%qUfm^nQh2Yhk`cda4_;o_P1ix4 z;fiX~FA@X0LORadWHcFWVC#e#Dt#gdX$CmeFjTFYJY+txpqhL($D9vW_`yujpFD@( zeZ6EAwBVZ>b5{N*;*&wo*58&GY&npnqWjcn(T8NU!$uUM*mm+Jv%eqA3+*zHy08wt ziWWcXFObm{Jq!)k!>|&IChyQ$K);30_9E@=MWASVOO#w0SIFgdZ>RASx7<{_NH1&u0(fg3QNr?Jh5C zm;`R>dwq}_S!ak3w1N4QCq4NLFql;xi?0mvk)?r2Pa)=DgST>+!*SZA*xZ1)(z$@d zLn@hZngoStvvImjgka6ICaU7raNg*yd_7h7i<`LVzL-P*2ec?yceiVXF`YxQT(!8n zS|a2>AjePojSV_$Z?}KD?mFq)SQYZU>J?oOeR2kS@v9ze#xW|w(?FB=JOcbow*!z2 z===K&cW>Cuqu%4Xj%8d=#wgnCk*nY9-d?wm|H6iX1<$h*8A6|OTsgN$^oxasg-H!K zDifaHg_+rF)nWJ*QgT3?8Hh@#Ea#W?V;&d(i(p+)8zco7Vz7LzH zxPTu>e8-1W`Ugte=*v#dW&IP#xe3$e{G@Y)q=OLNg;)G#YvF+is4KHY`3qx^llywrjJkdkKoJ+N9dT8~@ zN0THV#7yD*;h30~vCU?*4RVtJk9w!6HgA9@;7?xsix>WLG~`%Hfv)I?wL32Q@afjE zHRW;hRsZ}wot6i{rY+)^gs?`{WGC+BIaT}B3M{zE@qi)l&GwCACuwYlWC@L%0gHAJ z%eNIJU8xH~=Ioo|2ww$A4m}Pg?GNk!Kw8V9+nn!}x_T7@_8}7d#Li;>1y*Dy{eavO zVaipb3t)f!xFSj$fIF!FZf;FpY`b}8mz_W|n#ViOy7Je%_F=1A%9r@d7B{3>w-(>5{xe=6s?zkZ+>|g| zny+rkA0Gsrd$}!41?*!!XJK)`l~hxT4m)-V8$(_ zqbM%fD_`XdYqf%ZqM+apd7>?Q3P=dHt%B;oICP(t?Dz&j%9nby*!h=tbQR*WFFZRr z*@>~>yI2;@KovL!Ma-@hw=7uw-Jm$p3k~u&+A`1orV?QbZsiI-`n-y-K)X;e+Y1o6 z2O4>!*z^pqPbIL;oGjlkl4pBWG&w_4_*f!eSC0$=b&nIBb4dGzF?}N9GKO&VbN9=U z&5A&1ZuJHelhXZP%+1ZkwmkeN2A3|DO0#@lusvkD9&@B3ZBIWS-k^aT z2UI4oWr>ymr{1r2U}imHIu}=EKJPd!CAGEiti+G62Kkek20T*b+ki&DY|C@2mA>d` zy0wkQY$Y7ziQpx-F>6iT>wlNGUvkh+_{KYdq-$!2UkLV?UbrV{JNTyYecEP?{a1#8t4FNYXw)9+qG5 z$&w-Tn%3oVW9Iexf{J+iZj+JBcsNhRKqAti0FbHF1}E%;2*GUaKR!~RLK2ErF!;2d z0FlC8$FCdlqO{NEpEN0uo$ih7eBEf|-u}6`lYdrUv=sVcy7YlEFy+G)cm^25~2#|udM^FU;jPp3*)`nD%iYNj`s z{e=r=JDQ(TO!WcM+&MW>`F27%d%HIK)T@TM9^I=>muT;MW4W92yQG+-Yii9_ zJ@pf59~%=UuVOXQs8T`jE*{ODZ!CFP%)_cQq+9j!F0cL>vnwo6$ zdReVjrmCI1yydaFHy8T04^s&|TSLhFxk0H6N@a2o$DszI; ze%E)yZz$wYHbqY?A+L(sR@`N{r_552sPwUCE(0d-{a|hKZyHJ@hCGe@28wnP!DhaO zCP~@?BNAGk#J3YUD;{21EVjX)6luY_*2ngd0R5iL?T0=a0?Ju~LF`<3ov-h8B_(K? zMH)r^N*!x3*lG^N{@NJ2l&n|m!lbdv(RnpDHKA`^f>Qv6D>g^W?);Cti!b+2>;fCI z0Yl~Shnl%+;fFMKO&J$E?~*a<^3UaRS%(YaE}Z1n*p?u8QT?U|HBI7q5z9C?S-nLy z@(`r}&lYNW$=vk2prAz$b1U#(>+Fv@4FAgVP5s^n}oKN?b_ z`nnvFKKcyqL?gjLZNNNWiM>I;iYMVRGbucGnVm4HJ6+5_y!+Qco469M(nkTO08}8d z0r)-rNNjbT-^ca?snC>pD4-l8BYoF)T1eLqeUSM=>53keF>8C?_)A)`=3^@#d z__dlFvD0V2b7a2!3C#aw##9{_80;7sdhIF7>}_*R6ckyujXA2~%V@ zk)UK?lYsv9o~PvAwQ>k@+YKI@q9{D3`D+HgxjFYc44JHJ1Ltz;)sY}VCHaJd`$>okKO zZqL4=ek!8a>MLwAH^CgQF$OR3cIg9;LD8~`)A>QZe~QPyh|gwRtxcOY&HcKBtk7xn z^)f>4wKq7r9`kd|KnQfXdJ3_8ss=zU_#SgZev^TtFkjGhUbw8_bD0WXP|x7jaNY2J zhHLd1c7ne29c&mR-pqv=oSL2VSFL1VfSO%4>CUrlPqXk7UbH=NadBGKAUHU{*&1>1<-}*wR|Nvk?_}e% zL>}Yk!%w(d^CodC;hS1Q3-u-6LF8cB`qUhW{5nNecvZ@Ts-8bR=ULd zdv%q)b?keWl~m*;iIn&0{=%)xi!?#b(6ULeV6g{-e=L%^Rw94nr(Ng^MA{k8Nu4e>T?;G6sr5DzpZgH zPq%}rt> z>krQ0emQBZEWrzv*UjCKCa8%$Uu>jsf*~O$M)!C_A!-s@vYpw=9td2y7Z%_1@;qmT z%F^1-nOQuq#aS%#B<;wCbxNxLszypPM>rippeAI9}Mm7{a@J1QR2HoGD1p++M{8!KY}!?-$V4xX8#4C#Y2I zj;UbjUR}zPe$e>+)PI_~rTyZ>rgvgjQ$)P^$B~mVAIWg8ee{Z@HS|K=nRd}Dm&d+G z-%pJrh&w%wkoJ#_`BKF2*G`=rXnttS+mE5Sir{nCjbPyS?3}=K9127<=li1ivxLe)@);bLD5M^&E`#P6B^$)1&k@ZO%}w*@gG<|-3xh~ncWb_FbI z-;F*b!~XWs=F(tH>MqPv*pabCrBEPy(I@q=tIT=G{VqIa^pW~nsacj|;N@etBgv&^ zUJKbCy+8AI$?Cu5&7aPeS|Y04lMz8qY0C3TD}J|m3Q=b)u+vfIpMJHxD+wuv*~1@WUF3tHlEOZi!0nny8krB3ftKr~UBysG z6^ueaW`JZHWAUGm0S|>qf}`xpD3j%%nKFbWc7k$834y7yk&gLn?KG_bC(>gmQ6b8v zGMXVV_36+$C$jW}^f6z^ToG|}CcbNgE&7U3v(c+s9ApM`s|#NTxs6sza7jNAST~4o zxI|vQ@50i(`;7zje;di3HNI4JGwtm)9k&`O^jC0BF&am`uX#2Pc{?@aD$9yu-lFirt9e0o0d5TigxX$h#q1P;QnMHyp zYUi4xPwuf3n&Qv;G=(HAJ#|;iG{-HkYuZlyDuh!9Y4xLHD+RWZ2V{R35g#| zE%|43Kw*iTaU0MT81TPF{Zu^z5ml716DW%}gTe{^;~=b_+ekY)&KR^$YnCqEuq7*I z%vH|n2}X<(5@YnLw39Ksl@H^t=XnM(_d#O2p_BOZc~dgci&Ygq}+{tVDMvdi(Pq$3vl!f7&RC! z$zZGO+@u6oCYWfQuIU7^TS4IC9(F=%^=i_Y5BO!=$eYJABn7<~zc6*oa1?d*`8B() zRj5_sqpZIWFe%a5qRG3+UOk;9q5C+&*+9DhJh{9-^N_vNy&j$(SmF?D-@ zt4oir-gwB^HVC8bSkFZ(%bTek6X3@7rHbWsBd3_{!5E`^B}@EYv8$JBn_a$&=Ch3rXrN^d()MK`?A@Gp8{!{Km^D4$1Gd>j9DwuA8 zm^7q&*@XAjvEc!WpYr(yo@^;zyk#%IS({Px*;62ilxl3cBG=g#ex!xwg$4-4S3h3dY|@Tu?yEsY}L)T+xQ^Bdb@ zB63c478s^$NWJetq}4laQ?4=#nh(uq2{u;^N?pMq(D|(V-d4~UQtIaWBm2;US5bLU zz#9~kbB>~vw)Zvj+W1t6LXh<4T`8ZQ9KLA0#?8}KchYR-g>@bI+ zU8d@r**f#>L+9sn3~?_szL1t0%yD@6IYka4QA;f z;1sN&gsql&ph3VxrHlEP*+qrdw+4Xdk3i9*w5oZ0?-tPxT(6(Hg$%QC*1PWxa()1A-2ZO^e|rq03$K*+$T z9?r}H=m*eH%^Q9agFzM-Q(WRQw1&Jq%&r0dQ6QLu(`bFmsWm?fKTrK8C99-Zun1qN z%IRe@3Te#)4RgI1@TSxo=AG^8e5r_582}O>MstwWIB{&Tr0`}wwg(dtGK9fiv1b`N^ zniSa{Te$5)fDM8hN$q>^5AullFH3O6Zh`ex4daqDx(aER2?Ei-QQK=7hQl~4w=dcZ zBVAw!`n5tDgrk=zd@&yHhhGD%6EKQ23s;+s+!G;h^D!=F+8Hn{9>Q!bngUw!VbBKR zl3$vc=A#@WwRKm>BYHM65*YK^k5~9Luwuv`xrPKkn6I+63cQy%sc*nV+jqVMH5J=M z*sZTJzdRrET@siT=ZrP*yKxpv=YMM}7zLjGPu}PfcJmY(7EloDn z6{8#k*tlVT%||ofe1JmkgrBGEktCR-`~km@eaSg*-|m#Kv2=nM2ryy@xm+XX$gnp? zsg?5W3c2dhrh4nQ7D49wksHH43#iXKdlDd{ng%J|HwPE_C&{=0h(O_6-Zeb|fq+lH zHO>Ob6|nVR&UFB@E^;{aPiPe%l!3Z;y%GUG7r^~8A0tR$QuNb&WP)V#*N#{qC%-AN zBX4NOe{>^wpT{BBc@~);3*?BDpzPUx7LPSk@3q{HB>n=uv8#yUo4g zj>LvdOi2}>bd}(C!)3_@*lnk_Ocacn$yEcoko)a(#Pz^TYWeI_Jr+_XcHZwGCq4~Q z#TL*}t>7Mdk4OQUtEQ%QP?G!uTYpC)n3r$QXn;8pOkZ983CP_1VJLWzrkmby%8^BY z0+9`&i7HDepo`f{WT6dI5F*M4s~VSUYW~5e`jUV*r2848ZtvD0s6TYaI!@wp{_~0( zL)OgkvLRUrvpvMB51Z>qq2tv%GEz6Y9b2~snB?a)gg z9C~Z@@yuawd?rD+=M07o#yU$5B42mmLL~HEg?R+}Fw_Unwt^C*<(VtQMU~(gz%#o4keZN%$FDCd`?%60V`46R`eNOKl5HQP`@MY8Fq+5E zywBqd@7~<2tOPTt8;5)#>|zVGzj&h`^c@*Cg=QIp@nI+E%kaaGux*QaOykZ?lp1~B(Do`?_mIWhFpf^SqNGukhVz>2B?Fo*y0K1 zicQx$=tsgg2+L+?VL*?XNqAnc)QZgl%=Ma}YMN$Ff^U)lQ4er>%CpdaQLHWZ!Go^$ z5b%!B^J{YvFipg#0PjfZKJWOI?zw*v!^(^%xmRZ51H*wRyXN6e><=boFcKMH2$PN{ zcuL(;N+KwCDmeS(EVfWOa3vu)CWmu;k&$=G)Vg}C0D2@W2G1(q_ z&5Cw6tS&PypBi_J<2pRsvuXko`VZCp&>+Wq_HoE5QhCeEreIi<(&l1BIQ{w}@{7H$ zyE-8Q4MfZE(0y{|@ALq!>R*x9x}Ha_A%qDsc~Et@Pu=IQwJWr}!M|`}^()A`b>?}$ zDXmbfoN`@Dx%~#ux>=dYo1A91tf&!{SuZ-<=Hw@XXTw&zeLIUD&T*Vj%nt+dg+;C6 zBIe%5*LC@~wQ{Nr&iUU`TsIg!K~1-ho~K{9 zx5I)Bbx%!zNMCU^hj?DVDG1W$aB>265!=%V)_>Esf{I&2Y401Q0j@*=vGjsXzK*Z# z{Z1r8tsDjNW4e#lW;KNNUFvH8n*y{2F2r(uVe~ zQDxioICS3i;-lZ2PoD9ns)PA%IF(m#usMG0ggpy!PWZ-*_C!F)NtPfN9dE~y@mOD=rb{QF!y!RD!%dG zpkVLLoSRaxp8V61(msU1p2t-8B9LGC*-+|88~P8LK`D~WY#laWZ>^d$Z5GQQ&sxHUL%%@zKl z-li=t2@M6gSl|LA9kH7Y8lyCUizMyw3MYsUOMqh=v#uR~hroJ=gCJd6{usKn;PoWI zUUiT3Efu6v43*+m!8?5^L}(bgydPj7f76y{W1V5xfE)=F!52om{H1$SwtkVo0gjZ{ zU_6=LZJk5_@zx_e$A@>v^WDBN%VNkX`qP49Ek27xv1N(n`#0^zS=}|cnO*HZQK+%b>(K+YKYSGKN%>CkcB*VW zHxca+{xr+G?M$f7N!IqWi1yOf1pmQW5kjoUuVNjG1`f{+%jxg1jA?HPecoKpB6Y`< zAbu;~*O}rA<7Zhe;AvU5!tY1)IPlGwV+(fqwA-!A#3Hdy+8oWApPMN|^iLD8?N9;} z2z59JSG+%S8dRb1r_dh2G-$pTLu?CpaHr4!;Wb(;N{rr_w_gNEUDp;;&UQ#)sw0fw zV-NZ<{_28CMRE>Zy>l5%C)j(BJJ)V)SwW+?Umjr%hN06ffc^-gW6>*oNvMg3@CMct zA1^C+`}6v>$>%8G0VOVXX4NULtlV3Lfkg*Y(_DhT$ekx+(PL|UoIaiSSrn1g?)!P( z?g~N$CLigE2V-%Qc$f9;@}liy|E)N z@&s=m=YJoJY%J6)R6_N4dD#NP&I6Arhb)sk9Q3#9%?soR2z}&#WgjTEL-!;83h*c_ zC6m>qp9KHXm0Uz_fDfpH2R&NV__xJ8?n77jCERDDi3!$AX}F5_j>?DS`_W0V2PypM zxGlW^A5~8P>6IA>JeS!Q9k10HZrX>vBb{QSJHky50*se z$4pM-(|i+|BJCxic;nap{d%enoLeLpT)V94-Z$*>qYKpibBg1u*trgA zDNqGD2#2La`nKC|!+Orw{~xlp2gI&aXX?4btk_xmE>VrmuUsIj1irMpNsv?aIQYFd zfMtqG6lOZM%uCSu<0)aQvj_n zmoPv+3j_p05^T+i^7FO)wx-YT_Ap-;fAX$N0?}<|^GJr`P}D(4KS6d+*Qr6q1`F^J zl2zOJ`bUv}Q~q2FjsMF|Tr0cez@%GImPhFiNZT&E?*eJjD4dCW zswJx|USql^{@1UxH;NWZZMe*w+&s~kt zymUOiSsj1wiVz|7z6e~%#9_TF0eGu9133@a^jfuxY3l^iVi;(_24SQ{ZU_SqIeh4; zuCdlMIbsON>>4Jer{4TaNF9wfYkO}mC4QDpE~V`Y{+{y{5tdT)hN#fwIh)BfcaTs5 z(`sCIEPcKig>_(PAY-O2?JoSO!2MNl!Gg$w|LIC`=67I6o}~w%;67{GT$Z~LVgdf) zXPS)t+^?@3{12x*LGvJ_CO|i1J9Md3{n1@{?l%ILw4C6^AwMK>QmZw}X=^Z|!dUTq zX*}EZlYcF33i)?4-tolc?_(?C$5|*Xk}6xg-L#Nilzj+jN^G!tA$8?M(U3(`?(V}S z&!+CVEI!WLTcqiAPkA4L;Aueg27Wiir_Sb0gYvlkz$P-%O`b&!R90}hVigH-CZ}?i-b*T7VbiWc9blxDldkjXiHEET!|KN><1ar5-%R z)@I&Yh?F@WI>{9(NA+@_PL6}noi0LS^yp%R!l}y#URg3Eh6Zrh$_~*xD&MnI>(G6T zRumqxN=P=)jD=V`BRjB;^T}Tr{si|1xe?uGNV2=)AMo_tHlF7(e>;T1Aia)+yLMW_ zOX0y@K!Cz$m6-i*y(#@kz;?3fJ^^_7Xi@ zBL9y(Iqy%+m(D%z&pPwsuBK=-VE^s1Ok3o;?6-W+#VziA5^fCsy&9j@g-kos zH3syJS|IZx>p^3F|NrpQURt=nD6K-wJy`=)f{Rub^zAXGW+3g%SK zRkg`!CFOe$lg1t$G}CM3d)hYjP1RVPd?);eTML4+`Z<3-3v9{F=?SfVw(z<9!&(Yi z0uJ1F{TUL#x+yU)IHi=uoXxF)GPH;622FOdNR2zjv}*I$>cCp(&<+*?-tlbCJ(2;x z%uP%vp|uH%V$&V|uK6XA*MbGKlV4%}9ed1isp;rfBIGBf86CGQ?a%eHb932=mgHB* zPs07n)=EoJu`j{KA!Jgo*%!UX_b2E@esIBAi{J6;T0e{C&`smEoAgw16NFADIuo5i$1oCgO#Yxh!t)Z=7nMN_KOncr8v-A;udw)&_jx#j+$}K~ZUKDxr7iMA|ch!aA~dww>*rubx<(*Ya@h z*8Gd& z8HA)i?B+X!y@5{e<{4hy$=_1XXO?0iXU|d!^wq2my7BlQbn0?J!V*3aDumflq1*%% zm^s@}6fnPOfX{I%CC(xN3tbnkpx7%fyV6j3*LtppiF<&y7^A^`dt+qua>$z4*0u_un7f|EHG%w~SKPP0SluMm|@Cpjp0!$9HTXvqw+zt{#dJ zSzuyjtR^onbIMt^^?vnsV88>y)=F$>Z(#O%D0FrYl@*(=v8kOiw;rn&OHT zbszVmZHjm+j&QNVQFXQaYUu~piq!aQIfLg!@V_*kYzI!o63q7jIQEebB#^~T?Bu!i zD^!uA;>Sb;Z(z@mkAb|uSc^iY+a(u|1VxLbncg3Des0=&<=*$;sT@^@*1q);ubH)3 zbja1oRD47-&F6V18`ho>@1D zRsYuwLS(6hTK@Z6DoL_CNK>P#^FfEPbZ~>n?qV#64Eyp*92)bh-~tz*Lj`<0Iffw= zbTKjSAe6X+A8%5{xMx{|&Q-STVg(VJDig?)+~esNPmUpO=)k&Z%+Y4Kjt;!Vk~F72 zMM5RY7e%b_XrF|WUMvgt!HPS+JWeP`^6xmi{J1}WkZ=DjHOMtXYi7Kr14cUMYZsbw z&Dv84PBQH&1{3ZRdY%vZG;{t!Re;d0N1nimKGi*Kj6(hxEt^-hJoMasvT}4)5;H{t|A=lQBd|+ns-AVb25?MrKG#15d=XHDQS>Y z1f@kJl#q~+PC-GWLrPRa2_+2JW8%Bkf31D?x%Rovm-oYaAw18VzZl~lcYFy=WEAt@ z+o*A-F^i3hQ+|j|LP0`-gQ(2;m0O8S3e`ke--)`DCE@0iaQ}2s&YjzB^SPnvbtUKP zonlIM({1yWc8xJYEsamlg~F3+s=22Bb@kHcb4#Avt@U!J&VO->eV+aylXIFQ7Be*c z%jEk~L$~hYF6^JtZKbi8A1m^T13=?%od)Zw zaWo&i0G_HKq3+Hs$AkWL!Y-Cy;v5CP4?W;AKE6$s)#TCMyh#MIH_-yemdPI?FQ(oY zah6vHo^l}S%vvyUGtVJHRIp?Q}6}iaW~e z(^dNf!X4+z+dmGIQ$XpHG-NI=V?SORd8O86+=1NsjxZ=Y)A?iAFC=5z)mgz+WFo7z zU`wy_n~Fzj(*QC=9A#qh>wrF-RUfu0W5eSx#BAoB4a-lH0~2X_RxUk0N>VgDbhq+y z5ldG@ESD(2nK9qRMh$#sF64i&B^Tf|KQ|S_!uJDsAl^F~hm9^iiyT+M@@lpjiWQ2A zWEG51hakhjs$F@aawAfu)Wc^5?mDzO5c#j;-pmarE0E)3dTSO_wx6r0JD^> zvpk0!+9t$jpF!I_B`eK(rt--{3u|Et3}3h~PP7)Fetst6`~mIU1ST|`xW@gi0+rJT*= zCR#ljy?5?hcc^%jCb?QC_ASXfuk*PHS@aMDJHU_Q=1G`9C<1_00Xi9|Kff#u95Qkt zUbMg0#KIrN2Ab5f$bV$ezC0a#Ei=rm>Hhf**LM;ZTP02?5l44HhYB<$N&#aOrUx%A zVIb%ovEi2X*6STJ?zAFL`5uhLf1vxbE??w)7elT)I<)9aY<03KQR&se5?eKK0H_Mp z#k|fuE|F`^l<-p>5IFlFo~XVwcDG@#pp7V$;<=_-zk%^3sD3VYremC@^nYi2X6CpM zqw<}&hbC0A0@kmHxlXb6kk3crPr%@M@&`77MF?D%n)yBXEUeHqu>Qd#^X@7CK|Af% zH_YNh8W|e~BV(IKp6b+wQ5*6wiR^Oj3Sp7qf=L&lp) zL|>W@n2OMgPK(~hqn`3}oZ~~(VzZCi!mvKmz^d22e2ZjE7rD6Y#rwuMUvmaBN}8l2>z7KAOO@&cHM-}ODRPOXo-b8STYV;gtEu?J44=d z$$mdO$%Ve)#yptbD}G2<`RCo90Hpw*sA;-0vHwwU%1w`oh@AT`~!ZA8KAS)0Qe2;-GveR|; z%A85B#S|WID6kSOc<7WYQEMZZWMy!?{HP3*@Vog3Fq^5uS>Lw(4N}*CX4t$0ZGeuB z)O%TXMFVZqWF9QjV_aca%=o|#l3cm&M_rKA^$=n6^;>llxBQ6BBO3%9dA7huj~A;;->74)of80%EqMt_NucJFpJ{$l={OtIhM&HfETg*IQ=p`?JBcY9=+^-oKXItj5rJHkJVBwCh~D$>CZF1`(ZD6 zwH$4W@@C1v{k4vex3>=mhc(Vj_|5I88Dzg7ij^_sAqWfeF+8Yms*i5 zZjGnJHw6w@a@@Y#rXQN!Z~cgKi~k}L%5-jq8Y8sR_}oX;;IBg3NtT$~hKgnSf0rth z1=DqIGkK;FFd3ZM7_`Zq)@IFWox^Nh+FTTenAa`9y(cbn!b~XO_Y~(mGfc9Y=vpve z)Gzt)S&H_}ty;Oua<7Im1SF#3QS^w8 z*uJWn(orMsiC&FEfA~v=IWIKrg_%F0c(@KI1mzH;Q(Y<9a}Pi>kj!jp*ton$z5W-E zV9eNi`r{|i=@60~x6)RFrwburkk%*r<9~b$&SRU}q%2^NT109yj}cb4?LijGHA>ps zc8a{mr?I+$iPd=*m&*x|DnMG25}`e7&m(Nm%`YiNk zx{x<8YoGb3{KkHMrT2W?$WznfqOww2k_d({zFe~|B^ z%f5}e6x9B>v9Kn2Qj=V#FQ%vZr#G ziP25kH^%ZwY-H|3U(edKa~O+HThBZ45f`=WRdJ0B{>=o_POsP4>9v8UWD{WgbP)Qf zyQDY#ljdCi$R7eH8mn7B3pH{T@XG<4| zf(J6X)d+8dH&d`&9ex`O?x{Pu4aEW3Wt{^;jG6rA;y)rEQ)jYt-U=U>7EOg}96St8w(x{80@N-^FFAk#W zB~fS``piQBOu?neDz^lu1BMu-e7Zgd`vomyzV>=v*tup|7$g}SPeXo>1ZGmi{%_K_ z?s}ejRcFpB|7;{k`}eCt^lt+0;A<6AUDr&G2?g`~Wsgh78+Cp?zUv7B%Djl6G~=e? z-FYmiqgcR%kk`C!1GI82Pc-lc=RiTk#-Jbj;Y@K)zp<*i?O+7mrL;D>0ynOqx8qZ= zYi_nSs+tVf7`iw0??3vw{?6kS&t*(~ltlv9CSvlQI?ekp5X6I_s>GZ0$39~h%5J-c zgYG4PA>yd}OqK><4-vjYp%C^Cm9WiS^8to;hV45T8441o8PE{Ao zYi6Ix?wsD$K+)-9>XjI)n4pRFVh6m7xfjE@i@-i&z|4JX$A%r3q&}|$zfv)rkV9Uv ziLVxG%t&>W_KdM%m=_#?a~h>?b~Tu522tg*&`R$FQ=0JraS-k&zl{R?%Or)450l1X zZPMePmID)r`v#_~ol{@Mv(^^l-bEB&$Qu=O480Ti==x;OGs(u&MrP=0e!>iO9~esr zA7j&PMm;IE?Ti{Q2AJaB^5FIT2{CKcy{SHODffS!>Gw^8hUjXdKCt$Ut?B{2iC;@Z zvtKD%x6Jj1nubQWkW%W|Y=tm9CVvF4`??q>v&XdJ5Bq2H=z^|PGB6iO!_{VRTu)H) zo#dqYMuXUtZa{;Pf@K1hZFjLPp-V{f^3&R2nZGzscUZO+a!E^y^)=3DZiCMI-9XQ# z!!es@6nOexN_LjAIo{{YHeWjQQ@Y>3(W6H%dX?F$*KDQoY`}T_pZs296O=>JLxzc^Uz;jzEz^g`SHR4V21>;1kJqA znTBp{at7dB^=L91B`Qpur0-Y6E|@tZad;&5<+JX*SYtHA%W_)10wTlXW;DYh;@d?D zLhBiR3WkZ#El#_)E@6a16VpO|en%M}8}Sd6sb#d>^)wHXtABIAS`PjBlS0PNBPXQ_ zW*1(NA;Qi69e>d=^1`|8pjM;4DdQ-~)%JwK{t_;}$}~>r^)6f7rTORM_?@|IpQi@u z=6?UWyzlqr?1xia7(AdjKqm-TSHGc|5GSY_^W#B6B;NnL;axA^l{aDAli;k>s6rw@ z76o=r1%EUy`-gKZrt>E&vE7Xk4yT8R*d6M08xuJ?CUjIDco1ofwRC3X$NNeR;<%0g z_x}-osj}!kk) zK_2IyCMh)wq)XLRL^t?G5`#0KD2t3~H-zsXysMKu;u+wY{8d@M!tSnD;@MN6 zugtO=H4|A{rUOK(V@+SlRGo( zf7hgW%gx#`BDEWatZWHMZr`nH4*zwW#-v+gw4L{QQdj6U>akw=X`IeF5;TjSufM>O zyGaHmpH}Eh9wcqf8{~q*-9f@R^1_-a22$oKvpc|TSe%}g#95eA*>*YWVFgt*T})7` z85`d(--MfW)gBVZuSRcKHug$%t1F_Y47?+z1;~8*Tw>cIc+w$-rlWXl&G;a!#(2DY zZ84ACv~Gw*EN;K%#EHJ6*70#G?@)C-aMKunupG#AS5$!jPFQ)WPh-6_!ux4#?FoLLL3B|D2g596Znc!K%rh_{c=cD%9{YVa3|*#IvzfMhL>R8Gu${d#iM>IS9C4$ z47D9x+@ITF2mopZ~5bxpt>bpB=dWx?ZNh*9y+wyD3xq zwz$p1EhO?;Q}*sF{HpHGx3&MQw81dR1(*j0sXJC`Si$v8x0tN)d)_-+%TlcKL1uJv zC4G`qR&uuWEY;~r^|TtlB|$8aXI!-u<3D}hH}&TZwSK#9egEtBe-y8Hv{F7rZ%Nz* zr=ldJOaFK&s}V-vk<=`F*49TB1Oc-?BYiXTX7CJP)(IwrG(openp(sn$_F?zYaNVV z?wPQiBkq9x^mEFF0}4881jvEDuS$oU?oy@y8QCJWb}*f~d}v6GLP1?9bXm0CGplV|43mbZ9dj}N+Z3Lq&ptKal9deqQ@i@QPyP%YYqSR+2?dHWf3=0KUoL9TXR`dq znR#pXaTEoP#VDsQcYh#rOGzW0tfS(vWI6U4h1+O<*SPim6xEZ~m_YL>oo@Q__F{(M_aHshz>!T@R&dRb3c=}(Y0+sg33p~@_`%fW>SUjLXq)AHRBdrA%1 zNA<0vXBUa~_^RlMjR~JMyS(PyW&uO5=#p}PRv~TjlzlVs?(NID$@T>GyW0rt*-_}7 z2R9PTC2PJ=bq!1ZwF+7*Y>S8xDr)XJqnfef{l1huIB1~__2C1YdHrBNJag-Z}A=4jTINi4I9~2v|m@Y~sFG0aF~oY3qq#x+%1U z8vIuaRrkz(EV(r4f!J>%QWA=6v6vuqdi&$_B#G<|=mM4M6}Y+aRhNqh@fmRxlwMVV zRDcE=xE^m404O;c_|4C|uw7pDk`vr@!-W0iadIjLYpFPv>_rt!p* zG9LrpN#4W$d;Bwi*k(LJO$o`sN`p?=VffKj8UdKGwy-yKY|SpVUw=FFoRgDN^|D~G z6R5=kWQDw<7pPx8506`900vV91CIK1rA0&f!$ZoNTN-wPG_O8(-{J?_b?>t)1BnN- zPFV~M*ryQK;U_GV;;PgIw9v$NZyGu(((|&YCl6v|oG!Rd7A>z+sG7JEz`5~shD&Je z9{$ONK7pl$;xl(DKM-LIKkrdQY{vs;ZSn-&(%3GMng+W)uQ!1H$Y)0S+e^Q9G`6El zUr4`ymV*vJupJQm6sKSC-+5iS6`Wq1d$z_;yS|Y`Fqw&5|8ZlTF)6U}a6r&UwBN{V zmP)+(pQ6*rLdWQ&Ukb6?x#j%5@%mk_xU^9C^mgiD)RAbtnN;#Y5hFBODMbD%$E|16 zYksnj&$Dv0rHO&4nu7E)>$hL|%@~=58tw#NLBfGdDf?I-j@L#6dDBII`PyU5b;^_@am zYu!K6Rzd7@sLvrkcBc;mHvmnCsVUBOFa!sk`V1-y9LlDV=Sd*WP?`gEYNbMRix0ip zW6|DH4}B!hT)iwS^Lgm^8y57W4%F*5-!5Kb{(2hA^0Vl0-PWxaFb;a@k^bCU6*ZC6 z^VUBo1v=<|ulcwslSy?GO+5FFqUXhJ@-r{08fK0&h;vST4qgbnVmGZNe{y0kyoHL{ zUZ*RK<>L7@`xmXx4uz}VVqRFuOazALXtMZ_hZ8@atb5S~r_Hnnxl^KUeN_;?#a))z zmu?Ci`pBLmO{}F~I125qdia~&o`OmxEIADTzq&F(#W3qC5mfQO{*kIudPRzoNVU^> zp{q$#4od(c$tD0by_EI=Fmjfx^uO^%XG8!%HjAv`)Q&CmhpU~@!E^et6>3xc0mTa# z@M}`18(GR*p&C>h;F!VCB7_I2dn+BNFK8GwSebOISc3P*sWTrt>YE#$JF+96C!Xh~ z8K1kO8vM7GB}CBq96EI!HBq0f6219S@0~oR* z6NG=7N3)b`6{F*P)SKW-4QdpoD9ydI3$X_G#;PY~o&uNorHAFxOTV7~TI6_N^@&?r z@;QOvt2$2}W%-!0wVq*j>*H)}Qn&1Td?K;k0bQ9aU3YJ9!P8l~uc*IACT}K-f$)!g8X8#88Y2zbMOV z{Iq#(LHdW{jg98Fm}MEAh>5?h9mB7GMj;#I^*-@x%FI1DP^ zxVyJFc5AQ}JvwLYhX>6?yw>;M6$e6(^0;j+xZ%!9x!Kp??AOc{#ShnCf3BOj!g7?K_jfrYT3&-z@FokxjxyhT>#UUkHZD%z=W>Tb zzn+;dm)};DN&o(jUYf{Z`mDHn=KU+Hz(GQ@@puq5x#z@#<@_=Y)AO0pKGd(dJk?&j zzRC8LA=ZUiDmm?Q_CvpSwP~v!ub|4bbl-E=e)SFexuz-`%5;a>ZUMqP7LL=vvap*v z){ct%zmh4OM$oL17r{**v%bBmGTZcoQ3gau>BQzfv!Wq8hCH^!m~t7(C{yWRtiEwj z3alM1gXe;tK7TVl1*Q+O<>ZJpOg_4jJ97zWy2;` zvXBe2hV3J&{bsMHupk-ZD_DpCjJHIHpd_TWnO=>v8<5h95h@YHvL@kX^Ms~=2RKzr z#0}AknF3MuApCqHaitifY4{q&2=fnFF~OrD^IDTzvhz8a(Wvh|DFe~9MYTWk;NVlM z5jQNvG4iix70Nye>R6a|{2buKf-o})J_bHzP2N-wP2B-*CA0BQwqZ{B?z1@S9Q*+iT? zwvVW_&BY(1&oYM^L35`-whJVGCF%3gEvnIC%n|o>KK!0%L%^|If|I8ez&(@KAO~3# z(M}^$y7!=>-^Me?NDU4Glz;U6EN@`tYg9rs|MkvQ&jF&UNFYmH!=2p~b$x#o24nNO zX@FZ73Mc_D`ljOo^>BeCO2@ad^n=KjF}P-{1{Zhpn^+C|ZN4zQxSWCvHAJNv+=nqI?Fi&&>b+{6G2WD7VMK-xz!{^r9m&>}9u zgr=F_B?~2fpq(ZIFVJ5?6#`gZ<`Bj3J~{F?DM zmX-EZvuT;lKGS%ezwn}Qqwhr9GR^y;)7kS9POcmuk_ZB@q+RN+$7plO|Cu7UNmjxw^}?FzmL@vsH#qs&}r1D#@xpTs00p| zleLtTal#!W6X*)Pd-Ul}@}sh98au(idy*!s%K@0FZ*vv)1GEbY3t=xP!iu`(7&%Uy zNGImdlO?rVFOX_~*3MY_n?$f81R>B`)1A;1m!J#9{z#fcXYtHVDqE@9F#@x3|J$|W zqg(V$@MgbZvxT92{wlbvXKf_SUU-@|sWXbWaoRC$Ol@qgzDGq^zt5DPegX4d0EaKC zsI`XZ|HtHPvfJn^>1nW554ERGFs=_Xq%YOj?eA} zh}q1*^jrr6v8D@lJYh-2+#o{8lV*xNvSa!hS0e(>)Ls2P3QWS=@XMQ%L5r3*@v-U0 zJ_|-$TRWDUL@b&UP`sFYY^#%b_XzlvjOZUne^VQQ&0k*pKYX=N7;XJihS*p?LVsfG zcJRG~-m+~dVMrLI(=icw?MQShD>s`Xra1(X~@OiT! z+qx~!=i09$mNRC7!E?z3Oi|*5yk^Cd=8xm95SGo>y_JFUpI64_7}ghD_w@Mahd6l& z|F_Si0J2~GG9&)G#>?Ol{RP4raggAwlLr|Y@HhB^`kbLRb4Cb5{cZHOQahApFa9yLX0*@MW!Yv_lC*$TO%wXN*MZ` zLbhC3|1Q1?z-kvD3FmH>cuy7c{5i4GEV3m(Lx%GD{4@8@P*c|e4ory1B31<8`@HqV zV2c2w4Vd2$@o^aK&V544vElJq?H@b-5V7iMF6i~EL&#Sl@v5nS{~zKqN0pOE%J3bS zg5MW&GfIc(`d462jlVbhHvymrDt#EpgAsFp4i%$9NHeTpKx|R>u*)k@99%cawed>= z>X?5gH<4KuOyjK%N9FJy!;U;zHYkn>nivh3KRv}*IAhrw(z4=0Q_;0SfPTG(4Icj* z$6sKV46?x3BLCl?8E!PL3`3VWF#Q@ywo{vyY$}%Y9u>hJtgc+_rmm->Y@izl-t1nMHg!(ix5~bU&)RrIVNpiPUq2d`eDYRdb~ALlF7Zz z0*^QvlY>erE?}#<;#QtNT(8D(nJ&KQF2^eG1VY%ixh5_t`k1Hky)a&q+EA%0yCD{^ zWh22PDh3wmU_KAUTUR{lH8aP9axG*rDgS;ZzPN&D)WR_SB)v|^CU0S2&58PBeBYq8Vqc?+jb zB?mjZTEAu@v>R9EQ(>S5RHnwEJ$gnn+5E|Zi$sT9KtoD_Ey@+%lOEJhR8{XOzdh*6 zWFGWn@$kLcJUfzi97b-}6TPsG843za){@sXegRNMgH(VUSqO@T4+@m9-#UmOFn7qJ z_HS^;)bF>+d&Wo9#$EEWLWZGLf(Ob4%3;)k4yW&uCG~wxj8Nyv=0M*xPVL}7eDXhV zFceKHDgncOI;8Z`%+OXQ%iq;=)O&#`#Jt8x8!)O&i!d=$2r>n^6R2NWL7U|cSK&YQ z2n*wP`Nqt-*oZRpIFmR;Xc1TE!hxPTjvsvhuBy4@w#m~Y={QsaLSmzda% zKUKn0Df}9mVO?dwE%fN?*f}{1j*@GZ^Gp(zf^_E$$m`GIAQl*)j=6?|IFXz%)?qIM z`vKmqkJVs`ETp_Ac*-Ay3915eBqrBx-=q!;kCZu%F$Y}C-!8ocy8|E>N`XE_Jk9=P zFY8k<;K`51Vi*nCBKy>rg7wPrVDOr5Lkd`qnD0+owdZ$J#l?J7{zy>*7Up<32q3R% zV5+ge`?h?{e!WdpHkUpYjhJ9BZu8}ZZ3bC~w3oYT=+wZ)=iv9DFYrlWM;Rl`UA#C5 zZ1o3~!Equ=&0}u3nsrMXiAy#(6s0Dd6ibnC5Mz&Ke4n7Ro_^3}G;pa?;ZD z=kG)F1xGOD?G6j>rhGfdKuhwu&PP56g6hHplQ}GcB_AGSi`cu2-?SQzgd38IukW@0 zWOnsU=C$^mOj@jB8=55jU3Yy-I8_dM?JIxD541L@i8k*I1{r_iw}F3y5+WJAP~oH} zIa6mX;W3;4Ye?FL-179?dY4h$pY9C;A2&F|A-pm-x@juwf_@}iFoH#fz z0Dw^np1M_f25li?p0l*)5&Sc{(;($zi4JeCFsBbJ4~jltMUnxM)l*1t}hPeK9j7>o-Ac9H+{Cx)yl-qy$qX;|$IC z1}%B0EM37C>=C=k3&zfhW zYs<}vK1U}JEkdCHYwLs%>9a?8Z^tpd^@5%TzWhlv(b6L*o?NBry3DJ!)RhjGBuxtO z4IMsSdXof36*e9CInfwcxHg62XAQ|!d7F%e^ScR_j1QRZpmExgtPFgqIS7(l#T@Dx zTs{(peeEUGqX@Rcaux)mKZpARLy{1v1cl;wkV_Z2d0@AF-^m>>B%YET#!IM!DVj@~ zYX8($%tofDl`0u1)y{Bf?BvG00cSDPXkrC*22xSaW1Tk$V!oP*YRDa5ClH2V(TE+d zW_!Kh;<#Tpd2@rftVkJ9!Jx3B^S?RNac+>D(t zMFu(w#f~^d4on_B9m)4Q(^VP_^_&h7;+-fc7XOJx;!F;{RK%;1%_lOxpv+?*^?f%T zbYbFDK&J?AfV+o$&06pH%E>_kmpwZ5k5*#GjoQtrxS*>$49e$!M(g<7moeJh?mQ$} z;y*A`QaTKmGzqIgJwvzLna#t-j@AOMT%)ZJCh+?_%yI?EKi55x*6{ z>`lYVQ^pq!B8gkT%sqYAA8cOpt!H^{)>(hIW*Z8u=<@8YeW`NrJW!P-_ zA2!FNTr4Udp>v@+FM01q8{1Q=goj)q52VA$`z3%AL?)2a1>=G~)KKbETbp9An<|o5 z>)t43m)vt^Ts6t5hx8vuUqcuCwv648k`%_*P~O%~C;8p2vh24mG!Mkvd71e_%}DzR zH3AMEo%42Vm_Mu7>m|dIJnI;D>4MAOT-hIPBq^xfglz`)q+l5T5d{uj(<>Lw=-gN} z@y=D-!$-=$l2{rnkbu6j*J%HNp+cQgFFnFe>uAO4d1e0$o3tKCm@P$Qwcs@%z7p6 zhL98*ALHPWSK;tbUsI+K_ak*3FRoGj4(W<7g*kg+@hCOY=Y@yQu5pi#AWQ{p^zyD< zui4Cei=O?B=-FT40mHQ#OytL-gdm+)X^EZ+a}Vs>Z?R6XA(f31UBzt?Z_5v=s1?wC zKKP&e+*k!;@z(WUdmd!t`GDtx!5CDqDDjcG3l3nkm}qp3g7*z-=#bHVqOA*KXTeK`=27AX`zO2$XUf zq~+k?_YVOJ8B9#f1-jwJOGUR=jdZyzy!pO`Uef1M!_5!+yT74o3y-GJDQMh{vN1w^ zXNJwh3mJq^U*d;}jFqH*;9{X&N4rMMkufdJ!yn7Ozce6|{Iip9HLHhlUHdt^|}5hySVsMQSE+IAVz3* zu4y5>R1voylhY5QJ{ho~7dHTcTZA|FHeLoF`j8prK?8%23P1k+V)2&}Va)4I?gf%H zJmRy=@{xKB<&w&V`Z&z(f#D9=?-Z8xw4+Aia1iendMTd_%C5if5e(I$p2uM#^CPEaUSFpLZ^4?RB zLF4+XaRV10osWt@0^M>S2-`7nQ+_jNccl-F*9~#~8?S47l&8^aT+J9c7sj9FD-WJ* zr*I+r%pQCh6-`V`pildl;gB+m!_Rpe@@Tco&k@xNoY>ILBop+f9J; z36^aryM>(5Bk%UAW~{U7k8INA+W+onnAUG?EvawJ)IYE0(s%3Zc0D=JRi*3}nG%yaWn!+{&2{6EYOku%@lTx7lc`6!^C!<Jmj$|{|wrkc$A0cCR8X4Xo+f$Mq1E0Ja8=G zD^2`kJF=AqJrofkMy91|w>Kg9V09mRR}A#5!~C*RzS2z%iAWczSu zfkFeQ^)V60a4OzYuUYAO+k|@uuHPwZqufkxD$2p2lvXrRn_Kz0bKafTIjIb{U)H8c zkSQv}L1UIoMS~F&nOBP>zfFk@gRe)k1``D}=)NU!RC?CC<|RzOuGPS)51XQMxpStsYqVHTpeDUoa;n!8jQOPyX5^5^Ca zE7HO&8E@6q+e_1dCGW+{Eo* zAfycgVYmykae9e;O}y}cOOV6BRvsl8V@%o8PJgka!yL0-2gvTb9EAZ_%9r$pB1alk z5N3S;$4DD*Xbth9$nVwb!O94doX=IFuRHt7(X~m?f8IH4Y@prhH!s}^&wqq6Q=h{z zb=*(UXFj5xH~gGv(eQ-0ad|6oK;BN|PM_=)IRxNN^3_@TabEDMf@8ce$@)mKtC6pT3nv&$|nNaNb+gXT}&i^ zSTIUIoKFdIx)wi;MfHcnNIT$)kh9myXr)HOg5o>|AW!5DI$N#p0kD(jM#yEG11RA> zCN+j`;#FMVG_-MjRF<3|D!VtteTkTX4bvHC0}J!MU#oJAH@uq#^9v8S$(1=HV@t~N zPfAc)Q}%14E;tAz?x+NdTsC?pI1uJ>KhiYdMb8Jgxqdt#B;~(K9-BWn&w{j77&ds@ zTCj1u3ifh+=vS@J9T}`*R!=M1wp@SpQyc2P#eYt+f3+a}Pp}cA5Ul=-yF5wSr(-_k ze{C;WC4HE3nA=eLQoleH+VX|hSISK&YzcCtr3ZiAw+?}2s$=4=PUW|daw9Eo)vou| z;Xu%0eAIJ(6n@U3MAZ6~n$?mjH`+7+*am+<4t7p8f7KPUZgiC?vxK&P0pdJ|`F4qC zyprF7BJ`yM0iz2Oc}UwINL47(+wI{^9XI1At={Nc`x0C8q)W$GfGzy?WQ|fy)B*Lc z^UqGqWn9&1y5>6kUOU0$l8xCV`Qf@+j#Fo|B@Dq*Jo*L&srd_&0u-xRollbsvLWc) zH)UKc$1O~n{1#(2hs`@2shI`))FZmm@S2{1Z&s{`VyTGMJDVunG%ZP)?~k1zZis#P z+Ty`1M{mZJkx$;YGh3O4-MYpi7b4Si08+g?3uh6^x5Q_8;j#)0#!K1W?Ogg?F*RHV z25Z>L>`cJ>KWjz%0vu2Jk9c>V+Oo?~4MFrb?Y!B6*7g0?;KS->$=JNrMs;TZ9z5V6 zY2x}Ll4^qSmHD~>j;YTJ>n~0-Lc8W=B4W}n8c>yTGxJqqPdN*=&DznHKM77rL(Vs= z*T=p&pTG+tD7>)3hKcaDhOJ0rga%{%g86IxDi1&6t?nz7nhG7q?=+CBg(W-g-p3A@1Li;7TF) z;i|0C4hHMmDvnj$%1m`Y8Iu)=uMuoP&KiJWOI+UHHA^}C=5HVW0sDf|nxo8PDIbbO z&UXKzD&xB4Pnoe@gg0X5tQ)*pf_o+7iZs^c-6sWk?OiWK3B6Ad>b}akRn_PI(EP@ z#VL^zXYMgjF55kJz(DAkOtS=UmAOauJ_+1i1##UKi5NTqs&lbD>G$2o-ymP=64D>e z5OBq_p_#W-Hdo6)hGqZb6BwiBajHG${NroMwK+wQ#4FF{VX*9N;zIwW!CN7Z)My{e zHOa^OH2G0J=9=olO4`aL$eRp$hvCNjDy3_mk|h%5N}!%}p1OMZ%6Axy3!Fx-{=rQRe&l$cdn$%6uaU zDPIo92@!_L98M12gu3~k#S2Hi$BlF)+GwcNuNvJgvaDd4%HpIFz28RT@ZwQQI$&E1 zUR#lN1dKY2x4_e1ml?*jwWrM!#n%+wo%}v%@8#V0+<$wu)biQ}n=5A8FPOD;%~3Nk zc^QASq@HV^IE|K2aK-m2H4h_vGL-{bJXvk^c$V}&1xlN`_fTUCNuQ;pZxzYZj1FE(=T}R z3b3(1!VN>aFjir1{PfITucSeB?Yt0cD~poSyBi)eb}sgha{xZ;sM5tK*G_mG=}6KB z2CTZyTAen2W`QrBCy*ts@qLRJ?aF#{mE^h|`Tg6FmgwGF0|`_13^X~8<#s@!xQ?1A zbZhhreFe+s%!Z3I=UVyoi~3-%Rn=7sJMAqvhKF@`4105bhQtVdEMuZ1d^~L2JrK`| zSCcN700+C*`2{UKhMV^VH!ew;?Q1yBMKkXwnVv)qv(AzM5Ugu0!_J~OwD_Y9b=-Y# z=?tu?+Z4#vZ|ZpEVGF9&fdUlDsF;#1a=6$Ohq~RN-OBchh(nxT`D!0$kV!<)X%SoW za3e%E5_(Y!kzIj=6Y8Y>aR<-&<}dLQ&ieYP7Pm#>?`wd_352u} zEu;c8h~(kq&eN1;6D04)8QLKOT8~~!LLHpqYrP3{5!t8ZZi7VfWx|d5Ks@K@L!3d9 zeoaNqy+O&62tEpvTu>duWM!=`-{xlb)g@Iyjv{m4A8amOZfYGa!q-mpKwWn4_NH*2}rxIe!?Gq3wgfp-nNlZTu)+o{M%Qhir0Xd zyc|NQE}xxS%mV}~qE7=|hNl)K&o>_%1+nsA*75#uAI=LCCW$EG0i|8)5$T-Kb7!u* zWH^5J_QP!TLNHyz>H_sbK!ddbgf?m-2$e!R*B7y~B;jm`p)#-13NsNR8}vZ%N4VW$ zbamicUVb*Fk#qh`z4}cA!}#8!_-`(ITWr(OiT!{OF>u*5Ec=D=73+YpSnC0+SC`TV zCQOGzxRBu%F2)+%Mv2t)hj^ER|9fHG0^%~X9rXj)oI-w>EGj#O7)Ur9#6xHKa3v8Z zRZ4+KSu?i7pm7I9%R%KgkDF@>+rPei{PEry+Q6N}y3YDgn_kGC)t^6t=pLBrsZt=2 zTB9pPsH{$2!969l%zQsWwmp+aHIieAFY)JQ_!sKzU8QLrmbon6v($KI7bq{M5s@ds6!meoqsECDWrc69trRZ6m}}E*#rnwbI#he2~CDndId{fj`HDiGiel9Z(~v zdJo|c_l}aZFM0ZX`IdZ*IO?D5viOP;OMraDxhpSB+=DI=W(#LQrIEfYtb&LJjM@2||qyX@H-n>Kjj)hJv zF91#eaixvzt_eqC^jM3 zj1RzhG2HQKhN_^nO#8K=+?2+re)bGE0E<0Q9JvEyLH6IT1P*;>lkwFS75seQ0;9&n zs(%MmCk!EI8dIQJvsvkvn11$ypW72JSXakeWm{XbLtQ*UVHsNV3pTazJ^ISB|9lUvm(htR|^1Dej)8iu4L-8WWU}UDk3B_;vizjA@gJ4hJJN1Im=ZD z`go@Q7J#sGACyV0T0?Ff`hrg31z4LtN>qI#`2nzIu+P3QKMAt#baG?^5@}uWCS5tk z5?PV01IG^OTU)2DGJaO&XXE--Z#LIoLV@kk2bo|+?}hq`>23n>?JrTqx0Km`+!-#vbj(eIyqASn~6=tWLt>fd8dGboNd)Y{v<;lj45e}jz@ zmLxtlM3ef1jO?~5h;$CfPoKse!8aqztx{y4MhnVsAo1BR!WvifW}9lhkI!am_G07a9%&Z`ms1_6Mv*)K75l;s_qUbwAz@C&?tidIY!Y?^^lR{tT% zzK>d&f*4Fo%gaLbcBi^roNiv+T{{8HMz(bGgQ7e=kP61osH8*9QcAB zh+726G&@;{M#jm@LT@B6b#o^qW5`N zy7+_t{FnfK_oR&2orgdBD!)#?Wijks^Ao=jk$91IA0kk%w{}yO_}nwK%sU*fU{Sq= za%EDNm0x_YlO?G71(iO-vzkWCr9Tx$h#G-x$b%I^3jp>UUbh9^**k_*o`EVcXuYktoVZDrK}vD z&X;q1pHM~HbgbPeo126Ah9Y#L9zXT9>ZFRdKH5q7r0b_4iZ7Wq8VTkS8s*=>3$`7- zJ_UBy%C>(Qd{@+3^*j2CoUqE|#dxvYzWLG2$mLQY4Qu_{dPED;9ds%ja4>#%wN%Xs zF5Cn%!>!%aU4oR%LbtYJ|C#!J!1+ zW6p4BC``thr{E?mAl(qFxqSJ1HpZv;_EYm>&q|v+PkIm(h_T>DY-XP;0Hd2QoFk4| z^}8_;SvMn#N~w`%aCF7DbX(Zu+fT@9pD^2IYYtowU{0uA zND--p@py88dcX2d@K?RyXv^6+QA!F3jt^&REtsYI@Y>Ygl+t79*oV~4y+0rNei%=* zCm?x_ukU3(ysV+ok)zfu;rxc-(+tBt*ST0h>m-K9S9oAzO7QU%b?QC=@-WcGpre>h ziiG?ir$Cic?2HL7M(A|gbkJP4|6R<`Q#T)4UbQ;$VDdHtOhJ)W*8r>Kgma9qbbT1i z9A|IO!pRSwFi;x2(rEtuzZiS(aIX9Jef;Sag|b&hi0n-n8BrO@D4Xmpd+)4-tc+w6 zqM^vh9!1C=*?W(ykde{vdg;F3pZE9s{rvGejt+-g_aUB->v=t{>%7kEJY5q;zMKt3 zt1JP*484T#!t?hIn>op4f>XXFhQIal{}{AMD>1?0tU~{4&9C*UQ`UYYzDi-@0{LjL zQ??tw79%mA%$6Hwa0|tE@vwDhe|;vcEM5Mqr%573@*kApAJP{QW_-N24<5~PYk(#_ zxvF{M1A{G?!@8~xIf4fsZ1~0Uk~sf4IR>WaRj(b(Io#l8d$`23JZYRzq^!fOGx;a) ztI?Jw|A;m9TgPN?$vyrA*gvK(x=iKcBlFPp9aHMVl#!L4!?@te(~*&w7sQi8|_@t!wdqO+aZG zQ)K6Nc_dS?aD?KW-Sh84dG+g_K}h`(p4G3MPy>sVK8nNFk*$2r0zW47P}>X=sq321 zSpnuyfFPB*4OC-{ER5(yldd9=q z!TGJppeqVqKDbEf6UD!@4+B?HSD&BZCZK$+haGJ^aY8{$3<^)aG6fC0Oq_-G4_z?8 z1~n86DGNX<{wqfP$#GW3Q#7#ut=gMFtUHAcA`;Kg^Zlez&DePqE=yTY%8NBam6rXFY7A;^~z|6Cxe&spuHO+pz=l16&({yq&)gGuD9r_ zVEO_(rov8hq5b!vhU(9`L-LFKip`T;>)q91oxQIxsbK$*Vs~jyI|Nt?FDnS!LNm}K z0E+Y9R^oe6C=>%DV~^$Q#eZZtKPQcFAon&3d;}|1G?=CY$T4}}M%36UVfe@Ez??(q zvK{r9&OX^Xt5}c|v&Z|Fy!^5nRhN6A)2eKh`Wb(jezdy3=m!R$mrlpf)$wd5t3}FInEn7Pv-?EY)hczRRf< zT+Lm7ql)QSo7+3`YpNO-q#cAmcQlk^Ko*5}`;>08dagJNOr&W=uFiqc&o2tN8jXjL z)6w*)7J(yHi(~o2es(lv0V=*z!CLSLF-FsJ1UrD8XZi}Q_LKGJCC0r^XsbzhLU2=} z-9(6DLys(mLdOYPpNuul2;8JM2c~<>mto5geNf1-?D=wJe>uDrbZ5ajZ~g|6RQS`E zPn?d!L4LZk@kyc_Cbs;S;8nD)%yHTv7cVd2FCxv}w+iu@_~&JAv)#mb|0pU^sp1K) z+{GibMbW!d?Z0X?no7;D;dRao;SqYlf9!t~ZOhK8VwSPF>EJ5vDt@sw=_wWh)3fap zbKr+n{_F_kc!iBPe7wj12_C>;VCzu-!s+XB#m{c2@MRH!6BEO0orBiadrK0_a8P;P zptw56YcMwY4`IqP++y$ygNM#nLB+i#R2#i;_rEC6Z$^E}kDXxN93#YFOl!+Pj_gMU zxRx_v2_AW(iVnAF2)b5d%#Wj=8F+6t%cFBpxX^=tl82K@ib z!rHJ$&ke7gmgA5|Abtso-slMv`aOk`241;FB*Q3N;EjZ{y+^p88i--Cl4o&T|R zdQ-+IO#N*WvTV>R7!*U*QY3qo&G?0i-}2c7#liY_`wV?wDYUp)7fiMVK_ro4teq81JP zU-olOCFlbRCPve@t_%+aGOf{_^yKutv=X2cBTAQe6y@It4oHhast=Li|*vXg>(n&cd*eM<#}s?CyNV z%SHS0XqBq>)~;6HLku_md3$8crS3)52o+B?X|#t=In1PlIG%wAjeZj#HJ;oc9C@j^ zdNK=}{af(6pudb>gn|7`=!{a&oeyG2OQg%7Km8J#CXGJH()F~T`D+= zfI6T26($rbQ5i6!t=L4exrpl-_qa69O8$O0E-+H-D;hWfKGLskq(;U=L$T*hE=u?a z$DajpUU>B_{a_@s_}7xWJ2Fdv5xJM*P8!TLBTx19iM(I4@6+hlS5e81Fwc%=0PO)eX?{Jzgwxu zX^?%(=%@8~T+9(2?IrX%?n5aH@QmbSpAwY+0euAfiHF07{@Cjd9~Jw=E9?2O@pCwk zp;Bv#rr%xQtF&MW<`dq1$MkUZ^3^M}YqSPdAGG zvW`*~xG|6?UD)^9;#df_j@zg;-|Jwh`+Es-5*2#=diZXgDT|9w`vccD>;+(ZjTRaO z6`2879Ilx<$Jr-G7Lb_1Y-nUM5Oh=lRYhlwKw-n+>#i$9r04}k%>$BU59p_OKIEt; zMSaZ+gxvwyxOrD5=@Ni6f)$gdu2A-MdO8suw3c?OTjK^k)m1rNi%Yy9AgO zYX0@JslSUL$$5$9tnv+D`-dev>tM#=5t=;HtFW0PZae5a_qzu1x(A#DZ zPCZCLW4!3>5u;Lt7y`3d>%q6WBgpLq3hKj2jEvVE(z!pd{hq zGxJ?nPUc(Dvd-+n16%LK&Py(5d4(2CMOHLTamQ_FD;k*UNRZbX9JPFCa@4-85Kr(# zQ0-wd@5-{LvQU@1w-ES zwZ|Zox+OEWh$a?ok^e|nv$SM2CYdJ}g=!GCOYYH=lhxqLB(nf z7YB58JL1_$W~7K1!frTT4-M-|;7X~k49v6(yq<~K7}gp+1cV_XF)x^PIf_Fa2OPR7 zc}g54;=e+GvSI?pzkywwIP?y5*7(*Nbnu>0KR*QcXgcB1Y^0D&jS%Xc8C*K$4YnhH zO9bz~_{|eZ&+3k!XwNL{m*nS>qi^-syJOAmcuNDWd3}7E@)ZOOcPE}j)pRzZ6I)V> z_TD;iBE{!vg$XhATpP5~K`=y+t5hUxJP z{asvF)PDVb$$TH*5k$*8b3fY-&A+;K=1Cx2$(y}jw4+VgwQk?stsiMz8G{o|BQ_F$ z7;;hkMoE&W1Sr$zItC=`2>JVYo1NgYT;f*u#eKB((8hsu+>M z($WX&To$;(9u}p!nlQ)r`Ai2>plbn7n5DWf% z{TMcv;#Xsr(tFQNz61CxhET$S>@*q`mAg`!H8gr&b8jraYE*}q6zT3TQC2R?(*$$C zy-??|fxd0xQvVENT1=_8c zFv>lybW}dAe}c{ECoVNi)?%;&4MqER=Rfj3HO_i;d_9?n`XiRmEdZ%{Y$Gi2p0~#^ zhQy9&d$)oCY_s#z;vfu2SJ5p+C=7(i&J-j9eb9+S-5Z zP9nBkSs-0qVH4Ho`aVIAAan{J_GVn7?Mx}r9kVH=y=2M7z-6Mc`yX^gJ|guQ7xK?= zSuLDZrPj)(cvr4neOj3=5B*TEA;+9gtVnX*;0{1%`nl6g|Jc`+g(bM7S~89E9;N*L z&fJTPj&PCgsnm!B;WO*^yYGC^`p5tPAU)`=(bE*_wJML<%sqcNnqRE`HGeg)LCJ~5 zz)c39#G5C;%g=%vV{!f95n4O`%2#0Z2)531qn`9ZG<#rPIguTqzJfMyZdZ-G)Gg&$ zyAfWN|Lv=D4Pe#P!cH=D!tp;@j#^;FL+0is0gd*Rv6yHUrk$#XWo>{eM7E0uUcDdK z7I5{(K9>x|s%kD&_lI=qFK)P``_Awh+6@;mC3wg@;#u|omIurdkE5IP&D6Q!Uf_0B zA)<4}TGtm7+N1#NVoknO$1JuPW;-SMceZ+q+GbM>ol+q}KF>u_bn z#hWWVSC^M}cgFfUoGp~Qicttgxx-@Y$Lkcj9k6nh%!WaLvKvQlFP)HBHNfb=bZbeY z?)iyQ!&!WJQ|MAt2L0RIlSe!DH`{p_RY!1(9NuH>Up|b z46o!d{Nv)jpfLRJ$^>>|BR*d`eq?VO<6jkK$!6;G_l^fY*=&%S-Jk4e#=CH#u^2d7 zF)RZZqpyw{J7o0VeCoLVKwZK1Mw(-+=s}Ty)A>RV+h zbdh=oD(IG?5jrp6g&dz;1o3I#y%6H+8%>xiM1uE|$s^*j8Hm zrQj3VLLZ7{G%edCpDia8NDQ*``EFZpnGzk0?Z(RQ+X@5xKRuQYM$4`D2b>2{1$aL5 zS~z7um6k(=q;rpTGs;FaW)xC5=*qvHyAFxCVHXGmZ_7qq-DKSGLIyO7!0&!U*KuED zB~y)-!+APHxDm2h{G*E9v5ze5j1m;9LFwE_TOd=qJXv>xc>8tDL8YdYP&R?&`+FJx zS;d=pEiX(n(9wvPzy?KX%g%ia{~kqF57T@*f^k{0GRjAx5AXx6#3WkB-__Vg&|Y0r zwA>CWKIrybBeA$<*tFsV#b@ac_NDaTo?u>2GDDr0q?+XWpW4ddM#~q>I(c23*#kRJ zmZSf0Xjp zNqTJ`uKA0YhF8)$Qzo#&+X%yaLs#8R9`N@(o^?E5UMKHSxqioQm5 zLH?U(3}4e{Y|EXcp6&xJ90a`xtSXtB`Uy|5@-`+TXG(+6&!O&I<)M$Be2gJs(Ve%Q z`E>xRk;TEq4ZIl*iWfk~vVHNb{_+7!rHuX{zO`+PiO~i5(tcM#KuC7`ayuy6*?x?; zZPV&7mK8c(cPp*y@gA6C+8n9y=Gj?7TgSt?0GnEzATCcbP_r83c9(k2418-*%{e^l zQa-8-1-EV|Sn~WdSA_On>)dO(5MUv~I)Yd))D`zvvBmNpRddtD+0Xrf>c%k2;{HS47G|tG&#F~Paen{pVXa(v=sDV``|&38 zG#^(wTg7!~P#l@lEc;^wv8o+LH z`naVW@geZTDAs|=J(7H$2r?5Rzn(-H^_v}c<5@A(6RO899>&*#JXa-7q@BRp+RGZD zXYp8Nl=}Tcfm=p+--${z$|Bc;a=l4LbQgveZY`!Mr@GZgbhTtW98~WXKc9WxXp22l zyvMFEOC!iaps6DVL9%>qr>_kcH@`l80UoS|ZSnO7Tyh9$Ng{CB6RP~|GK%po%Y@9v z#lkg%-rJtwoo3GO4}@aEGeWt)|AxN811 zk0UHN9F~1OW*Tw!w79xP6c|Bw-QNY~#6IKJ(T;V(cCxZa@GOLpD7AB6(W-2^dqUyjb!smeww^}yl7nO(*N%Lp0s_06SlcS^65OMc5?-zan zAChQ1+ux5brBDbsCGIaNT>U4RL%DQ$Zm{EA@M~i0L+GuwY9AaAD@;AR&l^jBUCuU~ z=DbOv&Wy~7Veduu-M)zb;|2I#-CkC=o|IgTIU(l05lpKG?e|MWk~jF_gbW$re~g^* z1cz~;BLoT8GCIrm%=NxC7UWE(_=@5V7l+VDK1 z$Iwp%Y=k{34DhSA^U+z(LEKThwP+D+=u*H)ZBF882h)v0fryw>OSe#7NdlD1bQu1*|5shq_$sEYH+Q{sNt*&>*ej(s~o#J>Tf{G`eE6%!*~gHqi)9Go*;7vvf}AInE(5H~pUlfw%RYo8Q8J7z3ISXOHsrlvSlikl3D4@ZIF1v-RV)M;|r$F9`f zuP=4G2p51`s>}n7W;=95`)G#J(fp+DYb8ywl3Cnvs6$JCV=-@lGy)OMMTcwi!)#;b z|G;RO1oR$PaA3wfB1)8HZ4nVs7k4RwbPa0lA^FYIu$Dol6qLK>HKoe(79E75#l^?d z)15Sh2l_}{w#E9>*)u=p z_iJF6L`dLrECp2AO;R#JJlSig7x{Pm^7(~hWhF-(B;$i<{4TOH{HCKq#?M}zB_@h4 zY`-vxX>v$i?xkSrp<-F2^q=@KmZ+Z`*R{y@~R&nORd%>31te5KSEOt zbP9FpbuYY`R)8Mf^r6%u&JRb{2c&x0V^$5@H~zujuJHHlKI`^hipeY^A|}qtmz#$e zcaf9dIOhsV3B@~m@=?zo$@}Hes%2nd-nqvt;^xNOx7l*s2^=8Hw0%J1((bVnI22hj zIEHjB>CT}+xR*8|=I7tN3oSrrO=~%+*i>Ry~VheFmAS<5?I>S4a_r^E`(^5zHpO=rKh*L~zFL$D9@kgSSIJr19~eD7o>f zl;pi2HsSbuw>uy`T??7=`Cmh-&uv?5qAwe`x9DT=g)@SC@#rorqR!0%75ejw9=CwI z8mrO$_)&AW;9ZvaK?`P7Uh~g234JZU`E|Y%P+6N3gx@wOSS)IFErx< zwkVWdCYRh_$#^hKhtcVB7zPY1~O<~cLd>IWK(s29*4#5ZSvk`{B&PgacTvxKovJCC`bcI->^OfJwsNi$Q)m6x4 z4(r%k_cF5i4=~@nb`t)Ti4{^$ei|($WI>;w-z9`9%fWO>9%NubT&U1=rN+`3_K(WT zKmI#NUi#*@zThbk#iqDnSt;}~jv#GN!to-nNx$3$`T~Qx4>l!x?1^=R3k88W^5Vhs zd1h9%nQ0#{X4GS4j)#72oPSxB*^|FDjys}pjN5P?3$68xPCA7wSKm0bx^d8yi$|nl z&zx&+r&b-4;I3{s)-4B843R+*;ruRz5Esi)*Oty zC!H?;#)YFn+r&R{fxbaM{ZFbXN-+@^f0ylXOBu?r(?JL{3Xo>d5ke1kjuB@KWJa;L zwHf{Ld#2K5|L}2|W8(hIzt5IIgz@@ScUNcE{xPkg`DM14iZS`cEl4-C!X*)PI6+fU-}o z+PfLejpiniA{QgDX$tRG?#f0q{7oJ=(ij z+aG`EqQ_a;$*(*m;>dc0i9wy2DRaNl7=!bwbk1!yslu#44vxt(JS;3MKCYUdzP&gcS$gZTxq)sWLXr zF@}0UA>(EyMZ&q5DK=sC;WKw^Li`e?J6CTAFd&72jdo@f$#tr3!{CO}%uxXVpBPcw zu@Q#OoFqB@K=yQT0A@pj>Fq-0RAKw}edBhg$C|tt^+oKwk$kE=C=|=e*${=sEJIBJ z@S8fMqF;z7#gvk15I%P~#*4X=it#gwagw7Zb>sc2Q;W+h3l_w*uD9)n>Sn=CEZ25P zZ!rlH`+f&i8vjLJ855Brq#;6XBZQc;@VUcn#GxG%>04-X88?o-Hf*Ap!YD+$3AyOY z0+G^;Lk`&aqCH?&8+q@^Cf}afC{*AoS;Z^evKVJLUKpTVy`e zSqWkydF%_^D?$Cl86c(N=+X+Dg4kc&DjT}4{pv<;8=zWrc8`=StpD4a)^E}RU>#op z@O}YnOSx&Z(_7Z}tQyfQR7+KLDyjwwxPx1y3?1Ah&Oc|qt}lXqS6t+IiG-RvI;siR zFzB5Klb>kMZDL16#wklAm7=y|x^i&0l;3R@hNW8ON?B$&TCwO^bqcpV5y{RYTn}EB zYV^UiulY3qP~zCV(wvU3nNv&w{NIMxN=LI{-zR?rwRfGOOJtG8t!?bjcU9#cwbYlO zt7Q5&vYdDe;nI^(W$qk+0fE4{J#prpHC6g!@x#^P7d!@0Y#6tw4JrhK6+%M3g&JlE zJ5D72{F*qAU0{gv_f;npX;P@c<}u_3@~O;Vz&Y9_?QBk}Z&yjNq@MNg_5~5frH84V zXmj&!)%3PCPqgk3D-`y&#UuB2etoif^};48?|FI}^33KH#p_q38eVXyfpCuWD1UnN z0S7Xo{7skR?iCQd7>v}m8FSQ;~+tGS~RzR-9Rko0uT8y<|Sbow3z0_g%ErN0HjT-GT zn4fiRpTDp(Zt&&}4|A}!CvJ>6#V5RYT@aS`eLH1kaW}3cm(v<0IY)mLuefEJ=pyy1 zWR%#J+!LBAMRTEm8(jEzVn{l!V)u0lALej$P+B;iZ(Cw(^dscUKsj=1p|aHU?(}|! z9bF!k&au8~&uy^L*!SKMrQ5Ig!=U&Dk81R>h5x5I&T6G$v$i@=Fq>D3*e}%dEYCeu zL|uL3i_~T8QAL8H_xpo35*Lc>Bz%H!qR&8qspgvv=W8RoUJaWfcyT>^*i@8a$gt*KG%ng;5ay%@LWI8EM3FP z3J1Z_@Cwk8H+rS1i>e0)Iv@jB^_Ibwc)*QoZi1N%7bea2nxyEwUV0g$+*v!V_3~Z^ zswcU!!hl?1LguVmPeT$L3A|_gPx3QGCqJ$fM!`>w(z&>)s5im3ff)$$ z1piWN=MucYjyENC92FS&+_e(Ob#|Xv*?#%f`IYl*oRe7CfKf>{$z~SH`#wNy&oMHM zu@Q_kl0H$^kB*|NtaPfBXY)JUX5Y^%4CjoR=vXS6PupN4-)N=SGR9wIN^25gND^Yy zmoUoZW^-ctZ&LH7{LO(JXlu$}EB&`J`(e&&48uelE(O^qG;1lfu5XJ-KZoEO$fnf) z`k3KUz6XSOHWrdI>6WHo#PBnKx}o z__=m}CR|%0xAD^gEHhPJZheaCWSkWhX1E#^B0|H|OK`49!CC0T!(89?*S(n0pbJ9F z9+%dCn4sEK|6GA+!ORg~F)7!s$*<_K-}zLZXVzaurv-n-7u%Ml@V^lulSNqD*p_?O$@m z$kK;@n21LIi_btb^E5M%%n4zXl0{dJ92zP}Oo+IcN;TuO*M#pk@nb<3Y!ZGfcI zMOeXx8eez{k$ZPL5KJ|>X3OR#zb6;b;2p@bg%K}1^gYpN3X(g z5CXAHBntoJ`vYJmi_AT{b=`e^buJPiTrxjmNr^z}yI)1q47KGfsL^0#@~Y-?y2Ssz zM^x9ak*KJYyXN*<3}@q3-!om0aC1TFqv}^B%g1gGc)%(LdM3hvs99+3B(w|ajhWczNjAHqk{A31oXBlZ)mcz z5h2JOo#Eh^iTQisCrhVn7uLv3xda z6gCFs%-`!JKqPBnU?T5#AXDX`GncgX@^gF)wG+zy`OXcPft;^|I2Zw*AXn0Z*fN#Qkw zX#%x`&psGzr*c)afbz}cJ53%Qb)ifO)8J$gXJ+nwf*VJLCfFAQVO{_lXOdQ#swa~w zJoBo|rH6DR2KV(cQ$di?EAl6(>p$R$D=?^*tJl9<9vvY6O{!}sHx;U?zVi}`yD#Ki zVRm%8^CU<`Rp=PgUoup52^anA!f!s!yMHTRdf*HFPoa(XWfNJVk4cM25Y%|YYUwiD z5R}id8+0~L`_N}u&u_Lb`S=Sch(sTJ5etsIMVL=*S03QkQxY%~8Iyd)sZzT{BPWU& zGtUdY!+GRz4tHsDzwN?lnd&m8dsJf-c2S|IEMbXvqzA_X$6xx|eW&x!GUm@b@=#KLs~w5~7eUpJ4^`#-4pWkC6R!(%Vq&j?oO^%D&Cy#MR96o&g;|+=*A9xNY|D3pgS()vPeELEC z-9RW2V|5f^O)V^Xdpzw*zQ$3c9uWsRP)6MKOdn+;so>Tw*c)Yx5)RDR=Ij)ny)ZI$ zX;oThb8p1D0H~;k1%y`VXWBrU@`g?F5R?;ZrH>Dn3P`0c->E)0=;v#uX()CG>T)DF zSP}(HmZs8et+D}M^w~=ZTOmbEWNs34IwkHcK=HFcM?0;v*<;iH_PINOZNvSBJ1;Qk zyE|I+Q_=&|5W+`042Uq9B2uW3^hXoxJH>3q%Qo91>9`20Yq+IB1$CNoIm~>DLg53r zLNbJ@D~bp<53(B9R?tA_z4|fn?=cg~o7l77DnT~^qLOc$Co2v6kB_#yY$D$kwNdbLRf(m zR=1(9i!JcqLbrmLQVx(p1Lzs{!AksI;YuxNdXis8zdolq_a-?e0#7t^WM(Fu=Mh}? z=+;pR-A0}^kc3c9V@f?!;-JMa1o~dp+9&FXk0(DM!Aru!v8z2WH?|^SNeR`}J!-yj z(gTGhjCp3TgNyUu$bOhy141u96LLFY!~KKn-6kF%4>Fi;chmuA!?lf0&_+wg3e>Xc z@@|QJQ5O4oTYh&$rqE}D_U-IwgwsCpyYz&q{Hezbh~xM#>yq!rTjpq9%l^Sz^>@_; z*bT~(W_#&B3hE}Rlv<7;&E>9{U%~KyeK2ZQ#dT0h&r4VmOoirEY_PS}%2DBd=~1pMo04QOYJ=5XJTOKw^~h*c$Prf6P= zb}FhJdE61TN~B%w`0H>fJKC5Sp%SuNt>}HywkJAR$rKkeb)8P)XtO~${WoYo|ANI5 z*w#CS_LuzAEnwIjr~iButem9#EfkTQ4sDravk1@F(f3|^{)s$JNFZu|XK*bu92lNG zTMi8#K6W^}b~3~WT;u45q^8V&scrw3tlQ)}v-U*3M)|o0EDBu*pw3iI-bAJh{Pr9x zIzth|SY^2S&f?Q7@le(g%7iXch9OgGCMf?Kt>qaVUo3az7-Nz#@guA%hfC3~J~WZ8 z7>1rNf?YjQ&7S-%2&R@Wt5FySsZ8#l-v%{zzJ+(;-@wGEhdj_i_}G%cL(BC?Df*eB zpCkI)z(@3dVl&AghKCUk^w&m8!}d08a0>7_>iwwMP;v{DTKY3vN`(^+Nw6*;@!=5| z6!9IH&iX%sYsA}Ntzt1fox`^Ls830>!x=<1+}1Q6*!~)G?vi$Ev`tGhV zKc0#?;yQ~X81z?*;@tIFNU|^o$mMP7cV&40|Bzvm!TtaP!NZZ${P0v3bqr*Y53}K- z5}?x@Ze3fGVuY&C%Xsv(x$cjVVybXVbL4i(D{?0)-kX`BQ<3p_{vxZozo%>UUrdTgGxVZVL66*B1wCCif?*@dB?K4J; zsgO!1-@wv`L$)89`xU7X1qj6tfi>Uk1lHTQWAmk?LfB7Thy9)La3Bn$U!bt>)WdJcxpbyH)+ENtCcMIzSNArpL5k?d z!22-uuEm~g*;&q|%3hfS5O&r~t^4zZXzkX$yBIG7<_$z3+@ z(V1?-4pfbVs6f|OTgs#V)>oV;@OAJ)|lOIK2E7=!Z4~Y@RA7zIjzIK zG~(crkXI}wYvCEyxy3%ksZ4kqX~`ge-=Zo;gcN;2udo#8i_byD9p{|QxxUzx9QEu6|#MGn3T1h6Z>(%P8VuM`jXeu z%XKmuwkHI1w0WXN89}CjuV~6vpm0f0+n`xu?xr4QJ+i6CcmUJ>XI%pi>=h~SuP-s@7c}+7hB-LtlrNXz8sLiM*As}Y7Jj+7 zb3V7eEgYG`qpp*DTHAYGDDxRQ7UL9;Faw`wb<1}VVbI@35D-K(lR?V}`!pT?A@JL8 zfH;g)-UWlXW~q49`Pte*eeeF)_f0a`AE!-;S3@C3^Ge`r;_TzNKVWuoq#Bf@570Ty zp~LKg&kw+oob;?1+ZN5ao${y&(nEiVdCBK$opnH4eJj;~#x3CSSj=aRAC8s()N>!7 z--`eNcp1+c-zJjpc^kr zfe#Z4n);t`iICKXcMx91qvA0N$eJ!l+jY`h#k=euApT%K~Ja^Qq3AB02s!rU}fP_^yM80i?dKDC{IZUnR!(j477* z*{Dpf#fJNS2TBQJf4<#EWGG{cDOgeDR>DzmHcz4!5Y-hn2xe@CN^4@3Zsh_s!w~azlm*!}KpvZ3??|mS60FVqZs2sZ&4PH0*(hp*C!jyf7m;+Q1$huYr{?R}eE$*+Qlj49sUW&?rfgTPch z$FF3-oUPmL6xBYcpt#J|YhEC^RS88>X*;)kraR6R!?)wZ#BaEZL_xIRS614AI@$Zu zovWNoe#MG%n@J2x%51aVKIvsmYTd`RvPOGD;CM1{O{2 zCH1%_-sS#6bHSrzO!%@-MHQOH_ePZ35Ax0FU zudT^|>281*voldnWDVt;@=MzGG%D^hzjQ*kjPev+9?>1Fn{@ zI^vy4nN}%QBe#-1Wniw)3kn*0Ojg>IIp|GHaMjND*}4B}aAHV}!TI8{I%DuKxt%BB zjCGY+lS;wYdgDq6@zaZNy?AvTSL4#CQG^Wxvc##34$(eseY4bgoSPr9k>SIEN}+6% zjj9ca+L_3+8BbfVm$*;Nh!66NQqN45RPn}nzqh(Ir%4o$W>UVZdr*6QS!q#`Wjr8D zVdM^`|J?p`2p!-Qvg{8bfpUHDxOXuGk6LEp;Q$>l5q6mgmKCQ#-%{H%&SqS(sY4Cc zCi+hfH=<3BNtDGt|EPl^boFPV+MIL^+#=(Xv6sR`Yjl)^r!je$K30W3rfsE}T8(o$ zvQFMiUJYf8S{kbm7>0R_R}j!z4I%|98nfc(d6^LTEDh12fw;}V^D=2xh)};JcqXr2u2KcJN5mO z9<$`U+&e}YSYdhoRxK54R~0eZIORmKP{sZUeK*OuPij(%PI_p4Q^rl zKY_iR)Z_z!Z8T>Xp2{CX_B>qwnpMQU%qHAYZOWp62deE%d z-k)+TDf4~I^5Dh`W{mnBt=asYY65A4WobK{^PhUdQ-2LBoJKWZ*Tk$(3qP<{WLgzovES0k-btc5HWB2bhSE!-}Z*%c@svrVY3(0#)084t8DNG=Y{MVKlJfCQ_*6O>FySu;g1QwLVn!=$o2ei_^K#u>a*SFIp9)SS)RR86Diu z9TQlvzcL`E8xMX=tZwBdzE`+v&g$1A4IQ3wZMMfO-G0+>u(aJnPHF~a>BsI-M!vBe zlY=FB+qrS;lJ^;qIWx-0_;(hQ4VXwSpLH<6h2zJ2=!W&-yZ2)_w}^3>djVFdQU9tW zx;D2!zF6APCMw4L^F0friUP`Cw6@Q@Z)yR?xZbjP&%9rMt$EaPImBZzmveb{%vsrf zq)&;e-pBc^mT2{_aV;XwAg$?&RO&LyZ&wIki`$-cd!3q3B(ip1*KhS>bNTvn%Hcl$ zc(G;e%64(?Z=bP}g?dHD{!}yIAZY7ZtDDU(H^bV_DB|9|_2|PZNd)_b)Y+pCRqHEw z`_L1#QE$dz`q$5XQoudrY%#}8G!HPtZA`_AQKhf0n1BDdH7GovcI@Ft&bo((nkK8Mes4dy$~vjx`~gevz_Q zEDcD3qcuu3qc+8gG@ut4a(I{`c|78M`KuN#)KTIlUb&7FK?zb9X>D@ZOy0g;-FfSG z_A4J11LX%sOd+h4H@k8Nb4RCnS1;f}iTlUX2o<7g>X|suy77V@gMRB{lkW0Co&{vx zX+DFN=lUK476Ly>ur;23DeAqO%eq>b#rpIM6s~_T{hwl-vw_{Lf(Y<+n)%`2Qx0qL z-nO4ro(4w@xd(wE5?=+UfM9G03x5 z_V2v~bT92~$1>f$$6tO@^=ASms+M}*dP(bFzVIj>K56+wd|`v;%n;CAi;pUMrr5J%|+(nECWqkj#(&EWe76r(dxba2V@a^f9S zB%m*nQ?>w$*RA+X6~S zc;eJeYjPFsbQ%ER4^-vCL7)#j1~g5#aK$0dWa0nGS<+Wda~2QgpwGGw7*&k>Wq2-7 z7DWyHkS{#%Y<$RjPEd6^Ejt)_7P;|`WY$GuD@XKRJa7P-0RM9tG75pA)E!KO0zlAE)d zxjt{9vyu|oark4Ld-rjeanrun;*(%xe#b_pSuWCU%3g9+^FjHWsm$5qCO%2^9gyt| z6jr5=MW$>nYZ!fvZWg_y)k+~+aq#d4^xJtVz@g!w6{iPA{7cKY9BQ{5`uM)H_}7%AF&|7&uU7%86|z6nMK zax&*?lWO_aB-#Ty)I1g~FC(V2hbxW3X+mi(;m9-DXFEVI>-z9%oPY$8d;Pi5&`bD% zlXGFWi|q7BsrB?V320`fe=gP>CQU;HGH#LKyP#h;7kjr_^P2OwHcU*pdLoJF8*3$f zGla<53m2!3!_WOmvPh>)=b9VLEOlP&boDv9$)8)1@$%%^(@-X=-Pu};uw2h|`8 zi0RRr(6LG_T0Q7Hchk@2%&FTh7g_a~c!~;4um3ncV3t0~h3ab9!gr1{Q~-;!xc*HmNeI7@fYl0qTXrI*e_|lsi-xTq&{civ zvhoNsBD#G%Is=bCj3 zcNXS~CX6NK0A})PI#F2LkZPm95FH|{^WEV7@(=a43s2(&ujV?1#w+V*+*Bg{8#Iv! zms3jyx>Ml770cc2T>D)S5A(xjs-clY4X=|&AEp~X_bzdd95n%X=m6M~! z^eMx=`)sVZKxjK1XP{B_zn_0*1IPMf1>Oa7vFea5`y1n|pWwa+Oq;N8i?Vs0>d$TA zC>(~Zp|2dRYA@#NN5W*0(0nM4I$Aavu$4+LBMRwaNntP)3B^AIfFSh$E|31E8tM#$ zaJycpPbk;-)|_BhiSvc;=}-v?b71t{DT6NNL~`o|tg`^REeoXnkx#Zi@a>xDwi~H~ z0Yo#*eb!!my5UxUVg%2uQ4+6H<=CT*FON=_QOVmE8iHvbz_sNyLY1SbpH zCbwCK$Ttr3D6$x@L_H|4(p>F++3=QY^MeJuR;$QjsF z9Q3CGy%PzV$|wJn^#93Myy64G9O8Z!;ehYiqYHY0j^B-cgKm|h2J`<%(2b4W(!M}- z)a{CI(r_~W!XTFk`CFBb;63@KKp4}^eUhc%s*RuDr(@2EPFX+kcz|3&jj9Lb-vTgb z3{-aq4HS@s)5ApmDa!!>ivR$Ol>vj^dvXLIwvuzTj&4RVJc?~kU@eAvLgehu9Y9C` z3$DqSHDzmc>g7U7>Tg2%E%kl2gA3(P$D#SbrZ?wXl<0cP^(UBj@@cE@w-f=rO$jDr zYw)LuBk9+@sqs3H=7wd?&Cwf2(*##`rhPYR-hyPD0U(l1ziGW!9RdB1rD6A>?Gop7 zFaJDSD8v2+l%D7Sg@v3WEp$6FEPl5KzxaZ-H%1Q4%t0-g-IwX1VBC_dnFO zvc|gYfJtVOd`m!=+veNY&Y0+_Jr7KfwOm(%^xKUscpx$j!JN#oG4FAHTCL zunEo2Jd!S4&tg#>H)BBxX@ke4t%n=7D}O$5NgaHq7mM?~02@*IMuI4KF>n3bP;&4) zwI;H&`_?>6lER62k~_jUAGe+N%sJS=11mjdm;34ek@HLeuy3Zac(u4;gD%y%j6Q34 zC1_{Z{JkfAU%in0&pmnX$|(&_Sikjz_LSFK#Ee|&6QnVij-R46W%AYm^-9Gk}R-UFsh){q;Y`)>lH|>&W3c6(imrhCtB}vm}p;Ps4+%T~n zlgM**hUYXrUVo2tBsQCGf+hq58^on2|VWc)1oT-gM=2%R8DOa)0~5Y|lq{Z{BX9w*fwVmh3%iW1~?eT1QhX3{o}>mapH z0GJdRiujte8GN5=?0uYQlK$)|X2wacqM7u9%)Pi=eQ(sJE?n@ureQk*NLjE&^QM3v)efgKhwG}Lsq@aKGLul@x4 z95Rs*QHm@fu|q$%LgBD<Ws+Sxy}Mh0DT=a_)cFu zsm;Z{ryJT+sZTE$?DG|$cbdhr%JCI>(XU{!9pDl6i(T-+H=aHG{E%b+1f~_sCQjy7 zd3WKuF4n82j11RInQNO>RMa=i9IH7KqP3)eE~}k!*-{&Bl5y;U+igKrVAV?E=WYTo zJw@dUi@~~h$h}tGhwapY$Y`ru3c8(5GQSC#BYlP5Ri&rMRJI4kZ64$$4>0@Nc3~j& zuf9uL|-TSa&^gu{7Z@x7$V4_za{VuDLacYCfGs;CnARgR!bC;&+mCT*9Z@Z}DoWJ`HWZICwspkFIkC`XDhX@iZIvGD zH&oL)=cLT4afNcB-z9&qElXbxm^YLmDyCTY{H1n!M?<%@!^g2jN-8wNB+ z_H|cYLd^V2a9&{1y?NI6>Pip7t}rIOGfP{zG3}8+6`yLu|~13 zlK%dt)q#97xv1k*1Nwb-5>z}&tQCAg;8ERjR4cxWsm2MMo4{FbDd&;W9rS{$lYi1Rdp+*heC|l4`Dn9BaiP~_ z>QAP}Bz4&A6|f=#M_{wG)<++qHDC0-ksM~mm)7YYYr{^2YnoT$u2lbP)i2cU$P|L< z9eG_HY(fIhv?@q`;46G4Xx;|RdA#6Ol4Fn+PaH0NhjJY7-*EAD7}32g;KfzTB}S<( zvr7D8rjliQHvmAJk?(Xaxp%)->V0~@$U51Riig>X^G^&^3MD4Kk&+dHM-EVSxgN3V zj&+cpox31_s)e3%3{-N*CI*faAAk7yXge{=B^Tfu;T%)e0ZHo`s;5thF7rG+uJ$}Z zd-!#f;^9!@&=<~Y=natRUj=D%H(zyB9D~T(m0O`@>)T~oA{viv9BfSyT7mp)pNUw^ zHvuxFd7{0Xm+sJntyjDpEiw}Zv%AQ%+)@{tZ_-NXVz2CX*o_LK`DP?`;LT z9ROM)P#Q*_(71qX$cXNa+SmMiA%Dosp1WdaNzXS~UjNPsqVqJyR@arFz!Qkzb6eZ^J`&6NmG!+oc!PmRP4 z@5w{84Sae45%WLVG@|AImgai}2zJr#KkNP{ z;%Fb8*5u~DftONC7)c=ITKTXMB)T55Hns>B4;0&k+puB9pD$CeQ|hzlxGMPEE>-nt z5;PWr0aDED5-jw+JOHH|IkdrQgP>4p)XTZUG$4Vqy!XSuo=iw!4SL*JYfb85T%Uui zGn}vu!Z30a=Vx_nZ)c5%)UAGgfw8snz5LgC7}c7!;urnivIh)d_pR?nKcOK9VIq`m8=jxMozvQr`{{WFk|^3;P!WS?BUzj{s;5W@;dErU5|My zv@-b^oTFu!j9I^j%@ad|m5|W>!o-_O;x1%tA>==ogiJ4xG!Q@)SvO`B5I~6}Xef29 z(_Wb4cFWF>zbPh$8DZi&ejj|+D)X&pkAZ93sB3MH?yp#zu0t5uYU5h?)9>1s>-eIh z6!eL)+hQ2j?U*p8NRWJ*pk!|vo}hTLDNydmJ)LOF6?10@!YB2L|b zo~PowEtoO9LgC?4R2;**bDF5l9x(Xiuxb}>s@jy$&0jB}c~FsLkeKf?#+jcn^A~^p zp!jLr7WnfdCZil-Dx7e+;nC%?t{?FoT#8u5fMdjNwSH|z*cf1oGr7=Fa&O6er`Y+> z7{`HOVGf>cVML8&!zZX#NrF1sCJiAz-rHRwUdVLFZi-{QfBGT8!-2<~A>g?&1t39= zfE!aA>|WP#Ibj{GJw2+4gWdxC2bI?qv=H<74d@VcGK0zpp5v(+h4GZF(ohC#cvbPc zHJgsxNSi2=H!vICueB5e^2K|`<+e$C+-84B$A=5LUeWB^p8Va5QAas);)sNwe~;|l zK7eU^u1)Z6jQ`Oq$do}6(aQP&Zsh33{+Jp}8WVVf?C3Hk-`drm-m384?gzSVD#+{s zuxb}}xm5$2L^oT=crj6tTdohQkN^CteINEr+I3N554I)zc8Y5ImAS<^u7+ZaJ@`20 zAa349I6?sPzZY$~#*ePpQ`~xX*(rE;dr*Bga@||aCHjeN<{ZY~2z68#c{JG*l!?DB z06_EdsEruRheP^#H!?iDAL|63g()L$v|2+q9-!j^L%OL|&|!ypLwh*t`6$a79TkRx z6p^vzRlgtnr7xlGZ~eoi5SW(n>^UZqqBSr?53h^jqcymFp2HuOKH9jkt_z9VFW3ps zVB|GVQTJyqLA?nJTf+6?H3wQYRBY$7>;#c*F@_@weHk9p3@y>pA8-yR(M_d3a)j5$ z1*7(Yl#RDB?{PY;O8`LKkK7TfhW?sY?g3TkB6pEqV$4mikg<^L%fX+cp!-$}CkfJi zv(c*%yfje>}y^ z6hpx|$-EzuCfvu)J#t`&qQE4@(4A=(ZE^sXz`L6fpJGK~nj+61{-`Ua zIcf!at6rcFL9J4thKV|KRXkKvFXk%lxpf9IHUg5}8CZ@+8PYU#lkzRxgsPaRUbcVF z0uu!-874Y`b+s%BedmS^kzCpBBwxZpBV?!3P!xm_c*IiQ{7jky zKlw&Q7ZfH`G0A|;Q{0Rn074W#{meTiz$#E&qTv_YyYgp$HfGk~6Wk7ag+PCm*^hnO zDSO4C>_`BcmrvO*t6qfta**KLiDmw;AOwamqz_#S7?xW@CtA$9XC z82PF${s0Pa#{3VMh^qMc!KZ&2+7Cu%q^L{f_@1lf!;X3F3&&vaFoyd&_C(1>B&Ey%uuOff((1m@?bX{@gCi#<+>Q z#L6Kk#`+)1FomR^sz3S5e_$&prb6BYSJhWuj0PBP7!rk`n;#z?KrXZ382#$X3|FT2 zasjljB7Zl_Jc$A5)z`De3x}6h=eP(Ta$Dz@(HAi6JKjlPgytEk6o1$XB>UDR!Q^pdw+#v zVKWtab-Kf1@z1ihIjZb)`Q1BbJ&j?KSei;jH-{0Mg9E(BS}%Y)_6~@^J^f@7y$S6 z9X9kB#Y44xVGuNvdrk#uxad+&0zl&_pY9+n6ClorU_>PjiauXv99h^+g!DHxX+xw(d6qeQI@TtoP90M_b=H(sBfgWU=43U#1!D7 z%Sdg@nCVbE$Vh2h<~(k?7(@0z=)v`q=WU>_UuvZ?jyj7Oq%W!1q|%u|;k0q17Mby> zSz*?)xmsrZB)EMdy0uWl@PkfU(&!M9(Gw{1GIBaEizIaK{SxwDzhC_ln6cNL^H?2y zV*BI)Nibh(=Lg^oSA@L$)WvvwT`mu)~P3m{^w-b;? z^pr#xRMYpo0vnHfW}$Sf&c1a8n`UKl>7g*w=c0-k{H{s>szmcQm1k39$=bnO7Lti1 z%)8FM3Q2S7w6Icd?G2*TWkGB3*QQ>|UM^=Emq-X3XY0)nB`f>|#bCxq>gnX6BEzxr zG6Qca&U4kzRf)bA@{t}tF2Sh`+Y)Z(>>w=I*UZE1mV^kyK3TD6OX8LRb(wJkp7 zi`1*NIpxNX?&-j-yDFzB3nXQW;L#?;tZ_>ucqG??=9JBfE}*>;)xXbQ0h6N^qy%kT=J$NDo_w7ArM@ z!lowz^ZIM4Q$#-hGA`=o^(D{jVjYR80E}pOqV62w1WaFL0RC6e7XcsP7ctcRnz+AYJ68IQ3r>8*3Z;}@XyXl?Qu1VlQGZ_KHq=!wx7kM)iAuLQtS@o zEVHH{6J_&tL#X~7`5v-!jS3b$&B0}VtcI5W#6*We(XT9&EMSrHw z0-p^Dy zdzL6X^;U^=p5h0&-Am&Fsj*R#stfmCL_qfOV^u{aCy!6=8|k9%611pm!+w7zTMU#y zF43dj()-v6LY<1&zt}x%*FNX@p?yiMh#X)R^yM2WjPof4;?qsMwLjIm7PONtdOA8R zzU?@?g|JrO1B#IJALJBl15}M2$76QQFAct8&+=^IR~odyd9ClasN>y)$3E^k`}8b6 zVTwwWN$IERUQTRXX-2K>L~gRnJ+M!KcP7z*%mlh6!~|4)8M;`qxorDhz#YT1q5>(W z=ev?}c`{)A;NYrNhDH}scQcA4$)m!`aIsXoD7Eg6@L32IOp8ZjVGyakbk2a1&S5;w z(a`USeFhSpl~<7H>^zN5yr`Z$zI3(5CiEELfm4}7hp^t3lNb#AQtxH^ZiVBb1pBvC z--kmr66JsU3P&#BOIaeul(}8@lYxx&tUdZY806JO(L=gtf*tr!;D;AQbNvl!g)JDt zBym8P1})2@9PLew+Eito5K&hVKXawf9soeFAGs=h017biyIUkT+}RfxuN5!RN&D55 zwyxXeXS(ArTshA!&SoSOotmnM?jf~1Vt&u#Vf0(Uag&p18u6b{DlEOT;s|{tBJRIO zA7(VEE?_CI{DccE2oleMDnueOY`>E!l$MlwQ?i6Qxojz}wp6j1k0W6uQ&mm#!*o@4 z3Xgu1aI43UzkBd|y`ZvTP1s%7<=e-4KrAYPn$q;2I@O@GN}y z5$0|)MBz~^v#vs{`%U(F42m@(T`pmiEKre+Ds;IP=Ft=Oy%I~gIz9)y)kCZ(sU%~#p) z3l7xU)U`XL?-gBkyeZBEYj9j}fj|$(6U@RWHZa5I!}F*9?EGS-{m-!25M3tcW`~73 zZu<8o_ZNy!LLM~Yw)f!zO*hgeY87EV+Bexls>Q}8tfG|JZu!t+*JISeBAVPYBMxTk za6n3J0<|xMOK`^!3SsD&DUhIMI4rs!G%-BD&czJx$kD#tRI%=Z^B9z~A5UZic+nWs z%%RD1T3Dka`mtC#N9-mi1!shlkwc-7GXGj3B1=ox^!VMAml!}hDLL*By}zoy(F;o< zx-VeO-chyd(m2kt1MpW(8Y%e{qa-U0m17t-+^y3H=fG~rZMy9w*DEjW#I5q+Qwpf zVlEWUIv%;9$ErtHzoYO5s-7$l)fuXS0)jk)u1FN|Xk;ppWFUP<_8U+@^0>4YaS7{a zT#gy)4`uN41QF82N}*F#0yox0zR;;ZdN%p0X=Y1mdiV2;4lSd@BqXWpPQzj?t1b9u z;yxCY5=c*X%XWW>ynmNb|LP{Hqf(7PBCpJrY1QQu;hQLOlNkz|BBS^J#C7n(pk;h| znVd-+J1_$RBpc0a3C4nX*vb(y>1imCN!Vi|?W#^ZW25Q%W?d~7R-6UDD_Z1#cJQUc zGM4{2IoNR*)pFvShJqEuLwUB0BW(6Q*03FM_30?HJ<`WfEvxxGR#lQpe(_o2GQYG0jn1mk%W6}l;K9*i9ZAtu} zJ0Y3xZd8wH;FtomA>q?JPyiu53=0fXtM0&hAdtXz`E%w?i2r=hguGjtJ59&g`I0i> zUPrwD>7OBa^W1sqlcV3Jp>(kKhAsS(*6E}cw>@=UAU75)AK4B~zki2CPu?&^Cv`UY z^75wFn@j47cirm7?!2udyF3FPQ``mCB;iDbGW0!BE%#|+fxpji|8C>wLyaa4v@cat zV6kai(DbSS4p2lBH~9O&%?em?QjG)9C#o>sa9%xd2@uoqy7|Ez5ve58Fkf7;PsKZ@ z;`6f)ys=XEpcD1+0<9Cr4;cE_+{R(x@j(x$_o5^@@F&fuPJ>|i7a;jS1;q>gl{hZF zdTt5xxgOy>)N1s%njX&@wkt#Ds0D5qC6O&b{T9+itPg_d=a{bu{PhOdtwtQV<`mIo zGWhl%aEhbLR8SQ;4*ZJ?q37(iKSyzta)dfHug9r8b4GqtQ8M0n*H4-<# z43JnVYcP*^PpR5JA@o{EGay_yr_#F?|9_S~SW?9+`r8Wc@kv7u5_l<5Ha;oqbh01Z8BbMolzdJ;jycTv7dfRy zVjlfj${+m{du#SZDE*&mF5p3*DDf4S<+N?~i&|>R-hACV; ze8HjExR3nt0nse;sgc&zBSf%xfz}J{_dUo4>GHXxwqZC(AButv@u50*r;fiF`#tw! z`~Suk5zE7!yVv5qHrf-T<`)D{&z+U?z2=!yepwP;TS#wB5$~-JC}Q&L@6k$nU*?5I zm)Uv9J?y+y*hTONGwj?pPKL0|ILDuNd@8a4Z%@_+Io`F)o6GN<2Tj1S#)rLOJ96}7_` zd{cPuwpn+zT=yEUa00`le>@V9eY?mCLtSIvO)TTe2<}0u40A@59-H`mux`JR>3DBa zBZdXlLT;YCejlu=@&{qIpJz}k;nDp(;uWgYIL=Bt?yRAXG~AIy_FtuSet4NOQsan> zxS!8LaA51ka0)sz%LfeT+|1Alw|=m*erOfWPnM9Do6|QdbVcZHm7N$?BzeW79O(2R zCDswRPs4n?0a_Hssmm7*G#<>=Cw|d4`aT?#pQx&wB#S_3nl<~Tc{m(S(f`6SD4rcf zKB;^4{c2C;;cIjE1mS}^`KzhB6;iK}cAXqbk?<-#mN*`C24gW%b(Zq;7S3CFF;N%* zY{ot?*>mzR6Se7w0=$Q9eBy~I5u!Pq|CyvyOx3|=4}Hi*(99jmgVyDI>fV2XvIo@t z#`0A^gSQ*d;yy+GkdZe=?ClRs;ect24HYm{=b4Oz#@}~n<9UqPU*qu*aR}+ocigR- z{*_7s`5``0R`&0^cF#Uf4SYL|Nl;NVtS3B@x)vKkRQhUSs^W}dS^<9+q$}1j7|MI) zJld3p)aNakgTS9@a&sq28CEQesMH)&szjxgV;uY#Rv#c>I`#bMf9t1*xc01>P%ni1 z>(seM7a?m}`y)S}Pp!n`{?FkzvYmRVm2@=Hv032rtGq?3JpX|t&%gMQmrKqv9h2po z8KGG+M04soAr#fa5|LJ6+H7fDuc4rKyw?)ta#59Ot~YGu{2nYK_66f!QQcxA(aRTy zP0xR-8RSv%I6}C%Q`@aRqy(AL>lk;fw~^Bi!7L=G*|j+WF>9|e<}dpDmVX|*AclSx z9>E#_YT3T<@O3-uL7GJfc6*e;bv9@d*iNp+tCv-6snL2nVD!DB@Eb+qAZ2J(CGr*v zaN@XJ9A{PW^uk5B!SRpy3AnDkiWq7nx!O}JaiVYeKFEv+HCMX*Episp_B^1pBwTo} z35jlK4p;e6;X!1&&V`#P`Br!7wa#aom|`Pk`II&#T7wx$gg^SsXs{~wxt=Jw66^IC zaq+PeXlXM-8r&zSMceJZYn8k&B$%eJyez?|#ax=(ou^f-_}_cj3!6EcQ@?l_^g?dX z8;47{ZbH28L6a7eVA0`{>McWVAzNVO8bG?RfA@3_^I{th6}*?!Yb-A8%T zoNC?sxN}xG+i(ENSP~xo1kR%cr=OyoxswUz!t*Ohx<;e5J|4J&Mf1--_yED%SWpXH z38Xc3R7;!vOA{6Z`Rpvu|JX>!2h`Zp-R$ZX&LcwTNG|Og5z&ee%RhuJacvq|QFPme zg}y)8^Gr%H9et|s{)hxhRtX>s{BD$9B>C0~xn50T@Y-C4q|a5h7&upN4?!#W(nWbH zM;>Ncbi<2ct{itGp=mgQP!-)ZJJt~`O^@GJ^f~axULP2$?l}M512?a4K&=l;Fmrol zGy$ja8;2D})8F|c44cn608e~3XRDm=hrXD)cv*fyw_M|13?+{~oapaVy7Msk-&@L~ zhri9tZ&Uo;Cp2nV`CWokF1=w+u1+PAbb)g=XpW@oZ~dGeth%qZjpGUZ{pHTH^lrrJh|h_G2YQv z^iiZ-@E*FYkZX9Ooq^QpS>mo>aI4ueocJ|O<9?fKvze)1N`=2_f=h=F|JF9l1GoqZ z*qib3sfAK+W#(Rqwc(R?QtG;w_bTp>zdi99{!9)9vKHzv%r8oeX3yQ zc2UqQ$Xa!Qd%yNrRsa@_Fcf=Ft2S6?;9cUqIT}S9DPtB*Yer349+p2~Aw-71m*dB9i&RWe&+|w7jse8eY$}w*HR~4<)_4gcy zM{rtmWP5rdCC2q{%kGgHv>4{92{cyRTqPNMtRhy*4@UXuPRm%QA(Yo}tH6NJBqY0oCw?eEn99 zk|6Hv_~L|L4Sq;qgI!6Cmdy$6*${YoIvA18nmD3ZfyYjSR%d1kR6lbJR#g*TE+?${DD!71j# z>mp9t2wt1R5ra}nc1)fH+g;gv=%~`iir87dYsu!%cl3j3(jAQReis6ZUoFgDBDO6H z&Ar_p{{GSLJAO=Svz`FBLU*WYMULYMO3C+kc}($YBtim23ed9Fz=bK*XYTh)j(Xuo z4MOOtMTs%jF7EwKp+3)>zI9u2&0brM>T}Nh*XGa&gbCsvc|?In-udt6{^a#-anoS; zr@r*GI1Y+R#+KXN`zNxb3uHOhI;!*b-ohmp{GxcD4dT&xq_|^9nSrt{G=fgVutS@1 z`upgX6E1YieD2I`;q0{h*OA6Wzs|luSAZ^NalF-SCLy?gDd-hHP!UPR?{27 z*q&x;?q6mvR3XjX2VO)+cQr8swlp;?-*M?n6svA+<>F)AFk-Jx7#vBayvg*+ROx1U z&wUNS`MV*+UjCeK6|t_r1{}#j>-@8sa?f&dNttMAq8E~Ls=g*2ZFio0oxHUf?H`5=Dm)SoS6xTi+Db~Un&MvizP#SM7P$i0Onwf8h z-mqs?7dwyrD6KZ)2m7$8P3 zx}95ki30Nl+(gsGg2_XPank)l^CluM%tVYL@2X6RO>Dk#e`EHEtfh6Ph^{Z&COn#K zv{;5-*8c~cOAZ4O=J)WZUxR=JYCrP4scb_XJdIbzUgD9=lAKqQ;s>rnwm`*XZ-{0| z3TvUu9F6DTh1}Zb9_)BAI!=kJ7N?puotSi-Z%)IN?E5Cri{_x%S&D_n$tY3{CD6$w zbRmiLT@g4l78dAr%MXxAQNhw=Nmu}!MQ7CyS@@5ae#{q%D|l7DQjIDmB+jURDy!m4 zd#NH`EUYuj#)N~5@-qZxIyt8$&KT>?gjd9?d#>mKOSjv+EY?KW-#KN#S&qbZ$j3pE zkCDnH6FNO!_gYiQ=zxi!fI?A{ohG>87X*+vAgJ|+YvKp|V0L%P?ZmR8p`=xP>6}7b zKVsUlu)dvGz54FG-R|0}%|{NrZUar%EcW`h8s6?V97HBYyj% zj%w781U2yF#Rkw{O6T1{ms>6HASC}o3L}Qh`^NiglTjw2XI;oh{T5AJSKs&t0yeAS z?tanNzg5p@#;JZ4y?y*GR$?;JTdA4C;0R?wcbqSyKj0kE1uXu6CLz=K7;}b{x(JxoQFDsv2*n=6QKGZNl~k;i_!mG0&wR zV^3y?5Ip-%9gS}u^Di3f-I~ak+KJ9%2)qz^vCmk+Kw=|hH^^9W<48aP?Ll|}5|GI? zDInj>N!h$)X#sk1j%zlLP05bAoacy@F;vX==NQu@nmM^^+w?*rCY~*4=f@FoFWh(u zTB*2_2ts*FY8ADbmG^mOt7?9e)>0eyQc1Rho4#N;;2tUNTpSH(u-AW6*CB5ZKi`(& z;BJ_r_hl5v9sP;}Nl7q@_yCi){BGV|H>ii`Ex#-LeCpD7XQrhYVOCM2YATcN>^_kY zh{QzJnc#I=nzJc^%=Mf>!WQMGSe_tcVp*^_aaXx=ixgE5U!Ewr2yE z5Z+k-T54`$LgwM?)+a-2*1u@Jsb&oTS8H*26Q%eB?8uVZf5SljG!eLiEpH)0?%hBf z6_<#%JlR9t>?Rd^t638_ZC}9Uw~O=`TL4hJ!P)*GbPD`_>#OL=uZK?`B|F|pwZ!Xc zYWexPUk-I2%61|2fb7?D2}WS}rcT`$J%gcJvG%cdl+-yYSRaC z!d2><&*H-tRb}XgNfHcp&jv&CSA}RP*z8?twVIFa;J(+%QY5T(_RKWy|oVTzQ(UI!`U2=Ox@KqzxV628pM>dZG-ZK--5A{QrnE>)v$hF2r{E*Fy(Iwi#NB1R=;Lk`&vUxOf+ zI0mJJPDEl{X!*`&9MIp;4s#(HmrgzxTyQ=Qjax>PzY}sv#M=No9T`_bM_=D&{=ZR! z^*vlCl;vq^t2;P0@`S2g+WS&QpW`llw_e3%MC-dOD$K4P7iH6M`&^?aOK2G;YIviU zM3xr40TMt_)T|t@AJfX;)VYy0yyuph)IHpOed8O1B^k1}s|%_cHYsRK!K=+>Fb`vf z$V+Ndm1X@hfyvl8GSu@e3w6}gc5t3I^@j=OY*@^D*SpMIFci=)3)0pRT71z2WUNxturW$h zYqH&D6ORBEXV0bMK1U`S?qgR2N_Y!-QbJd9CtMRm=1nYQTT?MnpJA8y@c1^+p5}Oh zUW*~GnsS~-o7?IE0ztmS-Z$v+x9@cFL(Z4@tBBhoLqOtqSI^se>6v+-kIf|%qYIze zHZxAakCkTKmZ7vcjMXR>6idggXC@;p)S^Y*f!H0s zAwWZ=puYXaDqL&CVW1VV2+K&Z*rPoCG&-_SQ-4hBfM}bfkaZiGa2Wj4^|R&CWvE~* z4D;MI+0w3lNU>U$T5j23EI%Brk`(e%#-X7tUJo9FX#)+hGj?E;P-RAXjd_F=5Gx+^N=q_JF|t~tu7Ed*`%;0 z6tP7deSAZBdwF)wXD|j5FSg#A#$O6G10M^X8e;D-0hq0#VgplbR_NU%j`wGe6YKIO z)5Jh0tS9!s+hP}z{}hx>29}r()4}w`3I+Y?JNi|sj;}yTe$c@j6P0L@hmJSgH%e5$ z<*f6{{$wLmdOv0y-DD69UcC@O{PGKaY_emJPdA7WLKwN8hX0Dcy3`H!?K+F^9!ls%^eB8Ae{59-n7Vk)kNM-Qg71XeP1L#j@<_$5v}T; zdX@xry?<10TGeT#2}!!&kKy8cSD9kJL!-0n1tg6!rLnXCm8D`=Ja`CSe}(`F_b&j? zic|BQV%Qj{ueq>iEz*g(7E{fTqB7nTU#5-N?@vC`t8`|){=|LUN#VQzU)ACz^4PPO z`3fUYukoW~(VpuB5k2BmOwnr4u}l>PQ(GKSDR7&8yAemRXEL*Zog$H#iUMQ<8~Xf_ z1B;wAy!Pv=bY;XSp3L%$U_Bb*QRCfDER|z`!rkt&Ad{Dt|JXHPxs}hz;eH$!-=obK zSMJg@#~R}Y6xo520hT>ZC%$LqXS;}(wwQ}`Of#2HFX2^+NKMG3#nixYgmXZ>2Tg_+lgHIFk3=$vCX4d}asM5h80t$smNtP!0j<_eILjlFfYH$CjK8;G_85Ww@ z%3oB)5Kp6tVV)B}3^I7j+p`@gRKYhOky^4u6({v9NRTwRsDU}V{1I4wKISlHL!f_T zyaA>OZ8(TyT3xW)?1pIQ2|=<)WlU@~P{@8GEJDzt3abG~iKPG~da%09@UyKr2! zG7fnS)yA_#(3wK{zkmCyYLyHw&hr-YID?Ux1k-PXAIaYYM%}yOCfFATArl(%cogG1 z4qVwfHv=X-)PT)nM(rrIZU(f5&YNA={ci?(@+MovBg=t;833a;t+#J~79?!tngKs| zJMB_?Okq(Kmyo*%_I=f_`!8nR{nEzDd0|g`!GtiJA%y)iz1F&_VPwMI0Jt7loF^$4 zTFl(S7W`9Ye4jhms_O>62A zynm-boI1#{dzS7rLfiUP)Jex_x|t95%_Y*fkBQquG)ys3zO#-aTzYN*6Z@>!AMFA(Ja=~=Nm+e?rsr*}-rQfnFdEEREAx?n!-Y5X@O3SpzqsI+ z4Ws6XG881Sf9;yH29B|DR(z&+BT**JfPqDC8aj7CM4|-7?}h}7*2)HaG8Gdq-pm=XcMdh#;te6{Y<&NX)|R==tIyzF04pq|=ZvIH~g4+&E6nnM*Z4 zNv)z}U(W@$1#Y$v0E!PH7LA{-%Hw7$(RQUOo#lPf&SAa%zvsmsP4gSNPpAtg?c%*ka^A7Sx7pdee@?eMu{`+#b@CWpfQfhFCWHQI?FxTm$aREJiIUJG z_yl0+2SsjGH?VE*M#G`q05wOa7pJcK!-o{@Qp4Pi0F&^Qi})1t*iF`m`Y_zjwPOVx zu6D|^2MQHv$3N%s18=pQ`hxOksuo{xKe1%K>bt*@I)OK)8IPCjJpO&u>%(%$-E`Ia zHa?j|GWXaenYWyJg%H|1{fEWP>FSO)7jRk!0dnh-FoQYcy9=cSiF8$mw7);tgZUsa z_yPS-%j{9>Ha{=P-WR)NF3Y{!M!j$U6xZ_W6*F#OLuE|p!9yN@qksfe{DJQr)5T~z z%WM4aS3i&&@qzsXSMcRSOv?8TJPaf#3p>(N8lkV`AAX_{f*CM=DOB{rtw2ZgVZk>h zI_v^^8A8psO-sU~XIrATXJ-I4bf2vg|5=t0cPWB8q`gyYH%4;ZvVrrX05iL8Gux}U z**i39dKt}M%xIAJ*w%@>$J-^qkH5Y2Rhx^*7wD5(l^VknTKNj=%tjp=!lYQ;c3T=0Z>u1G znS(Y!(t@TGF3MtbYurY_MIT+M1*c$S{Ic1~^ND#&XnJ(aBz;qt97p;8&Nw1Qb@W{? z5&0-vtarG(+RYb56E3s?KH_aFYAk4sfhL^5TKAb6%Xbo!ZZPLhQW<$NX}uW5xVeC_ z;`jos8LcfYaTWDv8Z>C?HOx~>Q_0VU1!M0-jcRwXC6ZvN$U#ZP;e?JNv3I*P}k%5>5f9@Ks*RMtAu}(TTS+>Nqox@<$=f+zP@LP*# zMTxX_9~IweaY zh37$~PT?KHM>}8PE`dcV#No#^zo3D*^tF&`sn~IgIfl${HL&~#wAH4KkQtHO9?XbH z4*Ujkl=_nGeEuHp@S8j^Y3eNq$pBc05XUY|2ath0$mK*3N;%TNR9{m$Tw8ZBeApAK!gO*So&L85~2dw_nGED8?se~NRojUiLUe~!FAWP<>7ltIUpVJnMToT%O#D$pXhLl@W0t>i6g+)W~!wYc+d z$%ErCy@=!UYje0v(r41xN&unYq>24B`TLvSWGIMSr!da3dm)=0%(o%6@R%8&ZBcz% zZu^x6WEq~?WP7!+Jmr|%GqBW>4dv^31hel+qg4f3CplOIF8DUnsg3$_7M445@_`oU z(Kc4Sy`S@cM7eJ+ui1Qljz;p(1_6;hK&48-eaDe5v=35?+v~TkO=zEqUWa8t;CGp* zem8*1C*!{_6>aAQYYi+P$y*B|__X~@oHHAX6pR$dgRtZF(&e=+x`FM5ofYDUj3&;z ze;anEK0Ul!Zd-nGd^pJLd-rd=9MNZ4-|d5l$njapcCf5TVen4Ma%{r8HtG3l*h$_1 zo)X@Mx>0bd*)~zUYBgkUjJTGvr|dQtp#}e?l-N0hd_ev)(&2=EBL9jj3uvA)vE2FD z8hiL1azCf~%KdRl6f7ZCP=nXksIi8LI%lF#;KWS@Xv^YleE5nD3JH+kfta8kIN>-2 zdL_oy$6rUlH3vA2J6S4u<+RkKjv7D1MNZOD_MnVG^sy8Eqiqi5U+++Y< zh4BLg=bDkf3$>wM6&dBTf?+Tq+pK$>8SP)cblib_Tyy8-8VS!`YuEzM?Q=gKN$g|9 zTS)wNU8_5E*t?YE08BBY334~0ExzAvS(L1^FhufUW&9FNF=)7u$v80UJe?yBhq(N( zICMl3)D*B3Qm&2kqy8OGqTR~*0AFXami4~59>c&AEfQ%#c*ZA@k3D}|Iz_z zwQb!KdN+T(8RHMYLJi1%Y7P-^Z=-uPo`MdeFmT&)_^qI-u7c7lvA~mVzv1mk(Q(jF zpTnaf-e>5s0SA2}-j`dIYky3&IQeWRb+r*AJ_B5o1Ea6MF>Zw)AVgk!_h2u&ni&;{yQ=G^zajQ{3%YX$fz|M|*UcD8VzFKIq#GB+K#ek#mTLL&* zWR=kVf_RzXKGQPfcAFv)2cCi|=i{b!)Q^RF8JI3&diI+-luiBMsom-VuYCyG(*ky? zL;uccd45?GVD{5+hQR#JjdzNvBUcUUjACfMUE>?(HIb~k9J<(rcuQV|j4gz-{FeFp zB*31i!oLxHik;e6>tE=+iQ)^$> z*0ES!T4hn5Aic4Ps^5i|;kGg&xMI7mePF7E+bi;Sdk`Ak{-8oc3>3#{L%}Kz&kc+` zsWy{p#o-uombM1K63+NOScGKA?(+{fiEiemKJhHlTO*3$(CV&Ca7BZB=^PPu_mfCp zTjNxGLe#k~mO;ExyZp``%IhHjv_hF#~BYHRnJOho}=cFQK0>tgJ0fvEGZ`nj>|j^1pK<^ z`tYVSGC>b3fb2ZPRQiM9k-irrbRHV`>IIj1S)mgR@oVy~!>JwNgvuaeP0(ZgsllES z{YLezfzn^jTCQ5ISEl;E$~G?SZV~8SQ!KSC7)5`g{|tuS|2_kxNp;$);>axbRvKkc z#yPk;@YA={xa8ZDlB?eA_uWmspWS`n(n^e~@>o~%hx_1sMa5W`HRn%*Xy%)=-@>Pk zH`^IzE20KJ?ok}?!4%ucT$1P~f@Jy}BS@xzlR>$yTNh{k{(FIN$6;h;S?zZO-h9+; zAqtXRjB^{Ge*f;5&KCqRvzsr|Bn}b19TN&!QVJYOW_}TDlZfBPL?Oc%d-&)PUb2y7 z9C0nKWV`XPAcFA(C# zoQO`sxjm^157!#9vb+m_>y^L3hB4H24#A_n`V+cq4jH~egEQb?e`DzyOpEVJz%P4S z#)5ObgJEQ{^w`{E`Itb7>aiQswKmyg=wNP13UVxfV1uKaxx4E2td#W)F+IkGJBS-DV*Ec>S{ZSZM1i5L z{?@a1iC5D71yo8;Cu39O=_r|~2GAgfg@S_i)Y$Y58rt*4-O!ioe+kTUdJn9m18TBd zEKj1kh88oQj~-{k%+tt{7zK-esO&sEF{s|kOYCmEN-}?9$s0PUhsc4!0i+YwXb4{) zJs5v^GZLg7{_t7)(gV2oWK<2c_`RHa^NZnokI!|Fd}^u;b6dz_wvXzT-xQ8-Z1~l{ zYKR`1nnKVOKKkUfUq1X+-vyWvqt&2(e?S^C-&AbvTU1YvL6}^NA$9!Hb}_vlGqHvB z5y+n?(NRSrJuRSiX)lO)1rxyPF+9{i)bZZx2diG1jAOwsW7V^iYuch~9*dt%zfx8V zli?;i1NKNOF>wGx->eySxgQ>0a%TZVgWUlls(sZo+8Y9;s*nvN7R-}}h4sNUB3M*@ z0|SDWf4^%UBbEMM9wtgE0~}I`Z;o#wn$v@J7tsAuFJtcSg?OKa6h^)B=hKiP?jp<3 zO7VmJH(?>^6Y|c)K{bzAg;ZoTwJJ~QfXDd!_``~d5Zqc<(E_YY2zQPxZ|s0`8l}~Z znIC4BT8P<;Zvz@mg}8jfWDFsF@}cgXMkBVnGqCRB70A>zdTcJ(?Er@>_!Hw?nD*%> z%@{%|kmLe#-{hs(jcpmx<*xx}@xl#$i2@FIu5;M&)2V0}9&kZ#33`>*{7>Q*xdy`+kx~|`k^7)=g*HH~N7q7vBI>pzXO&_{HG?pYc-=O=kcE%L;GiSe;{3jv~fP$Y) zWl{~5D(x>dFR(x1d6Zi4;WpVjE0FLv+tV=YZM^>Jm(kQb?DPu}2EP~t0~X7k;@7{` z)4+SJ`NJsm4xRF?`l|Y70Ap|~gDrvg}bf>??kkcWHOaDakn?s?! zN%e^gMaiRwVn=It`qW7NSX$k}qaq)$NtI+CC*Qb3{{;+bbvC{Cw9g(*1>(qxZg7yD zMacg$N*lyaOJBS86nxTT)SX{npG6F1k1y#Br%i58krJaEs}-sT3`VHzD34~TnIM-A zHCUuG&IB?MOh!`82#>g~Q=R48;4mD3j!{A*A<=oAcrf&E50KL9Ptxw21qgssGX^Z4Th?dlNP!Mu~B0%Nr5J&;EADR#CDM{Md`N|kyfHt0Lm5U84gs2|yqQk5CEuF}ZRhJiZtrKUs0H1;*u z&|E#4#c1ICo|R>tc_hiJX+OwQ=6sGbv#1kk+Z%3lWd{T)0k*xLjGp?+3>3m9Q4QBj zEB&E^3G!6e&GliR?WJpv>(i+RaJ|Uy~wVi0bva)s0X#TV!^crhc}j;b;ALb{!|- z3n1-#dKUbs2O;@e{t=uuVrPwW#>z*TuhcnkB{>}=A7n@Bt_cLAaNHq79_)@XP)aAM zxH%OU&3Q8vyMPFd*^c1ps=%L*&o91u^IAH!=?abWmp7ejs)be;Fdt>t{hJ1>sYY}X zS34%|u2psfG3`i>qP_)mN`c?Sqxut>cw2k2_&9RgP%jD3#DP`C+aEF;AqFl6D7Px+ z1ogg>4uo~b>!~jGnFHWw%GovGw*4AxcXqzQTF3WsU+wS47iILL|*YyM9K)H zfXH-Fa6}y94*#qKaFZ$Wp)&EyG8#4K4!Xsth$sZ>p^MdN;y;CtqF0WMw>sI(8jn?W zGW5Q=L%f=7Cc0a1san0;kBYPh4i9F;shJ#B_d{|q^>*|$bf0YdiezsQq~>kj1jnjo z$q+{Jx?j*MIu`MjHs*Y8b0Qeot_YQ)>p$fV-63m0jlJUY3-78|Yxt8-j*Z%n>IK!n z5-6({#^D@6qN(JT2b$)@gj?Fz(IQLTYKH^UhSe>DH`3tY1)6p}PYV;y>I=a#t!1}b z;WPunY$+RW3Y_d__M8YUaCfcroHWA>s{k3-jF@mUk5nwun^( zpdOwbxsenT+o>+tW*An8-uVfW55y48->BN_mR?+oA%0BxCE9YTUm-IJb=+>j41JvI z{!2+M{%q8wYW&QDEEK^WvNCvPGG{ef4?iSDxzIr&XAX1wMnXbBMWWuihd> zy~zBi9dTYh>0)Hh&UT82q4vEKb;5cP7aZQz+>|UL9{{rk>_}@t&%oC{T4#gg3uIS{ z{ub+Scdz@tSWT%iLF26L;^`m&xm8{6>ul7KY}d9DRP<(Wn%Hj}xpM8@&$Z*^u97or zaFeS?%Fhkko{q9935(a8J-a>wB)*s3%l0abn5y}wj-vq%HB4J_fysg&ms6ahgeDAg zTjw+S^}2r07KxFdLfkB>JRBPH_MlEAy?^>nj&1U7ElFkd?{CofD1MbtZq`h#E4as4 zukTPfTnoLRB)1BXj8De5-js*yWFSXWJ0NjLx#O)aprm zKthu%%*CvK(JFoyN^b5{@L90zVkc*r62-5+*hfsx49>GBoKpoB&l2UV&9wV^Wu$q1 zA${s=%kC`ILJNyY6xXjrmcvh@cJXPk1vhs8Aw^OiG^&`i;7Uc{%m_! z;wT7Kr#MO89ULb&U}(yd(7QXhj>|TF%?tgk$5XpRAGXJmbX3>R%>Oyf;fdUv3gLi% z?o9wg_DCKn9vgGQN0uM`)a7CyJ0fy2ALqEF1pe@7XAO?aObBTfR~>eXSJpT}iV)$x zBHtGsNgv0*h+X&l;m1WxUL&hh$MkqX#68ZDO=$PXrCq&N74zj^^oPuVwkI`_kmGZuY zVty6i5+XAIIjWn2MtHHrf#H2bv_u%5NnGF*Mc#Cd5*=U$Flo)B8oo~3^I98&W^f`M z9+ZJ4w&i+6-U+bXP=$R!pfs?6xnQ`1@}rpL(Oy{xn=v55)&_e%wJD-P|A=ETM5!S+ z>Y~4BUkR^xLAFl4r{2Y4)h0UdDxw(~Hu?8ft+p}Uc8S?wSs^QtCE=JJ^#1c}=%K1D z2da1@OZ@O1;)n=TsFp2tLHfA+=M)*d^#NMDnArxKN;7&hf3!TJ`He`>w2ic(5{W{r zur^vD|2uSsTr}uK`t6cW?%vmZ6x<9wf&Ll$KNm{Yb1I|qV6{&)L1Fe*p!o;Fy3)}; zeous%MBcJC1zI+LI5cyv#(g)6OUYL)`Z8u)*8h6TJgEwExd5;BrhY;4qERxOg0<-mg*fK^}jUR(P+*uHveYtOE}}idA-iY{o8uo zg4Zyq)SRf<*-4%h8=klXwqo*kxBX|9@^!v$?L4@TO<9Z7{Wn5|YwQULt%KIn#e$`E zw#)49;@@C;Sq@TSa`gaN?Ejbx?da%9$lQxLr@TrV z+X{&dW}`hZ%pY}rk?(!jSOBQsJpuMi`K_;s#mA|2L6^dbsdxy@A&b7TFkEIzkm`Gu zmvv}Yp{lb(Y1FM_t968k8{Kqin5m(XBv*v#=laI$q^6J#8qJpHg zf{l3Qo_gz&no`0>eo@#fq;M{kx-NEw6#uO3!Tivh|2O=|- zaGK4)WAOkWO>m)tjg}CE&2?clMpSU8Af)9LTYXZ9feL;DVwJo5Aovk>D<6=OX{I$$ z6>jBk5hk9YXk)A4D{faU%;pT(>p9zpX>0CI(XLP#ig(E68?`iWw0Re3!{RT8R4m&B zEeW=0RHUQfl-|e4zK45s7sOYDh+Q-xlq9v_i`;u43^xG*PJRNOjkhbJ*W7qphRurk z1^}COVQ(2xZ(VF#8$@K;r1AhXrR_W_T?_@=E`_C6HN0Cs=q{n>t#ke`zz}Hl+sHDott@`_j;ch3+G8_)!tmWpG$5v_>{M{Yb5eJo<`AS zJ$wcJ1@Ug7Js@&Pg>CvCg71CI846>y)=v)uzpj_xh`-+b`Z^6>=3Ov!Z0@1`(LY@t z!?ylRR>_tb9|c=yVbb;pM&eD#Q?4|3u~c5=FFp$+x_xStbzAv+1GnOa@Yc;Pdi2W{ zkfAuL$~$ejNtS|UZu{#fIw%hz~Dgh`aI%NU+QP@f<7f6=O{S1@uC{$f;~52lmSlmr4;NV_iIi< z>HC-Ah}6Jr_$CujpbJZpD3vHwNZ0svNG+IE!pMNstkaY9r3p~lU}ZMau5c`R(@N!6 z*@=1*PlXQ@+e>&K)9trO8KzI-mMdl`-8^$HP>$7n34y|1WFgyd;`%q!syl7tv}Qp+^eMi{(ke8$F2#_ ztHb+!o(J{fTI2&p{^ZCv*4haR2Z>x#%WI^+?k;lVP>YORAi#3D6zqe*KZghX$fEG} z$6SPwG#6(P>rSY2&Mx;V4H%{EJyk8bYUsFicktO&9W#dj(oR$Q zy{(bge#K|-2}HiX!u}SKY2SGBe_alLUebL@Y8CT0#)%3*+hOuTtZ(e zeu?tZe=#&}tXJ7ytmhyI*84rP91=L8A`)2|9UuOyeufaV%*%I}EmeR0uX_T^QM7|E*i^!bl zrHD>!6pdKq9-S-Ak59SE!U%I~s1Izyf%3#bPnH!EkNZAy*o-!75Nyq;{SDM)6(NU5 z!Cqi1n0dM2>IrWCaAHNXLeGIs-0Ul4Vj%O}3YNb(3r-ktSGAw05lmi66_0ab;H zygM39S}E>(i50x?lX;XCuLe3iV7eM0m*+DL5EFv!C;711xaW2oB#V zrnO{z;sR7_m&PescIMIOXAXfY_`Ae^*;^WW*P^sn8#*i;ZRoohALlZX;yQbdXSpt9 zJ)|ZWD6`Zr`X+DnP(#R>9sQ6bAmI{&`m1n_{=Fypnb3B353+NwIW$pn91u`L$4F); z-eZ_Dr!2T$6`9I#lpUJ*U9VNlcI;IqCu?cBy}nAGX?#|{fOPYwWZqlUU86iBs2V>y zB&wQ8{?o3e&%7vu_Z@dV?ugr^LF?16AHzR-9UD6b_M)0oh`{5RtASKJ#MV?0<_T?%pi$HtVS>ynWmJ6d3 z#$k1}W^--;W8SUET?M>0_S-;IBSx|X6$+|yf@pyViSs$uB8KbGq&-&bepCfAWm(4#t$_dt6QCV|0*okjx4qMUirxxhf!b|LZZeQm$pO zLSHE2(&D12fh&F94shGLdw@23zkgbhgh(x}Hzydm3`jkAb`|I> z5xjgX-YWNbKi}>l+Q55j5%jTa0qE~3UqHQn=d+I{#eDt8btD%XlA;D1b4~_)HTQZq zq0N-~m4e>P)NZq;>+i>cEiYEAi)Emo9cVEnXayq-u_I;})YSM2=t;p0j{yz1V2b;~FBuQ$Y&o7rgfu9UnG$jIIZ1ub8wBaxsfT}Z$x=s6fe6E0UCaLP7Xslc z3@3nImcgDZG^v?xZ>_J~$&TF2dwKPRu^o^^1sS+GdwF_!09R!zSI7ADKNIEPyi8R1 z{DKSR)w1i4;`mg}C3>>BNOV;J$Q7%D?x7mSU-_4L7z%3vm1eSP1?W}Ge3P7BXmuje z<_WG?P6VfVfb_|^nY97wbNlrXU~e*g(mBjVM}RJh=}g^@ zHTiK|odWwt)4hehhN#rIs_T)&LfYHIYEb6v*-Ay2rP_*y2 zlBSK!@C*i z-$1?$-l0-ZjG3KtMz{U~cElLy2FDv9pr1!h`)oKvk?ZCnnEEkiaAN-;EahR}ncm;3RZ50uGJSWJBHfdYWV|Xq_8X11ygG zV~FCp(q%GFn&*N0jL1p>LSXnCV?xmn=J^eB)G1&pjqCOYLMu&9$HvYfU{ zAE;c%K1P~YNNli)K@t@C0HNB3q9)^AC)?!yGvv(p_BnUd1Jn!%@KJSNU|h<(c93@H zsol>0bq`thZ8-Zicb%uD+i!no?LB?_G5~8~=)9Qw6?2#pe^~U~X`DH}lJsX1L~8s^ zVTcGU@%81#Qxkt88Kf_LTT8lgSPLl4!A`sUmjD)2Z}X${6Nd*8e5}*rs^cqk33f#` zUM&T!aijwem{7)ew{O=h!oBm_KEG@MGVck#Dc?nXrV)7}muJqWAmQl# z;y{^#c2vG9xhemT^3wOoY_P#k5(NtO@O21Z6%J94o3cb10x^-!y7EiV@VY8f1vKBtHC+3QPt zB~AijuN>rE;Ek#odiPrZkdvR4E_wDux!nIjGO}j`+x|D21&eB$egRpO%k(K+$?ZrM z+27I16r|$w(AobSudUu*0eh>*g-W0*Gb#wO!J;1b!F(O#FnpE?Yot4c{>VJcjVWQ%&1K@J*+$nF4bl)t7dq&hNQJ{Y-;Sh`z(_lT%a)Yc$_yhE=A z@4a*fM#n^yETgdX`(C6b0?~~H0D{z%Yj+ksYf=Go_ZB>$e>e1&up9{4R#me|Iwd{9 z@Jviz%viWfUCqXI(WBG()r0eoUOe%5%4Uf3kJ}Qg(FIQy_`Lh1C|G8fI~V~+Iag~8 zDm1Wk@bWdp$=sF9wY!m$==KV1piMyR10^Yx%p_oM)gWHjQ|k|sZ?M)`ygynp|NAQl zgmsWAg!n#2Sqcbro3}4@K${ZP1I)i4f5_B)j(w__ahG5I^`3s>ZSB^VZsFd|q?(hg zzDuG3>1YO9$%fBXHw9k&gg_5Y=U3l3UFiwb5e5(0<8H<0+8_oVDciA~VRP?>sVle3 zDH@WXQzryOjT2{lkUmCxhTQafIGvygX?HUFgxBpY&ZjBxHC5mGk|R7k5>%(oD&g5} z@}TGwfCk*`e+K`J-;~)@{(YJV(1Oyy<}$ZZRsbjkoE|0J#$-GADXKiaH(59TIrcNU zFIGE8K`|?bESC<{G&RD^Fw=tPIds0<1|G!gHyQ9r{fS`cf#JfdZA;x`=P zOY{266bA2I1O5UWAgml@0zp!x3khRp?X-O*(x#yXb5r+V<9yW$>HOT6xq9V0FDhMBY^dzZmb1zUb(`O>?bvtn6Qt0TkJ`Mm<;#N!~ z8l)PwAnC9BfLLL{)we;0(MJIQvOk1@Qh8ch?C~;LmHM^aF2SNrIdUZlH1B~>Vb>a0 zTEZ02*A4(pr>hu;;$;IAo1XA0AB;;A5FNw9dNZQ+E~NW3YY-#S2~BCUMVJ!beK3B* z904q(*ob@JS-}nc^S$UR-y!3&x0c!*$ITU>Q9Wv)o8RfF(d2=MFf1l!99C!NGR@1aNFZ1?<^TmU<0&4ZWjb*JREx7_j>rdy9B1 zd_d@++n2QQ8(>39>huUWLP^ct9IaS0yEB$Rh1amYfkKQ3t-lSux@40fun!Z{Zyc`A zD=UCunF{z#;WLjAYggJ31DBSN8_DZ%R=Tvi& zQff9D-dL|=Cxq9IUI(-NpC-kYif}9ogsPYLo7?72WZ#M;LGc3+L4^tlnv_4>g>|HR z-Vtj}(-XjHc^=*^zE#O)=w7T+IrV^v%;F5W)aTCFfcK5&II{~7g)jBf@al%72__p} zTfI3BK4h~9T{Ro}7`1(nriJP7P_-$hi;#=*)^PC@BJFQBC6Z&;orswLDx-0y8YW}_ zO-2^91E|C92W85Vt%m3pdvzUsQ~19K@Fjfch~Y$H)JKOFC=JEDmQ3DEfRHSQD{0Wh z3p+5*TZ?o2kf~U@tkIvIB2C#+n1rb^N*`UBl)R;@QD z8p~F}Pg~)4n5urLQt^=UTYV^aL`>E2+`1&S;>i`72Bh+*r~ENn?P%w!?>rp@3-`gKkAVvU2^g`A*mShJ0T=2JXnyO2{K5*H_l z-9w0@EMF-;_2Lh8uP3Fl%m3MoV^J(@kRn{OBE7C6_Yi7GgsdAIhO4Hk)`D6Zt*8!E zO@TfWXQvwE4R987lZUi7`1t^t$<(crd#RS26)J9e=5}hucfh3s2a*=e(CO*Sp^QNl zSgsmXWYd$mYI*(Xd#=z68ignhyXj!$D@O|m);|J#%M2vPLvCwxm&#GGC(W@=d2{y| zW`Ql#jSno?V!iprZFsFTij41={3PkO;Nubo`F+dg11&|P`RCzHck(RX(wZYy395Lf z*M*fW$IM6GGL#P`US2!NArPXa&iN4Knkm63n2y3!vQfch)E{I+PeDcGvYm0(KFat0 z-1!!_6$?d77kS9`BJAl+{kZ>jyx`ioPI=x2zU`s$K~s+Q=!?7ngY2mJZ9;2 zJySU&MSyhWJCWp6L)^@Y7y*J*LD0P1gTbwYsy2J#7Bt-9lr7-Y$^G{#LhVd)%nJ(4 z)8;kU1XWGG%Q))mdERIgJ{JvzRNxS8`m{`){)Y5+`0uY12U}0-_g_??QR6FfM&RMh zfy&M!#=~W1X(_AckaoY?n%?>n1R5Cui0h|iwMy)@UKIo@1Q&8NsVxb?CzvYTsLB;i zbura-olS6oA3Z%jJx=#Lsiecy>$bxwAnJbN>>z3pQ2O&M5%k}%pr1`!YRiE}VEWqU zupJl@4@FtHV`1eyB+5AWEK9|3$`C8Z0uKKiL;!1!SJA%HQD_(9=Jd%W@LH(`dYhCl zkNqEbb$ZuP?{VCW@Lh;tKci7gtk`hF26U)l;ShX1>$1M#vsJz2DVAT(IP{}r_^^1(&0Q;9c*|EbGWRnb=kg^cc0fT zrIp)XZRSyHfav$g?KCvwnwc(#<>EzPswPhQzWB}>a7tOvIgbL`_TvX$a0Ep2OZ>JD=!2z0ANKp(k$&p-$T8C-v)UnS_qUlx`N_QU>})+>alMK^f<7=7?8;S6P{v+bmYc+f=!$9@B<6WDiXSstnmrAjp$1(hW> zrvbY!1G0__*_gQ&z@K$~t~m0}~RC_c8^RW}pFn&96xF6hnFSy-LC#Q%uM@ zeh>FRo%^Pvrf00?6Y87bBb5Kg1Wz+5*A#>F!7~Wi5CHC-*(s)TE|U!?JVg8r^nz2R z?>IMnOX-8v4YcAUNk%{$qy5FuFam|mXn=`-)*l|!RCe$zn(soUd#ol`FFOy38|n_g z$G-^2;R8$iaed}Dxf5H~@=L|dP{znyZHjP%*7nYG#b7X9K}=|*zkS@{{Ks2u+0%6Q z>sv>)yt__p;C;lF5;Ac=O<9u!C4B8&w$EwV27OgqUu`!MkM>WF+qz#|8VMOD7gY?l zVF?yWw!TmNA2~OIslSxpW_8ZXr8oGxbc-ILl)z(wnB8y~lsZ*oJ=z!D>-}(=a`xbB zmNK!qxr4>&K$x2!9V=K!{5@9wl*8V06Ol!SzC9PX6ugK?ET);D~1JUb# z=$Vk&j4l~9(eLLALWVojKCtGNt1Y~O=X!!J|a$V%!YZ+%fYKM7mimeHE?8la+ZF$njm zh4E~zC)g)ReUt^C3~p=DbeiEgZc(@?B%nljLU$iPx9zh7Gr=8mE!ho?5vrLX$z4$> zRLn768SB}HX|1GP-R0P?Q*I1fyYpw0D@Oa07`&~l%td^3#-LkHWvK)UUc z^+cquL!*-XVQc*>6ji(0WY*KIZl5RX`dvnNqt(7V(X!;mm`9t8?*wL~&`Tkym(&_+ zRiQkMV?p?9r^V7BDBbLg=10DuX%ql!2vMKRLL&K3D1N3Ih859~W=tdr#TDTCs6#O{ zl(L(cFh0n!0NujTjNn<&j4mN#8^_`aaPQa#&D5DYCpn)H$-RZnYD3Hk1a`AEC4?~F zo|;g0LeGv~YKe<%u+$`kot{=)yyMQd5|KSv=;ORS`5TM@wQ%CG{8~`|#8V-zRQQdc zLtojMx-(Kp{V?+FQT1b$Q|j{KPrLj*_407P*0N z&bt@4j$$A!s)Y>d>pb*n645o#Q^`s-Xyrsr7HEA6kbJ4K0IOK1kmVA zr?clqnvZTGr|xEwUv>Y;p>`A#6;`b58C*6W{&a`IW!Zx#e(0?H#T9J~VXY>WYPg)1{4%+N-jvaUdhhR@ zt+)r=ajyHk>C*uR8?0fV^hr&etF>x6HT!r+A>>yTRQo0RuJL@GaJLvOWiLb?LTU}` z;pKPtPPch1OW#f+JdRf6D;isXX64cAc95}1&PVTehJJd#Y{b}UaHVB{hyILO&1x?q z(9*PU_rMF7xa5Y{Mkh#}ST#_q)qke0^(9~-|6jo_QW1XGOI&T3<&FGqz*6yWzOrs_ z=cN7;M5`u)5?xg z`TReR!wN~4*FwB20Kwr#C+QBpTdA3IHJC3*6JWtZ6?@LnArx@yH21^S7myLv1 zB^03CCE>_<87=lOjO*P=-aHvj^@POd#EQi>YtyZ0AK>HtQ*{t8IfDCeF`QFEAV8bt zkm0d<*?w!ZQm1Z%O0X;AQKiZZS%C9l#BnA`KLGG{o`lSR$~x`Ch#cGqY>4NI9eeoT z96@q~j)pFlnR}RB@cavWC}V4IuZfq30CU@c%7S{Ixj-GC&jhU!mMCHX))g$P$(-lU zOu3mcR6&tZRPk}J&@9E%lM4^!|8)DB_4f30nujQq-5v0AcnWavwy?!<78$J8k#G+G z7Q{C4(L;oF;hPlNv0VSe)J-D^g?nf%3ERcY`*R@HK0X5V)yiuK0|vP-3}PM&Q2u7yYZ&~DNa%`Rgh8N7Pw z$R1@`w3pd}<>h~2i`pO88Q4&t@{A(rDwrY|c=rIWKSKfPGH(LE-6v-|Qr2c(S6@s3 z4YxP9C(<$Xo1uN)JC(QEK9_)(X2aR3v$s3p!+=X2vRdNxKH$#}i%t?0j}*E~^w*bK zcO+`Pb7IVZohg-9Gf-|NpSQbf>mU7Ag8AF9enjrAt)tX}187_zvNV?YVr0^T*|vCi zGPv@e5Q;m=haPq5iI4oRxj(#{BoyQrmAbWN`Ntba?8I$v*_WE>NWgQ&`6U& zXRbfzCGPt{k+?HlfQ0S=w^pL1D>>J;H!LfOr15SwL+Qu2N`1d&8eNF1Z(+wfjm0({ z?Dhqm){MFY56*8AI&1b1zo3=s7>XC0)iV|VTDJZiW8e~)r|~FAi$=y35CE)n*{cWu z%IuKja2?cFZ3v;8B z#dm3nXr(V6Wf`lS$!t(GChcGXYkLj^^VHEO^V2oyx-x4jD`Z@K;Qi9##}1g;0inh$ll=Y$ z_O>?AF4&ABv5i8-YwPWF603Au`{2hM`>HGck3J?u4F8Ad`XR03EkDRu+}&rB^IB{^ zLeu&D>W{(KP~R$GlcD&&h9DdP`!lS+mO-!yslT(kgyp3lmO|HQ$Rd(ylg4|gNWO}w z(QP%8J1TyblSJtX&S?^KN?Ydczge-xu!vftiKn}?Cr70WYnIKA`N{yQ6h6J+5 ztmd{-+aG>Ix7$fVb%Q#K_^0PoLFjiIKx>jOKgnels-0K-@SsBpKTUJ51DRpvDC9sF zllYRC8~LDaC%0JiG&>s!l{cXjZ|u`}rcM`(x>#(d0N;)`0(xOQwg6nXvS9>}TKLwl z-6#jRtZ{K2Is6Tb2THcek#1epueBgCB_^uM>di=<_i zO*d?OXG^h0#NxNAc(+x3~^G#d_GZA%mL zk~`cmm-?vtUL;z`WiWM71ZIU!qtJyY=1zXDcS4<-7y%Rv1tj9COx{`m6@As_b;0VC znTxH<7l)UPVRLlvK*s90I{O3`Ha0TnHUAC0Voh-W&-w3rL+sU0VDef6NR*5GrTmR) z$d5Q;A|h7qj4;}RVMro1fZsoHWQBf;6iCD2ju{~BwbcM)I-GNI#vCoD<3`m8QV1?I{E zJ`4Y-wnIUQIHYv4qox(38TGd^{AZJs@@vD#ed-=TJZ^7%w*&=9!UoLr=#T|%_>PDL)$bSEg|9aqh=Wc$_z6A+I7IeT~mhANDTue?XW zChKA|to6nHwm54FIS>0S(GMaT19X@KE20Y#XiU?UEyxqWlPb$v8W%7jWDU?8o_8L_ zY8`CGALW{NmNO$T!2|ExDc>~B7AIDTo3t9QXv#?7MJ55aT2IKL&XmFmUL`*6qKD;N z5)4>pF{r{rW*dk6oWI12@bKHPhXF_;qE^)FRP_y_CNi=8v(^DWjiaf1uLKYV+)Hlq zGR=aPm`MTp01%&XXna=lN$5f@sIJ|_kP#AHQbCLY|NTAn=i>CY0Myj++5uTipDE}o zc%GUyT$_fG!7K-pI)Z+>;r*c3J;^4K)wr__D-ddrr!W_sX-MJUkTn2zTb=$F^l^w8 zbFN1&3R|1lf_$9ftJP2zvb)Q{OfOf+1dW!D+tQkgN2CIfJB(W(0?X0|8$6dnNrtNE72FwyZ|}QfM}y&OJ|)HxGWN_9Z73F1v*O9#s5f zE42ewy#4^nl%Cr8VR0y+Dv%wnxtEM}efD3G{oH5|KSp6prfF(dmi8>P0TFoY)2ql=?PX1d1$XEXmqiP8l;3s{Sa$G@041koh-&}r%Nc} zx8qVO)te@5Oics$5#j`rc+1$-RaK1l06@_LXw>a&UaRoIWkp)K`*;t&f-VX7!}!4> zTAFB;80sSohL6oMXX=rr@7=yaFw#v*PY^WLv<|u`x-e4q4bow_k+fztkm(an z;b$1&V7U*be|DSk2JfN#iQ_OK(BnJp3~908$-i}L2w;Yri-TIhMwmRxI?(BPck8P; z46P3?jvPY5g+9y+M(ID$sz?G^wm3KDL6a+DMtp@DA4AEci3-f=#3s z6(uuOKMJd%Pw8|Nz65AHGS?>y0`U~9l*RGdJ$aBJh?fZK1?u1n8R4Ld8n{7awtk4H zxhBWi_Rbv(F`Og1#ihaXv<#+5(9cGD;VFH`+0sJ43Q*wRGCg-)N(c!X5+;OMR|9L$ z6k-DDXf~ zikAXZ@;+tsY_wNEh>kA^mr|?#z0GWg0#B5cgMPo1I9Fg%&*HU>Y`|xcDz&1NHyQ0y z*I@R=P;%U?$^#}R_5#|)fQYfJt!VJ3sW@v-=NDZo*A3QDPcX+!!Ja4#NB`EtZi;}} zX9OJh9KQ^HtZdK4c`9LCv3utIQwhN+DLv>hvK4fDpl6w4lLkS)^+4pVLxAEGr!u#r zj)HuKL%JK7)2k5thS9YCe5T?{Fi?Rv@1((iKso}l8L@|Z^@q}6>jnY_Q?~tnqBl!Q z?5IeX1Yo9w2S$_ZBTAFgOhDp-9@=h2MRjTiP1|EH_+bf1I|v*gbO7aRd>qT1rVt^< zIFS1cWbRoyt~ZT2YY=2KX&u(!S#t`$lc!b{7(%0Nd@CnWQk@ATM%*QNz@axRf%M%L zq%6C7(D~Fs6tw)_hVYOigaCkB3J9R8cJ(G;xMlD>vhj+J`A|1cLoWIGDfc6c=yaVt z)6B?TM1pI=qY{CCgIyP4YOK=PK}))j#W1+1RCf-6VzP4v;oC&NM}jM1A3XxTJE%<5 z-MjKtJ3}rlVC{j!r&s-xjlr;gEA;oOywXDaFXT#9h?*=U?N*=H0&rHL)?Nr%kO4Z3 zTF`JomSjI*Y4-Om4u2cNw2CC-@|Yjq3=L{{XCsKjboO41(B~DXE=!np$RD;|64A2@ zD1&j%uUqd%9*ASnSe($4e6_%3E_$tf`xrr2I+JxIYMP5$HK7TwL{j$1VNlUq2kGfUGn{=Qn0KY3`7CseP|JFHJXsys)94?nyd5wCcIZEbqIhye_ z2ogxS&nF$*3%Yo|LDLHWjQ{RdDJn5anNs#mXpfm9u=}PvBpp95L5H?>8@raVFj{#w z`PTsep;}1JXMhD{+*MAB1)znsSgnGxkP8?;fO^msau%q2P>91${}2=_B>?z-ltl*b zYssPg2+>>J8W5*0-YlBB$LP&k2o(O1KMbGkf6D*8`cQXk!|)d;3TeG8{CDl}i*k|I z9I)GM5kB`|`S!aMIgV}?!m2nEp|uP`Q@NwbAX=WJ$!&kF9dz$ZpM_GEW^Vo%(sP4> z6EgqI4yW0XH<}v`r#gVLDRC5?)#L(ZB<2uJTW=`?Jqz({Ur(5VnR+4gUfT2?A=yuA zaN>m_hPxqOn_K?p2H|@@g4*u_C&X7NlKw!cB8X1RZZG|K^?;MYSrQ-G%14Dvq{=f- zdJi%Kw1`!WVfTRU@)dT}%DCVvH!|eK^M)YY>D5H~ntz_#{|&;WBR&S$Zjsx0WM;^w zHb=-{y804AFrciutM2dKfb_}~gbTcWStB!2RKB;5qQ{TVx1bh?=4b+LMY_)eR;9gf zU0P>qXU%8N%BPAu=mUneQz_5kj{*W7PZCMUS74I5*H0XmEnP%~ex|e-+8dIkIs`F~ z?+02?)vo9|b97Fjud&QF$d=gC?h<#aC^iC2Ep56#I3>V>+4qK5t5jiQ?MF2D{Z zJU;3Gql3{H$?t#ZfjG7x7=ymbl|E2HQVz1!t+eV7yr_dIiLbZl)<}G}mNbNCFHmm7 zkU7ga(5mPUXj16`6_lmX=)t>h_w4fCZzceWO@c~zQ`3HvT`F7bn6@tnZ)6^FhyPRK zMF}k;-LmVBFO2s5E@9imeI??J_zZ0~y*3+oyS9p(i6NRr$cdbZ(Q{*c z0f&1NAU3rP4~l}g1+w9z6QWJRsh0s0Ci{mT#DK#a5Q11I3rO8D$j!=mrZ2r zS+7A!W~mQ`7w4YIU;Y3gY-+IOL*Aw5M9flJcShIw(M>2ZD?LS*Wn}C@!=M}=^#R

K@>)wDLVd(5imPtVgz zn0;5wr_|)A_v57N>Rq}-jZW#Z;i_wxVgq1d5kVB0crqf<&`Gm^ajY8AFlgSp$Uk#g zzgx+&KXpB>JX(=@C%mx?YQu3@rbxX0p&AB?SE^>{r;Sv!kkLiO@o`y}+juSwj~$hg zA{&c1NKx4~zYb(!1?R+K;-ZiRL_XMQU^J=n;Lp>ZiZjrN}dAs(VBv!5Z-TM$ZCDBo6qeWLX|mEz~=Yshe?6d72!pg2q$Vb<7X-{ej$ zNc%Vg(x?JbaE>bGOX%Ko#p7}=`JN6m-)Au8WbTD2waHQSg=h~_ucW%HlQQqEW&;Oa z=-4Y*l$SBOMS@xy(GZ7|F=I|al&tQY=e^wnwuoL}#othsizuQHo$CNM9tq8jDD4dP@?oN-9kk1zVU*Ej~q7tue zDxVY=QKvc7-Iv!K0-)JLx}RJ>!h?3(`2G>rPZk_YE=hcNWJ-C{Q3Jz62m!M7RvUPi zt1qVs_)wpTIbat^g~g(l3D#rLl_t^=_K1OMOytAJys5?7!+;?w0PkqIKUm zt#C{ot_dqUU~XJK!Y0zuycB0wF%Sb;&OZ`S#fDTyNh3AjT-4xeW133FsvPdqz8bvgt6CBeR4nZIid5Q4 zY4_<+Dr3)a{1`N<{wu54H_Z#MTr_A^aj@;z5OJ#9c$ z05Pv0E<)D>1EhbIu9|`thD)BOp%gwTlV#zJd)8>}8dV%Wjn`nIbQQlrxO{Z_j}}9P zb?*E1ziEOF2eIDfDW7xXpy6(NAe^f7l~3begGRagV*3J2P&jdgEjecA(RxoDd+4rmFa)F5Q!9PJqWt!V;Q8ZZFeO>z$+<*N9Sq$mp}j# ze?V}Hg8cVPCTkOsdp{|nWM>0&pTx+I|8z!BCA`2Bvxc?h%Wzs~n$W3!NWW0|#I5($ zx6UP{e)2v1Jyg#Hf_Z>1Gdlz4d#A(PbI`~IwW=_!5Ng{F&bILgUX@V4LhgK@ISzdz z)gQo_ePGqp0_QQl&w>c#* z3hlT8C^q8+{$VSu%+shocH24T!#3KH1d>f}ZuH|f-~Z9F1txCyU`yLOxbggCbh@C> z9t7??kHMb#l-%mG5GLHAY=(vc-U}|fmFXoW>j1-!xZM_E6ZNMAzN-f`UQ^B9ElGXctU@oFrlnSMM&%U` z+uaU;L|}^jcc>i}>~1iv>X1YhWZ4Ka(v0rY)sb=YkL3@Bs}Xt9NlCo+G){Y@bQ8PJ zbdXWzrG4TeK32cU=M|XzAU5$K0CrafiP}*!prZzI`25e ztg?&ZO~PLvY@={#C;ZYEVpcx|7lT;bPq_IeuYnPVD0TV3e06yhYK-H^a-=p^9NfjdX8cod0Nigq_Aqn?zf{l2NR3p~Aq`@l#74 zTu5F09mR|BxBR|fHT#|S0)37Tf<^C>CkYA8wd~>uD0YB0w|neuY-{$P1MmeMMV*Ro z)rST-%VVGzRLs8vY7i+UMzBtc$M*LA#Xlf`&c?`6D?e|%-%A@YKCspW`KcUNHwc;Y z*|o4s5{OH9KY!W@jCYY$CKf0&;54@huyTk0X~X7P0aHzYn&AP0{K9kv+&HWJn{dX_ z^ht!G=|s#zB;OypBMXDb(65Ujn?@EYvQV>-@L;0Js%Gm?xC5@ECVj(+x5D7lYvae3 z!Z2aH)fHV+0gS@bUPNUyHVKBarCkmhJT&4-8M!Pp;FK;y6{H3bPP(=cGad@l$+E?8 zQqF+42FT0_lsjGSpVPPe{XUa2YGFPsy{14BpGjPB{Sr-I5z((cMAb{ExCwv^Q<-%7 z#+6yUfuP3SL1T;LI@QacPAuoVsA>^L*n*l~l@uArA72aOFy28Vu|P)<(vv!EZYnis zy&&AhvFba#71b$b5A6ta8&WASQY6&m`|gK-us63?Ch8$)`HNkvV3x<5gyk$NqZzvy zQk-}zAS9l^^<8af_OmYv71Q_yjeU>S=}PpFthV35JXK%lYAr^y@=-?0`5WI=nRhF8 zDqZ&FujZ?u!(-PCEix; z4?~`4%??MRHJ$nQX-xd+&+^$!&(Nvg`Fm^aK|PCk8H2@X^6EULeI@?!G%8#PfB72# zK}`*tMU72tj>K~4o-42@M9A)U=}e;`Du%Ly8x=CgkB^#cgd(^QG%EAA48l4HT6(Q0 zG>yoVRD+B~sFt-F<@P~fHS-n#p*$X;7({BC-qZYPjfr4`3l~Wk@AL7!esA5q)YcJ5#Yg4(j>%Pfm3Rh9#bWR};nBCFnK^{hrw zOV%+9*BIX#LMDyCova~hH;x!aeti;beEC??=e9;#*uvmop>jE_+IYQeQZ3hMp3k zX4B*rY&kXM7D5-INtk+?<6)wbR!28+>+6ODYlCUeuyO+bLy-yZfP3&0o}^QfXit@L z5ASHB`e&dCYw5%_3MBiPRxrWP1|0j^7Iae&WY5jnG}{F5){)zykSt&*)jrO-FaPZc zvvY~U^l(_T`VE;TM-41Qd1;b6$Ky7Z%KH3aV?h4_RxfqarSK58uGNnyP9&M*Pz3qK z0(I-j_uHL)-XDchdS5wg1sXBmTUA}9}E%jGD`y+vsRaN2Jwd7y>ny4 zRG+Ea>7W|c?*8K}ID!j)yIqu`^|ALhvRp@Roc?4>SNf-lTxy3KU&x2`3i_!(l*A3$ znH6|?9zvdhD(7S9Dq9hf+W6gI>^3&HI&c@D$VlZ%rk~?xyDE}B`rh!W8EWn5GE;emRaQQek zxgD@v`K`(W`-oW4MUg%SN}T`TM?(kcbr_{Ko_rp}W=qx-GRYO#dB%l`M%dQ|8AX*6 z*#~Chw~x19!qDsKN4uM=&QdDf(1JtEhCJR9C@ao8Emgu8S@R7d@cqHC0h&t)HMA*+ zko=+r1iDcWR=OY$QfabO+ENQ+6|OdUcVQX#ah2b$Tcsj70eQ$@!0&4u8n3tDQLeu=V^x_S)nm~hjol)J_sRPA6|SS0e_si03Of%Z<636?10g(&>Q1=*Ih(e~H+mL8 z7d`WxF{|3;MQg1@irbDG?3A*fW{ywgMf%WOG0KE77^XuNpq+MJV*LTkfWkK1ns!`Al}>Q| zAEv%L9LxUw|F&-qm18q z_5FOmzt2C<@f?TadD3-V@AG}0uXQ32n#IBJW9zztH)xK&mHl(v4Xo$prWr33TY$?K zA=tu9{-tA)6{lUm^k)zzr)dZ(;-iw@Hd>KH)JD^i_?XT9NqxOXby|8=fnA4})rK+( zHbaYg8XYv9^9^y$ztg_E(9;Zv8O@I`W~Q!YT%Xo+s2Zy;-C8R-7X8opYJIv=coLdq z+AHai`Uosb(5{$_BiTg`H^$3g6VfnzN$2odUnkN&hM_KJ)=*Qa%`h*smpET;R@Y}hKj04g%uTP_|K!pDDXRMYnczk0 zVhNA&GGXKO*_S4ihh$fE)XFS2FLNgv3)JHb#H=rG-y$ZdOWLEnVK5Wc+@ymk#^XTq zrWBD1{eOL-8)hVvsHM9M9&_;w{mW@z@@3n*`qU>Om=;Sng8g(5D}j*4v)D^bL3-UI}n_Q3d`dS2C4Pbm@n zzzRU(Cxq^U{5gMTTD%0Y>p(SB?d5|F3Iv4$9nBg1+tB&#&%q#t2y?jOEEs^|zdL_* zpjHrsIbo;44vTESV|>C&b^uIn+*bf0U%)lJEY}yO2NC%s>idrb>o@w`5o09KmhaOFjUxGrKPvXX0qP^p!5jg-3YR-{NKFfu)103 zxhdJt?_J`|1!bFOE<~CVI&k3n;4iT8fS@>Dl_3I^vWXDrfEEC|))oB6qRkISidk8^ z;Evx4Ou=Wk;GR1uat6BD&|zzI6Uk*YzL{bK={wbC4c=-plw5EKu*?nQv2G6*V})qP zMY?(VRx|tLi}ao>rcBoC!K*7G#6ZDJh#vM<4MFYA_MH z*?b58CjqjtG{CKwrh}KXBSf37L~!RD2z?*{kdrT zhZ}zWhr?FTeFA|k<6rK-{R5oaZFA8vVWiGb+3FGl5yE0rt(@ z?@-nfx!62$)PIIS%#Q}&S`dJ4l#oVQZYMqE?YQ%S!xljX2lk+9-~YIvJ+uNq9_uK+ z$|NZo6_P&*Oa^c0M93RBjk5$nJoTQFwXjNkB;33S1l5hLr9BRh_IGT!Oo_Sn6nMHNz&!tdH7z5;AJFRCrMD^ z9-2!G233z>U;&3~9FXJJXPRyP85+PG;V|1>z+DHo6%|D(VE1kJlBmxvDPC#|Ovq-M zs@DW;sN&EsH^qrnb`s2c-VK_Zo|F^AzPMBO|3O3GG?neN?@ZIbxZj)qY4Ii}1J@HZ z07moMTj1?!BVJhapZFF1T7%$}bArRZf1(Ill>H*2 zP+?Qb%}6vML{>ip%v$u#my4v8w*ME)^PkXQ)N}*bOw~zQCptExwPvYN{NW7SeuvFS z&UBOprTikyz<%eDxR>6}A7AqrCLFGPsOX9;;?Kno2quiIH-7ty8Wl652y&!*WW=l+ zO6B&Q2^{hr^cU>0UHs?_vcm<^Tb2skd3Ih+OQ6CSi2dWdgs3$}uW!AH^!p3^@R|>~ zx@7gk_LcMh_x>;(c%PplB~l(wLQ7oCS)g@1>+`%A!kimC@4S=>TAwl$5;t%~esJyY z@Ac?+E|9pxO|c}t0drjDzqjaJpL|&nZra!(#sC8@0~i;2I97@~MD??Uoo z)Iue4LWsgNdLU??X53?QwvAO8F-8%7eFgFrWYGGb$t;z~DHiCVEf|4}nbCQK#+*1a z*XJM@!le#E&7Ro1U&!}q&9SiZIYHPDU+hT@ebA&meh!w|O}vZ)Ld@}IzH`e4JZ z${CFsR-ahL>}*J7c~t zLkJO?3lb1M;C$E%O`iXN%qK@P?tFB`4PYuXyVL%CeTWc^eGO>FM>S1_~&c zRcpcA*1U;m%*Rh)wz>Gi!?HiGb(cHv>H|>+y|WSb{wD!4MNqcmDj*2l(fA8+L0|b$ z@d51cT&Bf(5VbG;rvzI~Aw;BXQ`z7E5u_xn-mQ5R)gXAd2sl#7;aOyAd>~?{trrg3 zv%2&t8$3eP`019`xeWgoO^IVvev$PvN_yp*(7GG~`C1v@3l31`P+^G%NxfDbgw~D% z+1)k(-0Q;y1OnH=?^ebqgqA1ifBZl3AD~8o{TpP{-o}dGrB9haym&ZZzCUm}jrN!I4^75QAOFl)3nPCdO50lqD* z`k{VyB|gxdZnS7t%|fI%&khk*@Sn;HECQNv&&BIhN|+7k5G7z#>l>!082q_=t^u74XZ_t_pVAb7_zTxlcaT=h?h2^nTsKS$mi zM51Y;sRUpEw-Z0xlb8QdC@6VCV~8A1C&Zn*?Hbwybu9L&h#^0kJ^`AU0&Te#!5m&g z5Err5rCvgs?Z=XCdLY->{QDEyyQgyfbRGQKamB?AWHoX zRyFX36-?Jfmg2Y2> zcAG=t=`FS>w;AEhpL}p z*DAAoKDQA3lpNV14sg;(V4sM{8H!tifi!_Ele3*q5Y+fT`3mT*u&e&?3xcqp(0*VL z{sM4>K=J3Aw~xgG%NygT<89$ zf&dL7KP;x`3@|{H-n`q8q|L&kM+Uu+KK4X zHZl-gb>vvCA+;L>t(7&tOL}ytn@hcK(K>g1f&FcrB_IX-`xZ*6+W%ZCh`}f%>Hagg z6{mi3dv3yn>vS7YA6oMM3no+oP8YT(o1*10WV4Sl?tgr!YNW1IrdNMY9@{$+TeJ$zm@+HGW>OmwOV_V2i1Y0BzfFJ4^b0|c+z?teieezb& zvAgO*)tUIj6WX{qL=*-Y$Ok9KZ%>R1d(U(pi#^yE-Y}FiKt<)%+dHJO1BiTC68^ zxfN`WC-7pfgpoy%Ei!d; z5$5u`uZPQzZh%XcuE%ZgnK_R!YX9!Z&y%@d=_T>vQPTjY9eArOf7yZob-Ce1X?u;% z?7DrrI@vure2Qt%OFn{@b1jxHcYlHXq?dvB?^(>&9Gs{l2l`O>>}apcUUosOhKSzK zgc{KIC?$?hA=D^Ag0giej~9LU*+8yNJV5Z$rZ9#uJg8>w{@kf2?7BsCVGh*uaf>E~ zo6ujUKHPxf&avV3W*tb?W37i?HaDQ*T{~|PG~v?5pSl|Yx!{O%HwkY;85@f9Wc4P= zx}da1WUkdY{~#pj5#y`@kjqp|Gy=sT5zVu}jfpMz!a4)`TehKbQQvtBj5#T*5K`NM zIij~?sJY~xbKPEpVc$onHBEdFvXJBC;^j$%Aqb_A!NR!RC#!3m@Lxnw-qx?Uz~ z6G&cm?^e8hr_BR+yFWiwo;6eUtL8Rrh9moU{ygoItfCL_<0 zbhDG`18X19J2d1``nFO#8Jg6s&>}@n#yw;GNu*n(*;;xV@QpN$E`nDA%#5Tj5aAcT z#rYUfPQ}(&>5D`K5pAS&I8FmrtsA6bwLmVu2E8t(3q*9mz1ZkQ#g15*8}RE^U~=I9 z-x9#to13s@RAi39&Ci3#(i4=ch+Mwb@kxal-p<51vseh3G>^iI?3 z-+-P&=E|@m^cnDqT98Z9=7zYuRQK(BbjHU!ypMU11>ID(jmB|Su-3SO!nnx`cr>mg zxd>0AOLu-}EDa(GM4}m}dm4??7FRK-<)J((sRBqm==AR)m-`VAYygSC6s>DTmIv>_uRBDX{m~K!}LXT5K!Y+`IZp# z#(vu8M|7lK-GA|3QjpEH=;D+0@-1Q(E;W)|%r1R}N^-6b1`)yVFAkQ!uy2~?*Wc+e z8SdH8TwrEFOSGJZ$EhZ63v%X5AvICNs+Z7*#`^7-CRsY_?uDB?F=EpkK&4!I1ER%n zpml){ElJZLod+7^I&h?6@9YQKrdEJSS~ldaNvHeVaQ&&q{XXJ%rPiX(=jJj@t0!R{ zTNxxhoJ$!u1w#eFBQf+$R|VUGxP05gB(}%d$1o_^Ij;9Jm zJ^q0N#b@v%V$?Srsr8R$2;b_yRqrU-0IrvBRX{EGa0eN!YEg$gW*<^}NQZtSx0QgO zndkgOz}jd@YdDUJ6ty~sKnsn73o1yOVN>`wpK2B0bN3l#*FhSHd?SsiO?!3HUm#LQ z{!R};BjhP8KC7jJ1?`(MKxFa0`OE9>Z2uC{5PCxMzT8PXEdzeNi+g{7ta^_>@j7w8 z6jZef!1j7qA-3DC^A34`4eZvB3B5#&!`Ba8A*R?G(gi+ok>9*C!1b4D(pMiFcE*&l z!Aj|OK?fW363_RV%XLdJ`>2JkzmRET5LB9K6J_X(Xtc`1%Qk_Nyw55~ z<0)ai>#g1WE$NVj=;=)W6(AeyafA$7-7WG+cBOxNVVlS0S$;I8U$q1pFn>I`7uZagFtwl0_9a{aI@%S7&v0*=X^Gk_f#wT+UCJ( z*CUhJ81@fw?+d`^NaLT;YQ&%Q9C>~72BnQn@!B+aLTZT%lHRQ?2FH|!*yL{EcUq>h znnWe1h`=AfBe%{;Cwl(V{!*4vd2Eg@hZFic;T2>0WR&W!-DyAL_Lbee6%;8**mf<_uXF9J#Kl6>w?A0o(){YWQC7=yBDgue!+9_;M@Dy`n|*ZbgJpv zxjjYxKX+K|%Mb5=E(pJf)9ONuMmnRH%J{Uehn>x-+zUC_F37!WL!i2vmpNDen~rR7 zUxg}{aj)93iv`8oB+p$bHOLrN`eQ#AA76ru zXqIWNN=`dd*oLMrIu%cq7`3Oyj4{jnKpA*%FwXMIk2|JQ_!PDflRuF21NLqzuy2rZ zk!hQFkSr9A-_=&p*|d>&I_kuI2~s^|SffyY2bu+|5ixRIbdnTBF_7>gp6&L>z)bGT zY1#=GPKj?Rnd^I!R@@W8MK0I*qRFwi6uj1?{Hjy#r?WBYxL$y}AZU2#jo7P`_YcI( zH`Cc}rbX*8SS+Bi(*rQ0nWkZr%Au0R7Yh%9xcc@~!*vg4EY8nKF8W?_?lVaHn`!gi zATT{F2IK+ss3t1k8hA*^Z60mP1-;Llu&3)#hrzT7NU=t zOkdLK?2^y#ei|UxoC&|Uc=vea?!@@Y+D!PC1V_pnD7@09AbX*H@IdH-XbK*OU4tjQ zo`SodLGniwte4NYH)W!x5}a=wQfIsLh`;0E z1SIOg&X62MZy|__bPR|o4D=wu;>0CUOw1@xVq$wCM>{k&K}0C8mbfqJHr)D?e3zeA z>ZEe&bkeDDG>?uH2Bt17(8%l-xT2^IE|Eb%vJ-q{NA!um%p z9nLeR&ysIxWj!mJI0ic;$QANb^_&UUkpzkCcDs{K%DL2$B7An0kj9l%#6KZIfD+g) zU_u>+BI(>a^Pz8DQjE{6(#{7ia6Pkc8hu@fS3peaghKCJ?A;1h+P<~^AV}(X_vKrg zDh9pyfyeS=72>nuSJ7JU@b6e${0=@WKgiYq9T06{wWg_mF%)mC=vx1zaT`k3W z56J&=HXNqC7v<=Rs%B0Pu*!9DS=`+XgEBtSeCKztUb<(w>H9B;IlA0~guxEeS)%7O ze|&vwY@`&sPv{JAg>3gFYH9{Si!$(P8eBDOq(Fr|NfxkHoHX<^KUU z=wZmwp8)+lwpA@iI_;+d6z!nu(gGb8X=tsUv=j{5G}RfB1xg z&P={?LzP)QJKYU}zKdncdw;J+p5qyg{~OxFk*CzlS9!5TK~`@3;7|$LKbRn837NTZ zcpSaAw;c2yh#E$2N>z1?DOc^bGfcJImQ7yq|GiIQJ-jD6jv??r1gSgKbKWP)O(5Ak z+B;0pC=J@0UsY093Xw$hqGnI8Ud$TzdtzcPM?}NyyV%?IokQ=97@N!&v5RtSxBhCh z0H*b@j4i?Cj!=S)kDdciXGaqK8RsD|Tpg0En4F=A(L%osEU z-#S}Z^lVu1I9cH=0In8EnIFvIB~Enb0{&jR479HE!p`v+>>mdQFp~cPLszypGs?u* z-00hdw|_qT4BdN;Tv--)r$n{g`wzhIw-t0I;gD_bY6@cw1*&yV5BPnk%y-T~ia6L^ z5$D?bSK{=Jo4;ZWAYVMIY0(bFbWmc?KRu-i)1PUc+BTL z%X`edgFc_)xcZffSlm)@AyR z3gHU?2udL-LuWTu=?XOGPA5JgHkU(SGxewhO}h=OgT}o${~{8j4u5U}1*-d^jIvGJ zIP&f=8?JS&1A-mUci1@hK9pGp`=F+4zO6ResgUdJBVjXElOQ?$48f>VVfFMj1vpF+ zu?==x<&{iD4u@NH_ie;Sh}Z2YR(=PE>CU!&)#SG_nw;v*&Sh{DMiZvI7z*wOznky5Fx9TcMWdLcZ>}QRTr^9Xs7}`+>rR z9X<~-ceTeNmUm#9n{#%Tl%ZMt`;TbDNsu=Z*1l+NX_jZ5t_##?0D< zFUtH1HMnr`1;SXBJje=sBsx7~x&%YU0{%l{h;`VMYkl43VVf7WR8( zK+hM7aGjDqbFGOCtC@vmdNh8wJ>%EXdkA;fhe7?B!|Mi<0`)}(7__v$L&L`jm3a)# zL$Bf!*^V|NAJi{DH$LJUSI7XQc4>D;p{Z7v$^W!2I{13HzhKHEOYVa?4I~>{r|@uG z3TBe17&%F>NdO^S&i1$${(env&?UBId`*%g)tiQ-9Y$+5|H{N7;T_NTGB#E`;$HBo z&*Qn|hpvAVc@!e}&BWBh?)-T)V`M|_cEz~g?MXgoYq#$a-zq(oYSi_=&|JjLnH+{H zZ4gqN^UuP0vwCX4{s5Ej95{?NF^Y>IEGM78$?lUVJOblF$5(hFJufBz46t@6=xhU+BOuEa zh2b5Pae}g`hO2nkUf>wtcb{(vfGqpn7W9~tYs8vu^n_e>Sufb=N&~CqAyg0u0fhpQ z82g1!J1pn<&F_`%!T(7?TfH%3>gA2PbOl)Ugkc}5PggEYVpH4}uAGG$%Uc*V%0v2z z9GaDQGj0Vl_%dMNp&l*|#Hgd;k)-0a8c{Hbs~+28abpD$0M$vk^^MnGHRn79E$#Ja zg$i{vwUJ30q_p8O%=>if$aU`Z!3H||IbN6mkT80AUsnj~ey5J?#+JcDLYWrpMvX|Z zkKe-*%3C|Dsi1!YsN8F&Is0q=D;HixVBbB6m{wR&I1H@FxWpi8zYis|oGqc_nt0(n z!aMlr+eCt?!|ZlqkRzLYbegb@q$QqA;Ir#GI~RcyfZa^r9Ps*iS`_eHwmgy^4wJ0WeDol1l5xC62sS9M6CxIc8l75&!} zFj>92;DlbiaP)9GgT4JvGKzk}+lHUqB>5x$D@Z265uHXWC z-;3fv+WLTWVH;nN!F!NA0kTUa(LnEGP2jiWy_&FYvErhRD)Igm87 zWpLU0b%;Re%T=aMJ(P4bcMJ0kZn401q;3LOdt^YyY(fO<{l)S34NQE>@nU zR@cM|FPq^ZJCfiwHqMrGt8wnqsW3mAg=ud>${n4uSvk{!<#YS&L(j)~pajZ+(miJn zuoz!!#DuCnXtA2(yU`mPX>vL~n;@pouzgf0Zt%$Sa&PJ9&5f1V1dPh+MTTcym_mMP#i1VIOEz_0svn*Ud?>)aI=bG%jxF;|B6bZdgC-=)au1-Tgu zG2L@++G)Ioe>Nti;0yoH?c0EbE09stei*rbd60k$sO;81b)Z#2a@coPO<%-f+BIuv z9f6UvdJxl)aOaYs`O|-o;aMeo2~O9YM-Bi5Id|tz{bxsCo7{CW6Z1T8I4`?mzkFv~@qU~KTj+-x z+V*F{4zJP`xZz|K;iC#qMoaYX=hXs22e;VdA}uAm%3~Gt^Lpqf8d!6yr~h?d-h(Ll z_dmp7AWK)k8c3KHY67;#{VVL-RO)~{Mm+|tPO=ZEIVRVzs&|Hpr8Vv@t-2gzh3!zb zRCKfQwcK`jQSm_eRZxmgJus5hP!41HgQqJ%gia@_1(OFhVLx2J!D zq|Einx+m}7Fh*%!9?E_8T<#4dDAe@{G^W8tS>V{w?9OWTViqR3PbJswFE}g$^ zGWicgnn;|Mzxm^F(nE|<^sOs3`Xbc-+b?kUSB4#4$a$cSrj@C&14@*S_beQH&oMLn z61b&3Kbpnf2IdIFA!f9s*g`TIAt-xsv|$fXJ8|H%d?FR{XQv!F>6(Li=T0ELnOi?I zVRK`9={>ObzhHRtV8G%uY?^~4I;27jp=`j#@t|)~3kwDu5i87#$ecB(3vgP-DLF9- zTBPS)XP%XcdsQk-ZOR`yZnO|sS6x*8MqY0F@GlYy4D8MaN~Z;&;<>aFIBtc_O*Z;0 zG&ZvCKzy$k!r`)MVyD}H#Ww!(78K-Me~iJq0^R2(59j_Qgw)mev~|$%=v6aTb@1WH zgTRQ_D{88U#^T@-gt?H)2x2PoRq_5k54U)(P6ygsh84^s=*~`>9!t>5sGFE1EDbzj zysrxR5an8p@H;`*VBh4cl?a!iEGQs0AY+iMj$1d48`y}YM=&%3WuOP`gE*ys6K!3b z2SW$nlp~=ZO^~kGlT2#3jNy|J7j#j43)jQWRghM)K};4j`5z=r@l6L7-&e3Dv@Ct; zv<5Ykl_B;}5^@rN1kw3EYn4(f@k!O!eacv;lVE|w>h@!^a@DK{!s3!X^O`Rrb%S0G z!KEucYMpvQ_b_n!0+K8_1H>&-JFbv9J@%x0GIV3KnAFAyZt3ob7~0rN$3SO2lD%>H z?QQUz1Rfs>f1vqXxWJE7EHf)1%h3L>V-pEe*Be*^sUQ}GF-13lO-jbO0H;MeB#K^d zvLJOFjW9`2h~0PrGN8;H8oHWQXJjW4)QqjaaDP-mg7)TI2fQ*9eYkL5b0gcyIg+9d zJ-?OY=a-oE!5G0U0|zbs@7_;XR;&PU2pF?FS9EVc2#oyvpEB8J-9V!XasB$cf`}!j zay7KqHtt5Vt&3gkgjCh$yojjVj~MO8o1^00u7}7>uW2Tpk@v-EH3NH^f40=Uqbl^r zw0zFxANPEtvV1>Ko^|g{!ERrTQDqAPoDZ6((o*le;>bsm*SZ4#I>N62@kEMUH=wI{ z-mz%ad$!%JI8aAUd>>ie0M^L`N%xmA>tABOMg%dIQjrByx5v6rq#qHGP}Wt?>U2;9;2JMed&#S=i(409C)^03PXB+`5Y*l6w{-YlJU^)Xq6ng3K zXV!fxWMEL2qw_6*Ew#z(v4>2LQSj`~pMPIF2rA`bM~u20k@iiU=<@9ym|`fI#KXQ# z5W1QEh@~?)o2T;*4m`C2V0juILk7{*_^$`)%rtU_+`6vdJ@JPEHxpt6cj)mJt2X7w z8<3XwYsL>rpqqOeDg8Z)7}ajrl4l0g>t6G0+_H$IYj3Nfq~e16^P8c@;%BIqomc3y z*0;hB7U65`$(-%%c29bjl5q0t7X*{^?jo|Usl#zXCJ=1Y+2-UuFE%UD1A5p*apE!P zxH;V03X4iH5`dyozKii7`Q!(ei*#^8{Kl_TsG(5-m>Z+WMhG?zMk#=Fx?g#a+(&5< zWvhrsIVp5_c8$CYvby>2wxxWENt%VbV!Y(%$7d{3M{qUMxNUFk& z=oyBVmwI6|?r9g!zNPR+y^+`>-!MnP^8b$&%K835@R9gmB*PYhC~Ox}Fj$TV*OzN1 zPS==;JP@`~594|NBNP8#VkRtaF=uAbjk8q92XPay>@PO(#+e;IXo$ciM7ZNyxoq+| zH(*72NH>*}$c#9x8+sFL-F-~^GDoJvn-KZGd!BLU&yMsLV>URX$GZFG~^Ar)(kWPRxP&zICsE+(aA;n=}A^t&Uu+e#w-`-=*@J+ z)-KKH?(o^YEcVsm-mh@SJ~z7jN!4Dco8?915%amwLvX0(2Vx`~>jkKiV^24onUhS& z=RDRuqY~2F8fJ>;tj5OpDe_Ybpt>F;w0GU?_+_jhA1qUOiw1GBuZ=x1Ix*{yJT2>0 z87YLzxt$9KsoLsYiCzo!N1@(ajg~q{pO_7x6!5tO1pgV&yLIgE&vTz&wI-kA-Q+DO z`|pyDsrWV{jKeSh&aGMsE#s|DQ8*biY0#eFD>+Mlx>!*I z)eKW9ce;$rEoYkt@7CPaaU`t}UZ@mIb0h5i=Q$W*C~Q-HJNh6x$;p)Ec)-G<)6V-- zLp9XgLE1n;T-3_T&f;?y;8uPgRVs!B<9-F^Sf{J`8eF_7tS^RWqad=A&yzJTc{Z(7fylw1Ebc}|olBy*LLKs(aT^jXt_royV7<6W3E zX|ry^L)@$W4Jxd%T8Wy63(y1rRt_xtq;cz_54lRxO5D%hz^pn>sz=p<@(hrKU-ALf zFuadm4uc|RZj~Ey+5;a`sxJU>-N7Y*y9?V%^=dln96vs-hjtPgJ~Ahv8sfW*vXER3 zHshr9I>UUy({X_}EUrvkd zzn`GqNjY)8;$K+G9Hj3i(m1PlSS^fY}u zK0~KY?7lV!-|uAp=nBI}5dpO_$IsHzn$Ose8`Dkq<*64u#$P5J`k-I0O+l5&J`*c= zU13;VleI6FRlz?7od)V3=l9r<@NZ83io&0pH$E>wxB3;5v6tCMTY}ldHlIldVgw8P zRM8LGmc>~3{|jNoi;ITOZMB(BJ%$P6D-Q`EA9*o7n$?c7?2!SiPs32DQ&w_3ZBxQq zvzXc3KFQ}x7>*~jU)v!c?b&JkD^|l>jrj)8Fzs^ZMb7)5vT1@mL%{51&Q`F>`&P%_ zbENFT($}x$2cd3hzR19|Q%=9WnOyHl*FsS;f%~Q7!hB0&Rozebf>nd(B*^PnZNlnD z+8d_d+3vo_UXw zE$(;f8k5@Jeuk^wmz_YQLSNeBefcw%j^lFHpx}Oo-26D+ENd&CdR})*!b2)(I-DZb z9=e(@d{dP%Lyc2*%h%>YuLq0I>RJUYD>|PS z4C({_ghxmV*f$>+ZFwgU3fHYwvt{T!4D2ZJ>aSfHH#tZ-gNVaw8`Qh`+UY_arSd`L z(^x}&7xT$*l+Rd=pmQuBJqie@zwR;AHVfTUsuid1P)9Iv$1)M1i$s8MApb~61>IX8 zwT}z{9N#A1!UPKa7V(yJP0q}Ms`MDQ`vZPc{5em^1~MIhmP=EF*I==)v&c&A+ThjL zx}+l)uc~QR7@A>u3W1;)APG^cH@zx{a+S^fqQb%T?bK50o^NjltBwF?7_6rzkT>$x zmOGSD`M#^K2%(MHznpcChRG@WZXdV4oeQRT877u11*v)0-h@E4>&=Y7p7Di$c{R*Rm?KWl z_JI=L`mTmWMPQ5(|41b3Ii)}judf!VS~f9;rXuW7DV6;VQJ>pzR?*q9W-z%7A2AAB z!|sr9&+^9ozuQDM$z8nYNUS^_a5fsJ0!Iv z>^mC)ky_R2<4u0<>O!fC$lR^P@6``~kjA;r8^g*FcuUYm))xzd9jvg6UlCM+;y15> zJ4{0opw5tB0;|=vO0N!Ol zRmr4oeXQ$!0cIc5Jx)mSP{#^Ng^81{ZGVBb4Gh>5qDvp&jh?bSL1@SaH$eU%cR|K zbSa6c8xlM=(jnW&JEsfm4cy@z5`<=4y{laF~ zI6Jg{9elBwydti#l2_KaZw~i$_KqFv&lw-xyq0DjL*E#z{L2qANPgCy|ZZYUBWDDYmkw?uZT$o#I}M~6@9<; z^v&gK!dOXCx5$iat})Bf`@)}6KC+A52ygodsuSny;pJs@4f^}7{ljh=0O*;JWSG$W z*HjQ@DBSQ7=`V4lhA|zI63Tp&A7a)Pn5G=rF-6rWI23;32;#-dpytRa1i3_r4UXfA z-?JOnwYWn;PPZyP;bi+Zw50^CcB^nq1MPINlum|BUa*x$H!3N!a0>A?aK^$btPPGu zJrKn69IU~O14Ub(>@Vkfh7ad!gvL7W7%c`DQK?$QIi>jqzqxf9*Kh?rG4mcUGpeW1 zwt?UmGAXM11Du?n>dhUto!lc&YQ3@e>G6eWTISn+G|mGKjo#QP!O=g8)SbaPL+Ac( zNO_{Jd^BA%%}k8LOYl%n{(cwRKO5w>ea|o?#!8-LL}-=_4C&@DU2bYh(00ChclDjz z0NLP5zpGVm(wmYjzNK4%j=h>-%QrrIF6YT_4?50S5#ehJT&kfB(`)D-!AxFcxmt7N z7s-@)?tg@WB1v#+#q}|kKNe-JXniXG9(@I667Q+@=;Fut_pi9JZ-tUO#wA8@dwwLR zz9i0_Uzze5+gI?3)7ADnX32ycjrXI*rDy&qqlrNBiY9sQ3H@i9rRnBi+MBOFw=wny zQA{ya)YMypaRzGmh?VK~7hu=eh*)kJfA|%a#POgm6T;y2wv6!*r6OLXFu% zJ$&^T#)#5PqkAUb0V5k}sWy1btxFU5!bWF)1#abH!$%B9%qN`*xSc>~YOZ*fd(*rg zx5MJl?Ev4dCiQ;kPQx|urd&T$Bh35(++teKiHr7ITP~3Wa<2-5u|j=6fGnx8q*?4pcYYc)7f#1 z$ZY}Q`DMf}dDCY$1tn`wzWx;#A2-@Mi?Y72nZF3Hzn=tLp z1RPgV{-B`WcCPFdD&vwgEyr6LUZ7I@(ck!BOWd70<3${Uh~65D`Y3isa&|mJ(er#GZtV1_c!!NqkM|n?OCPwzYZ5ATDhNbjIFs^69dG7+uq0EdRVea| zd|3IRWY|Hd#18GGE;U5^Eg?UzQOv`;=zWM2-Vd(xN%^R2UtY-+olYy*qB5Dm zZBa!sXKy+nBa{3z$j8`WnM2`_?^2%=74O%aR~YUNQ^t5HCjfqu$mJO42qoALj>e-L z-?LY7(PQPV5AVDkkB1JO>MB0L83BER;+CxhDP})g+j@Ws`g0WbqHZQK%_;*?ntpmr z&Rn^Bqob*U&1t^$tMj}rRemXkH&+QIM~vL&n_=9J@78c)(>{eF#?b*v^-cV4<)vMc z9Am#=nX$x_@159X#`bL0vt;FquS63r2;2~=B`ZQdmGG3pE?oA>dcn4v3oO^>vLB#O zA5*>_X5$l)HSbA%yyeSZt#Fh}wc4aC29i+ra~*B9p}K8k>#2asH(ZRi7f})Xn~+)6 zN;Xs*=NZY6Zh+5!EMVNz)aLo{I?*nF>aolB(j{7douT-lhcfZXCbxY1)L%KeAHZ>~ zyjK%aabGXyN|;011BW%5{{zxd2HZM4yEgleT&#Xm!|-JT=e|rr?T_o%cB}_7~2IbV0gVm z&;McQ=6>wJbQ+dl)B$h_D16qiv}lkKCbc)+%kzFq#LoTW@Ylq@oi+n-LHuhM%bq8=iGGsK~;$`oyqyJ&qPnJLe^ZMTT`%I3M$PxFyRPg6xo;KIk(cIy+Z>h&f2D;l-Z1NDR31RUD#mntpF%{-&$ zNa_v=LKKhRl|`PjVR2nt*c4{y2qwyW7Ni!)7ktO{s9&LKjfoW+1aE)UA_{G zI=3dtRMn-oO2OOJiz6@ta<5fHGQ7<%adFn|sBP(c$QyqggsOZ@Z;vOfh)>?lYBgf; z(;0v9vW-yl2J0pn83|3hrVS?-<1xg`P0~JuJ?wYzpNp{KQ^Zbcy8c`Q=%Zj==P&Ei zUa60g;oM&Yf)Fz=*EdMxZqxaLZycHPrT z(Dhv!55JOJx`8dD*hSVuIZ2G=BxXhZfRdLIa9A^QO%~8BrETc$N12IJjA-nj>_I*T zcKh$;XI(M1CYyEMaMRx0ps1JFI=md7rN5`8VU}bRCjCIUvc&GNQ z3;(}4iR5^rvYTXcff2&LmG7-CU0M~^&4)PF+tw2iI)Ho z=MoJAT~!_5O66LXzwW$tKR{LJ@HSjDO4I|1lJ-&JBknYgRjFrCU!E+{I*w7dG59*E zzqT3VEg(6EqGd75OC759Q{@}tR5{1B^1}56b8ci+*mvtVJ=)H$W#t+1i`RiZ_IzG# z?xyPk@5W=zS$E(3Z@hTxvK2*6OF&fuUT6~&+3k%$iN)X0N+uSdUj{O_sOkN9Dz;}Y zFcUr$^ay^I&{;BXSJ^4VwK zkLvkzEb|OdDMg8R%=b*RXuA#0;+{tuMLKaum~yjOoYBeu6)x=Y_R{_5YfD$LvU%MyU6+-+tejlT9Q@qvGvO}fGg(gayqoSf9#Jv;B&Y-a!JD+&ZE#A0 z(v>BPmTKyrB`ya~sfXwpsqNiB=?~#(T6rv85}h>Jj#MM9F2CkeYVOSpC4D7S8SA)z zcc>AHIJ`JT(%$-7gm?mC-9tBSH0e1 zZc8UrqMKGIY8VW9Z*vOd5z+p5OHmzgnh@5}E1u~wsKkN0b>EftLuK=y_7}zPHk~^f{((i2D$Rp)RRa74InxD$vD|w5J6nx-L{i_cG8%4|(4|;3u-_bM;BNPW(|; z-r6cb^bZAtvgwwsjHT%oB}kHb?rZeN@kNO53XW%~XO&fIJHK;K05@>$=5Lt5Y_D zRqyVVWsDN;(<vLQS&+5191toz4n=oy6TF)C>4=>u#sAXWJHt9=gWzT4I4N_C_ zw4V9k%dfj>Rp5QYyJG*_hKN9etelf`h+h5KcQZn@h!8La1$f6w6PyIA4(tc)L6JfRFGDAj(yqZ*px<<*ARJ^%jF^)jCb*$+RT>Zbx~qE*r?TTal>KK4Bx zW1HhLi)Vh_9msiY0bQtv6=3Q+4ct*$e>DRjT zvCetoQqG;zKmK}vwpO!az%@?_H;XDghnr@X^X8b_rp84>qiXm2rLrz&Iv17;_$A?o zqKxF5@p6A>1Nyiv_3L!#P;JJGgpNpMdg4s6OFznU9$~ba+l^Eqz46jW59VR1N(HQvvwXj8T%nbKUolQU7qf*=67xa1&~U=a z=|kFw7J&9}B?b)pa_LvLCrVt>?@PF7`=337J^emODXpi7Lffxf2*7N+L;+m2^!$dg z^}`G8|BtD&4hw2cyFT6B-3>}3-Q9u`g3{8B0t!fXHzExJ5)y()cc&r((jh1)AtmK^ zbI$k9%s=OvGc)Hp$lm*T?pW)$f_OHCB=UFT6Gg^2Xscvm?6$9y!V2iSF=eG3O%Vt0 zw|)9lZk_x??5#BWPtQTLekb!kSglp4bZ0hbynLlo-fcgX;C|Hji*s%zjYHRnj~sZk zkNv#!`XGW)A@TCL+)(zNz3iZaSux8_i$j0DFy_nlFk7op>I%KkI~eW$PfnK7@v$58 z3*~J0FeVo*Ig6VJ$eC5Lm?=(^n_AgU?y?XLE>J$6yyX@rc)7ylJX^!~WND7d)-dho zK8O^vwK2sGD+ z*t4S=gxQXbP6?l?3=u`Rd(fc82=;c?H+R>O#t5cfhhZ=NGgIO0A*R5sh!K z9&Uc$pseoD{@%AUWzSr0PTBDwrS0CZoNiJeUScawMJtOE4y+cH-I3G1k^INur9We9 zG;#>gslzvaP_vY?=^~B8{hX)8*CDG8Yv5M4&WN}e_F3v58>#4Dx5Zlou*#AJdfLOQ zkr)dJF3sOaaQ&aU6<5|gR9*I_vdkQ`nJ8MKhW+`DekS7JveLU=Kh8m}v>LHWl{_)e zerCn>l{&mZc}8mLd-&!Bi97^o3I-3@vBO9ZHs3mh5|9k6yOR&HGlcqZMk@bydfr6EAVC7b->k6s_za3sHE`p+??IF@orCFPV0Qrb~Cw}^A;G^-krn&wth1U!$ zoQc=Dx%DmxkU6nw_$fiy@tq6@>mGN0Y0(lHNg-tie(dmBB*9zVaiKb#Ife~&i|5Jy z>Ke}nSg(+@ugA0^h?;R?d&%7&&1)&j$V zy_PGX_xri<0`v1+qUoZ#vEfwgxnqM{-L!8{Da7hiU0F@^z7rIsKH@J5wJ3Z%6)V+G zy)0XiOr~0qE?QcVHpxLS^&V2YD&DoZwl)(y%?{Tk$DkE!XRgK`e^haa>dkG+Z-iCE z=s!8-)*6Y{nWEP55LV=hf)2!`^+1QbE!Ciq+F65`mN_pMiiCWxzdDvL>8HS%bwlD^ z@q%s#NLt(7;4Jz*7mD zVvY&w|I_?JogrB!xFQP&b(9fCVqXrP+?js(ZpN_62wg`@^;~Fg)6wgs62HE=PhEIN z#Cm}sEMz;+o>yaXx6Ylt!63KF|NY9OaG%lJetGjM`A=rXjw7LmMtj;(s?zwUc8*b* zX{@vj6zi1f^!(aooT0Gs2^`Jbasf!57*`exG6jaTzYL5N2l*Fmw)uJ6GXth9By zxu5rw^xwZ|iiING^0)c-L{;i^LUdfJ>G!T?>Ey}=&A zr0Y48!wa%ARzDg1<42Z$J}%q*?VQiE_~%iC>uFHgh6^n?qZjLJG#m$#GHFhP%TX+e zH2D)wa+1)VZ@o&v;3Ysb>A|VQFqmUfr|>l_WRQzf(c?-KBwKCSGPN1B$SJ`wUo+xb z?0&HFBPQ}AgNRWZe3({;Q5yR;L$Y3w z*TIGrC|Fvp0&gcGEUaCGslJZlOq}5ek^dl*<~lT`Qyb2?{m8=UbuJ@ul18p0O%ZLl z^*N>XgI|tZ;B!^ujkFlBo!cJ7IGxnVEw<$r*{rss{yN&(2f07ePY;FJ%^&Os;H1?_>6{kpA>J*@JuTUPx24bjVE?}Bh{$7l-MFIb0h~@+ z8Zqgit5>SMxp$KmLiu@Ld4>pv@|QlDH)1EUw|({CjJQ?jf&7%DZBg3yvpNQk!&Z4% z78`TRwzLPsgiA*ZoqLnG?&IB1A zqZwlw&5yDSQr6_OoI)30b&BWT#Xvg-$r8Z>2wuV$=ZF@6t(p|v9+}3ZJXMMPA-_4y z-OBiRe;ag)ZJ!07_Hh_{e%77B6sIfvCaO?|(^-vkAE}JA(*T$7!|p=9MtyOK{ioM9 z5)lm_fD3q|iA<+v3NL-7Lh zqOzsS&ux=Nesh(vb6k_E8LPnu^|^yFMsJnl^N-0u&se`iJ|<3&hWidLG0L@r{?zh8 zuCq2CF0y1Vnc})*=_lhaJV{wlXKr<`>efBUk$G_q<38Kr)bF65?fV{dhIT=4+>U{1 z;E72bWvzwwR}(!ly)>f0C49cO(VV`U!!FlE)Xj_7C#oazokl~f`{O#~2GLZT9Ty`d z#zrf+%^a|xNRBSs%#2UU9BG>J^nM*9yHa*S7}QLz!zV-RsTU-fbZH=jBZ%iW2u6{{ z;THZbUehMNj)4-Vv0u<1Oz2m7)X~w(pxt%Mk$cas(nDCR(PgmFq!R$GNi4UMYprLk z%?KIo@OF@JHHYscB0nPwD%n`2saa*L*E>?(eaxsC9MTXtq>#J4_F?t6b zMzD^kCRW}03nNOCwScGdJX#1Y3L^-eM%_fXeAP9YNTS0{a(qkX26?cU8j%ld%98mE zq30@%_MiHn-CietgvBrPHnv)+4}Elm#s02;FoNXEoXKC1$!PJFnULeORbLW`BJ1$8GC+>0-^SxL9rF12_f=qBBO#EVTIN%T6zt| z-8JA2Q6X?qa>nLhNUXuY;l4x@MXY#i#Anbc^DM13ybdNyRxxAVhOy5<%u8YIP@~jgk@@ZYZ5}GV4YE(*A!sO$ zYF9U#l~i34);5W3=?5N5G(5Ji-1x`8w~7KssVRn6K%! z0$I&!qVZcHUF0&>F=-iM2SW$^09gxTsf`byP(7U;rI;uSyTwr!*BByo2vtCxVf5#e z*H$g5cWE3L{~W|xIZD@j9V3Y07t=Fe~+Q1VPS*O!v3L^a*!}AH6vvs@JP9xb?V~#*1yxsNL9(b)5cv=Ka@c z!uLnyU%=i=_t7gtS{FZIk9YiVLx4P$U)pQZ_8ZYksOwVehvL8ePfA9NbGwI>EzmDj zSJ&jj%fCm-NqgEe4h`eF}ugkYki$FPPR@=L<`VuWqY&a0?VC;+TIHaH(TK6TJ z*d+C#{mux_Q*of3!T%Zz?%RLWL?0^#^}BZlA}-Lvj&ZoNNHL0NK_Kyi=fw(JuxYrN zB?|*q(sOj=Ot3Z^pT@)KnO(;7t!G}GGok4a_Jrp*W{`=zJR#xf!rev(jn8nVBv5Jf zt|aXkgOIyY06eiO-!336G>IFuIAX#{k!F*57I*^=!Pzm7!fuDfzlS-9>ZroK?5I%W zhYU%71O6Uu3=z8^f)H_|k>hPch&dneb|%(8=BfUQ@nrL8zCe}vS9^CL#Z*bjVny#MFfhKDhpWd2hy@ks}@FEH>}+~Ix}s#w@u zf8V*FSn9G&Ie_6oE{0UQREnfFVBkL1Gv9SV787$skBktN z3tA*Aoi^wIckA?>Lo^hAGOuZa4A(82f#d!4pR)riqDoE?_)hrPdt5)XFr)JS1;qtjDQ365KGaJgyqC67pV(O# zh@;Zk8TBd$YL$XxTig_$r@K4gxcbJxbhIU+H5UPn6_C~n6vQ-IAZ=+qQ7pt_3n#%8 z!W+W3Y#lC33!~Dy9b1&Xp7^9rP!veNCx^3Gqp%@;e#hXC=;Ii>F3#_r(J3c~3%K81 zXC?Xe-A42s;n+>+O7N%r-$yK;!lJibOaiOHHHqG755oNK%O`>;pB{Wh#<i@*MKpW2>#(@l6{s;!my*sO7V9={!Oi(iLF z$ev&SwdQJmO2`PNZn+*SS%y^BWTf=a2?^Agpn^sIqdg9B**LgNxNJTY%@>H^$!P=j^x%-JFV~49qJ1iykA|UytNBPWjEvXhS!Y>b=E^z zq+Fq%L{%~!rQZ~axqeR)@(WM$N7I#+kBql7^W>B872~0Ow7CCHP-Ll3!e^hsgo}sl z39w<2sokjov!{7u613lKt>KQCN@~EUW2ck!$tM$>dbglHid95)@_m|09-{-|AabFK&L&+ZQ~Qt9872n_E6FLy1WNap_-iE;$&ct z0)O6nN8w77eh|q_^%l+MYl;H(*in*3GH6Rv!e>%rjwo@I>Tm8!_Yn25>iBO+yefBB z+;;0Gpo|e5-ZM*}m(flO1RJDPZ=Xep2h(bhnmQB`3NIvynXoz-#GWjVjo9GOOSb~D z!vQ_xrdxQ|;eg)7M9L(#!E5^6ohJ5fRw-P*#pa+7(7~nVSV~&LzOe}F*c%Z2$Xh!1*=yN$9WA17{szX6 ze+f9{Pu3IO?(|Efy>s#f{?8%A2r_~4S6b6wUaXveZ$aEZw*iA8luwS^@@|aL|CF+f zV>@uF-4sIdqJ$znM(WB3fo^fa>L|`n;4N4{ANE0-rqqDshT0c@ zuOS$cIR`IH1r#?;DsE;B)Y+rQAgaScY#Y(rNvs0CAf6eGcR#ESH=$exd)O|bCpXJ_ z+{#C=Xum0pcaIzila*HHjdh;w-fE?V!5wEYRdb}S=#w8L^`%fOWq0;FonyhyEc-hP zC3VFV5y^0VgY(y!*5U^w*aZ9=7&@d>&Q3Hn(Y@-yTp5cm%FWB$NORCgN)%ZJ{;q4V zoI=y8$KL>i9&3j;VOmYS@uG#8o&DVLZDg#(+@m0|>t-77t{W>2dd1h&@~0!-N@x;h zq$(5>P5JwZckv!XjE3@uXF2h(xh!%0ZT0<4RM00GZ~6j>0qm|l>F7!s~$X zK6O#W?SnMs@5YRqw>thmA5GT&BSCgMKA_t^XH!5mumym*pKz!Jee28K1A1I?(OqbpDa%2nL7zC+NQMA>wK0D)(C6q#F72bLB@ zWb7;##iRaZ-@A4bc3%v9_^x~E5Ko2+1k^0?fYCFjYq1aU0ye|%y!Rf_3J@=E)*bvI zHiu$VXr`RPh-g_BhpdH^!?lT?pK!nI(zuPWKez;;{8EV{F|PX&g*WkVsQNi?C^?ALbi>ohw1ZE z&zLZ6N)u&iXFkEh96IEf8v;C!c~{Qor)VuGCHsQ>AIr+mxJkKuSZr57VCr*Q3Dbub zi=<2q?k7>_iL@0zl$m>Ja#jz)>DfE zSzY#Xyq7E}gZ4&rqwVzG@ZD9p1~s+#9VZO2PjKy7AB|+9@S<<>Kq6U1m8r$$&FbB#;1XE@9D!U}xo<+CrcG;K^?9%#%sv*j0@`L>78h$5`A?$d=3E&C~Ar z^6uS?`_GS(Do2=coTmy=u}{HDVLOy-LD2&>Jx|iUhQKr98AzCu;dhlp{fYOHo~5`o z-ugb&G@gJXhgQuX_EE8_K=WXp%!`l3YB2lLqNDDRW8A1)@oVS3BU-r0i5>qL*MFpH9V^QUhxt#n?^i z;$eC(xhVIeAF>oL&KaLsIms9WPUk*F{v^te;($&rJEr#dz#+m8IgIroH`M|d){=J# z!o7QaG)H?ub1{9!+jj%oxL-uX^NHUjQuF=|<3j{pfxKT5qx|8Ce+(&i_<-iX;-YRv z`rNBpZ(5~l0Z9=a@@*gAWl^KAr87)BmmnwICxcAs>x&Lr@Zs-2cDf1k`U}bsu%;H!E#7!G1 z2gunRmvt~CgQZyi-A>;TeT)}t-)|NJ*U<#>vsm(blSV1rY9Cp;{y1$HzAa&p$X@%U zKlB%fwF|K@oK+#CmA<3tqFSVizU88%hO$@?J660j?b-REOKfP8oD)+nq`KUg(GvH5 z)dN5i$FK`L>DEgI)7)<5Hp%B?cSL$r?`R)i&X2NL68GLddYIJN7#uC08l^AMgb@A| zqjVbYE^7HG2-_I_eW9RP^;?GXaC0yzq>qUA4+fm;VX5^;LO^{@N#!o-W z7P6O@;KQ?J%{%rJ;=hztMd`2(>or-JSPa#ZkG>|f2vJgfnhzI~0v}4_1{8gRCQPA` z%%BN^|1Raf>y60ZTus|z;GkFpJbLr^buz6|F`DmwbbZJetagUdDK&*Odv92d(lB^qLHEcHP_{uh>;tI6Dw1%H-Oft)X^w5Cs z%kbjm4UjG>P(T!*U1e^wTdyVYXydifvSV*g(zwNPoW)ZHwjiL^0okN5J!;^qqfh=4 z#%(;}Ehc-=iN+qc(;h2P<`c*{A>G!0vwa)udeg;E1HSlD0@mge8 zPhRXrqSj6~PKoF1HC%^rw`S?qw?$BH7C735sl;pj_Dn?^4mi3R<)!c`*33#ik-JJj z57eLkpj2%s)yk+4iWW8STzx@8=*u#Orq8Dsqca=(FO7KwQ!*AOBpg168&L|>f8d4X z$rL1iyqcp#Z7`+gY_8M>60W1R^!Gu=f<>Zl>S`{>E%9WC7kc)L4_5eLoen0H6e_t^ z3?(rixzo?owL|8gTi$!EA@Gd4i3o6SFtJ|8vS1`jOv_+1u3@)2J2{m7N0}W-1E3&8 z*?-=KRct27U=xBZoAFm4K=v_DOsc&KRhttZ!;#9biKnPv^PFV7eoPO&;n6gry&%p+ zp-3X;jUJ7Oiw4~KzTqy4B+(`r8nOsiB?gnd&_m>}_fWdfd=VHojq9u}K55U%$cGYR zb=9kDBhiQ;G~Ob7yRy<915cXgBzL{+%R5q7ecV_jthkz)MMlcKCfcGdNS+XRQOyS3 zgxbD}o;AbN+Q=j%TCw?%{RX113|p;9%?{D zG6mEF)dGh0s_`qiM?G(}NzlX@IHmp0`05_GB(abQ_ODbQ0Vd)mr1mv5VLyp(^Z-sFo8VD%sX4Q!>#+JP+=eNNh(Q({bbB-x0_;m6esw~u%9dcyPM*- zH$l%dsK>v@;#!W+Qp+Drv?XV=eY%tWtcm8`6mw{#ciHn%S*3qVp}_sy_Q0tb?I`uE z-K!Ra)drX!pelvf4AmS1gCI54xbWdACiNyreu6?^w$rHoF>W>`$KBak^Di}E|JZ%X zFt+gu{DERt-dL82SQEU2;x(urA^i6?R&F<$s1Dc?IlhDiZ^t4xP^-? z##a%qDes~T*ulK%TCjS0#K_$ncIko=1&r*4o}GAnk__fVi^zZ67Ov(*@Jj23Pqn*T zk<>n3Ejc1RG!57jUh)>Q6!24e;zzNkfxk+7Ue7=;9Z&!HbZ6iUA%OlHq14LUzW=_) zqV0FT#7|jgHN|=d$TFDE@?j3wYjsw#sR1MoE1WP>4i&C`D<9OzxgQBGum4`}{UtC(b1frp%-%yh^ru}OXtJ7A zb-i&}PkR)+@@Vh1OP_ss&Ht>J{KTpKi)!YDvPqCPA$#m`Trkio>2w1WPPea){Wp^t zb20gESLjxAE+cax2VyoO8aFx_lOfmEw5Pr~F8M{Ww4SJoCU_Q_bg;VXaeD9N<5gv# zd0JpTTL6=A*x&j`U9M;T@`jYrgF*4@`>)|j*JNCm?#HtR=4D%}ncy22l+_Iwx}h`? ztT^vJh)4dr52Hn`R&{b7LK!-u^qdd3;Xf!FlT~x(ovnJptJr6n7C-;-py;VNYWA=0 zb9PM8+(Bvv&!8>6(daK-r{g0JggqGFM9X|~Y@0qkZu3x}cMTOa!aelr$O&#P=mX zP81H+H89X3p+d++t1Od5i(W1g8pO?UQVLjO zP_sMb0m&+Gk%Z;VZe`7Uu8_T6z|E|vqxhE~zPVwz7DKN5EsxxQAJd#q!>4lz83DYM z?h!*jD{RXiXlx7(u)*6$W4uF%=Mb836d`jNwB7{a5%DAu$>{skOmADwP+C;skMiPo z_A+cthW2_c+uMEVJmcdwZUQBIUBDMnX@>TUnK85vh@i9Sk121KkAn$K=c?}gVM?Vx zUeAh+rtj)|tgGfpOC`UCli9N+iJ*6uS;P`*dlXhZ0gMW?=R=^PRd1RUU6w$7k4due zcTdbtLQ>e_a>=o&V8vv=^#RZw`R<@Kf9Y*1;;&)HJam0=LH$JFkIV~!iF%Q{iTl(p zN!vfqaEI&7u6`Kapc$JU-ZrY~yf``6qZjUdyLW+zw5an(a`#t#?|{j^^1`r{e_n~J z92*#F+=*X&kKMpKd?D#Th&VOdfUAj#Kgs>9?MS4&_U5chcZ`v z{|pyn{Qi5B*NWpqkWrNR4sQ~+feNkXP;?ZS>IdML4Is!)g#5{~C`FT*$%(PHLo2zb z;>THFz3m-Mu};uoMKSS&WPO$~Y#=zeYDBAzsVcM~)*jCa?bl#0X?>;QRzDqIbU()J z&hwwqR{r_KE<_INRCAyW@>IVZBT)uCs zX&?A*(aOZIi<<;@#^tUuJ;aTRB@D*qQ{^S!9>#TVweZ712tcn0%B3sw#F#c|ujx4a z6+MwBv-dSfg7C8J9n06-H|QGPeve`QqbZf?U^W+Kx}QMVC8V)r0}5;94I++s5e;G4 zCW0VC-;bO)xRSBe&F1r2%79`RB7z}GT;V@o79X#^T6z~cYpL3DR7knV z-xjds=EG!0e-2aOG*Xq}b1`7SRxV_1$@BZs=IR|vk|XA+&~2Ss1U<@QFHG}2*`@4v z(OI+4ze6OSiSWl3u7BH}7Fd5lr%=M|ywG$2ZL~8rIt6y(SuBkhQ9Wx*J#_KK)CWgm zOm+E744$>H!P;G5?q;w~y|s1bj}0Hqxd@lJ5wqJGHD%EAUn=dVRJU<8qh^pN-_}pt z-NHj_Ht}7a;uCQjKDa&8Gf2LQ-irvm%AE-*t_}bUSBaa)T3$~ZNV|h5@pShLvP6G;<>7t)2mMD9(y>#3 z#4Enjxc-2y4WYWvNICwrmRT?3FVq_3C@>_~Rh$rd8bP;=h*@e{rVmTc;@`!VwuJufCiL7i}pM zXj6E@88{~BCt#ryvsG*vHbZo<{&5v0U$lUAt@I`RYu(!x8od+Ayx1hKDE|836=_}D zr==YC_8N_Jbeeu7I&fWVPBT;{w~RU?eV-~zkfNpo+%2=;lIkhzYu_D^a7B?Hekh2& z<7ZfWg=c^)wdlGd!Jj+UZK${g=?4g8w zq^jJU(K*x2tUPmD1mWy9VQBj+A}fBzL8@+SLxU@A<-%@+&e7;Gw45ZERfI-;&XwXK zOXo3ZaCia}a^XmVZ#Iq3jc05_BAn^uk1H3MlV8o^T5?Q`>%t&tmfLbZaCkE1Pj7>A zK!R;iq(g@$g%54QsbS_M4uZ8yR$f@6X5wfEH3CqZPDaeGlZ`*EZt42QEuv1u_?S)y z(S18Tt_`cnAZ8X4AEd@))U=B-b3$x%{1QKQ*HXY86^B{`$HtjKtJM8vd8~FWuC{>g z2f~`E78q%)q-`qsnBT{9HUNY=41l8_#5|Z-Fk8#z+j?GVPM!TVlzljoOPnRuRF-->VkZvL~DBoow%)idKDl72w z@1SRx?C~K_Xb^kTfONW$6J|6ok|03K1D!Ox}Wi^A? zyby0u4i-rVSE>z}>vb8)U#H}c^n-3!z+VIKIr5@y30oT6R=e%!zD}Ah94`fo18T!k zY6a&?o6w48%^PQ3tjI4R=eQ21hi{DNhtuCjNpY$)Y=a>Li1k_;A25}XaM&nG6OqZ+ z(JZ(xpZ3*CPhvtp;FDG?)sfRb5?|W}*G7BB@-@pdxHfiQmf`ljv0pHYZo!el*&<>e z0)uW_&)EkeW1Px|Eh0PBH{s%>Z3#XvS&fzbFGsPeSl7T|C7rNwAQ|&O`7*5m!-YOw zDlm>{saq=yYY`=4=|cVJm&4_Whg|i7ZvTG4==~7Mf8TzbC9}TmXyh@iSm#PAQ}tQG z@}N#O)jx-uNqV(n@`iz%;deQtX`Y#*USc!uA}jfq4;TJ(TlAqJbZDS3-Y)IUv_vse zz_76qR>wui;5B>PVes#;VJ)#!u9h)%cz0RfUKN)~vDM7diJ`Vci-V*E^=8wWhgrq= zT)P)v|4&R?89YjnIXN#@ClUhoV_Sl4EP~Qp%pk4>o6*Bp0XhetRSfjgrBJ zj;fvVOVIiRFN}b+`xGFFnOkX6hLuKyYm38{U`O^N{1II@<&tAw{86tSgPbWW7K4zH zc@AY#q;IXLegB_Zfu3yR8}-fwPXKj$xowH?lAKR3W)EpxBxCO?W!&!7;-Aw0yzyNU z2QQ0UanmoQXWwNlOrVUMIboeq%TuB{92Ft!>xPn4%G!|sP)0XItdn(`qQgE zbVF5G4__Yt25YVz8TX-NujBBZ&2cyU6>(F);Lv(6F43O2$_A%)Eir6VUA}c%7UMvB zRRZsTw#R3ja@oqZjVEZBM*2-1L8w#3Bf=}D=grnN2jxaQtgTs_RAg>^M;<#CM-=Pr zR<=k_{(0wxS}pL-D$K5iT}uz@tejktW%Kto-^)ZLQ_G2Ffw6BSw>_Q3v4q3|WorX| z-Nq-q8a+fDDiJnP_qr?5CQFR;0OqPEjOktH->Km;g+}!jiiMbbTyH{?YB5vPtF5|UYpK-u zdv2%D8eN~TSo?BxH<;G$dcMl!4qa43pe;Xgms95sUlHFgVFVvG)c{TckYcMP3{^!|_8v zQt}BbxrM#yVsqVAgXTd-Mj+O5lYX%H!E@mJ3R;~~2Bt4Ye$(UuaHx0gT?tLL^d3lm z1(laZRHhq4Vc*9T7@PFczbn8jP!27%LokU=|HhokOuEhAiu4S7{XSwK8QzikzfC~z zson|8E5qAUqCktk0q>}#bbXC>e06EK*ZqsvsZ5qUYoA=p!q+^EC8+jy(ovDZ$g)MJzhecI?$bt`#al=w97sJ~;*|iN z5|rxuy%-{Fd2y3Ahvc*EN>StA6&vWgyKAF}tY|pG#|Tk={L=y0`Gka~emVGR-|G_V zd_}PR{LfFF!VP6}BAzOIEo6VfN8aN?PM>2j;dC*EKdIj6-JeB|AF~pFYGBXITi$=3 zd?7{20WS8953iJ3YJkz7F95sEraOy>j>%x4+YAe2!U%H zXrB}C|Ecz|rb!1fvomXUO>1ns!ckjDch$|~tn!flB9kTzk(&Td&&&YZR32j>DC%X^ zKkNQX4k%$BGGVxjvG!ZaM>=9aq{s4iTyA%9W>_R+5bKzn!OlNs;#`9Ij{^Z??8AzQ zdh{DXsAsJ0(Mk+R8EH24t~WIcR*7XYUFQwEZNzB@$Q7Br^! zbV-o;Cls0}Ym8qe=*Hj6AK1rwRW`F%E;(XlSMh(P$|o2WHE-{m)%$6k>-BO3%}2j% zeYI*g^6Ls`{-rFgjh`f1`_FIwfx)fur*dAK`AMb|fEPe{a!tzk~ z#b(O%2dq-rs9&i{$-4VrNe4D%*(dkIEj07BnV(uYP2dM~WGs6MsJoyP^mN&b5ztHT zM1*eEU#iAQb8)(NyUo{p9OOu4Rk=-{P7f8O`UCoocggBTg)CN)zk_VS(;90iyWkX2 zL!@UY%9^=sKfcpcKlSPJzF-m--Er&kquYaN)Wa0gB*?Ofr5FBw`AdgkSNCJdafI%u zMT)3Y_p)jpQ`3AIZA`o{1R`jQmDY#qsL6hW!1Sb9 z`^HV+Qg=Oi2o$@_C4vtsQI7)D7EnK>0Z{Bfl_*Wu{lKdkN#uv$?`LrjaS>P!KSo_0 zB)qT|41ePcMK4rH_}EA1+j4V1S1fKuD^wy*6qj+}TJ9>*$QW!^w$yNAz`nhWFG)%f zmx4`QhpvgQ+2F;54}08lWar&`wHELZI(b-CSd{}--nUDbyWJZrz3LELumHSqxgBW^ zl)e-BE?pQ^8|_K>9>N5JTXu3N+rvJeYo#l_3!r|M3u~N3Qy|20IZtP@fX&;BvmW}y zIpd*&+%HjmD!A9j$o>c&r=Mi^hz2frzTBJjXf-NR!WrXYwZ!w%4(L&LcP&gROfuHJ zhAiD%H)9>HNvlWTCsXzYjqRqeeIab=(0t|o|5ztgfsH7sg0vL>;41}#q5eL@t5Ndv4h4f-Z1{wcul?VT@o!&woR8K<3&C? z`ZFzf+b{i#{29`+Xv-O1itKF3Y8`buzae;AE}LHH-zy5sJvXv(WAFjkc)pXgl=Sz5~f=|o=KiI{- zs;TgLv>Bb7ZaaHj%5p*a>}T}4Pd>4wj{=Tg;@MB5SSTG)n*W}+KNecR7@a7Y5Z#_N zsCpOhO~NbJw1bAz0JS!{_Fm?Z*rjfnE@ME{{czMfepCwKIIEmSN5)*%(*rwhTVXGTkCEX}Adt&t>I7$~#gaq#oG*}Fd@OSfn55f@K$N&)uBkSkcEh8tUq*&F{ z9wG+#FA3VCe^vvYK})mgwTHyBzSqROwGEVxyL`si;qOhX;I^b2qt*nMZkb&wUnT z9Ae+@og$+&=k<@|h%1}PxeFd)SxDSJON9J$VLVne4Li!JAJy&4IU$=Dv}QiVNWyG{ z$&x(E7ia!8@N%{*3-}pZGZmzc@L{IwscGXg-#`3{)R4AZzY~^#Gwrg3&M~=jMyHs8 zOGRjIoV+8u#aEAWP5+FSWG6*%(DD4K=;|KzPXrHJtgf)7gMRG@+BM>hjS-skA< zHwnXC2ii8&;>{?etE}5O-Fto8zQ(^8io-pTH%DHIwj7qd6~6ImbL?8|ju3Ui^jPZM z2n=td8@v4jTGo7ICf#Y4+>Wx)+jjpDKii_&e%|M!MZSPRfa`|JpVQks`YP=C&5ko^ zzb-N&^=7!nv%_Cx5tVT8CT%96Dl($r9>6&fb$W?<>T;Lmb~P5=VY6skk#6xJxdd4f z|K(Sz2=czo4aGa_cPG#(-O!pn6lh#oCVhrr0)HCS5#{VbpyXp@!hH48=Ed-{n$~b( zr1zTER)Mg`59PlMb5FWiC~JB_4$nVt*jh^_fte%5lqG^WW$>Hef9r-6^Q3_36Pp-! zkRQ97o|A*dFBC6Xc<}WKRP(i1u@wBlIr5F@mAFds7m}o%bH?u~k*q(|mPYCYspv7y z&FjA#e$gVU23xALI)tw&p{xv)jG2gnMA+QpSK__=dfGYp!c0ujNBy&L>3doT=MvrWJ>%skjNyiA_4+>TLw)y%njf&;a6fbVuF^A@ zxQWEnTs1B9TYyN@tLYqRG0$Qd~s#8?qbAm22@R?!ZEo%+x)PI zy2ui&>+V;JuMe}iQ;1%UYk8~J9dn<0O;Y^n?x2-hw^0zSWCm&*pP%+?jeB8rpJxMC z)gi6VsDIjc1J9BILCA%lH>+xBP12F-h#F*yR#SA|GzS$>d6#=-0k)q9vrh0&W2fpye}d? zozdlnQ!5wi?>Lwew85;u-gE8qZEcc`HqSwc=PWR-*bw-J zVk_ByKAnJgyc+&Z-?;<@5Hh!KUB+zZRfkBhWgyFF=7=AIUFs%g(0nZs(pvuiFW*V+ z9R)&Wt?r!hzmt#dWO1%_05Sq2ETRvo7~iun5N(NFMQE$-XZdwlL}_8c#46yV-6kudb?Vh$dBj5d~luMjLP5q@*@$CM{`=dA@8WWX&#}o zmz@g-q4HVH$I^e76svXJ^)1Tbte6T8xDfdC2ZUv__-y%+w7QLds^- zgB1Oyz~d6gVym6$J@N!$Po*?t^Crq;wP^h>!+W;8*}~jBo-d|D5QNZt{n^Kv@8hSXtsN#!&b8`{vMk?beGpOb>qLGB2qd;$jGG zfviWokP#X=(z`JR$z|><2EX9rT@DxrJi}ROe5Lb^h>$4m)}+i4jv`^3^`)6_f^NvRD#}~ zbm+L#@;l{scx{n_eWOqZ2-o#~dF2J}ulSCnxD_M-qVQD!U#BYf9uo zyob*mUW3VJdRFgBlQ0v4LrmNKjLGGZf=rX$>s-`MLN}~XEF93k%AeiQAAwAT!k2Mx zT%y0yC+i+vX;u%N*%sde&@&a{*b)3vjGi3j)_YRbBSj^Cxj5~nWr%;DdJ2uRP4LOp zL``Si7(Ly?c62{xvr7qpY@W=Jf&AdAEzg4L?Ayc&-qpYa!G!Bo9W0_|n7@lvm4ag; z?~8TG;IkH>WBJa!7`*;McoaCDy)(`Bz&RSxlF8CtXP__C#)dS3Y=mUGp|j~eh#?#l z+>HyCK@6P$B*$Q<+?dqsg$u67Aw$2}zp2cW7?!A$&PX+J9cce{Xl0Z6?NB+RcQ-Ea z`OdRmtB^V7`T0xWElrE}=*#`>$4&iQ09puGrqa*FYO6Aa`WRCPmKlK#@!xPiwq(2j zGLnBUTtY-0Nn$JYl$O``YGF&1@g7UaWnuO!SEm<0Nkw3?T^L_d%ttc_w15Xc$FYfZ zlW6hk>=u;Shrp|b>`aWGsqy9Rk;M5>b$B2tVv#(s(uG-5R`q#!F$B<-QBA)urIGC`{ zeIK}jc`#Rb#^LE@H;pY6gHO*`jP&j*33krN@nag>V0VzkX^y1>-;D9-CJlX)Y^Y#M zp7~;#MOF$CiPp#6h4tQSXI9UqPMCu&*5Wv5WcauYmP8Hs;5M zd2Q$H=sRv027%rWqM~`t^ht@}OpABBn7xTq`cE1bacz~r$V`!$&_9V zO8LyqC)mj)`>p`KEQ*VA=ULfJs*N|VAfJnv+XpU!^weSRC0|Jf(-cc(36n zJGndtd-t>xTnqnI4{P=kO_xD>y=T*417@=(6o2CT%4lR5*$y0_M( z@PsR&#E(M2tQ>TMdh_*}qyx$%K`Haia%P=Ko{rx&w0TyYSN< z(jYDEv}g~~E-kbNrD#YQX%I>~rP2~YXh*52tcq09vKka2T4*Ru?RviJ&ij6^Kfmt{ z-S_?bopYV*T<1D+=h(_qeto>((D6304Sc&Pt+k{nlh1}4e>j@gGdvb;qOjI^%-fp^ zYe#R&CpIsK9gL#BFK3-BW|3fiaT9ZXYTPem?6m{Nx+zX|Fsv10rH|Ku*PNq%%~ybd zoXggZ<;bn-U*ELqS!$Cc`;moGXso_Ayv^>*au0t!`_l5UNaRdoFPr(Ep*cXS(|XfW zd-&IBPVD%31yiNJjYe*B{kU_-{o_0P?TG@__DW`QRi!Fl)ee0oNJR9>awvIt4;!S4Rxh!Fzu9fY=a!G|0G~b+wUoYS9lzrl-Yv;yA-S2BXjBxS$HFY0R&RoKh1)qia^6 z-l8o|)5t#PI=88?VGm0+v-_7^MoyX6KfN1_FH3Q2d3Sw#>PKSvF5$ygMk&=hm~*3eE23_zG!6kAhUFqH7X10Sh-SIv2Y5muk8*L)5Y?>bvp z+xj*{>0x8wc0)0>l*|e2`#j5jp~Ce?BDj-iGtnNt06KhUjWZ~ z=6WjzcM1d6CRECX*O;dKHcE5((uklt6a@a5-kFXlq>leJdKV#RK^lGd=lE-4U%;Vsj`>a$s;!+Qh^(FLfd6);$=G zHhTi+9_mYeW9OYdO>aBqhApifG2&Fp9LIjV_W$vkdoSlWO>Z|;Nw@iiwZS9IPmdIw zluIfY+NPw*&y=&l`pU7F4Y#yvHP30GNni6aCF=^5>JKt&$%|rKZ??m$bbK>tEsHJBd|QAG z?37d$YnkGCF*w=5{o!^8f{f(h?BiYk6~jIj-^Nl#xa(s+UHGI^UsCcA69P$3#3}`u zc?a)&66!79t82Re_|VSY6E(N$hemjw+KC}wEJNPEJER7qXPZ4cqtT@~L(AkdzHx&; zb3i&Ie|3JlU>{$zjN!f4)bSK!;G9!Zx8CenW$28lZ43(6z96=y>{#2~a1|vH0=^B$ z^NzA!>Q{}2>D+Zr849=vKc#z?oX}$!^}IHJ*f~=-(%Zjh*+s|fC~}-IV~XmD!#Ljm z)n$6jhn*cUMo3uC(K~MgiH2!|lM`v}#Cv{S_49(od16)X9j)ypmu*P?n@ma5i9R&N zbTQim2anH&g-q;RuBGzhV)-Gdo|HQYzjxWZUF#U$;36kEUNqibdHam!G*(eTF_w&8 zFiyyzpeo-~7wV+?HVaesOBH!me=@Tx|?sXnCV76E!)|Z6;}}g6S)w4p}RMNeGH#_CcC*hVZ}ag>bscp zd-rY2I)(jKHfq3)jY}_Pb5Gl&PLcfd2xdFG4g6s1$IqW~46@~xE0cBi54<8)rxgS~ z!_YbxixbuU>cX`u_s**&e6$&H;$25hN14&7m$bxXd?NLyUc8{hvOE`|no+z;96~*% z{q3w)|6lIIy47}#YRGcM%D`+*8tXUXiIn@_uzN=+E_FlVz`{psgP+YBG+(qfZhLT6 zg+hWabQ4Kuvo6OD#*70!QK+WJUSSS(Jd}cp_Wa=&RxQ0_zU&t(E?u*}TIRpIuxtRc z0;)%(U4VTh-p+QQ-`V2htJ7e_A^XJ2x^6?p74?9rEr%~(NzGBoqYsV3$hdQf+v)bq zZFCDAT4NhBGEYs-X`3tEIX7!_#h_CXiv~{dKHTOy$~JJFML@IgmR!%BcB}GZ2RaY1 z4Q>bUZD7mYgrz{P=wlNEf8p97tw_PEn=I<{q1Axh)(&8+Cx`s<8{)X1_s4yz`4ra! zFITMEy@a}RfAVefOv?L4JPp@OK_c?Y%dev<>mg_eyKk7lGF zQD3vuRqHu+qQsO$vE5^i^5@<;_hhtk=TOzzO1!@a|6jfJHFpTb*1u6V{KZiIpBMcI zYv}1xdAV<5A2MO3>~nu$A;wGZW(fvLe95k=(?373h?73)IZz;2ic+V}sZTIJ;2T~$ zxpS&TAZUs$QBUT%6*!s$G3ux$3&qxnd3OluG!#2 z0<^o0xys+%+>!k9@Hb3F_v-gkz{cM^woc8#umkI{5(p1$U(KYata9FNO*iI1bgA+R z)$G^}QNA4qh_|%6$^;;$H01pq{CDiA*@?uRbCD=)~zn_X*X%~fy zLei;>B*NXCHWZ<3|2mLXEK*5y@0Hz_jWmiGD<+%N7S}A%39sjm;iWDh@#v&A{e*+0 zi3v|Cma|-pvr(eMlbmR2sNe8h(nP7yK}JSxJ$`kg9F3aWK&`!1HrFdy^Znud`$lE! z(Z@TIH(F2IRtEaJdfmnhf4OcIoAvLjLU{z zxW2izPR)nX=0G{D0AQWNby)bPVOP4Wqo(Ch>(r#;#Va5dJ%gB8w5QCn?%$j>Tr%7E zh@C7}IYQx~*Q@BR8NE7`p^w0QA8EBnCen1bV#Iekw{hI^=EbD=K@8YZYgL1d_YrjJ zZYzm9(CoJ6X zqJ5&9sA9AVst+#p!}0-BZ2Vau7YjjJ>?baZM5w|-X?vEae<$Y=76cGd9u6OTS4Cke6YaW=8+ zs8!NAA(ywgq*i6#z7VUO9`ep)qc7EcYm-A+@sTFRN@f>@tcNGUYH2PU*lW&SlPTva z8mLoJ*;C%*k}GAIW2EkB>eC(lZXW`DZhG*!+Vo!CE2c`y9=Hn+9sFjI=YhhD$+UC8uDcjh%q83EYZTbgubZ&mPK zy4E0Gb-!W^i6fRc&6>- zwqCOOQYUg(bl{pL6<|Y-1h?*S{&#s6BA(8#`F?$US|H*=$7QNWR|sVtMEWUIF5*_# zYbkfE-})tmTNGX)H@+{?2b1Qt70&t!8q;fv4%xcBx@@xUE`iRp%|)&RVgvNQd|^`G z85T`j*!2H-r)(MLuvBVusn-D);-$9cGwbvw?sLa))u`9<%ZcP<^$4-|;OTRmvI!cY z#b)pM?qO_li`Ty37`^oc+Eu|z7}aaEx-7Hzs3pkXd*$1hkdSWdc40{~3^uMdgUL+btYT*NWe|;Pc-l}Cdoz0X zH6jAVTsZpfR{RdLGA>n$-g8(!e+lG^?;-4Hu)c14{$A(Zy=orvE|$CgXWz;@Qvwy}+Qb5xC$)Ucs| z7yD*g4FxQGD5b9@WIWmG-K~upS;4iDaDd7=gsSV@1Z_FY7;yXc{1s)?w3CwezMJw+tzc+)^?2-iAi}Azp z5up^8Ny7T({ZW2Z)giHFBxS{8r#W{X#m;hi#20|56K8onVzwyoQ3 zD29TZ*R|*<$-G^I6BawidGqU7Sf0*4BfNRX_f8@;Oq=EgMqfxqx8gBzzr~b=@Bx2zG9sC1*zJ#?bYeF{e(?M3dC-8{E|8ypOVOSGV zYtWd;x}=y4L7GYt%FtQruUKdLO%m9~^~IUOGAzLx-x)D%OQSjWZLQu;bsCd3yC^26 z2|PB<)P$r5bKQ9lw&X5;glzzo^r#FF38QRpvJ`y3DHBH#Y=f;Ke)WP2bE}+R-u|-hx*`}(`g-!@ zd(ZsNWLD#AN-^9^C_LJT!b@*J?#q6+zaXw|ATo-_fx(sXmzNG>?12E%`|3{zh+^_)0<}`FkF2WWyPr!Z#5;GYzTDW7aH+3vtLZHpNaly;0?Fv5|~DGCp)d z0jl4-)+O&&fqL8mGHAAPP491vOuyiJ_9eFgwfS}%!~C6h&Y*;eN9l&cbwiQy5sNOS zN6F)gmnbvNQ$BWAj@?cg48c_ZQV46UdDji&39DDNB5_eAjlpBj=)%Iu0$Rb?3P=0$ zaI1cIdDax8h;w2_x>N|G3&M8PxZ)Qh<2-Qh67G@dSDlF=NGr2vT@9q*i#flj( z72LarXG%&Uubv8-o;r$JyJv(c1VETke+y%*d=EL!Sp}BX=tqiC>=NJJ9m%Ne-!}#YlXP z;>D^eJ&hQf(5TFNHuUPBtLhiSY)|qySc1iZKzGzerr~1nJ>J)T|q2uR{__@3)$*`j9pb}S|R*= zAuuN0T8drLavmMUfEnWfztY-1xim1QZc(sHU`zdmw_57)@fbR>5A$PAk1Ht)cz=Jf zb{+lMjM-~#%#RMhD(>oMk%{*RD%soWFWt=U=G=0QH|6L*V-8$64Zl3eum4oYZ?JBDgUv)U4uK#>>th$8U1QM*@e8)VITv!e z&M6i?>?}q@a~L(>44cccWPD`fbu9~(4I8{Q^P zY6&Z{tM22xexi?_U(xN?)%SlQz_8O$RFeJw-JL>XIy6<2ZC3mzS?gc)lu=v0&$HOZ zeU5m|N@pqsGS}@PjL)f1(3{n*3*8bLe{`3t_o{AgL9cZw3L*OKs?f2Uw`)Y? z7}tS~j|xDtG`w1+5EDx)&rv52F$BjpHxs^40E+ygF;PhPD)Xlf=CL~qXIhLMJxptK zn0u`|g923(YOB-r9vwYgHQ!GYHj-#1RD!5S&AMhXacyW08HH)^-1jSOZ(Fl2)6?A8 zaAPxAaciquC+xieZ)Bs^aS_rMs34?-z1{=@CC;l-2lu$urbZuNRVD=fY)A6W?ik1B z?SFA8n+pPaXW~EatcRbZM1gjMc3}2<{IrlBE2Jy>9UF%>SO6mWX#148fUon9?+ZUQ)-m&Z1vH2r-289Gpf&9fbaRh2cTB&_4PHh&Cbky}T^_ zxnQ^K${QEzW!@Nu)WLSPfl>2uNIvz{$mc5>L~=xDfc6CEy!56`r5G`srH;P$3sRUJ zMc_DvJiB9Hqg-+9`?;9i4I;-%z;$&L8-y|JIn6fy6ZUkzmMIb^zaSOT-rN&LNyDlE zF!DL5YaA$*{z|sq%k9ku-pcwc*oNoa+bwF8qVOGv(xJ;?!_wOJ;q?y_g7&7K85i%$ zDaU2~en?`GbeQ9!+&UIq>ZGp(n_|g5DXanr_Ixe>@}84ZrMATz8Yk6`-`;hH zr*7VNClT|E1KUb(XoXb%)x9EmhVtYO#BMJ*++FYix>h>eCh_ML8kbS35NNPdAZ&qf z@F6{b7Z1RmE#(AyIk;Z$L(f;;pQ!etr}z5&NIDVN^Cij%KaSWnveyk&kZ|A?vftF( zd=sBm@Qt^OsP>quhPQPv=DQ1?z9mQsH_|&Vvv6|uxY4PEv}}a7mmVMQJ@ouC4-Ts2 zr@H^+n1nj_!(y{MB)F~RYd({`VFA|KELHVkyE*;sb}=#MLdmpG?3XC354M7gz<_$e z0hed!D&_IkNt1FV-isLoF}0U{M4~!^s#MSX(4vTOjUUODrp`9&R&jA4)*d_&`WM|Fjiz<++dF@lCC}~I8=?U+%2zl& zU9QiC>VhQwUsbLz)=X2$5ji(3&Q^-TsMb8L#PkyA>)H%7N_17H;YumOxy{%unXS5) ztB+>;Wl_O7*e29loa42moeRHf9KtCZpkRDeE-c5@LkN70cl_6>+L*O|!MGSa8c`P( zgFRRvsZG#t_ym%C=ZfO(c(-Ty6&ANoqQ-i5tZ6?D?DtFlcf^ z^#tRII{3V;VQoeFp~@L2zL!=c z9qWmtUV8x~?7h{Dgcw+q;Ci^iqt>uEgi6W_t`rZ~An99Q#%T0j8e4Df55Ze!~@Fy33| zsvV3Dk!oQLdrpst5p^&gr3SC2i?@{GD zc;TqlG!hZy9cM4<7@GT?@m{@URjv}xJdU)5LXD4O50c)cAKnk5gk-v{@-4FKKYLa2 zouf@U%mBBB=q?sH@9t_KqD9ny`!1}sla+r)roP5w{A{~TtkgO=YqaEEb~B)~m;vON z=&1Ul!jUHyIYw+l`GbMQEP$nTlZR^3juWwP{^Y|dy2EW5%UutT{PVD zKd{JEoV$Me6StwvCovOAG=EZr_|BY|+q!0)qG0!mCyy5mIHTe7H%<2^Rv{!U9)}d_ zQWaqK+n^BVoB>H)rWZjg?>@aEg>(E+>H8^7nq<@Ee}MiB|$&IQ8j@RAI} zL@@+}^-oNbhC>FgMzWTiQ6>HGw^e=_T+=qLARj%1Rm-J0kS&jNmfuJCEz*cmzYQ@Q z@~~N{hw~=Qg<<0Vrm32H%)dwb$0d10$)3cqPmgPO|!F9*1pe%sIe0=AoPYZ9VUe7|Ac@|D6F={ooAD}Z^hevr}S%Q$ju-2H?>;R(3Z$;9QznN+e62v*YgVB7d_tVP~4*B z6g>H63-f%3-k54hU|7uVD3$}f=bPX11Pf?OzX!UhU08TY5{!sej?R5^oZ$pK&6e3h zk!Y~XmLvPhu9S$?Jr7{0>xomFRnA6eSpAA*H@Lh3jKn@*ryBleuWnWGbonLI3CV`8 z30#Ty!eQ0^9hmj{50oUGSsI1a>dy+IRPJoR+&$KoDmqn@{$x~#*8maGyP;8VINyLR z%W>?Vp$8y@cBEStkFe64sEdEl$>Lg#N`oN5%gy6>0a_XfF_^E6+zi5`CK+_ey zq&2BFTCq)_{Q@0*(cu17Pu)60J(uZznfV@4p_-q|$EUr;%rCTm>t5PhG0P3c-@pJpB%{hT%V)9TiKByrW%aT@1b4m77N zf&1?Xe21QI$ao#+;lNl#S+^G#z7x|31a~B?^AR@2E8rC&+La`yK+;P>!`!j&em~#P@~vmNq3B!lSaOm@rUxo(udlDKD~3E<+*G+Q^X!_`V>>w6WpojJ_Yp_r*V%9_n3&B0 z=>364-kY?RrmS$Ep%3JA+|Pe$TkRwZw*s}!akN%YExp4r1@jexwoJv9m{IQ;6dw>} zm{4tI6GmTg09g{Daclq4o3nPHybwlxLtU_s^b+1%>K;+R=N-{{mfWP8qGj3N`vyy2 zu`eBN7#17~`KiTkRXD6>lfNs8PP=36{7vm~G8YN_<1I}Gl~wA})>G#md1+vV9P)~x z0YhG~L}Y~3{c519AAu@A>|ZTrqNDZd_$GWuJU2Fl-n#f7Z7ocPQxHQwXo}0vL zdS=&Z3v%<9s)5LrXPz@HUju7y^LGqSbc_|2ECs2w0Up}}R*5&!m-Qs@C~wTK{Ec^R zJ*v?Qk8CB3Mw{ow`QfF zg{lMOX0_t7zHS+s9h|2};+1YI z4UwS&mt1yKxrBQKD^h=yjIEQ&b`v)@USfV4<4dVwRqkE6A7vkAMRP9a^79NmC+1d& zD;@0(y6q_;yZS$%kxOFwej3W$6Dwz4Ox0RPD$=pcTW19e(Dxm#W2tPw^-l3l-lN;5 z1`|6Z>dLtv*YWAlh?_>Waca2ZS^le6m7g%nVMIg4O9Sb7YbLwwD_|OcMS{iI;hdrP z!}nZc12kP!FP^CKR>CPF2f(T;ctQGuA7nEON0k*wDytLr++8exk~lPSXF2`eTmjQZ zM>31RIh(+S1?Q&*Gtv$@V^B%3_6B4=t#3Y#CYws|55T?_aqE2R7}d>E^bu99z+3B* zX4VPmT8OBMI#QAPH2`J{YRh3E~1OOT9m@iStY) z2U|7R;pgP{$bTsQx7x+gXGk(vw-lf*GAgkMJi9EMZ4jU3)Ca`xD-juEBN+oIg;EmD z9tR9ut`5;*aFFLp#Nb5Hezr5vOkk9wOVt4M1GFHUCiDObvTYx@_#{+#E!$6%m+Ol9 z{iw>xj^yQ1mW9%k;^3j$)$itw2X+rHoidQ(>svBfeq}G%g`nd&E2GzOjDbA=p=!CA zylA#Bi%n*x2rUl!jA#YSyC^r2O)DBFM{CB zQZz?giKgGc3~N)Pzv_m@RJ9poW#5&%Wq0k@UNqJhBWd_6ZaoTRzA5L!T8j|C^ui0hFpz+JO{Jb6?tX!! z-Iv~_y637!z@z?j$W3lt2Zgk|^P!6ZDI#9%&h^d19Ub=Xx~3-+4o-#4ERgx1x;QYE zSB-`UoIC;(m;P=qogs49^7NL){hc~;iO#fO4|soTxXgrUVX4m4%=_kna-Z9gnaRM~ zimDXNTF<8|L6V7~9^L*YFA!2E+KueWFoG7}C3E6a&ER?cJrFuc*wK97Q~x^q48IQG zk}5Ff+WH+;D)lK|2FI_5%zQb8bh`&~TvIIquvnU}n!%^~D{dR&0x+8yvWxcIp=<0ZO4n%BhD1?u}}Dnn~sx z8g2IP((fC2GLgO?-tCt;pH4p8#P_0yd6^@+csve&)vFmZ2fI$S!yoRN6#hniuThm- zM*S!Zx8Rs+&}nRJpv5mfz`+b}sc z8cqcw#kTy|&HlY;;qL)gfMYv;9p&Wy-#zPyWmE-riH;>MH0fm29=#?=y9YH|7lM|8 zW3?T1HYlM4&7ra&OT`83)?v`Gukb7RSv827#N2T(+VC6!s&|O8Uq2Q2%xHaspHk`8 zuSP%Gn6!N7p*3J-i(oV=WctqOWd_tga&C+R($=&66zqe zxGi}tjf=Zp1zd!r@=|nVm0^mB!2+;rJQL}W_|p18@xt_>JA-8VEOH&G<%C`$ToBfN zFw1UcnV8?uzqCA4g%#Nuqv^%xVKP{??P%i7s1`>(FQd{sT0Xhm2gWN17kQ9s3vaxF z?|XG={*(ks8}dDQH`KcL&JO$rI;5YHj)CKK0?T7`o=J-_U6WkR>A>Kx*J_N$p1iEO z?)?*_k;t9beaT?-CfA#Sh77v$t_ZAq#V4rQ=0xYlxk!v0QjMGJ?~+DmR!>P&2GOvk zom)MW9vSN#`o?5)f`OXhdDwE?(o*eZqo7*9d9?T`-^E(=K>OqpKhe6&oGH0HexXT& zc)$0|cgPq$jz@6#qsr?MYK?N4`V)=w!c+aGyoq}O2K4%qQ=?h&#r`tgsMA{xpf=cy z_3?+^=G?X^cn|U=hN1K@r#VIF@X36NfrID#oI#UdPE|1b0iXjoQM0lwy4~wJ^`9&V zvU3C-b$_gq}eD*^cjsD8iX0I~A!}|FFQf-o11(o2AXHo{$a?q!B`K=tIro3ig^` zGjQNn(e~ySorY0r!r%y90Zzr@#>oIFai87?&reD2)4A2GI!>hi=;wONcmaRK4?24v zz^j%&@nhYWe!hDzO~W*EZVZLkGw9*7y#| z_j!JoL~jyMh%Ai~hO zIsW;i`0EX@Ic9MobyS6D6`lRnHOW5go(Ub*$ziU(GZxvjG$Ey)U&e|v7g#(=^#8r8 zehlBEIC}V`O61FL9P<-}CL4wyc2Ymz6|6H(c4)flMra<6V^d;8v8B`Jtc zY|vmnu@Y2U>k+oh7xU!O2`p*ME*0bow0yGslUUaGRqyZajTO8a-GI-)%Ci@L%sTVj zH2ZiR+8V`I*f&~7SIAU6gXAcoi__3Du)m4#;eEnY|GZw+m@`-l{3Az@G8wi22X!P? zL{6;!6I(YQSJfN&t*XRSN0M`Urn0zKXJ2~UB4Hld78WxeV9NA0(ypR^i%}QG+&H&6 zk6JVbOwGUwt;53aAvm7XX*G77jimF^_L~zasu%W_6|C6=3+=}!9sr=vu}td1GRTYV z+a`K$Qbe>GJcG5fo;p3F#ypu5A@tkrA$-e4Atn}q@_7hIdNex4BjRC&qf+OBPFY3gqZj?c)R&ZNlLR+Io4IMNUyOhxw$u(5mT74ihR zm$QmUAgAO)Fx>0nPEtAkgDD-;8NbtVH-KxIWW@CLddmBBo5hqi!g@0 zcE4@j9Y3Z)OEPB03)ZQOWFje$2(JglOLu8UFWOgiWBYX#G?jb%H*3w8^EXW_1(WI% zmN^Ebu%99FtRLdl8QRsl-;o)H5OO(3WcRRs7d)qO4D1I0b%`l*ssOnZIs|N#Tf?>a$^~7oufMZ@r{(!3r)z2X)WG1XWJ!gf1*zuI zT#x0$L+?|*COUX~9|{Yn*}6Ksst)i&l2ZHJR{-4-3QS`~s%~DaexzD+T4_q%4ek1> zxi?<;8BR4SlKDKb6p|}=)*sO?Ft|A#sldpRbj~1{A1&mNi6bQ22M1eaL*{l;KU}Op zfuTj7Yetj=!A+m#*7a;Ah$?6mHwo@j!c0UDGPhbkCHZh!2}>nnfAoG*B^qc zy}S;P4bva!GbWlZmy(x0``W#Af1SMb&`Z~<*=ENn-p?NvoX?0cXRh&zI~Y=R{!Mdc zKMi?Oi+qv1_C?~=v}FM*@+cf*tCmK*z^)l+jIJ*ccuYb1sIilR`2WaVJ{nFV6QuU@WA%9if&s=6qQ_`-a z^Ay_AW9GT0eDg}XgO!HZlNt2TILphgOtBDUU`zb4c5J{94xk z${$L0{9fh$Y+zmASy^t4poPMag?C}1>o!&z%F`_b(liM)H`6;yDr?^@PAJ4H(4zwH z?&5?XweCug>`-XWa{KsdaZmtm=Q+}5YE-Vkjf8Voq=Ki^{T4`tX{%Mch_|qw(5ZHv z=fQL0^NCPt`yKl7_%{TSH}4aoJ~@j*Lx14pMo#M$jbF`|jJS}WcS->@+&X_zjDqsk#@sZtPvP(Yi1+>8$(5{!QMpeqc&q z(*e+C-|3@Ti15#*A$SSfH1561&+KIZD}II9v!l#^Qf&YJ)2(9_e4BOE`7Jt{5VAK~ z-Fl5~g6~VBme`yFMm?hGzhWX<0%%*`_bv|}nzhESP7m6*-2@KNrT+kgUI`A1D5lD4 zsN|p_X-U@&SNjo#<_D*QxuD54CD;oH-5dn&R^6m0fgy+0nR6Ij-3QOCOG}yRkCe;DX(LmKGc~1b z63^1GzfBJN{0st`TUx}z0@V!Sp>5Og#DRPr3{+sZ%Igk^{@Bo4gXN9Vw?>UW9 z@ZEz$R-A!Jqg7;0^pK>6YkSM-C5e$ke7+sR=1lo04Hey+U}KVH%EER7M4RbVIU$4x z;FNl^@CcdNzVi3hr`dzJ6n>Uf)S%L03!dX4p#$v6t+>G@fgar2iOibFuP@2FCS|1Q znz&V+zp(3(7el^<%a5G)l`toUazt>i1 zcd_K|k+ZIVg>U6ovy|Br0;#l_Yg!?}?0eL_a}?`ZQpE7fcksU4i}<|jzfvH_f8^nZ z&XRs<{MYC*Z>**E2m^Iz@h2|*_QgarBwY2<=LFd6C4QS!)XN}1C%cY=|ICuRi{3RT zaH0vCy0zvC7I$Xrm;ry{tH6L@ZC{Z!g9_Xz+cq=D-gVIN2}y<8jzhw{`UB~_xEa5U z<^LamxZBrzA*FB_+sk$2dvm&DgAx-&I(Q5v=g&4pnB(Uyc)B$K+@eLY>F)vJJ1i&? zdtq0|wSRN7wdmKXkSbGioIc%*aZFd26H0~%*1P?{c4(a$_WepIdd~Tb>b&+VHF^hd zE$1FxCrv;th^Z$S zc1^H^Y%sY-EX;l@ze)&*uJ-^I@Nir9;kK$S#mXr(1E(+_0s%gtpMa z8iphsVMe-!cP{=m4RXJH0fy6t{QjtC4;kdVP)&_+*VQSB#>KF?ZFvAt=Z8PPEsj^# zBb^^WDCiAetd{Zo=l!#(VgKDn>~rf2^g3Q)&AUeS8~PiBqlMqsg3;qyxkrH*q2+QG zyRQ0%c=d)bUVls+;lKFy=6OD)gXjOed(xw7&};qBnp^&Jjsl@g22G?ObAhKHh{EIJ zA{{d?F}{Nn+p-?V!V?Z0vqnVCI=Q2}AO(sf2wuQtCq(!E{@|GSX|67`|0?yqPdDpz z!5jK*7c)zf0%f4$>|o9j7sjzJ460N^z8308;oaysF*X-p?os;Xd5)xKHrXo;C+^#( zig@2Ok{+KQ-9dZoFTUUn-f47M3&Td@$tD9E@_;1p5AXWtH_ym}X!bd7?%v7xF~#%O zqc2F_@R~+;<{UNE%u3Hj~@D|nolo>kYwy8vIlF;Z){O!xjm1{A}*Ful}+_4hFJ)g@_Xsv@I~;d2n$=p zR_=YsyclZhQ~jQG9vu1RBkvKR_UHX%H4TC+*>mF)HeS=^hy;aXyzLcE=^LH}KWKWe zd0%+=M@x|r>H~0xL3OjLIRG7v6nVwVRBM$*?_&(+?(vo1odfhLHhot8Uj;?lkIe)J zUv;hMvspXFg-?l}e#A{27q&OA(=--966$Xmz2-Gl6tcAHSrCYNj|qQV-D0$XiJn7K z>iQ=%qMWF>Z?xnebQZv2smm-4u7oN9+|JuuCZ7@cknhVDVjZ8JR%W`r2~nL0DZ;;` z_B^0pt&x2&DmZxK(-0_0R1?I6`i>!T@ubb5?llI{Y|^H(33X`R(UmvpgY~+b5yAgE z;U1(D{>04backlS_|YdIwwx1%I=zqs%Ud!+IXoj;OdQW;(CrF9*mEmoIr}33Cljb; zLp){XTL2`OQ1pq-M2-c-0E7HOW>4!vpge+E!{|OJ$p(6rUjS!9wDO)fGpM!b#Zhae z%2v+SWD2BtL$UDTr=S?Ctt~N#Qarcq>J3bRN%HVq-b44A9&T)ff8ISWesiJzf7a3g zIER+#`#i<*9^_p|R1i_b!id*bEtJDo-u`MFx$j`aVabu=vwPkTXQPJ0dhQ-tSP^c- ztgmxH-gD%m9g?(~_nQoMTcTTlSi_V4menzMPBgjUd?9-7uY$S=P=-t$^pE#OdBH|p^pP*?+La4>=P~*n{ScIXyO(n4{MH&+ZTN;0q1-gWHxbF8;Qt}9 znBw(aEUM4_^V+{6P`C`VWd0#^^{9aTmXUY%PT#CtnznZ}MxnUn(G~mFS%ekSi(;!I zSJ~!#dUVvjLwK`3S4P{mjE9EIQz#&kp##QFc#qDqR+BECkeysVto}VLl}^UGv-4GnO9%?Gx6}s&^xFseM{zUJ+3sZJ0H@33d z2o?6dKFyP0Au$`D{rrmdWGy8PW5mcvOYLZ9`MckDP6J`!?S1aLErWv)tzCRB?LQk% zbT&74WBC15@}?_I7Wg!}wHe4)gfxC7*a-9FVX5!h@0zl%h#dn)@T(auObkL39^^U^ z$c(npe;NMzbO9uxHAi)(pkiHX*h{V1=fPkKK|r86Scv7r$Pp)9mhWx}j0G&?{tOT5 zTk|uc%{)uQR-?tC-Bnn^+XZOer@t+(ZU8}Q{I{IYN?9HKBhy^xC+<+wdg}cU!yg;&E-Ss#{23&PN_a2&br}66(5(}qAxgV5cQN%J?gT&WjxpY zj4JgOaWTf+p!nA7$!9MjZgo}|nm<{kh|(nsm3#26v}U`h`aNF|WAK=5!_e4kj9>4< zYli`#WBDLY>+qRBIB2o%6H!}4$R581aE`7RxzAF zd)a~Yo>9acnkw*UL@-o+e@_!_UYttd|j`k@9@r&*)2B zM!_yuk>p5KjdB^Mi%_@VJ2zdGe_k|p3gJxUux>~Nu``|M7@?gBB3Blx+Pcgignf9{ z-HHO`WdqG=<*uk_h-qlNnl`9mm+AJBNFwH450%*p8zaa#y<6OuhYE_Ii^|^5If^hI zeaO1wYl5yV{#xHt7lqfNYGygbk;N;gWw z`PbVvNXV6SqjBD#1^bfxJ+WmDPtmL#s};SRv@8-tWZdoF^|r;8IRu!VbVInoTvSZm zTB6I*sgBB2?=!ZOwn%nywhQVr8RIZozwkpxcFqdbGkGjZD~?~|(~Nu}Eunv&YIu&k zNf2z^)!cBbrwt`syd4wZbax;G;OxcDN8gr%9pB72qp34LnNIFRNpeFS1RnV_{1|Tk z^G1p>nh3xJ(wThhC0%{;2N-|ymHFiY=gJ;&R`w}QR4Bqux0EN>F9`|rTcqwGUdSrJ zxn8vR|H@(2pn^VLXg#!o(j1|UE`hG-PY%lpLxp$N0#L-v1I6`YTT07S;Kget4dUROoSRmV-Pvivve;Aou7Sp9Ac@e#BM60@4 z(WUATERDeQo)R`527*(oTKFLmZ`em`Lk`f z2a&i}*zh32cxmndszWAs^Po%BjS=_uJ1b@=WIuBo{3WJ=S{ijVE>-^hop@w#irN8S zY@PpuywKH{4_A)0Q#Bz#Ue78Xl@TE#(FpIbg?D6Ymj0Ld5$_ZCwj75A)9&CW$O|5T zxgdjxX&i7yKnAUwwEV`j-5-`jg)^2X#@vsgvC$~n=C+7TJ+c^tltXMoTDujybF`1m zzxa?lMfY-nOJ_OnBb-ZFd;2t6PJDxa5`P~REfYqbELpKiVfSP(*E)49$6BTe(9Z4e z{?(n1_;6X!o@licGa!oYZwwoG!Np3_jv*p5j6eg59^l&l6TOwRlOBb96v1X);0xq z88C)ljXW;Oc$)256E8Ta@aK{r@#{}LRdat@6sKT?T&E8M9xpsi6`jhjf^`!m|oJHch|I?~@MQ}6e+xILXgn4WqWBYDJdW3)#^%UEV z4`(zi=oXcs`Sr6OsS-;OtNIyx-FdCJ_!|pXPyCYSr*(+2Uem2Sm$F}6F_d6qoVTIC zZvG)jyJvxAlsjO+uTI2|k9*(Qkr9bhrNCKl&QkH@Y;a9Elf=R(@3m6H!=E1R{O{Il z2(CJc3#@9ja_c#jEO|EK=Wy$}lnxSz+F_9flSV|C6C}qs^#$NiLo24_(ZPC?-R^qc zTX>t@d8e8A?D$*sh$8$BB(4r6PadzWl00^D_+5{`EVN^}r%?W&LEm+cHys^RQ?H%j zT8Ne8F^awW@xb>Wvm_cW#D4-xSD?gkq!2I=)$oTO>5~ltA;uRrFpxh8#meHjj^k4mKivx)x>sHK zImdNqtU1>YN3xOy0!Assls{H_RYlW0JBPv6L48%J6V5uPy%?h@!;O+Rnov`>eQ4&? zQ#G=<^fi`UM<++bM307sjl#mhSI>gA@zipo&a#i&h14&psj0_O>F63f#4f6rD_&Ih zPy6}UOGwY-Eoj%F!i~%*nQ`74Nv!;G3~d<93;O3>xA8MDFq~Vdi{h?IhWLVBm(Z*S z;zN6lcF9pHeEovJe=kkCb}wMf!?R~r69bqS z@7(+r|4xnmFiP04pP^I^>5EGpQ3T#E`abZO^+c*>(5Ijy`wA#T=?Ulz{&oAgmBvei zIJyosZ#kBmX=eA_rplOHSGaEz&AT|((}t7ZUvbtg$76-__wxR@1%|P!P>xl!-P$??Yc~uSoJbYp$)v>q)&6N?R?5c8;x6c-uxg zuN#bkjvbm#iAhd-R(kC^Xjmw@)JDsBxgw^@bq~7IL~`!)3+oR*KjQiE-T}0=V%|IT zzN*_(B)if>3sV2xW`#L&YhHu_bic8mM|~C@XMxiLnIIH@zFNunRX`?>F+}y}>$rr0 zMTX~gSM>DajPn1e`tEqH`|tg?lq5nbtCW#d3Xu_#Br-#hL<$*MEn7%dwu)pYD?%k? zW)zV!GNO#4l$E{xt{3(F+`q@8KkmnU-w%1cUeD(_*SXGhu4B<0DS16zEpLbRHx$v8 zWO5v*{L!@Se~9``OV?8lsS@aq*>gwe#ic4n!%7Nww>yJl>@q4Mxyaqi0g4AnA?eEu zJ7>%l1y^2wSy_82^9)Hu}*z4QkuxYzE zo&D1i9Rj3IG{6dXEQR-f=j))M{qlmQE@F|(33zof)|d?dg#t#QMmtb zh5YBBvfIx>B)^p`p!$O1Y5>m%`d8^@P@@x$Qd&%z%qz#ioVP^lCuC?7YZ;nBI0-(Z zymu3&$JWB!=cBJN05}i~yH^ZgbuP3QJ)PwkX-l`Y)QEqshsdEZEF>dC7<%4$cOLq~ z=!s?cLEQQGyQ5)casEGd;~)a}IGBx3tG~ON6#i-yJ{tH;;5|0RX9V89e7jlF_s$*8 zp1k_}jg;whZl|8=&w^qiwk;k3&W$d=z$Hy?_H+Cpu`~#gxPoF7|KRh6LIYDpA$+5M zDJFSJ%(Wl3^&)b)QAarjj&3)an#)h>JMzvyfnJs2OEcs+$4mqrt)0MDPdp}wn8e&x z=py%4f#9WPCgD%HHzM@m>CUTj&xrOzxgTslUs!xPLEH(9)!QP$KKll1c%fgx*ARqo z5F41q)+k@ZmNF6HcD&_zNaA_!De!959+yP%$}D(ZHfoTCl|Y^iIt}hbx8~IJJ(NZv zyCwh8O9^Br9bi*X@zN2P3X-e>H7zPko2+5}eYCw8WSRL38DCdmM~9S~%Zv7|L{r{3 zH3=~qT1DbjuAcZZ%16egIL9CVzUzhAA92-Zp!UopM5c5c+fvbN1!@Uo(s(7vktFeu z3oaaICM2~B6|~eQ3|6f@G>wEd?-BE`=}2HxqJ@{Rjd;C{=0s~xpHrY3(WBSp)} z@6rB@LCJI?hhO@Du8-H zsVOx)Ry&nG2M#Xjyp>f|u*Gpgi2{Ej3|Kwm_*RQ)Wi*81`rnsPz+FM`GKmPW!m_qv z#F)7Zt`h!Exs@m*iPEqM%qge-9w*gUsWU%#Z=WAVICbKk1~~qL1a{1j3$sFnD4Tc} zONkR(C76B?Zkhi%FMeKLUKof}SveqH4ET(eFg*PiBQc9w-Mfqrp)1}-;H^x(Q-184 z9O=5fxsY4=F~0FQIlC6B{Nv(p;1o z8z_x-9Nav9(oSs7Q>j2;u5C_}C#U(%{!8NJ$N<5Dd>-Un>CHdvihw3srl5E4={T%J$xnEDrpV6<-8%fc~m?(WX zsV${L-R^N^NFL!c0^mUPm`eZomo5mnpb|x4JNKH?P3wuPd99+l4sG41|MwKK;zAf7 z!6+>sLMbp(0x~0)@2S6$M>9da`)EzX<>gL zs!OU-@FH+r=#`MAizJ7U;^~0VCw<4rcE-ftMb!173w9`l2OlW;fD*;VBST%Kr6ng< z9s}+)iqhJMJu9C=HZlhTB1Dk2@f7!fAnxNLP##fv3NfvZ<+PRey3_%Nvl09w@TqFF_#cEMwXX`@)Rm}Ogd*mxABfK8=kon} zlI?~Y!}rh@&A(Cs^=q7KqZOXfyPYQH3!KAOU!h$72u2wAI{is|vFoq!Wxd*oxnDEa z{|tLyNtvm;R2NBa5-Nq>b5&G!>)#+F#?^2>Ug>{0#qA2MfCij-=ILaW z2aJX;p^P|<$BcsS+hGq+@*u=rXpew9KKYB(-JZWR_ZuI;#O)VY=~rkY8#?r?J5hY; zebl|$+XZe&=@=~fAlFjbR?j z+$AF3B%v@f+Wnu17l7&rB*Dt-1<8UGi2l|8MZ8iS-^8;Zp)@4jdg+;NE{4ihsAvT9@4~^eLY>Bu5x zJ!p+hpKOj^;}?o=JF$HWpX52ELDx0z`FWT9m+P(QiU}N$g^~cU^?1wi*QSO$jdGgq zs}3#U$XL>0fDM3=_xA#l?glaLr^>F6R&wnn%(*7{{4dq+syi>~v$PaXS(=$4d+ed-*z5{|e;zPMA=pJ#rg(9HBA& zYtVo~BR1tGX_M{I%1+z6uWon-RPmsYG7tT`L^%0k~D7aW8&#&NY#Aipj*Ci#UX zQ;=K^AqgPAccGi4s^b?<77rWTeH5fUnI&nlVqIUrrg+X{)UV|HH)MkwdR->9{mJMI zZC9*~Irt15%|SWxj64{QRjog_38)Wn_lU=ZGbpn*#Z9?@#U#WJjzVE)8$Id`eCR6p-A@o7yC}`S^ue`e`C=$Y<^wa08k3h3UyDU@oSvaQ% zz>bJ=Nalgr8uNlCh0(`NvG|N_wJf&S*S(4rn)k2KAei`8FC}NtQsIz!#6)~=0Nes; zw-1ARa}t2EgyUEeea)l@mebS(j z^nEg75Z}0K8K8+_>%`STJ3Bj~QI{q3IMS39F?sG~K$Y*y2mZ@2RTR29*nA}-P@`s> zGf9nP0JdS;o7KAQ`K4fhM4|^tA4WZlaUbgNH=tF?QsUH3j5(z^@PH%U+PkPZXo=I< zutiDo{x$E(K%$_ykaD0C?EZgV4XwVDHtWCJ0Xb0{OaO`h>_wMDGOxX}g>Un%^+825 zKzl8WWt72D)H_{9kDyf+!uu&r%SU?{vBQdsO%eVVvqzXiqyI!ms0pKa_SFT7_^CWx ze%P^uy$~gZkRrWJszW?_9OR@;>zI@k)bIB?j}(jH-2!XrEpdDxi~7@cG1yKc!}f#p zbT^q|bHz$~);Hc3zZXmtth!31RX&fGk<-dQeJf0p@bJjJl#e5M5W6*>_Ky-=!l%Go ztnW{?5(3qSRbo6|)C{tOza>Bo=1$RmO(Vb?UAgW1V^OH(iS1WdBSw^68X3Adl%LMB z@-f6<$leXV{oER+p1a9+7k=U+R^9cKb3%td^i&PQbU9C)%jSVg9#Vw?@!PQ30JucL zWhT2IWbb5BoEX@K^KuFU0;YBLvANy~S`D*6R zbv;^6pK>LLnQx@>fQ`;Au{J8Qc(A7MS%->$WZ=`3O zCJuE)!fuR~vzI=sR}5q9zzn@lTxUMKJ8%C*lkwd7pb=Sfy%8=s$OeXQSWjx%R^OY|{HFhY*B5N2xQ$eydnr!!42_ZK|sxBi7bD4mdikJhotBqpY+bh1qn zn<0dp@Qc{3fuZ9a`APHNYa=n%hF0RE{q?Y=%4=Uxp=QqS2+H~;9rCl-kZUmoriAhg>eIF*5Z;43S5WCs_Uy9RgUtM zAyENc)%@IOgTtAk`q0#nuuaC2vKKg$@U%GakARwJ#KD1jpVj=F?|A@oNk#?D&W96S zjmE90L|?nn?J$B9^{6jB=fw4uTtTDaj->n_&QjkI@*`q|V>J-o2D|bCLd(B_!99am zxRA4*toh9csqui44Wac=1~h%#Q08z^`#z-&WGrZRx<&vGzg6+YoNx@|-*?HuKMUWy zCjDg*vybr@ztH-@Vf!;QS&79>l4#MZkB8B}PDC1kFF|vw5^K1G!WLRIk%60M%>P+4MZhnOH< zqJ46M+lhk^>BX4r*|I=NI~iU+)IDK|mJbsJ|f#T0ogiMRZo!6WP{s&`M8y#8(cNmno%q}ndINW;99=*B&IL$o6Sr;@Hzq_Gob$Ke@%M%8 zLEB&c!!)ATo|Ou~HUX1X@7ApNYcP>n@vaLBY@52tksB>y9#%qqa>ML%yET|!?z*-H z5tIF=nma0Eq3=v4cJqKlsfl#eQ8&>YK=b2_BkTd;1}#)xL2}r23i;7P2EQJ8`Bg)_M(6yQu$sLPXI}1?Ik4 zyT_-xqa9NzpY2qis^c zOa1!DS676@5+ShY!39V5Lu`5U zwUc4Hv~$kEpp3u|NKH2&k4Y9PD~=gvAL~#{(J~byjUI94d|*}z+9P7`8BdQMT>ID3 zwZjRUo;+Uw?93768gW}Utn`_MKJ|3w=i=$F$`zD%M$G%@+P3%{O&~d~=|8JRHqH7p z#C2%)bS+?76!r~3xdXpkgpjl?=x%`c3T6OYjv2a6p3-x! zpb-P9_jV6AmtPhZGPTNSf5TRUT}DN%SM;u%3Hsy6fP8W-)g>ii}b55c1p)W_j zH{&z)Um$gNp32Y-WxUL=qcm5l?^3&Tv(feYX$hg&&NJWBq4cXocPA^Y4iC#q%*bXn zNnWU=+(Z^AGx99@Q(a#4nqe`0_W94m*U|If^o_f!H?)I%vm1NNZQ1xp0q(LFpAYrt z;GG>m!c=h9?s(H`8d4=@LD)S0_gMtXKNlKe-^Ob8-WNhA#Lj-?Z+?`0WrF3xo501$ zWh)-mNbpSy57K5SHbLri+w)@2-50yhwP4b1MphLsiTn(s9I6DYJX*Lu?B`29G z#hi;bFu!ZCATsZ8;Dpr%R=f7B0yPBWq0D0~gSi`%AJ5)xl{5a0LC%|SUn9@zgO19m zpg@K->(rC`1q-NSzdcd5jQRY)%y+X@ijIgrH3@trSA^95k2jmr(%*NMZO1cbDK01)L?fPefoflWfiy~`Q(ZDe@Xy}e^`A>zoudYw z0Jbp)(ZEOP1tF8hiQSn~YUU9?Q~7iXZ&+Ew)0fD9IW@R9m|=hXKNg*-LWE7riGP5Q zNK#hhnvG%QMC6!MJaI4RdOiT;pH$)B^yc0L%0{LtZUC)lj4BX;^e{1%Vw~CukbDzo z+RT50s5YkQV#5Rw=gHphJM#KaS?!4Q-|RtYCSLnl#LQ#XP0U3}bAHJU2l`!q0yDbUCnE8)xE&_~_O5+Wx|JCiciQ zL(i3c(bS~Qxpo#qrr()J_G|yY*J-O7uX!2`J!N(s@)%E9AWwZ3$p>k)N8j#myy$x} zurR@NErFaP`*6XVN&mriMF~!mU_XeX3~=%EIX7yl+|`hh`U5Uk$9~3(mjOq7>+Qw3 z@FR`Ny##A069Sb7w4wp>=@?nsFnWvY=@NibBHtF^EOGwtTdcpMmR9_r6xRa81q-To zilBtF`~Jt0{V@mVD+9m~$-E=d8QZc$G+}lh|J5xuQgdf)2+z!OGj5fZ8*1u94_#eU zc@gS%w)dG~L<=|7`+V@rgF-^=DKZ0JGXuv{CbuP8+|QTaJ4$zE=$fB*I2oy@TNSbF z$0=)6GJpu{`}XqiHJiKL4`sVD2z)UuT~IS$kN2KxUe->lnq^QIiM@~GU_{c!CeL1C9>gn3_tZ;%K*l`f` zExlZ&6oR90;tNe)SN?MtwEf%pFM5DMEfkuouR4G9d${_Ed4jm|Wg=7k05W9D`du4E zg*p3NwR zmJ*O5ghglV@m+xv9h)N-lPgIim#y?Y?GXeVF)MT|&pr4R_3rO;V_91B<-0F<_*7e@ zD2ptxkT7_ANP7eU>g7Sisu$pE?yX(mJ6x+obzMA~0R30ndomnj+P_=9uOW?Mz&gGP zG6!s=fow5D>)V$9yzh(EK?lY5cY!h1~)wD(7f}rm)>K3{04$5^p-En~k zcjuwqcMY8(aujU*%%MyxF)@!G;AOb*ZdsPch|FyIDFS%YN{y0UwfP3IPclH&7r=Lt zAP{;6UnV|{x!WZd=|{n|=Rji4*eOC9SOD<2s@BQ&5mAh4+eSp@jb`-K>Z^1b$2K$6 zT#hbV*E1MDGj_gkP&t9_29t6nmkReJVhh1R{Jb?Z0-se|h&@kU&92VUGJiLYlKd5@ z3m0h;ve<#9T=-O{s(WS#pgopxy2!!q?z{WdGmnC0UO~V`Y}i4zLIAq5ZSP}Y8X>sdB9Qr@pNX)(*L=$%vp{QY(>w~+_t3~q#4|KfLFpX)K4WZ*($ad1jDN8To zR00z#-lu=5DH5RfXfI(+`6z8pBJJ(TE|xT{lk6zoN+?L1--3p17`;RiWH*_?0;GlH z7|t5$F!_``wC8tJH;ay0mv*Rc2L9*>Gai{nyHV(BUZDuZA%@v+#rHX%bAOZBUu`fj z`P8{G?6Y=?S!5dx$%BhgW%kUbcYK@r&McwI7QcnJuKbu>NWx)sS9_47hr=~!VErhh zGS_er(Rx!%98UUew^D6V{3iF_ku`4)*XfzJkzO(6S93@QK*Qau{FI2%>-PW1rW2}1 zt+n0t@YV4K-T%I!R7D7j&aJ*6@A2kiKxF@Tgroz(?}}7wC|>X05U0V*zomRb@X91YaFy@Hy&Z^f@aR*qD^b)^+8FAYk7`$uu_dB7tX zGa3ZTwSsl|sQKWdCwLqdyy3`MnnF^IwEptlknO8J4#Xha8lgD&&TT10Ay^TRb-l$^#%&>Aj(v@X%WQy%@S`s5&TI^V2+IP!2Bh^nA1el0 zvk0WCW&RE72ijmgB$1G;_3BqQ@bw%Z9S|)yr2UAzB zXwvfyd^A&U4EA{3vb^g#vQ|bi!}VutQ#K7mb4K;ji4_hgr4WpM{pRvg^d@t^`$$UW z?Y$u9Z<#`Orn6+HN2a?zD(#QpJ(UP;a7GpnJ)xO-BO#->DV2EQO3J&OrYAg?X_!eI zcVPk$@=0Nph-N^f>w0zl$zRmx6CFC?vSs~#Y+Y-EhiKK2wQ_j8lM4^C2u+7b3xV*| zNcS&h8&L>h8)tJjI)84ztZssy;o6aRZCND?0;X_V!qFE1fwLhixp8Hbo8FpOcn3O< zy(LP^z#PDzs)QjjAEZ&(ODkPW;3D$|Qnpg3?uy0hfH{ijP24FKF{`5UKQws;N+)ev zUOI}hHWQ=IQY1MF_v}8<&xrF``|JtT9?=TWyMZ^n7n?D&h^Gu4DVcwYGSmFwmX*#{ z54EH1gR;Lel}YHm{JlE;Z!r==GNiid^=TTu^6Vd~0sM-#ErETn_G3ACITEx!T6^*# zg-q9pjgXo)^YY$_zXIM9z4|I9Ie$o2ApY#b5ENRI{{0QB4;!nUp=*aHQZTD62tRKS zDi(c5!fBI zFr{7s#%Uk>;D2+$0~BdE`=)*Kx}}h5LFP-BQ2F*PZ1W9QzoQ{~RZ6BORs+~c+6}c= zzgH)S&x`?>%Z%3@@V}*a7j;p%$ESfWm=_CR729;m6*^Y$O*>eY$iu|95#A~y?UNY# zFns@LW$e40QfKHJrT_yz@;?V-kyOpPX~V{W=SF&J9j1hl0JJ7SKNauupZWzNY))YV zdzdi^gB{oP2l5>cZR^;*v~gPCx7e>)|LdEt9FFF>zL+9-Ct9yq6WP;S{^9h~gk%_p)#$`C+u(7)j%$fUo~`PM`dc%pyl z3tG5f;kU$>guNq>7A&z+H7M;JY#rb`viaN)GMZeR9u6A93T$J9rF`xoeIFmXBjQ5U zB=u^g?_ADs;^IBtlN@)eJ}HH(Nbf%Obw5O7%8H7D<-2S+7W1lUIPay6^}lC*XOl}; z?(0Hc2F~s@+Vb>G3hkek56HxxikE-AJfo3mPBU+Di<>$#QX^DAUPEmD_nsN612Qj= zr~{UpibB7e=t7WVG13Qh7eO|cz_ezI^(C+Y=u@~N$EIy9K-G*o#YbG9ugm-e3#pns zll}@adEyu4F4U9=dRC9CTt~dx5Dso(V7HoK@VwsxovGFELrhao_DL`b+Rs*qfp(;FAm=VzsqEQg@QL& z(W-3h_tQd<*Vn{3ezNQOj6-`nZ~b19mBO){3`T zJ}vM*8Xw00L93>t8B}0nin`5>E4HUY+-AVlst%xg>W{F^VUQegV7LU^s8Q+t*CYXtH-VL?F5`=b11==gxuJdm9-NEb9l>55o) zwn>Q+O;AV(n9R9uqQPZWeM1b3XBJqDiJ^~ytFxoDw;;D{Xc;6L&p0Rk81pJa z07mfO7sQr`b|m_TAiuIldfAChA~mBR@NyhhmayU0=R~w_IMt33UH<1OxkQ2OK@;=XSVmun^~*R+ z3$426W-0?Cm07h%pGdNS`Y2}TI9`EwbU=qKNUr7*`m%B*UPgmMgjvRyeBir z1ll;`onpo$%3t4|PS{38s)v!4or|NtkRY2#;ZDdj(O)^DW3=zcd@Mi;q5OVB`j~B0 zu|MmA_a4#uNu+WO%5|G@qXxU*eJk{EJqCq)Bh|a=^E?OSA|LGE9@go@PJn)~w}4{F zj>`E{B~WLEvepnP*N4v_&GhABCZ(Uwl(gX00HB)f^9H(xg<)zpd+|{OM zg+8InwliDQ&PRXpojK!cexL96OC>7$&%DE&9X2^he+7J=2)??Hs-` zba9o_eBQ1FtfZp*O>R+ECCM$!ep}7>kJ^fK=>K%qchfmAEraF(XmC3XLaTV~XGpE7 zE4i7#bl|P_D^ah*eYVru7S2<&7D3_Lh4yrT3mO0*k2v289v6We`Ks!8k$B90 zEh`%_k6`Gq!0frHo46aj7!gVHp_`;|$1uxJf3N}y+Bzg&?bHxp(!vznB`(h_5t;*f z1gppaOO)@F6<664q1|HC2_H5EL3NNSLkayboEe&2wAX1eX$cd zaz7=Tb?L-GV7vX>_2lMUUS%n8MU)?Mnn)!iy_|d$?x6kD&n^d@Yl_=__VlZ1^0zdb zDoJIRHcIK29b^7rDcm={#)IXYgLYr-X`SeR8Ev*lJL~0Q3m}>#8|QThERE{SYrFRTge5jHY+JIOYrFgFU6kl= zfC}B}1J$UYYZ$^?D=2T8R)>(CeWqg*{noJs7l{;pzz%|k16hu8Mz&#r8kWAMh@hdj zOWyX`i;lioGbG53Cjg*QE*LSVqmB5@_Di$^%+s=V_|tj48S zRiWyue>)8NrBJ*?ZwxYnW`F9fQOfu_NMLDqzPO=>nTT<|wrOn&CWZL}4w|&=uV_dC zD_%-;4KyYX3Uq{*FX$}80WYo(>Y&i-xSLBg14;IYD?<$B8#Q^2CNAlL!$4q>Z$5fM zNS@387DMXhii68Ol4wW= zvoxht*PSzy`aM3}nUDEAeWxi(t*OV2rppjz(gqU z&l94IR!k)8URpmKSL*DC)5Cs|;?;E4dpBu1a#CFJ$kO}*|F^WeRZSWY!M;y7BM8Xc zE`XiTSP9ST23m3<1|mym_eW$D4{uwvh^0YkZ}}hOmNZNc-}CTkaNGxRBodi7MC9cBvD?)4>^l;gfesyCJxF&7tJ9y z9heTV#ivj`|1gAQ`)!eJ$z1R66e?~nvOUveBH3wwMY`;aBR-=AY3*;9^>w!)-_5OGE%-azt^gO z$YqdH))&p8^qN+qpC5)HCnkKu?4J%%kRrnOWcqbwg#a8R37}8ATfok2Y7*2-;uh^X z*CXE)9k+2xQ5gwfVqC65v3~$!>z^n?1vT}t2DPPbqQghkS)z=JH0#Pw(no7Q-=Lk+ zuZGvh#N=&?#=~5edfhSmQP1)-)b#M*-En~F=qWenFKB=DeLHO>Ci~NrnKa6Fwid0I zny72t@7GvDTk}KR*tWm$+KiI_>&P~6p)Xx#ul!~=AzCTLc2aNlURo@pAu+`|PO-1| zfb%__#`vsvCC-8vt`ZW!#jM>=xZA^534=; zpmz4kP2T5m@5(C7r0?z?p(8Xil=K6(7X^?r%t@5v-wmO+@VFa1_%Y$u-%qY6xgKQ2S4lU1H6d#uRF2n`hIP8)5~zfyGzLK#;rY9a^1#Yn z*Afb#doA;RXLnJ4Vdd+f+Nga^Jtg74$?D&MDN+%2cRl?z_R*IZ`AOyyiw!I`X}cGw z*6C}n84k5;w3vaY(q@dHBTYP=$xc!_SH^?|JRiAErC-k%wuNP?{!~W%x<&mC8qz)< z9X*CpJqAD_#j|K*@8@^+O3sI2cn4fI zhp zoJtedE|O%GhbSMTpxc5&7v~j;;|53i{oY7YmC~B*`%ycc5FqK$a`b`gelqWdrPtqg z$X#P4l_%Xn!-eA%b|h`St-^fE#Nk0JqB%W{>#p?qnhM|rIZ8t&x^@_?{Ut*ntbz?w z#>wsayLK}j^+*Vn^z*$n!I7ZkItSnr@0A=PJ=O`W_#<{$kCxsl+*tC%T)ce%$@xLq z!RG;w4k+B|<|4C?Vx*Pt4f#oYX0`C&9^3C4lDlWIlvzI9m;duX^}MxJ;Ni?XafSQ& z+wT-IRoNJK<`UdKP)*@L^26aGs`QPOb2Z}g28Ff~DQcSc;^Qm}$;$e^pRnxJv0Pz5 z>%-M$n4@B8+1Q^7P~|gX5`*>(y|+BOPA{l?4v{cl#uSwYiI|-rVmCUVWanaUwmcoM zX-Qir-7)CMrk`zZ4%9leD0yx8Q~Nn)T7qV7|HGW|+}SRZ#bldEz*}j}&li=mrytZ< zD}?8p=qdxg_P3I3AyBw6frl?VtfP4(NY1E#TnF6W(7M7p$tzOwALEeB-Ro~+PWAnd zN61>kP*L;~n#nWj3{&5(oC&;j!T8N*VRCLM`TUF#R@n$~4TtqlNN<^DBu@MyE~xO% z@JxRzS$0u;*MFaK3oA*(lfpGx70K42{C$Mdwg6wFuWHPsHJ;YHckr!NT?SAh-jUZ@ zg?EW;{a%VMGQ^quOdO9lr;7X9&a7L6tQJ$a$Jta(!-3604({jmvuL}FL6#=LFfiK@ z8!jjL5I7!_F-C@m%N+lZc5fQCe5K-ZNcAa?lZKkJ&?gdatHAYjpBG8EZFAIHqFW5; zM$e3d-7W^)-}kn6pTZwjX8TNUz|ZdyW?j(GSy^3zZwhRk_V_J_J53ee=((a6ZbVZ2 zTJVyU)T#dT1viLadT-l={-^YwJM60VK}5J_xJr3{u?X0E&dV;@p})CousseVlZ3XL zNlAoszIH#OVg1vAxM#zetXP7bBAH-t@4?w0dbO8aq<+V|zgk31+Fcu=mt%TtS1|kD zr2VRS4_cwfI+jv$=>yyIYSya}{gN0juDi>}P;ZoO-V^nlHHl&KW`~ReJL6NEj*SCq zxSzNsbuCi#bx8Tc4=2EkJZerjbJv|`KO29hupOqSdW-^P4)3n5oBG(?VO=Q;&qkmh{^U|paqj%nni+}@PAyGdi}}ROb_hVrkTI*h zADu!ej5DT8E>e8t+tg1X@fAi7Cm)6$e-07WpZl``{nvuR<}~`Rl*GemO-1+OtA3b5 zJaViZfk}eV#N2t|O-G1f;n9t>jQj8SdWO~ZbSV;tBQ}m|fq?$yqdztqZ!%b=&FINz zYeR?mHVq5j_BJ?e!gYK9_xl&8#8hv*?-Z;lc;Lrl^%dIZ+55@HmPx&)n(YK!Yg#*n zgK+(5oxZ5(Jj{=Y_fl#Zy$HhJ~S4ps~iaOMaon(&2-4y20HSxY;>b1 zn1LJ%aw8xrg+#SgB_uek_@PxV;4E(zjxHz&R(!Q`7WHy zej@;MF6HO^`Wx8e#bPxij7Gf@Zk)LpygZeBSj9%-=o{);|Ni~juc}=ohSlv=U$KY# zbm*{(7?};s2pcRJ@Q|0GY38ueQL^y{)zt>0m$4(da=9-q>j=gLU~-#t8^l`eV^^AT z(S1D+?-z#^`Qt4L?8MVv!W3o+;JTgu)h*T2XMFubedL3D`}QWOTTAbJ5F7|yTGhwe z!VE1yBZ$fLe8zXOy0lnIsAB5L$3Z5SO%aZpO<4YRpBO)B-u^AL(kUu?XGAS>{7tM- zi4nzaW~XTXwe9AvrDWIqZ*6chk4-XL5N*}3SeD9*q7_&B!OKi*qV}Vfx*Pzo)<9hH zYj47*{~Z^0e;Sg@Z6ZaU!{&t=k`3#ula$AUk-Y@^v#ozX;pvK2lB|$FdMpk**4@a(oanz3 zv_Fk1K`roVV2PK&b634ubJYpt5F&vXQ8F@UyQdXLfNAJ|cTc%tDzFBvpo%1ESYCaX z+3FXXYqk)I-_Io>^SKYI;@%bmt``hacJ1(Yi9U&v zXkA2BtDN&WVYb~4H=bxd_Mg?bzqu`HBy^4~qRGNkSGVa&u3Ib6e1v#gSNR6!YKaqU z#Ao$9bw!}hNTp{Lvl|rQw1uU^jlOX+|E`OQtK+psK92SqiijLYtGigcO#%V!tC1-O$xC?sY4G%%rAH~%{Y3UCUuu62&^MU{1l#1lScKs zex!fODmg!*S-9T)=J*YP*nEq3ZG8-Q{g#IHQNhhmK9_VHU=P2uL&l}eX%Sm;&+~AP zyIKtTK9#XHcw!w%IX=F1OX4TeD9WbTL-uMqSZRRH5WFaQnM5tN9DTEzAqOGpz5%fM zmkvqjcy95Dzlxo%+Co7XlR<6l_r%Aa8GT9uvx3;=9Zgs<^(+%+X}kU0eqoY^aBzf+ zD9MYWlIordWqW}0=ix7Ta&meeWbVxN!XGcDH>?eQ!EG>s)xslh5aS7tQ}cRo@@I1` zwtDWsPPcEsRN7?%A3J*5H^xm7<{3$^gDMppn_w~bXoa>wlk>!R>Cb~X>67X2KOg*1 z3&-!^>l(|g*7`qw7|QX4zI{~n{1c~6ytWRgE=?P3r5K75G%82qWAgG=`X9=Kfe2E= z>I-ov8>QZ-k?Y8CG(;f|}iI@Ijzgc;BpNR|0Fc?iPJsu9D^{9JkVcwXUBHZEp3 zEcm{F0@m%d@eCe0X(|YM3Y8eTb)AXmuXgUCxfiA^L=Yy@>MQ5q3JocFeSGWHV*k6m z!e-?fjFX2)w(*~w8L%EwUv`na8}oxOE0DZ@AgfHnJ=Uo_x?0k(!Gh#~@M$c)mC%eQ zEhJBMWGU+i41IjARp2!?yR)8HSM}p&zU-_Q)j6l@=O&jdykBqSU?yqaMuWlUZ1O%r z5tbxS=NSI%U%5lXV3S<2HEJ0KNN$j;@@^?l1ePN-VB-qJbO-10AYvD`i^~F3J7{e< zcU>p^TV62-W7|l8K6!Us_Wh%*A-Xbzrr8Z{d)>2dhO%lsw{a@Aqd7?K~pWXQQ z`q1a=Kx3gyb!h(DwKheZpfpMxZ9^$moJhGt#S<8fsjURB2Go2{uXY0v)j-7n44_Xs1~ zH50J&a0!~+t4Duttn+XI1c$_!5iu!>6;-b=p;^T)3&`cuc+8G&+xNN^_1EKEvq@A_ z4-qrmX3)=ku5QK6#f84&JB+ff3~_P@5wZ)5=2W_gXWxr*+e%!Wu^6B{T?z7A;6`#vv=LjYC`cdfIayn_lq_E!vEItBQ*D@9c26X4R>^T*B{=Agj)3&|Z|2EQ zSYx8##Y9?~`9@3RE0i!bo|+yXu8k=6Jd(e|xpm*kq~jxOTeLJkf}Q5ZjAjbm4a)RW zL|>mnlS$mR8))fYlN;uxT{#zhdg*pizhaM_dMJCYk{hH6SeZK+2N4dJkT5MV(-@20 z@Ye?9XhL9DvXxh(8@*Tjy(^X#bbdKF1VFq461(pTYDybdhh$r9en~jZ%U=-W+xG{K z%ftx0pr;WrBNaXWJ|%mQ0kBG!@qLNv8vZXv!Hgc z9oswwn`Y5x!3|h3ot5LF7@ugP)asCI1E+;Y-}Cv+r=&@+-&9Yyi!nxmv+I|yI9BZ1 z?Xms>j{vcf3hTc{)9D)4xt0O|jZGdTI0EcFFr#osu1x>HM<}xuC3iSqlUpo3Khh}w ziFk$#y)3;fnd0j@_sDid3d&)86^(QPHM}`BS%XoVRbdH}P)O+lTBrU;yP^0H!{{AV zyUKIQs~>4YH4Zlh_rkIP?ux`f3!CHjP$>Qu(a`$$DES&_-r^1>{n`hm{{ZND-e%dH zWJ0zGX`({p?vF>#u-UyvJudRKm1(wrG#ZVqIA}3XoLfilI$AXh;ffjC8$x)ojhY1N zKMaQ!t*nr1+x8o3=F9gyIJA&d1#0TXRhD%c9Lb){F!PiCJjt*uQ%23rM zcFO|pF2ch`_hup%3v?wO+ZatkEWv>K!%(ceL~~7C_rk`5@Y_kXjbzN(d3Tpa+vKyk#tCphE%O2Dz|Uto^T+x5pnxw-%?j$#`zi_W~!2JPxpQ?91yiyJk%ss zbm;Qu+D}oRa2g!qA6#RK0Z`8ZpCMHO0j9e#Y3$K!(;1|Vybxc2iSI3UABc|R!n*SC_0r74XTZd2`-RGt{WyIV6NapEqK zrw@(`;HrUKmKS+V<9k*;eYh89ItZ2lax|&Sz?{t1nH-CUU2pkr>_=KmH8iSJ3FExJ z8J4Er{3Obrlk40o9nnuedWJ%axUh6vr8)}F&b#dxTn$xRz)+tclrwn z28m%0j0TPcsZH&^nqYIFE>Jx8I6@~gU!kH29Oxugg=dIi|Gbxo&x3eK?ajMM;n`ZH z?-i%FU)WCJ87g}GwMV=sLSbkv6|6mv7puN568K`}bB*2@d%waI;>0&?kaV@j8+hs1jXV~3?Shf$BU5NOsv0D6ssd@F(a965`0T6Yj6AdnP|gY(lhr(=O%HVb zR~+{x5&y|8=W%=Zn<}toX$1a&vAaD zCZSq-xfF2c&QT7VpcG(@uOj3jPh^nIfCtpCb;rt=tB`it9`7F(Nh`Ae;FtqhvQT14 z)51Vjxd_cuZZd~iaMnJr)W!fjyxS%2qJDDFVc(7VI!~o)cxX;yQm99Mt&sTTQs@@o zPwfVafzW9ok3X2z*?ov$PSNV{m05DW-P|FwB&K5Lf8f4yr=L-qpz#FglE8cij7HMV z`Xaem-d9c3F4Fd6XYelYy-uOZr1!$O;m4fJ+qqx<#81ITX*3G?4wU=Lk!l60mb*eh ze{*{C{6#tM0A`K4(u4Dp#5ZbbY?{BlFn8Gsohc7i60bAS;e*WYssXRwPx~aozv=RQ zr=pg^x-r6M5MSwxO~%P{tlj;+m#!!uU>!m&-+YlxLk=U%F7vWE4CeBq`emxU69~)2;9Q)kQx}ByJbZI%({Hy)6NG&rR&f-B<1l zw+d%yvf>JBd?A`J^w^oQ+9vwxeNvEWC*P);Zyq8rB|ug1^; zrkL`sHA=l5xt2cN52+UwE8wPQycXk6&Ce!zIA`_>8A8Sqq`g?=fLT4<80PA-MNlMl z0dnZ+6V~Ul&9nP%O>P-CF*JSS8jfz-<4RJuy(z~@a?)Gt#R6#gqL9ba&#!y?c?pU< z^G3+|v^Ep1Kg1jPot?X~;~syVBE0g4FZf>tEs-j@)tlxl_AXzT^SyHk5D5}0_{MgV zPU|)=LxM^ppH;D1$Bf7ne`2L4$_{eb@8!JP_%;7!r6kRJCpdouzC66^yFgeld+$rv zj`d?ALKaI*{?vsdo^xf?k@Gt~Tzb)&uCUQKM~H^BZKBsYzlTD2WHf5W8P1`kTBV46 zU!Xsfdije?WrFJiNtP-Wkb4ob$n2MIEHG65<@%Tzf11zQ^(?F9Pdacgy?Gd=4?em5^4duW~hm0?HzCC0%;^tB)r)^(>KA>ZiLe_V!tex_*6 z)(AOr_mMiURu}g|vTgty5n|uWYpNjE`J&phrrX09|9&8*hSa<7LOIvldue}Of&}rS z#h!XNxUm;IF&(7-a0#wd9Q$IcNtz?dmh$;z?kx7ZPuz z;XBd3tu@qMR$VD5(`0eg7ZTGu_z*OzKJ1CXV{FxrXCW$-{+C!GhfI)Uy#V4a)HIeb zy#xEFH#`-^v!@S~IiBgocbLpi*JTbcxzoMEuYRbgCkd}`9|5%&z$a*6CtPvsqwr=7 zNZd?#y)gqum#fcXPvEzA%T*OlIrzdz;vT$(&I$`o2Q)`%0_VO&BN!*@iz(9_-m*0K zNdJ)gGCK~@7@%(VP9M;#RGFu|OjOS0V(BfBFB1}Qvi?SYfM(|#TxT?|iC48Oxb#r8 zwboi|_>G&2lzK7km1vrp!b0MJ7%tfF02X9|SWJ`l8$MnrBq~Gt=E1#1e2YIXv9*V4 z9^cQV^)Oe;)wTFJRJn8cg`05ix&We+OVvw3I!Qon<<4W7JZ$)o@Z@2OFisbL)-=RA zoy&yvCCS-E0M`V8SQ!E`d{CllmvQMm-7M&G8Jgpdm-ZO4+)C~(ie%;si$AvNwYLOefTauu0m|uT4@#7Nll2DJ zBK5RMWbq<&pJ7yEsDA3poNJK>92#A+@$QJM%;c3ghmJ=UaksK`9hbv<@_n>SNkVo@ zodL23NA~|->OlNAvJJiW|7UMr)S7bEjbAC;dR6u}b&+=JqVV#(t(t9^0e{EMD(!dP zDfgBmMlEjmd@rpAeCA4yr(X2Ro^PjBWkJ)0TOhp6GqzB9#;z}SS48p!XVQTnC?s$h?kr*Wo#OGxKRht6}V({MWn0Y?)_RDNw_6`l7tqXCnC~$dH+! zkd}YXP6e$^JaeywNf^xmhspNsj3ZXw=NF(biQGGJ^pn_|c{=UA8UEb_`S|>ghxps- z!r=`_7dp#veoFSMhpoP+6>Jm<(Y2w+vw>NF7)o40J!C`1UdyFGo)bF9OuTJ7PsiW_z?_D* zvk==7=Y2&mnViovJy{#GWRw=UycQ0CT^a%IPBJJy-z*Du9^$h?E_W|w`kHvDX&hKeG!M~HST!y0Z9jtg6Ibs19w#^**oZ!S{lWU8 zcS5}P?;}3%1HTus+@TB8)z7{%xc`?h?3xm;(=p_b%GXq*gw=!u5_K1#`sDQBXSQVW ziQ>49hF-wt&^+q9Tw|@((FJKDT4|{^Ugc$X2cumAdh6~f0+SF3*Zn`zZrom z2=DbdvR}TE2q2{9Sd^IlWXWdMCz~&OTXjDy=n{=*-J~q}XvkCW z4D4_=(8bl#@CO^AFJRXkf&Q^CSm{#b@leonmf91N_x8Avz{o<0};)(0+7{Np7%qcoKP{Aq8S5+oykKG0f5gZ^!@Nr%+)E- z7pxsR7wQ`*X?K07s&q5wL_1$#GBA4eyIffv=NV1%eN(aS;{LY>Mx&whqqoHln2~FPARXV7G_a_-Eys*U%At#W{qj%+K9Ha1*js(>7)_187aIc zO=$B4i44607H}v6KywZt^dJkO!@6)zEiqxZuNlnF++Y4&I{A7@^Nox56FIEDyFY;j z15)gw=Oe=++r^}Nz19e8yph36`U!%4EY=PLUQL|2=iU$C4)3Uy_Wf}^x&#Yw(z7l{iXTAvj6Eor z=TkTe;0BIvy1&Ca?8~Ej;duQx#iO#fG&#_LJyT3V`tNku;8d{GXxc&K@a%HrL8G}L8@xiH7FI_{n&P8ua zCU;zO&46Iv3=s509E_Wlf8lM;pjsI#<#Nb2lv(W0T3;$kc_I}5mJr5Rs?U0%*m~C; zPy#aAjJ{R5CsoMqjw+Vx{vU9BLOoow_VekdkH=E?ooy4o72bQ@y^$XH7-LiO4F81=?&R?RoE1y!{rb$_3al=21_Z1v7@MFNKZaPaf)8WBjhheUbYfbQ5ib+zru z?15K0B&PdHiCtRiN zjB)5Wu#tNYvB+LDr;L~x^4|(?6Vk37Zo}kP+AH;QZ%{<@xuz_%2X{Y8zu0$pvnnjc zaTK6LQJ)ZW`C1*%78v{XgwtWUmNe~`^V+qxs$8&MrXg`e=(Xz z)0uyQ1l_x%qvusxZnI0;b;WWrjt?hARTa zt4t##oMeHEzAupUi}Ei?_inp^EubqHjgccPJi7G#x*y*rKIpdyUK?o9U|+Vs&y4QZ zRC#w5$RFEtI!J)2`;)zHcF=Qk;S_^l^#gFJg{4PE_lPkuHNZ_ZTQ@RzGNgh;4DOYyGtKrW343qmz|AL4&ZLp4E3lOLZ1TN=Z;YE%%V_z0!Id z1A+I6rgLvPzVt5_xrHKYug)#lpA{=Is!GiN%+tWaSbpY&Tl%-U)<~}bsEQw@Y*n*%GjPc{d<lLQ<#Kx3gW;ad1V7$$wEDyBN;3vvc*1jHqs&ezZ=%&jj!|xqON;Fl8KV}b}z=eIr zeqpQfbVq6oR_0eU0z3Eb9GVNj&E1k9$6?vAZ_;E`fm}vnt*69)f+cFr>sXfFaU5LE;eBb;j+NLh z4RH|BKt7zN;0NNdQ$`hJ8S}En$CNTrn3sunF$C1YMudycf$yT?4mY3snRiH}n4RU- zt)}dnltz3+DV{o0ZodXe|qt|`zNVqf5?xy)V&JlAz;E^Y_RO% zFZC(rLhlo<5~t}j>AVFCjz?%H2I<^ht(wbND(l)sXye_c_-*cz;+3~-v_oWmRgpR@ zcbLa&SLjrI&K|eVXl$(&X5ghrL~f$JJpM?3z@BseD`uF#(UScEnNn#Q%($RaBuxI) z`N^x4&RtJf4J*Y>s;6b*xWL0l;)oFO?It&V@DZ50ox}LmwixrswF8BHAp}IFoexWo zj6iH>9p*()Q`SLIVKUau7d%)arGA&Z_!-5|TQ3isjP0}tpUw|`7cK2BC%mJKPdOZV zZy=&G>N8X+7OK^KfgyQy7@2X&PN|lZ#j84piP?|ni^lWTllMvDc5W|25YY$+%C|Ga=WXDiWlV9&dTlb#B<$5*?s;_K$Rh`>z@Rc zBnfb>f@C45JMVqBWkR`&dRVsK4cY4$(EC%R?CSx_ME2nz4;zc^55QI)ElkBQ>(BfX-H-c5|$7sZ@Pe5%=j)QGUtR9A$%_(v(2V7OA6(5BqZO5DA7Px8cD41nwOZg^ga_3+|c<0S&9 zZmxEUNpby^ygYstNn9>}@d(}qfCy%c3QHDhvHfAbh~7@gW6v?14H5o#9Awi=d5hNL zx(n5lX5Sy5`Sf*u#7DY)1K8oUGj_4& zQ+c>m1-cFVii$kM4fPlD<;$HK1ez-DQ%tXIPbJn73^dL33&ZALW!G|ge?ZJr<|gk$ z@`u+@J+a{Y+%lfZhy|i_o1OiQKqZ=x;&0+#b)JhcZ3MtO(@hpa;*-r+tXF;+JyXK= zj|^BuZ6~K+Hi=!NzQX7Oh7LI(n(cpd1k9webHl*%>WqQC5vaIk zdwUXM5Eyf(7>pF$6`TtrxzGt_$*vY!ylmSGuMj|L( zUbr%9i1Sp%&@iR2wcW;T1=}BZ1$}0tgqi_Md36|88721v@yH71cV3PU1YQ%8ip(A> zUo6Q^ikRf;e%EQ48n~C~1X#ADI+AmL_n5KYRVsk<)ujd4N6xJk+<>3Du<{Hc{{a|C zHVElNBYBZBe0WBuA2n_>;=eSt(qE`=-rVAcMkuN@8tN;{^#{Z zTz9S+MGUqeGlxxEu=uBNTz0ypfo{9+sqXqj>q7Les&nYO}p*gB92$d&=XTE^Fa&G zx%-;5l=HU>c34BaIvHzSEyY4)fU+5}6;hkOLrf`@ke1Tf&$DNfhA0Bh=b(AGVM#^Y8*ePdNaf8zt4sk=$ z!m1TWq-}Ne0kiiNiQ}&=MGk3O@kmV1DN( za_)kSz*Y5>;11^a*`2I-L;v5fX9LD99Zuc6qaV%Rckb$CTN1>w?=djE6MAoY&jEBM z&g)sdN1klv?;+c37k-(r30+T&HG6I>{mrDIz!bR33qd1K9_3t;4BvWNUkXilH{le~ z!5qyH>SiE%unE&tUiK|?i3m1vhMx(TD6|+#NI0T8PNM1)wO+p**p$S(XqD^CKng)} z;TsSkk|4E&7q$me<@#UA^ugJ>8s#57xLUw-ZIE~0%Y77e=rZwo<<+C>Z0C1rG~Q|C z1vq{1fv5l0-1>(a#%CDof7=!$LE@rIV9YfJ<^2+YRcfaP^Y{MxVH=f@HX@B(~xv48#YjkxAN`RVV zFG_k29znmo1JqRPgQjNBSKZwcHkG&8p|D!9=Kx0mW-y;y6aG75VKQ z^EhMYxvo@d86ex0NpaIz&y2^>rV&;Y7tP3gVw+g2) zr${*DC;X$8@HzQvK|S$rn1ec^;L1+jg0;el6R!mFEBkz#4sH{1S%+LT+3)?V4xYP# z>#kBihGzDT>W0e59njUZQL;kZYs@up>R zV~$71(9o&Gsu6Fla)DC&ekU}RUd>(~3F90M&^#aH!4gDBOL2E#0iF}uR|9ZKTUMsI zb76FJQ_e}b(r8BT-|i{ETq>FPOL+XJ`QnTgaFVPU?1A;2}J#4X`vj)XuhOm#8&SM8$r1e8hkKoJymVN5GGh^h+S5nFkD^;FP%i)C65?zy;@{A(nj1-3TAp zuvU1tKK2go{Jkg5X6@7!>kPRl#x8!-e#?iC-LnwVYHqlPA!=ScdM1#t32n{nrI?{5f!j1W%iUo>_g zI?!P%GNO87*80VT^A!FEzF}=o;q&z12%3FcO@ z4~EFzDut)7fEL`Q;eAzZ$KRxKKjBtN<>L=NVs;?j)bIV}`A&qnKM+*D?sQ$`=6UBQ z41OH0BZQokXO?{(5mYrGC$auIi^F!fF>)ZPFe8!o^P&kSaDE#=hSv=2zQJ` zp1p;#>FJ2X_c)gGwhAC_5@5mXxQb|y{<2^S2tS9&K8+YetFh2uB(cJkI0Q`;Mv2(Q z;H#KOR%F|i6qDC4!t*!$N_%Ka9Ak42Ui$${$zzabn1PLWak6~zh{^fl-901p?c4kE zT;p)2hLjq{O>Gd2L%>70lz$!#AUUIi`xSRrN9-s;u=@@=)y32VKn@0xC$7H-L#~z9 z+&=1P^+1b=apnJX_jLu4rQmL8ExFMcLSGR;JK8Nw3MTkZUg%{q+|3uL13zK- zd%G``RJ6l3bHO3-m#v)a^f>+l1^D6#ve%qv*EpJkuQbnqB|^b26(-$tbfRk@68|o= z`vQ!mk;cO+4Au^T-cA2O>+S|HgToI!WDz%*uakb+TAhAf|H`0v zMMc@RjKG-Rxl;N-;brnFAXs@vYakHlqw+F8Qn-#1gTxAmz_AXE3VMb$Wxgf#bZ0jP zX#&YkX{P7y%0*ettg3I2dJ3bwm6G;kfrp?3Qe4)3^AT|f=43h-WQImtH{VANlX zJ&yBM+6v*FA3tw3>$X=yh*=No1XR zJxYYJ4BBFm)PimPzj}NY%k_){ppOJuB)z z#@X`A{LS(#^g_1!jql9QJi0}xI-BYQiizJLj1ur)QVYNYV$=Wey(1W=I2_@po3BD9 zH3GtU37@8_oy8GAzRVD+Q{>AZLS;I3AN+;E24ehWDs4A~cT=91gHoeZW(Rl|gl2xdnLHbG` z;>f;}a$lQ)onlZmYxR)<|LT@A3uKxy&0zhJg84=L>i#`BdMkAUDLF#lcY z#V#Od3OYw^=8y?i6@YVCqNG32N7I1g3KV@k_gljnkUr>tdni=rmIWm_W{8YS19!*Z z4}%}iGlj<&BW7vIat_~4FMKM5Ie+8wd$W50y&S`RyeATj|JSWipEj}h!+(8P$SR;UX58P<}iTIDEFMS2bYtmUJ0rdH#oR>dFIEug+jS6xxW#k`1GbG!M_KwVdQW!Z% z6YrDxhwCZ{BI!P8Ac2p9FnHOh5aZ-yPZR_7nm*oTwM7^S&-J&$k{5X?4_FbX4icJX zuG0Y(BZNh@+qvvpb;dhDWnrdXEh?$f(4aa18-VO}o1WdsGf5=ogD39DpDMJf5@xye z**Eg2R}wF*=G7DLfk#;BX#_aP{g!WCFqV`ya#v>8?NU zDX@B_pgHtvV?SatCEIu5fM?4KeGB5b2_fss|4&H4k<#z(quqs=mFi7(7rckBQAsi6 zecOKsIg*6OY~MYh1VLq2)Ud6nsU=(K6RP|l>z?`2@q=!SROHgzQD3(?5jF_QEnzz> zs@fi*G{#D>KE14&H7TI|P@6}nEbP97u-{CV$}AVriK{>$eCW%#_*ZV(LNQkc{D$j8 z>p`hjvO4_rCduBTI-;yEZMSoEo;a3?Gl6b@%^VCbm;P#u2R?7WoCAyJxt~;@pf%>m z_o>mb?{&6wT*mL@K~ylmOLp2)0H+JH&r1A0)xQOZ1nSD3#%rE3w(?^~#A0WrkFCpm zf`uVKfC_p3p8x^OTm(XHr2!z`AY|c^7Y#m3@jZRt5j60LYQI`QS^JfUIbq_{eH_BQ zKgGQT+>uU+%}v?ZQEkg0I)2<3h-X|iQ>6~V2rSsgzaiI)`-rq~Q8I@76o86@~od;(Vs2)=* z=}6bdXlK22T|mrLI9j_G9x@#-`o5!<{lZKs@BciZikF#MI8+a5ZrGUdWrFfH{WbC5 zF8_zdQbuRotpsT!?nab}HjEJ?}rC)Z7MajaspAsC1rIN01Oi zuB?Np>r{?T6(uaK119{Ao7rP+R+|(ZLC*^vrgs4FRp_BLl6oVTf$a|$@x(z)n410c zmjhwP2~o$l)h4NsaWbCJzG!8vt_PNL3~L0g5K*t1^8)ye!x@!~^|j-mo61 zriy=i$U5K$|04J)L>_?f5!1fmf++N9wAoL;lS3ewAO?exSd>IFvRdLC+CV!h4mIIf z8uF6RsMYclq&!bP9HYhn^oebSb4l!;Ih0En56vno&mULguFVVkg{A))qNv8k+<}|8 z_tDgG$c`nAYjlf)hM1DF%_!v09*-LrML?;A}#s{G5uldf>!LO=Wv-ie~}Vl`%C#8 zrvJSM-g?G|3P<<5UlH1B;>b+=M7(_?tkuMV0tRYGQ~vp(NJT?KtWe05bOSW}X| zM|&VP2O*QVabhRv)U5F7pl6zv<|AxqOS6FKqt8I2$mQVCc@@IXZmSW_H=KhV;qR4y z6Ac{f<&2jB2i)H`|IFGUqK_7cb=?b8cAoK+lT)%vhO9xlOK%_#xh1?I)>CyOJt+L? zr3`4T-p3cnDXx}_i=(V}W^Yt{HPd+13hLBuTe=Xg~ z1F#izLurWAiVrHfLGpDFI6|n_6V^I&2rXD!%j3iUJ_Hm9!A59dLbDtB_=bcQ8&L=i zjlUxNaH-Dagm2w>R1bIcQpu{E+($k7KCz&K9IjTbexX5j_tiQ()*h(=(ImRuI=;dHTrWln+D zQmd*6GluFv70u8FHAAnb)g~P;rcB<3vz@fwl5!W)2SgwwRjxpM9>98#Ci@piQ^ym$ zn7h;#Vh88B@}zBj2P3T+JS4%(k{JVLyLH2_j1m8v5{Y~A%_JhzZ&5^3?lkQSbe^?H zrCRX=xP$);M(cwo&1CEZ25t|ufsN=6w!i#7G>$@a&69w;yXk`_Fi(QYP>U{u_JbIB zkV0YkW1(kYa0<-FKsfIdw11nb?$n69n4Hw<&|;*F>XTL|A!oB>>b-6uy+{ERmqZU+ zAG{xr99GbKa+tdRN}?G+;U80TF1of~Gq5=J`E}hLxIMbnud=zX{rBSveNGIga3C=( zO#!|)`b1v1eE^t|pXwGTrL%TKaP|+IOy+x;XI)|T5#>W+l{G-MPPdjLX?6kC79|$! zbBOAw&O1XJKuvb;GG&M2|8Gc2d!T7tzD}90i))Z+E@Uru(S@m82LW`{Al`zxcQYP# zq_T<`5q5SU;5UL?ZRJoh@?O^7SpQiI3;ava`=NwmM%`!?ik{I@Bze6j9yKry!eajB z5}k@M{>vgM`IaN%roxetDR;XtAM>&{KQ9(S^JaAV_&+%5?~jzrW$nr1Jkb1QaJ?SQ z=c|Hzt70Fl=Zqdp&?5ga9$KjaZoNr_NupD7cjD>quMcmpTGJrufNWMjbF43KM|S+t zs6x7v|F=pi@NaxQ2?lF``qby;vT-Wx`R_*Xgqf@tv>#)tu%c5b)bhax$aO)p3*rDt z9@0ihH;keM(_w@NL|yKT2S}}C_~Z*{5`WHUd3?3yWg4=k{1A+N_IP*CU*4wUJxb#*K+=9%|L`G3tA;ym3s3R%)dP2Xv9FGxLHlOA>C}Ztn_^x}<<4 z>yAkJ;9K}*qOrF>h+7H_%8A|>WHPjX%xV7vjj27D$=^>k1r7kYL$@^^Dz<`FfWHR2 zs>Q|reZhtuICikzbsNdBN~z{ctYO4D&{)OyGfx-=arI5H@u+oD1S~?Q zof~Cl(##62Uxl&YtBhPPy9Zg=WiiBC^bwZ>BNEJMW8`K6KjG{EkuUOV9gUI(P`|!3 z@yNmO)jy&>lp2aEAnY2hXb>O4&rV{%^cjLI)#x3H+^6vi*GE07CMuruN^Vj|6GP;4nR0qr)pO;(>Vvgo0M+xQ$C zS2zc3bIP{yO46a=GIbH+Yn(!epQaBN&Rt(vb^HhDkE>&mEvt65>GP4~xh?cDY3biw zsdk(4xD5ZptOYmp{TqBl&Yd~$jQ=FUXVx4%7-NEueoG_i#VB%mr7P&pyJ$X_DeJ=t z4Z}DAcD4V4Bi?0SSZ+_aY38V3%)gp-j6VwEqa`l$-eKCumXLyc8? zx+zbyoywOSsMp+Ho%VwbDvj$dU!Th(_^yD(15fM@?7O+LYR_<%glu@*u{wAbn$0vB zN86zNY9C_8N)?w_YOAoPp!bI09#wwX<)ycK_dthUhNE>RiTnFg@H)PjCf=7+wY&D{ z{cF$PdGMwsH~9mblNr4UkmgNVs)!wNX2vu2q1KGa*~*ZcFT3OLe%k$jzRIUgS^O+D zBDXlOCSQxeC^d_jY^afI;yoWKdAOy&+aSKVuLu+N9~d~EN0+L-62ANKzD>r{ zGJr$!r_x*RcQedtb4>7VE`Wa;)Kt@O+Ftjl2H{ULz#x0=G*hGrJJ3+l zJ<0<9VB_#^Nltvq;1A$lS*nDS+F_!o1V_nbyzB&5ZCt{qv{Cm-R1Gj?8syHS0g*ls zhF4ZVg^za(M5g~WpEbblzuYSmTA85S?*JOHI8fcNO^!+^ODx2#}I3LHyOvW|_nk4!2 zqfWsoL<18<%X#`g^De+y!(=`&D>JVR9Ze??J9ryy*!_6 zFG7OIl_ZD7#jRQti~H-}+ZnBwcH|hs&Xan0n#J=oR;W|ueDtD5BRu1Q+COtS`Nc4V zAN@N}^z$HB4VDooSuhO@!Z2yrc$PKW8GA+d_oDAbg&qdU_Y(|KmCK2lx(;Ka4(QX^^bw&K4$6bzxg_VzrNGuLgcx6A}AuBAA)59 z$osxfCb=vRB~DPjIqj^Y4Vy-mhyJGwahyvBe9;INSv67E# zEY$AInpyu1aT(@l^MIutP6;|NNOTtMd%}Wt6>qU_{};*~UoQrB$?oTyiosiH5~D|C zrw#2koJ+sH#C58SKY5hMJ$r3?Pf)3Xj%^&o!$qa2ovEvqZk&B1Fz zUHLD+4}Qb*R#ZQIYn=b{)-bkS5^f)kyPuN^e(iK=OHU+VDbN{JgS^QGg{oopKM#cd z8^xQv8gND_*c;gy=19G7*aBVZRY@zZPj2wA9+k(tq)H0#*#51A z4!r)IDq2Lf7a`sMtBV%U_-TEL+16C@`nw#kMtG)zfo&YFd(akfnv?dqrw}3_m9ntA z^UlSj|1vSAe@!XR`WCFHQ%oq{J=<&eeQx4`OJc<}F2g_k@xPk$=${oXHBGfWSp0pq zf`zgwxRYf6xUp))h;;cLmYJ#_8$9kRX;lwiye2sx@`=z!%X2{ch4DAOYL}H)ROAo#=*iEU@qf%tvbGP|E$jV85 zVH2=KubfTk!n=(wQ{Q%(u&t!nhm_OsCVk}kKACr=o{)#;2I8HwybFFb+o1yG18Njg zh1nWmk`I-6uLpT9gCiRduZ8LbM?&Tx_<|l9zyH3B#YV*-0YAGr=tm&_1^uVsg~)v5 zKOAc`Mjm;GO~0!CLZ4Jd&bhe<5JQpkAJl7B90u&RQDi26@`7B`499o459f6$2(bNC zZgYl%BwDKBRk@PX2TO;;vh!%{FqgE}sKsIX?T*I|326q}E}4QN_q=QdpszY&$_>I* zuFgUfa#?&inAO#@S1lrq_-bxQwLu|6blL02&^z#vz;%@6aNAG_VJt%@kOk!p63&up zlc&Pt!y-3&D=J%GGOW@v>^5zU5k2kw`62Zjf=Y`F&_v{Iid*K8w`du8ywbv&=KHu8 zK5V!P_Fi$phpkRT=4SN_@)jq40uo4~)Kq;y#bcC;xXqbSI*T#a-E*g`x|wm-Om*XT zHHlXKCWYbUFl{ul>+fo=&`7KbBsxSj5~j%g(*j7LQ54`9g7JgpJs|>;WBJqA5SiyG zdu^}2Ee(UTr||hbNO+MGWC#%e)EAu$f#yr+TcXDUgPiiXPgH z=*r4`BSqSeUP#8)UJN)*IQ(9uDn%bMaLAk$f}Xo3RQwS;#2h0RkM1JqP8CbEcJPIV zd!2%p>T*f^+xz#G{S^c4AHhI(fv{pe-azSiDMMmll#&MfJoC{?d%^{Hc2hpD!H@9N z$F=ls4uxTN(Jv3aNW4+212?&ZKeaDV>$dhiuMcy%a)~{nm8?#s{qC4|ODfd`zNomY z!zAf$ue{{7{oA58dob}awS1o633FcV$8e!lNFhf_rrkoCx;Fp9a29|G4q;td}rUZKL6y_B2RJ7rA;5;pLzZFOx|^ zjUNqKuam{{|`WF$&1M+(SyeW+~Bp5tZenvcU8A=6R)}P zB5GxZn`pZ&7|5^!HB1TLlfgPMbQzH{aEdp&m8 z-zu@S$_5__l?peG0?iaR4ALXGIBP0P0pCFxEmusLA`B{JQ)0mef-Ip{`*#@TMdA7ErHusOgmBofTSMi>iOw~fRojAQPkun0`M;1{hL z8~#{){AUO!#u@jM4e~%hS5!J{~rg4GkG~E>=Ezd}bo*h0!JW zYv7Az6s3+qH9mH{@a~mPwOsAt<*HCmM4B8$kfUi$yG)MlnVbGhPYH76PE$mv0Zk)F zYPpB|-L3vbk84t>1zfrNeo9+ZvdPOkzyh!DXpyFpXzfRG>3=@uFC`(43 zb#mn0|851inAKo^DbUV|$opz_@uq7?D8aJ{U;tGK7XYI`(>AAH#-ufUS$7J^cm(qB zpGzQ~u>2Am5`gfKsxnvoSe>XG@Z9}ROIu~L1y}}0TS2P%YV6T-1I@MKl$#6a zGEp?~mxQ87DtZFojjK*m65x>mb*meQQZ;KJ-FS!EoQQKCGKD|v$EdsmWzo+HN6;0O zy_~LJEDF;~`o-evd|J1bVZ14@;*tpN)AVXy@aui8Hyt0D_NoU!0lcpqNdXblzw4*4ji%8$cD*0bkD+VzAk6 ze=hXfm{ZudSvxmI7(=5<$#OF zN+Pz=HH>SN>qi#7v8KwtF9bn-lGsGapNAJ`{9tatWXU}Di-SlLyZDakn*4z#CiZkA8*8~0Al2TKX>7u&#H#4D(AW*5vm)EQ<-6>w#8T+i+ z<*-ZP#-IW)FQv|F1F;son`nusEh#EoTj{*vc*4mZL{ZeuW;6VCj8e@26KUGW#Fn|* zaRJF{Az233)gv2lS}9)j0E|6GW2n%WV)7Ehx>nAoNYfL0-H&s2>i>C1(TX62EVBj# zfk-b-t&Ch3E^d0L{?%|HQ7K9%famN{w?HDD$by#@bvBc0`L1*{FifD~52FJ)b z1+O}tZM?#HmXxq)I)+?nM0=H3;;Z%l_9D%17^j}CyLUc%x>)VPn*FIn%_ea;BIUC< z#!CCFJ*Dz>lL6=VI_hHgg0mT)Di7G$$_$#Hur(>Bms2@>bkkIdRVx$ zIhi~AeOn{wND^Ki$d)Duhe}rbd&bY&Cgrm@J39Sj#PJ({Y<<`+2zCtxj#R}XV&0Ve z+h0hkJR%x$Kf@*2HBh>JS!&ySsUww z;8r{6%?1JRArE4|53cR7Gl~bA8hC+H(Im^*{&Pv(MoMWU{0ok)@}@2U7wjHUIzJI8 zZ?Ml#dLlqKwkVWu+QXA@S`|vLp;aRypEvkG|Qk50|+v({mNpdp`cEUDdf< z%dwmGV2lP)eL%7Q%|DtWe%2)_2+bd?z5QbU0_L8*-~+sDApfWu*c+O8B^AygeM^&;y|H z`(g98`+S7FNsXjz0LyoC`liw&s^j9f?!4E-*$Su>EWl_*G75@eLv5^#JlSq0l60E?t4BZ3e1 z9MJq~5<+dE=pn7ABZlFOdapA)>_fA)*!>groUrI`*WoPDvL5=zJi-M5(A7;0xP6zP zyxmcf33H!Fm#?KE`16+;O}^=lWvVx6=u2gu%$@r0rBZNPAbj;WTuqSk_&Zc#Y1|dn z3|F@vTLcVHQPCx*%vR@n)gMC=`NaKsay>6f$+Jd|yK0rYflUo? z06uSci#*RvGzqyvE0g)J zZ}l;89R33e+pqN4=b4c@Vk2OlrnO$-+K;4p9vQjJ9_yO0#!)CQox&irO{ihv4)Kl{ zqH?&qaDbiG-7<@tobg||K*kMZzQPzavLS6~g8BmA-h^188;er^i4}?uh~Bx(+I&37 z{0plS9A7p(5)fXtmak#e{l)ecb10-<3>;j^W*nfcf6AcU0~_Ie;YL*-d3`r|9aRZF z??{;C(R5)$Ld5DH z)WW(IGE;dWNf#t>aN)O7KxHvcJRiN{ve;5?&5Y7MuC15lT@~;LKu20<)M|NY0m!?z zap>Lshah9Qe#wK%N}~Jv+uo`xRzgef>)-y~HSu|CzATa#1l%ATKWH}(D1ZCkUyg-( zYE_B>_WNK|U0{N(w_P%K+h_rl;MH<7n^%Ow$=;2eBhz}&dA z!d~@JSiNBHit!z0OB&4RVfBP;aq7FNziFyOb+z65jRNRoFQ+~P)g_GWfr7j>Tm8WH zKCQj$c>9xfVO$P&t19z{(Xda|@V)_{azOt{uyWNLSsV9q{_VKGW4qAFKhi$X4F1UL zeyxMqXZt%oUB;WD+x?i$)_F9-DY)9R5MMLA{9^#^K;eMjKSnfBtNFg?yzMYJ&Gq!ozx%iT+RYiU4Zbu z)Gxtb-?!;n3II!u?qKD--5bZ1u)X&s5+DB%!g-VZ%&^-N`QP5vB$_1k^%u~U)wPlo z*J_;{I|b~NMd`I`wCFsRJJn$*&hz|ZhsbP4&^{Txv)PLZl|dXYktgu1RUBxSwAE~l zJCn|B7l#I}g#Q`=nfdILenx}rzSQ{vq`y>PV6{@;gvsY%3-m525ra!6H5ukBA|C%< zG(~T32{Xk>PdPlY&54i;LDm80zkv>W0n|Cgb%rinCrqU7VwdlNQ=hpsgg#l8H{{vL zKAQ&I43_hxzeoV-CEi+LIk(+(j;I{g5!`*olVXy&usiwkqnM zy&_o_&_N7v0aGGM8J-^5=_|UO0717Yor7FHA9F&I;Xi0Rt45sIh6Z1ORdX3ez0+z- zUe$cLZj6YNvG}hohlNiX9&uf@A+9df(f)zAbH=3R79LrvSDG9Ne{;&9*j;vi)?S`K z+y2zQ^TYC|4bOIFu3}viD3ht(!45zVT9Qi!~~Ukykl<# zIMPs+QY-9Xy<4go{mg9iiK3t4VZw@0d!r%pP#YBCY0~@^$=FC5$y6o&Zy7rZE6_d; zQ4s6%i~Rt(V{W>uGu7FR{j*5_jliF`(vlIC|=t=y9&Omy~264E2*+linj1}e*7=nDK^8R5@C^%9aG9`(ASR8F=$=gx{AOOG>xenu0tmglL5=uec-k~7!U zOvp2wHFRAj=PC@kQdmDmh=4pml9I>6SgV%f-&ypoAF9eDAB#4@TSA1-A23+uvRX0$*`0IGl9gyc)}T zyZrptef}I}b575ORx~V(24TMtZ@n(MweYj~IRr$OvJD7>sEuWo1W$>WNyxgd9tu7x z`HdOCr6Qsz6{NbTHN9gJIKyI@tj1p3wKHk31c$kU4A_cN&LQfWkHz%r2E0110aYm< z*6Y32#I%~aa*gTdMCHRvC9_@`8uEk_78x^v(H_pAc2b}M$XZs|_ytlrBYnK34c9TS ztUZ~Xa31=2q3;3rJ$5siA^C30?J5nH{pI_6G%A>Gf}ce?Cc75`@-lZz{iDAG&u2%s z`hJudw|3YVTb$CV`?>}#HO?Qh=Q1zT%;AbJ3Lxl!o%mEP7XlCAyrA@U#Na2rUUW$n ztaU$Lb*qIZ>tvQtaEqq0viu}OYHJcearWmTQ-0fzsg8A(Go)#$wmH`yc3{np>*UCD zuZ*wbM@<_$W>owc3yqr&x&qz{?VH}c&z#aId9{lE`_Sbus9ROd24G}@bW=Diu>Glm zVl!Zx)P(E?W`}jdJUeBal{)6GnanT%IJBDi;mXyoc<{zoM*~q>#1Ft}Ri6K~3M~ny@JqRRqH|{bgzukNY zddWzQftzZi6<&RV0S0kz7qTLg2b>t19NE?0C4a3^UP+~5~@>cu&R9h zGgzo|F_G8&LqLk@PfKn=ME~ z=oA{K{(&~Vghx3t1Hkjy2>Bnm4pn6;X0aqxyNU~aV@2Dh=&jJv!E;8I=p+U_YbZ0+ zm<;C9%{j-+fS@GJxA0Qz%Enf~e0@Sk9~& z{rSHqLZda_HUeN2%4)zV53)bv7hNf0G4hV6D=w2s!uIzRLr?l#aBxT{otO*6=--iM zX9~M2gzyQHye5{0euS!mx>Gftl?(I#S&irA@EbSNKHf4H+Q9*iTmQ`b7z8y^QfW)1 z2A$STDNc;%S3qi#%%PnPBb$iB*aeQd7!3c7GygF#a&X9u{~j(}t+Yk_} z6~9>E^zU_P95@X_XS=q{D|bEEEhp*agLKebbm1)3l6h7bJ4ao55hzE3m3|&d$}KL? z;Ywyf^jQP$eN6vfp)FDW4`Xi~Rdv_3js8gKZjeSm5Re9GQIReQ2|-#qr9~P9L8O#Y z6cD9BNl7V573uDlkVg3C_P(F@d)_n78RPt|Lt*c==9=@GSDXcL=QkCqg-G{KD%7RS zGm6S~1YMEFBvbVr!NKq##?Hn%_-E$%{#LF+8JOjJX@nl2NjH$wVt)PWDqD-`@>`T} zNDrgTx(8UnrhQ55ua=X$LyJ3Y^K^7fvLQR=pGW2b&3{#!>tyz1-WLMO=c6xu^y06Sq>`ler}dHSvi2CPq;V(R6twMu^r?iXCXVEh+3r55x7vx59E0!8|6Y>>t>Cl9pm4 zzFO>x1I4%hZtqG!w4cop-3!K6e49-Fm(916Gxubl3mG~4x>20i7 z(sEKXnTc(JsHb?Y+FuWpnmqC-Q)^k}b(z!d_jc!BwbqHdcx~}xI__&kj1fmys*$eg z2{Z-IbNFesJS7cMxH#rWntPCGqqu8HC|#yRYiixPG3$35nL{kCR8z5xir|%1uq#t^ zjYMDU%ZeLD?GDa`-V=FP-}I(OmhZ-QGInv)xEQB^omW!(h*9L~MU822IZ52W%Vlos zx=HiOPk`h~Rq^XYCj;$?7(O*T#SdR@#giO;zRL1f4;f&c9l;w_Mybj;Oq`5<;4zFM z{|smWBU{vS|DW%?E<+s~i7R&(;oy{RWR5t$or2bg+EC6A_N5u)!qCf?0OGcoFa(~r zDnOD*N4Xc zctu^ps?KZUpW)+2(Z<+805~`le15VyKz869Hqs2Hp|H}|R-c3yN6B5=j$p+8?OkA- z5Zd*>LbJHUl199Mepp7rn&bn1lom}0$=5Wy{u2!O^zvL;@Q(IjWg$ z8XAB)>ih0|xEIhgnd3!_oP%kVlm71+FP;ue7)3(VSc?Jcgkxxvh+OV?PwM(5tpTi&8b6EEnmdJj7u=YN zABK$eD#jL={_|ypaZ2Cm^Rrd2AwXcsM7roA57gmD{bIGi40|Barhp6;8r{O%#c}k9 z$r{ix22wQ+9*MBH7#eWI|7~i#^N{^zJ^OM-k*3#s>D!%`SI26-9z4i~0l@w1AzaWy z=IJu~ya--Dv(Gil>Q`JPsC92Nnvj?cNyqbptB95zvPG9qMTf0%U4U7O^adpmz9FTF z)Etn{N!5@^CY-@^5mS$HDn3zt_o`Z0B@Qg&eW9EAi7i>m&CN^_iH6wY-6`odmRkO? zW%B4BQ!#itv>~bZ)=vlJ%RCfd6p1oVrN2_vIvL=nU!a|D_BwcegX=HF@6g8Yq0yhS z2fL!YDEQuN9L0?tLfvyI0%Tspx4@@-b zxn|agoXBD8#0?LC9_uR~WISwC-!;N0~>rtUWRB~<~|FFo|6<&-xs z(fHhSmblxwjjwH5ur(l6Qiy!?r$9=;uwYJ=6L zfWR5NwyP=c%(pZZekulX!yyJbWFt;4TH`O{LKSM({0U?#Ex(=RTK(s&>F)wlex)jp znAH{w{&)!iquJsPL3v!Dd&j2Z?ihV(Z9ie-5Q}vvzUGM$Z<7N+tqdcEah0%k>xRYT$50-ToDD~aaA92X}SZZ zzcc9S+bquiVcF5xPupdE%Ld_)_W5YV$+g=DARz1qfr_;gm$G_}gMD{!d1HI^x!}v| zRvXYzl)1of=#WmUraQe@g<;f@CiV-quU{ui`;8y{*+WZKqyPoE{K5eU3sC0?G+HdM zL>@1sh*pZg=bxJZnjqjW3zI+{nFfk!>_66Z;;yryy-&zb&3d@P3f@)zr4p4@!4?6u z_O_T~H&I(LgGk0|JMaa*uUO!R=IEp-O!*n>Jt;uw!{NI3vJT_vI%U`rOIEXM4?=$o zB63Tb}=_Vx^m& z#aDpjj@O3b;2+9o+!2+eP}_@VUs<+oQd69FTJ+@#+H^#-?#rc~kE}qHrbnOj0ds(m zY)4KSakkdnU9B?9PEZyaQL?1B6`9<%Z3+@)i_d&SU9ctX-@x`WrV^;8aVw)qJo^6} zFm2en1E&@>z`$?KmI{!U`$u(?NJ|e4f9Kr#!+mmNx@CgtQm=q$y1lWl=^# zDQ`_16E|z=@U>LDiw3*)FXy37`I<(!EX`d15ZhfF8JJan0bHxys412q9NIYq1mDX< z&(r~!=u9LBHJ)P}i!*%323DXK1yDzi;UAuvrupV6*pW0%T;j%t;fEIg+jYsHW)TJY zILXl85wN`jv2>h&G71Jz#Il1T97K&f3OIR>?6YS%8d;elFW$@X+&t4SmLQr6V|+yG z6+Z~ER3q+p9%Nm$XahFaH-=U##xF9__GgWxpHa~6+pax2;hX-}^^>5Kess3W^!9u) zOV?iB$2y>2Qg}aD40&9CGW<~mUs|-fVTdfR<=|p|F96O3;_wiWgZm1I{r}D-#wDb> zU=UJ$H;+$Ium-JOOo#szzU52LHV*EmoXOGk8lHFw5nYs2)Q`AjS1hvB{i4N`-!HoxF_Vef9=>_w6ZD_kN?!j({Ik+iOhyapzC$3X>DYhw zzSYC&{LBJ`(TKYRK+`IM++ls|(B^tUj*GNW%vzZEh2JCUF=+R%#P0`5udb063NcZT zwQjzz=q@7{$u z*n-9PZKgD&)P{;OW?|orjkH|_At;^514{swP>eHXg00atGR?hbB9D$&I75wLr=RBt zuHC{@JGK4PIt6jY1?QBbmCk)xiFpFITrUvP&0mYQ51a=+xeJ}}VtqFrSRRR-8-wX3 zloW4y1>s_}2mZG6>I|FP8sc-e+awOghEut0HXV^Z!CxLt8wf2Qv$@U z8%|i0MCAB4T2eN!_lZ+KvR8O3NI_QPG80V7f*k}<>7cEqJ2ZPy@6UT46+~Tlr4!e? zD(sitiZpJd@phSyNd>B%=6aZuYWdR+1*O0b)(HC9?ssS$Rsk0%yv=|8L`CQDy3(r7 z$Q8)astM>{;=v%IRTc)tVSIr)A)T-bt!imyygXitD=X=W5R-G=Ha45FFRS{lj1BN& zC?g`neb$!6qXzyu(x6mxDBX^54PP)upWflx{kps_F(Pp_O%sdoDkvv%k?y^=pdhlc zNPF6We}= zn`}P;271k&80Id*bXhS57A%VoGHOUVm$4yJVrxvp<%Qy>UmZEJBzo?H4(CwSy`xd?(+1HaF?e$ybL$4UWx+<|E%CFD}TDGwF zU`V-pbC@1^CuLBlyZ!$s(+p~C9lY8Z_8Pi0$h*_AT(vYX_HAH``E;85`vw_HeT)Sk z@rac%hAC?<0YbrLIS^z{-S?A@I!4|AI|zz^fjuh!>oEq@Q$|^^n3B?jZ~=GLC6M!9 z3p}l=PXE3?NOBzjWB{s0UJ#e*^#A&K-SuUQC8x-3%7_jt&?p0K9&jGotM@T0|0=an z1vIwk$*WQTETH#ob<@ZP1|F~-d|WDzfEWwIp6u%Nb~$+vf5s~DJx4X9XTZFknbNH& z0ZpeMph^I!$dO=sL+qRLmCocVX+tkhqgV-_=w{=qDbYUzB`;8arhbW+OlcX1r`lq( z1>P1x{2^gw-5$sygLy97@I!S4tdHh|q1~7DKX~@wHS}6;C;?NA$L-zM3f{uh4wx{U z3hS>8gelz=Ydo-C4VwxK?qV+sxW)GeO(V6(P$#AcghTrr6;wv8fJCiKil&HG{Z33D zrByP6PlKR0Q~Cod!kYeCtkX)p(q{%Y$vYAAJ8E`4ywOidBWRv#@<$e6J}hO5(>UUv zF9%DCMQxk=%P6}p9jD!Q;0W%$3k)nUxLS>p#z3ymyr$(+#Xf!d_Ke!jleYazAsYW{ z`K(uc#PU$D)}ZTC_befrAm~CGk*~mp^F~QL!RPr!ZBUmPUJ@F2^Kx}U>tk~)p%%2t z9cW9L#9e}h-f1|2UzB|$8m{=tg?_uxzydcW@*dDJ}40;^WU2U z1dvOdWT#&h?79C_H{YzW+HURPoNM*#hAkOq+QgbI-DD979Efn5Ym0qH!g@>s0SMHj zyK{+#a-NSZfO7Q@pPp`7O-V^k+PW7#j#j9RVLGR~V}E0;hZ2-yDC@28Ip~gR)dY#+ zC4%>SdiqOq&^>5dtmFg8FNf_|NK`f`)nq=A30lKcE`u;)~V4FG$%5 z+rZBd1jZ%ffE(j%UX@C{NGvG0*m?h6R#48hz`zP1QIUA$mg4h#*1LYX zouY_8M42t{B%gy3-sdj7WdGFqu8R+?R?d`*!Nui8{mcX;yc7+-ZL6YuzHYx$Y;T+{ zJ3rS$1G@MXB$g1~AwiD{asX0#FLohIIGB{q|8rWl@^%1Q*4gEdl;w9pS ze>mN9GgnoCkf@}cO6RtL%Eha>-Y#Y{-|>tz0LQXD4dO@O*|k$rq~M9%ei4TF?V(Eb zUE;0m57&!qp6ChkJCt_Dq&BM|?UB)0U@8E><+0S_VRU_O>m)qCT)a z-lVN)!l=XY!;A0Y@hwA2AH$nk&;rF{NNHgoDrc$W+%LVe?^(B*@Yr9<>1%kldoxAj-E@VkFY)}QNtq?CwKD&Zh>QaE7o$oiX^&;iwJQzy-z;H5$wB4M z4dCM(ADIijZr(uo9|fj(`=18a8KI_<8GoQ%3>tZG4HX#9ovbfC{FV7m2aJpiHF<5d4^TV^@7sb$J#&Rrh{YR3{EM4=!!=wXg6xac~(%^c- zKxJFz>}wtrNX+kBqPqMZMXcHRu*W=_f35+|RP)(lkzi6Y`6PA_kZ=7Qu$GFe7wNDa zbi^k=xL*c-wJ%9Q3a{aJB^?Z8&u?Y3=GJ8W(;*YV__zoM$=eTIfYO-#_AUOiMUy8= z>E%ROpUe?&D4h96jTEm@GwAw{>g}IV&*ssZBdIVNL5V?rnKSVT|B#(r%Cp^D8YeE; zv({&iESjpE_hq-+|&AG&A6w!`mAR zUl?DPaXOeD%CS7r!f(3^-`iY9)~~M|8Thq)6R4dK38@nzclmb|u?M9k(%*e+S|D99 znMj&60#}NTfL>J9HBysnOquEh#0tVpA7S)IJMSuw>K?3(XC+cDi2Z&v_&Lr&QtH;5 z6LCA9cbFj)iDUO=1Wp5O4G6Co8OKr0H7 zev1$Q7KT#i(*rmVHzV@IMuhM|rf2xjZVlIsU6KO!u1 zVg&lb7B#%&wYwWn+Wg1YbH=te>`wMqdqmyCULX9$yu8??%)yF%Hi|ReMcOTlfL0L9 zEUqlfeU{8ab=eo}?!ldjh5eXI+8pC*VA`j_ZNHQXnN2~WMZpXdvEFIQ{{fhxj(;gE zlvqdv1^5HY3RGB?wQlLZEv zEgGg5$jYwrY`&G+*jW#83~#@asa@#D!zvUr?jKF(3*H_-!1v5h+;dg;65Z`Ra6+eD0OwEuiQ}#Uj>Z={gvM>t-gjkNBT{-!h>rJ1;qE0U$Vag zA9c0mN84|+FgLvtPxb4gjX&6?VtPy6bM6s!&v%8x`ei>p_M}+ukeZs*30Z@*X7#Ik7e&jtHQ}e0%0LhDRl1TCXbZZj*JPvqQ@kA%X>BpdC z;ID+u7OTx;eZ5cEQ-GEvv)v-HSL3{mTavEvdOfc}UO!ovZ61sG-y@`GMa`PNRKWI7 z2KdavD7H&?v4w#?mtv|$uHe*`!LBvv-%?QlfOnU`vTDfQIL-u4eGh+iy^Sl5#|Tz|WnT3c*$OFwGF4m+WHQ0TAy8sFg7$QY1 zeStN3#8E0Gd4y7=>7zfM|3g6x{8cDbb9Zku^`qxy5vO~x5mP7(&bhU<^|hqi-KpX= znW|4dETNo&VCMfxYIMEy!QO`pf{H?ZW5L*JbhaKQ#A#XtKGLWq%SRpPsH^Sx0SoslLsG+j~kNn-UTCb=6YvL;d#ccL?7NP6+F-S<;N@uoA5J|lLfmK2mCbe{UE+vGAH?sj0M zsvwza_e0Y#QdQyKnLIiD!>D}24W3$F_JvM-QJR}#S@}wL;I5 z#WJ#%NVbm)L3;&ZU$Egy`Rte7iv+EQn6KeL6=01ymc5;_xk~sz^MV{T$6quHxLE5w z{_(QTpo@nq&of$KR}LYstt_*RRKpPp1g&%&N`V&N%KD-=F(H}7#)Bj8= zXB%Fk_R%ZUe2u>Q1eTu-AGWttHcQXNWCrtye@Pbp8uDZ^{`+>kKxrs_^$YJ^xqV%j z@TKs;-y5C;!#4!Ib1kP?C7ysa)6?0iXD1DcQDgwT!%p?iaVISe_q1oaS~48BM+d)zUbFwfYczP^9)x~(PX4F;-atn)P^OEOeB z3Kf79Yy+0%UfKWOmgR}(DXP9}bqTIHhXs5eS@EEwfN(46Lx~fOS0EadDzZemmgzi1 zxt4v)%ll6s8dPHvc{RuwhlX9EK!Cpgw5_9i+ayIGN-Ug-^ zU?Y8$xH3ojPnl$$&TeZbgIUJitmf$k4F|N6iRQlboE*wYs;fC^ zd)zmRRVq;yQou7~-M5wj^yJ2ndR&WnE67_?akLm%pF;of5hUVxMDySxegEqf2!T-g zkRSOkbk#gJosW3pEEc$oB_glEq?CbUs`NJ30BhY9hm@a?|rKt*yz~1Q?(n!Ww z)O{IK69zuO8@Kyg*C43Ry$)`F-rw8bNGwY5kzc*#x3M>me!if`ca_d65BH{G-RguX zm50hjlg2kX@1-s3rfAIXQFAVlUR6WKGFT~|meflheEP<;GVoJo%O#QS4N9?J){ zVGQ!2?C*h4*IN0fRlvQW0GGdAz*}0RitElhimbYCkd^lpC-2)H7aNBKFE_0#-tlm6 zfnhm1gGH1ph8&nRgTR{}ivLej$VsGVk9EEa4ICV#W$}%v23a^rmW7aB2Ah=3N8I=m ze!Wq2Nbq;DPfL|$2dluXLYX=B3sgOe?v?v$KucX}Q|Nw{@o z^TgXj`qL}|ygxj6z)@DgC_tDLxO!1vr{7-C;Zmeg&&~FG%ENR}ysr3&kP<2YiBb3mU2F_@rYtcD7-cHcHJZFS;Z_^O5B|LS#}= zG9t~XCTam#N*1d9MY{XOTI@C;s01+dcFn$)*}%iV)_TK+j1{dnH?bDJKCy+*bkxHf zHAU^GLEoX6)j2i*3_@V&$@(8|!OLfneg)K?CeKO6rMQai-hWopuAkZh2GEZ-|FI7` zXEx`)2ZzqivwgS!-vS9|Ox;!wL%kc^JyA{wFx3nxRA6myZ`X-KKo!7F#h>@@zQ@$0 z3Yvccy(xR6k}$rZ!gGP7QWs!HNj==&?exMT%Rb~Q4Gh@-bq-6EBR6f!9`~GqQQ(z5 zxq}L3@3wem-T@&n|BS3a8skjV*yCo%U1~Nex_@MOuSP|mYtC=QtH$?&_iLQ59ez7S zqVf%vH;%2OmeFuEFoJRrB}b#MuKkc%KZxh`*0wfR>rClinA@+^KGCNt%W{8kt-aZQ zF{FsdPJEA-!3jw7FVqLl`z9NLCUlDe&b`0iJmDHGsz~y7h)zhnpG5Y|i{dse2e%YE zRdnWF(bUPU(UH((J!R6-XKL5DwOEt%a=wS>eViP8^RR;7>QuJDk0Ex-z&*xw!oum- z)wa_>;s}dLi>E%0y%LL_8ACpqQ@fVGlDQJXu5DIfSlqz7R0bFjPjQ{#87A@}`0^Co z0#58m6Ur+SBSs_g{go^&yT0(zZf%$lvx>BL^6dkFo#s3z)*;~wwjp&lH>RM3f{>oh zt061mSIH*6QW`a8lQJEe*~pF+=aRVzCtH+#c4x`+G|n}eF;2CYiLReP+-;nfpX$$j zG-lvdE>79`$sK$}Z&j2gN}vV3%DTnq^LDz&&D-)^t#tg7YHIvb&I>m=Bs?6GT9hxL zH5N$dD&d^>oPCVcrDQ+`aaQsXvs z=w&7+0%LzZqXS-B2c;=}XuggY4J~Mou5a6CDWg48UQv7|6U+k7S3eXN7-R6^j(eOd za`xKLcYQTlT)0nP2wTMaKO9F+STuCqOE@*mi~HT7{z<^cP> zpUzyG@|48E6Q`!#mGqQu>LV)f{ybbrxmTkvF1Q^N08Sy{JbIWr7y)Hf1V}#nzL}&-l4q_MTR0QheFT9DATM%)x`z<=r&2?>a`VK{)|xi&zm8VAKOl zDC8GTrpH4HzpBmU{qV8j@WXO&iWsj3YG&zOH$0qL*%;4za-((3I8kiK`0p&SB?}WK z!DHFagPz413>L()+@Px8{rzTFwFmujd=w}L4q9VxFD}to56)HH_S)tDn=|+|7t+XFear$)9Kb((}UiX4)V*V{O-(r+Cgret~#K@v3(Iqu@v6 zOOu4Je;gl7`$^?WZHUv?v9!`kbjb#Pd06^455H8D4aMB)y93FGnG^XFAWrAb8Bvx&Lz32@l4(T0IE%A6znYbk z)|WZ5uiAAh1JC#~hCb!FLepjCgv(SPV1ZnwZ0S;oox(a3-P_Gf1FIbd54fK!Atu_!)#|NvN-SP2!~B#Q zAk*DOX4~&_v8Fqb5jA$VOIdvq$5QV%8|xgb$}k^`x}QajhQv!znp4f?r98c?YTjty z#+DKsH4YiKr=jFmbq!w0o*lk1DZ)fB2ws*`N{Hpji(N8Ge{LoootyFcCw_2}Ho9C+ zQ8ZZvGtPo4GTpBwXk)8)rpcr5zP-^zuyP_jmWGdsAFlnp$#}N}hlJ75NwiZ6IVLIc zy|gugL$l+aVe`f76y%~@ss?&DY<0jqnTwMwj9M#Z{e?ApBa{q*+ojldv6Qt~(B)zf z+)T3qn5tAL6bjPw3F$=M5|j?DxlFTsbQOxJd+0$yjg{p%URC;Vf<^WDtY+P`27!n^P~UM@Sn@F(U4U&S9;!36sT<9!29 z#b8%%rT_0_cq| z^urINc}6p{_-Ah2JXyxYeO?)rKi_68@V1oxY+t~gN{onfYLTgY}f#ukPgv9 zq{)`!7UQBiw>#)~)gO}!|4s{Wz1H%Hyj2Ez4->5iluPO_-tl2!He|rO-l2Zu1$SUa z6wNAkUb@1zixDOwU!sU~JV8iq>JeBl9&bFy*2l{ICbx9sCfnmHhr2Gg!5rvvIBkHD z`RrI;v%?HxXk=MdWjf^}=8z|!$Nz+X3+ce|6tE19dB+}FEyz}*SNJLtrzSTbMuDJ_ zuz+}g1Km%IU9-bLquF>EyXV4>=M8R=7Ndf34bvk+E!uazaS{Li==f~b^{ZR57|_n( zrz##@RU$%NiHP%=t!ih!-@X?gtQ%s8wVVf9-bY;u5M&o=wllaBJjp1JBI}vsM9ive zO5aeurJu@@M{IxO5*nfB7!^FyIF;L;wfm7<-4<|{Sr&7Rm(r5qDfNLSb%M5_9u8U% zN0RTINxY;g;dcTH1+4{(KXE%ZoSUsvj^=YD_fSrMz`W`BZAS4Fd~N+baD|u`|%CN;tiN-(>D_C*bO~a(R1pmGDjmYZX>k!?(On^hIzj= z1Rp|j5;C-a$Gi^J4iDQK)E#WBa9wAkY}iZcn^2hvev;s+)#a!!Ny%!>J` zeljH=%>{qR4?lGkmLSfXkEe(q4Y|X25f|cHuJSxVm~l|2hVj-Ay1`J~VaQ{I#OrQ* zbnnQc&GG8tmWwCsP71nK1f#I;YRb3!EnHLl)P^wcts=4 zW)`@@w&K#q7zZu9>gMDGYhpHYpN9T9+z1WuT~zmi#kzbP2i|FMC)3kD0AKW43f0g$ zgjX<%=JH&OkD(bU0!=?U8gqOBjkPsFg$A85VL>`vho-Cu6%t-jL?NhU z79faw`5i^Uw5TT#C3$0=6>4?vM;2KkMH`9T<+)Ep--b(?_6UN2-Jf_9g5;(*)7Bcp~Ot-TPf_T zI?@Q??5TaYxJ!K2pmDNGte)yOv{yOVzZ3JLS87dl^I4HYP8brDjO{8UZ|(aCE65>b zGX@U{|9MRk4Y@effRjo+8FPcXB{7J9Ka^a3OO!sK^L@oyBL`2V)p!Yl%{Lb=A~j}J zH;i6-Uk(vaL@X5vY*fbZDN-IIjXrU6%dkz!SmW9U!qoF^m77`qr`LUM&Wl%O05s|p z!d}$EN-|h5Ln|sfIhkcNt&#tIn0jrhyeo7s$(-Q5F1p0&>bEI@1@AumFjiDOmuJQ6 zHI~F_rxKg=ep7aX9mjIH3Gdm{>Q`8;TO;IXawM2MT9`rPJD-Zuc#zPH!Heifk)rM_ z5cSe|U@&lbT1kY~=rH0Hdxh(>@x4zDFRmf}&$S9hOfhb}`p@~sxYYXcT$j<-6)(-q zph1GZ5(5{p9(#&|;E+%GH>KTho?+4W&>#6_mRq^q7#soJ>t^6|<6gWPGsX%p3S2c) zOhnf3o7unT`^#eq>^_29pBhLX;V;ey1;gmM^^58*tKNnnGopz39Yv4ZdxyV+HFiXb zpkKe(jINN)g=?*EO|j&bD)Xa0z;Es0OA_4crIm-Cxb9A2awk!gSJGQv9f^A}qQ4Mq zld|b$D5?#dY22pf_RC%9`bLX7-EBDkp6>e>9vCF@1}l?fi|c5}uEpTSO)Tt5GEBXd z!S5GoIaYMl+8U9~{BxdU_*f0ACBOHV*`4N{&0}`Hx-A7gF^5OAPwrx)y@Jht9rNYL zu*t$Dm7kQGk0*a~5`#aB<())%^M)C$5e=XUZ#qhZAaYVXvEo^7ay=Ay9M|l(A;UYQ z7Hdu>O+F{XS}}izTk^i2*;ms^g~TH0he;5na5eihF=wwSNeCK*4|+b~_syriek{k) z$Ja>qUKy2)2+_E2**fVcaiKMS!_{QwDhNr`Bzl)IQFVK+yiX#H$M_)=S(7prT2MP> zpKWmWZ^yG?(Y~Bo&$PkG;;GiIh_HQ458W!$c{WdOfu!)M-l|yi@6<)(cOiA)!A)W$D zRkD^|2+OhhqmD${j(8d(${lZEB0F@m-}t@DVf_{HJ5%qqYi?Us($nhGoi2V%>8(en5z$v!&H|w%Vm3uf zqcv*i38&IxY&_R8rZmMAB~wDSI4@9Os$d2&$J6@(W%Uyd63l$B#6$^Q&g|WF8Z_qU z1T!zIo34DfzS%4j#g*Sn5m?fDmx!3TtkX@0emfzTZq%8rxvq{w|LAI&4ff(P0Xvea zV}U7y{kSg(2Ptg<%JFoVS!@zlXsWo*g|j8K0H>9uJxPzxXKqxlYWAL{SDr;Fn@|i* zNd|e_=5e#0lotB^$ktg*NoLKGPp|1a^9Dw!B-q5kmhVuo{uz4~B z^Fulga(MA|31^4uqpq!)fQWlpyWe}tWNnwtDn)izFMfKD$wzT^qp@y_4hIQkea1Q} zjc-+JY4!8t!_Kefw0+5lnL*1N{y7fv$U8qVgOmZ1pdR_YU&etPem=SUi2IF3=Gkj0 zHn;S#y064tSo+N`MWk95+sDq`yAHYU(4EYNy)G_si#I3h*wELIBAE5Zd1V;xUT4DE>`eEV#(y*F@nl@ky6)Nljj0qbg;{ zt8v-wO1IWxl~j>*Wn2Wiy$Fpxp>b;=DfOgB`_-h>S+jMY@I30!(R`1iW5hTYVn&x^ zXu8?^hF1dTs%tKvNqvRShv%P7BUZZ3YuZiyO1d*V<;O-oFnSit9`3X}^0QJ8f0DgG zn^$e62abx5y05kyocGFb`n$v0v6>Wd!C>&2e`fjC{j6>}7~fzzeLTBjU1akfEW7JG z2efWkrS1vgAbK~bSXtRJ9&p11z?Jp|gh1&Z4~^#{UcL9fKhW5~R`4Cc|^mXGvnF+NF!+)h?AzZV_QEQSmQuV=9UuMR1A= zO+Oz45Z1K@IQ@=V^)+&N#Nn>`pyRI#?y13WWhwp88$qqoy}AvF*&H3Jea(KWD|w4` z9$Dk76wYD1e~)MA71Z%8*$O8`z#v-ZGOZTWdFZ>CjZEc1OttS{l129DjgCt%g z5f01WNoj8RxcmE!BZDEMk4_6B-{$2IB+;+=l{C;dx~`)Nq$q<}c3N=D`tD=VWLO4- z(+WdOWRHCWy_^V!*b}2vlIBY^)?=MLRTmcOeP}H|3!VCgui>gNnI809>pg`J-!>2&-)IHyQ$xyH?O>82>i|Te#6+YyTWyH?50Njp8`>v>NN`D{nV=#f>gZs6MDtx z2r$rsyeX7aetQm^M|=jO1=>nd`p|>>Am`P)@pSbGUyW^wi5CmMNDI)epAV(T;cHe2 z(j5RiS*S#v_=)8^jwoe#;KM6kI7raQ3TPH`APkN<5Fxze4e6Zdd%Fl*K%i&Ls&~HrG&HxcU{C0Rs z^TZ3=Q2Fl)ZBH{>C=?Nrf?;yV0Dis8(S;*{2^S60|lccFM)TVhjr=$(db0Vy40GT&7@f-%@P)oX9|i#*2;0H*Po9Jme+= zpo1AF83*xUFu7WiK3Zetbm`^Q=j|Wu#@a-_Sq^u;AxElgfTr(7)Ri28khGRLYJPf? z7EHMB(-0e$c)=Vy7Iy4R5qubz{Ijl>-+mEhwndFO>g6Wn6d_SU_z5lwxMA$W=SWp& zs8A-3k$cA$q@i3Rta|7qxbiavhjH$(w1d{T1m-JL0Y)cB2&~)WPTn@%X)0RnNnT(s zfxZ>WF8q+sn({5-;Gs_c|9$oiZgxZ}D^UDB*>Hv-0&DU;&H#J&#BKFoLmULvl$SZ+ zk(Z?x&H_mXr3z`gDh}n-S2WmzG=q-2hxlpIKjPmA#UN%%vUG6V$bbqu^uq#}lYCiH~LYOgh{@N#%qHnYkt`Q(E1 zTD~Sh#;S0ek1;8#sIw$+3-Km4ljz?UJS4N;l_sSD0Y*!$hAQ@?Jkh(d%&z6v^Z-#a7sTl`0^^A&PP3!e2e}RHy?)m zVP6?=VXg%F$W?)iU<)Gc88F6EaZ|z+zyo@N)J1pm>bCx8is%TZc)>>X#@Ug-=!yX- zSQIUuF&nTVmhK)I=L5lQJ#AvI8u-R5+E(yGBXJI;5?jPUn!GL%9;v@5N z8#*@+c&ZAve!qDfCqIpP*;TheThsjOc5H)Y3As7~R2JgHIV_t#qjSZZgtL%gm|Et2 zEUlrot;e&-^zhS~|7O2=%l$I*@@6jpMhZLz&j zPPCxWudr#*BUu#odZLn>>ij; zWxZFys)X45dUm92M_2Rvw5Qb!&`F?d;K*>1Bh0H`1d#hcqE`R@=03(J#;+QuA{D_n zZ!NVcp?kaR@~7ud`NyKHv7MzI$3;b>!+6|pU*;5`c7XeL7nUTH>@#KeC~3T?I)lY7P}8z{v;&8fIuxj0hW zR7#(FWYCcJ9Ov|v&uLgsrk#?VIh+gv?#t**|1@R(kL5{-8bKh#fyYABt81W~!M_rd zGCMQG*tMm7c4aONN=L}p6vS;2C#TmH=N%=TbyQ#liG=+3^EmkFiJb! zPnQt`$f3XoSJYbbmT>l$D~6%`8@?+e)am4|E7s3`x!I+N^G+y~4Z83!Vg}t0+1-C3 z^Cdl%T=K+ll`x z$2`~>Cg#BaY}%C8+7$>iU?|X1N?VmWLK!GXAC%kMOiKXwqTqiD3nR>+gm)QX+O1E9 z>OGX8S__J9;VHm3`cz^uRXev+i4RTAFUPEt+F?lniNqk7A-P>lN0GHX=iX$XxM9Vx z5cWU^Gsqz1G8vdvyn>D2mlx3uk0lXZ9!8NPuaHN<57R{LkF#3Ngf5-oc1Kb?@0nIg z!g%GcHEr#Mh02VB4-^N1m!Nt&f+lv5DXO1`~ao}0us5t}ALD?Wee z#<@LVr6N+Iu!hvlTxUgp3u~DIgSPs_4d=^FnZ(+q+KSq@rWMUVNI@dX9+4W-3T>sp?&Y2(1^kKU^Gj-Vemr+PoU3NSAxf{!$Q9Kzvqx{DsFTNq zm6NNNIJ;c1UnQ42nt1-a1Am=$B+i{QCsB05uYcI?F@=1^?aWMo__9tQyGVJ39yZyG z(l2EtimRE#2v&gE!G_WQ6W%CDHInE}0>4Eai3tBTl-qVi_?5OPYqJou@hVr-3oX#G z8<-!qRZnH4?2f?N5mZr=17C!c*b1nZ3d!(8O;4$hvX6p&_tz!h_gD{o9E;hj54b+p z@;A!o_?`%>;ZRzZlGVgOs;woAO&cgS!xcYZmti?mz0eWiDO9+Am5q)~A#qtY?D4%_p@_o^1E~MIHX{mGdtm)Es5x>J9&Ox zvB@kw9BI`ih5sFshY~Xg6;KULpNem0=8Y%R8G(x{yx$(Hh^ZXg6>9iB`1ZpW9ZLl@ zV4^jW73arDF=;Zy!6pz!buBm$)9_4x20(%%;xKK8i#hpewWdlXSh3wMH7tF;S=!E6 zIqud4V|wW#lSjY&=H&5E!`VlW5dB@c4MK;8%E=sB)?hXPQkBH*3_xokxW!H3WnC|F z`bPN4L84#eAJpzmCyX}23V2B4lhtTLxx0oz#7Xk^2U-_8&oSvy^79BrUoM#e<%lnT zQ0fE829RV;WeDWc3R(9PbtXr?+VOP}A{mBLVaz$hM?KMJ`f$QiaoVaoV+bcgK>7Qp zD|ah8m}5&IRhbzqaHp}Tjm`e}-Lb77hc^wvKPJ52tYkbnyLq~cD%H{M9wu7@%{qOy zBc~N+YscJ$chg-LX&sJ_%RGiv#ekHr;;q?~_6MZgQ0>!{qd=#ip<0-D)uy!#Qsz7D zgh{V&441OWpx5%kkz{tdOh9=-f5c(JLxT1=ezskf>$P>c?bfM4Rd1Fk_CXq zVEX#)Xh>c6!I7eF?qwTZlbYXOd5TrrjYT2WF2)avt|Il!TM#p3?{cyg>lMB%uWXiM z`)UJ^Y0)*u7Udu)78rBu|{=tmisQ96NsFNWJI(Jo{DdmWH>>Fs!h3Ptsd+yz>KUzi! z?Bo3%#G@JxXYoI$Fu)!+>m2O!!b65%$PL z<+}?x-l)kN@i?HF&mBdYmeSNFYQ`R4OFl`vzxXwiGfBE*(t>&XY|^`V+O-em+*ZPE!KbKYZ%_k#u*U z$X-upI3vbqYADIK*M+YBI9^mc!`|8ojD=9qD4F77h6=BjP7Xs(L8VR>rT_iH>guc0 zwYD_m@ZoZC9{1EC$U{3=K1klJp+e3(QJj`C~ zm{mw%`GmP2VKSL`-4oaXjFgJ7AM6c%Kmu`f$X=*{_$rI!?E_BB)%b(@$1Qur1wO96 z!>A)^zMR}+R1U5}8sPDU*3^4@=EB2k8-9=Z@0zs*8qX6M$#Ol`4rLwR%{Do`%`#?h z7BLi*e<);I-{ZW&8uHfR)O*oySB$ZH5%Zf`QaTGu5aF`qeN8lX?tK%3XwPf~6`pn! zD9Q}Biy4HZT&J?7OTK9(l78D9=gT!J*ULAlF{3>3DE?lw^m$nu5fvE7urx*es zQise#9SqRv1kF;Cn!rh78;p%~BM0LpTn_o3IM)Z z2=v_4el2yN4{(dzw)|t$w36eXdq-VgFF=v^1%v_l<21r3K@x_2NfFdf)w~~91!#r^ zPoX0KCivWC>=Gvso~x}}W!I+|XrK6@pwauDbi{x(j3G$~(7xvM+ma6{10F&l;O$Kd zzbreHDeB?xZJ$jQUra##v+q#lg3}7>N+wwek>wcn{@y*2UVT?E+ZGbiYzafIsHQ6y z#SB)xEqcuFCc_ZvnF#ZHxO({3Xs#Zr)myodWo6%;iG<$00dlTCpnl8SfK9(c@gU9h z^_md=qkiUAR(R4X+Sx%Ew`_T*s6Wjlvx2sLH;sG;2$3aSpX4j0uVL8v05osdKZdkK z6zY@CVA(*ZC#rfR`1d_fJiB@?xQMQ#xT{-n$@mQM@7-_#uF{eGc&>`b+X}qj1Ai`O zZS3RMuU3YmJmoT27E1Hio)>n?dqNjb3Rhhukw}LQeiLT5CQ1z1YB25;u-pG2eq4i+twO(!Ke! z;$afUJ>Hk5ALUDw6ovcG$4$LD{^i_xOf>=L!U`L}S8D2qoF?ipg&Jeq`KQv=AK->| z5Zl4Oa1AygkZ8XB-D&kZnCDcw>if!uaPFyFa3F94V-W)t3UyukueQ$y#@_Z7XctVw zu3DFmQq{yQzrIvM>U+|ki5OINSXP3}PNr38sqC!=OiVui?a83S?Zrwx29^8#iP_I( zotumeYKW#FozPXI1y5abO2j`JGt~;?j+W?Z@UR%HAw;5Yjn|n*-^vPpc(*}Ex!X(b z47Sp-G8x7qw?Q7>XpxihlNR^``koZp_PT0;zeSXJiDLORDX?sy=osql#xl`yBO0Y@ z4^%pL@IY)-@o1i0&NvjODv;99G!43&kZRg8re z#ki2h>KhM8eBNT4Ma90;wEL!&62hzBTs6NqLYu~~VUjdYbtJ#I_!E|oWF(Sm;SMph zNT;c~j6Az(SZw`XjQ$>B@zXRF#2*4Pqz99S>ZkU057tH^uEjUkp*ov9Rlms_SpZ&= zyE#HQnR?R(_O~EFw1lioc$wo#HFfv)Gz~B&Rl=o?vX+Q1d>dAkQ4W-g5_{m$aCgo4 zJSpFBvQCm6zCpE#5YMs`8}?@jY6ScldI?+i;Ga55QWdcZL$T*&XI~u=(&+JRql>#$w&VvRlSN{5CfH+&+PMj)mTOxRCiz8`V5@; z=VH74fgwF%L*LeXZR)9~sIb_2m8c&!-eoq$ZU@FI?~^ydejwf`@P4m_?ptcW4XyiM zhyKduKZmaP^64M3GKD@B+|Vu?$P{2ua|m-lZenHHzpwQpB~ll8`_D2(<-V3tN_nmQ z*9+GxCIokcUcdD9qQ;izSfYTR$GO-3)aP6=n@KHPtk82^;I?3l2P7scGpu$ip?%8= zaXO`HZB=92b5xbQy&Om)tKzE(+|c*csvew(-S_>(S5@e(m1xnMW>K2>Ic-V`KbNRO@>NMGp|4ul z?YUHi1}c687M0l0KN!7QQmh4g0>#)bLGEAwSO94?zbd;c%YZ{h*VrxANnwTc4tl3ti&t&op%RrlCdyB~rH4Ic<#qo@2;%XY83J zck9gMLcJdlVk7m(fIGtvi;ISX#N|)TSM3GOTht%ETDkAsl2g%fPw?dG7mW#9mWkAm z3cYbON3Cq5ZP#Y~Wp$2{OoZacux(7~j_f-sVw_jhq%~mMwxZvF<0rQg;|>WZ)%h+# zqTl2w{~fif&wKT(&m)yK_LPYm)JqNT(n+ z4~yeI@n}~uuT@yvMq_{ig(;|RLw;{bF$Yrs61WYfdpIp}fjy{7CFK5^5rlcx0yCBdWPa49ylq-#yG^H2C z+oPpnD3CGYkX|unZRv+|V zaCN*oCyISX7<13KUO6-I5H^BkgTKNorzI?!-`^o#%aS6IPeJDB?bYP!2$4!IWG8ObFkA_U`l$uEZZ^Xw@Li$XtX?a@eDnyzKC03%oMv(k4-)DeR~_SY~(xu&}!FoFNt}) zhPs}@Rtecz0C%qDwc-S(z%DETrnyrIe&ky-pOliKm!>wWv@;a1%O*MK0kfj+4JmajqDd5B=UTwn2{pZT=w;(*Wrsjtn}f|vAg>6?qgpE!>Gyge4Y{_ za4Yw~B_kVKyp_$Y@+=Z5G!D1$2Cs~$VcCZZ0bk*h4V`l~dwPzpofNmW9m9-AmRTFO z=xt>ft$bgDqa+Qn33e2lltz)U@A>-Z{XMgO#$=_(aBCUns`}&*E<~ z!`?{m+Vpss{yPn|le*nFn!*mz4r1DP4{H?{ExV~M0~RWdRrr1~ijO_I=>GCLYw|Vf zgbME)N41OVKN-&15&mTN-n+yLVxooBf)H!ZSoR)nRodU9x-m4;K>X2vGUx>F&Rx>; zw_s}2p(!x5#QAj4adx5EDd>VX(e=?_CQASC2GyUmc99zk9IB@X=D#&X`)y8KhOsQl z2nPsa^J#xi6Ca}*npFH?!=l<2&`JR))0ewDP|`u$lVlA76Gap|aau>l>H zwH{G3lRV4EmqC{Vw2^=| z*$PHPuOF>{HMm^df388Y%}?5PG%~~=O4Y}5@>E;Rog|z{cI%UwJL&uv4ilI?u?u9E zQdGfGRzcvZy!xEO!$yyNzgGnOftdS=etntzqLfxA$FHDTE_vt&kt{8O4KSy>eZovw z$cE5o{&MsYPFJd|xm1iU#Hk>Kk|a`w4&AMK>R9JE8V?drpYOXvLdB9p!#Df^oN`I0 zcmrTu{oGUg%32GVbDu5}YT4K?+)VL2bTBIXQsPObsWnlqKLb{~=x)0?PXeNp$6=|D zw)OE+SZpp$Kd6yBIT`FzkI}i;VS0O?M+v}zV*JbWwT}z3*Yzkvt zcE3G0%4iI=JrCe6@u{cybbJLD`WTLXXT&8ubOZ~O&5SF`3Xu8_llP$!?uxO63-K2z zB8FJBuW&g1f%4KrjPH=>#*OGq2A#7Pq62zym>Kc=s~At*Og`oNUVRnRwEU34WBSo5 zlVpp^&*edEaxMgySUjHD3^&w>3aLwjjUS`vOZS$4L86O@HlCkm?xV0WsT7{S9mA=9 z$<788#+ZNa0ON0alxeGf0qBId3j}FQ9c9}mE;LfE$3uSg*W^^wclDaC;GHEHZ~Kr% zi1gy{pNoC~Oeigy@Vv@!34S>@Y$WmxzzH$oa)2g8N{%;qIcNynhQ@!=ATA+Z1!41k zkPC|GCF||UdY(&o_q5&&&pkt0lM%{^d!lDObj%>KBF}+n1n=@^H4}UbV_EZE7 zRv{@7Ldklyn|O}mCzAkTPj-N{&#yS4+(z-Mt~JBZxqM~4KXTkZ%gL;c3=ESz*4ZsJF9nA0Ttd(KrLgxt(P1p-j* z&mT=T`?+Zdc=A)FZHq1*qnJxbU5vJ7{zGn^duA-YivZ9J;v)Uj(g|BQ=ZZ50u-NJ< z6N_%&qEov=xx1Y|4eFEw;BJq`w#N|+-KUR^D-T870jA^u$Zp_UxbX~hVauOFBvgT9YHd3pDm`j4nTt_cbDksT7r^^Vn9S0jm0|x?uJV9n(Jg%`hk$=-~&*@ z{ylK@43H8<&f)z0JyFvD`@VsR-6mPVZv52nK5(1+JaLuBId9T0UD~gjKg5TDX+f= ze`J$=cJY?@OYG~ue}1F&YU&}->gNYj_AV7uEa}oh;{+^gUVJ0SqGT>D)=i!_fS+OY zr~(WmT6}5Bz*eTeQbm)9#W4eNwa;4?(sGK@*L<4aAVwBz)mXe3vT-cTBYBgSCjh8v znKij?SKh9Qlr z{cUBGUqWb6i^G{F(m01rI4hVt^m3JDi&V$tEw-E(t{CBB^RdF8&||jw^riFLx*$hWP(eQGQR8 zglSxB@Soe&9J@lh;d*^Dbr{@VmK}oCnJ*hqrp^2JPf}Eb-0|bAoWZGdw$bCE+;aOo z?XU>zROczBTYxJd$6`(V zu&GlHRys84v11>+R7HBm&Y+8{J@+qbFysAv-FKFy^W?t@%d_cX%dy0Xlm8T!CAQ%5 z7-^959d63Prmj`J{oQ`a{RkC0v%wNwLSXh3j)|_LauN>!SUMkZa14Ho2iWiq6%~|1 z!e-n(NheU2d#3u68KE8)GKm-b$Y-FI0?;%X`*m}MtZlWVg|Dcu`|3>K_8m>UqcMu= zaM`$(%Y0aDHUGpD@O)$ux7StgcUO8Ry_nbz8RL`aZV3?Ws&P%P*#F`uki*l+DwcGf zsp9C6*~zFks=kL%G+GjDV+myXjuvaOJW#NadzW^~7 z^7|t2^?pj%=IaPIl|Ts%s=SU2icSMgVQkEOGqq9yFa%gS0de>jxKL97I;{^W-gFzy1uItl*{U#1HbnG*N^dO1>OPfzpa4U;eEs_9XX!TV|Jl*2O!W6)=HGUPDdcEZw@(ibi!El5dnKWTjVm9*-k7&vPPdTMJI6KQxaxF0(bmIkYw>0DgD*J z!l(3Qy>_}X#s3qZUKeuFDVp{w$BJWx^0NABcM|)<=4kC4imH2v%^z#07!}1ZH;-fX z=*}Xct8MgF3w=OQSZ-tVwK01U2hd9ytM)OLj2+{LhvVRz>#Li{JJV1-Nm#aUA4%4@PU+F?8yQ zNy5f@a(cPujJ)XeYOst6^FCj4kNxoc!sY-;AqE;|-aZ2yMQ4^cNZd-UDXx1M}Pij3~mMv)OS%Jqd2gu}0%fV(i;h>eBK ziw_o_U35B1)nH+i_jm8(lDz%RaqoQD9tNBsQS)Th6A#-y$C(zGu_@(|9!L2w*||jF z-IRYZEqKvoWo$93)u-h~Kg55sUw@7sHS8_^K2ggMN>)mBsIH~0BZX`sa-v;|Uk zmRjBdCrS-BfLLF8K9b}7D{#hE!_9Dr0)TTKT*391JC)Q`JyW|v@)V&zrqp0k-YI~C z1Xe$z1xESwyIThX3|g;Wz%lfVwV&A+;Z^AD|B&PA8xE5dE;`*9K^N|?&-`Qhm?X30 zLrOAPWbmNymS(dRRD11r>kEVT*D0A7{-oOu)dp&%qjuQn_nyp_57>h+2E2w+kFt(4 zoc)BQn7V>A^*6106p9`(Fw<*4KW^y-wjH;da$CN4kcg;(itjNl?Y%-(>wQYn`_ER! zWaow4gSw5>D88X8T`tA9Gbq zQQJXa#{0l*qq%0PJ>mEUW8Gdo&B5lR7i=)qzwkX{-WK2N=lop9AbeHsdV1(0 zaglvW?w=09Mt}I>9R~l5c=y}bups~PjQto9XB=q}kG?ggyTU|EfEVC+n>4AqymM#Xv6U<3W{xAvdD6q5 z0f2WJbsJX=Jvq+v8p2pt96;GbW*Gg4fNnqkIH@BE2lMg;8=*-61!5+lDdY=azJ znnGE+$$ZVt!2(iJ>w?#b^{~f?j=I~Q45o%eJkzEO#gDHBe~1ZT91z`GwJ)GlL)?{< zWOW~3bYj}4zwOz_rT!@u#ql%AaVi-to;zj67J(f{s3UnZ%pi|p?WIuYD&24L&r>jO zo4Pf0{*qy4|CwveK*vqx)JV&ITPP_o4Y zwFoS^b=f(|q}{E9J(`+%Ue(RBOaUB@i6EWIXtk(o+Ng`rigxG{eXi6SL-b+iuA00< z=9K^nEF~CzNPigAa|a}=D|~ex{6rhBgK#p^{^GUZ)?A&5A*vgnPkF!A*li%VIsp)^tZ!F7lpwGs zwY`)Wt``KiCldaD}dtq$8HDFpmF(lKD_z2<~NMp zTssaqs+1vhda^HDhf=F-(n1>&q&mcI{U$<4pQAOo<5Z(qnb*O~YuRro-0-oGU~#Sb zym1+wk1&`<+ZXIu z$2i=@gz>aOpUgFa-_kjTzAz!{h-*x`o#+v_`KaOmreGw0uV!a{7&9IV0qK9P=J7-9 zNRzsxC%0|f=R))j{H8nzH2jMtNbU0ghwdl``IO*eqRa^>eqebBwd7!piBQ{mXB+A5 z@rk@GaUjwN7rwG7=fARuTfc{t>)K|o6P9W(cecBQar08DmRjD~nP5NVP5$e`ud7265(VwdJ% z`XDv8<`B(rm_dgdv|F9IPf@i3>;;pBcW1Ar6K$e*JHn8$^4wAzz4n~$cWl+fGq$n8 zV}GTr4b+?ZGj}zSP)+S$TE!>xh;*3pS`us^_sSfF3%Xx$a>Ua_TxI%L&xC9Bfhw8I z5)g0G={WB8(E(u)tB}_W*2`0oY^w2%Zq&>yjmSV(*fR2kZO3-v>A?HJCk3M-Gr2e- z_`=6sQ~?1mGoz~<&L@)EIL{41_P|`i4DZr$fBVarWe5+<^GReD)y=+0C4!4I*u>x1 zAGZB42{=TL`2BJtw5x&VMH8qHdm6Ji945uw-uwaNNN}zES8^O;r*#Lt4M;A#rt2W? z;W12ej6UzQE4p~!;u}we*oe(CPNZJ@DYY>Z&SrK)6=4e=3P4@Jw}Da?c`O%l`u>x5Fkoq9qq~O`a-1JZPLM9i&>|HP zy+Qr{mpn=WS3KHLJ?fr(WFh6b{An3P-+qxb4TuJ-?{_cPPP0pbg?{?%tfC%Y^h#?0 z;rdu<7z5Q^D@C%V!Qx+0HcVYBVDp&T_4|I}r|NTlzff_zD(kCjOEPd*bwJtz72dO{0fZq^SPn8QU%cBkV6dL}Wt)YKl#N3eYgu z7mKw0y(}EA|9x3P*_Lpia0<2HXOki12Hz%=*dYzIIUsN(-COzMka9d7XfW+B#ero5 z!VAcStSn@YS8=#M@$4u0*buwHqTYcg9R_>Yw~W66rv#dUdxXfv(U$bJNKg}> zGjVfx!hJpOfTi=|$x~fQr7CgQd`EAd8a31;vtv8AkUX={dgW%tZIjNrr-F&g|G>3u zLa&9JH?do@!yhDH_`&dygHF-lpDXc0O+D&v^qa9Y zKnkyII00*rJMcXKVQKP;&$|Bt7dI5V?wZ_EDeo39jkSTOrqBIt*qa~+8x2f%nCP?x z1#YzqAyP}5FHJNS_7}~C(R8{R?)|$vbFk=`n=E28F4`cD$wVkbLOmM25G1YzBba*m zUBEL60HO39k$wkn&bMzg_zV3XnB3pI!_srU8KTBvaym`JQqA-JWMu9u8d01}qId$*~a>Jwx`VlSm>6tkkqZY3-udYxMc+A;*4f}}{aN6GCo#$>MEPzja|v93aM-U=MH$FD@C)j@Jk7wImht4; z{RQLIA5_Q$*|eO@i5E9q1lR{q#LNBz@37J`06^Hw+?Dv>|0>VBEKvAQ!ob=R=xU=; zqkBUC2@qyBsbNJA$i(l=ec2LhV7>D#^+2!lP5X_UE~I1$JhBZdAaVzN#At)L9z?PO$4 z4le(EdBTHD_ka%fHUAmxP^s49T($Np;3;lPotb|We{MD?esd*SW~I(4>don2k7=e9 z9(LLX6jx~dnhc4P)y^Sl1UfulFpI3u7qugEj;S)h-O8JG^+mwZL9|zxV?%WnaxA!^?ZY)S_pYtRV;p*& zJ`XCiyI+j5N&Uz79WJ} zgFY2e(pbyLcz7Ces3}Ymu2*TGcz=la&AT=v2I~x$QeUQAeN}oHbxs59k)%dXtEP&{ zv5efZmG9YL6LVU`!BBfb^o_|qwP3?)dcBZR`pyRY;xF4xt1JilX_F3-Rjcx(0>E^# zFkRzBwSN*Ie)+LWbVO{2BdIyPr|@_g4)~QFuon!`Ul& z0M1%p+H+4A-piQz$rXv|iluansLUCHLdT+Iw8euBVaS+IOx9@ou+{VUNMqy(5B&n7 zr{@*nJ%bsN|Iy(tuE%2m5xeJj25<6nVZ(rJfaexDi@fy1OWO9vuo(KN%O%`S zK=fP3*`8Ng-VgVAw3f-BO%c6*AA^SU+!rc--4X-+x(HiJ#A$^YDh$-8XS7VLR)H>? z9yB4{`6_kowq{%=0gA2Rh6ahE<;C^46T55(ZnXW4=SQ-pq;fhYpItK|Fr^xL5a<74 z0V~O{B-Nxvphw$4R3j=K8qn&wti4HNjjaUmWF^iGV*?#A+{Sfk(Eg=}M$nrbKy56g z!OH!eBxtzp!_Y4OY{FDeMvGe}TA3pWn}?)Qm!M`JuxLbHG{aLV^TO;7TGsgDt--7TVA`-WQvZgyy3YPm{rHfa6K5*4_u>1U!n!hsGvkce>B_Hl5VRfn$#U-M*YU@*j1GB} zyaTqe<2pzkKcSD@c8mD+A&n}uAc-&utmGBjCHHXoW)dedNNCjEIrrBKgNk{}auJF$(%*ia|L^OAv1hWj3GDX&Iu!!Wis;e@K|SHm z#sZz+FlUh^m%r^Vx?2o55Ve|j4R<$tIFohL8%0e}M@2NAbm*ytvhBJHruha?BTqkJ z$6B=e-u1cfw|hcm!Z2~xcyxlI^>pxr@DC_0q@2eRh0;`ZK@!})!fM}f&i`4b%gV~5 z;p)|x!G0yg#&^I&pFFml^h^ncYovMTE<(W@5FrYYB=+A(V!J>=Z-=kXYsOFH@M2LJ zAtV`$cU|#(%;0;E_jBzm9_z^GT3U4=%rT&O>2M2RzOPW?%R%d6rQv=v*n^R5L0<00 zh~vj9T7C%$9;eJ_w?5zVz9=JId5UZ|d`+{944})TEt$hX^Jn?VtE>b9*h*ER&SQc< z#z7hn=zaXW?sI;A(lp!QwwI}RIrNR(e;DSyx@Uhs4k2(ZP{k`s0!PMNi#*F&Wk1LlK=2$tLP0e0K z${RCZSQ0x3qR1yLzY2k59%kl2Yeg|!G18%yX%*H`KALy+3}p)D8e^R@2$;EPaO~x% zMiDs*W}%z|iH0TLg*v~bS}e*~M0!sMs-o@$z|rHk{;hUU)~g@Smv&?h=dGz{KDt_D zU~@wOhTv#lxzvV7vHsw55?w>Wh)&ldAr~|`I6S9ak$=n^A(32SVCBFa5MfUR)!c}x zcBOnQ0#zD3?-T+y8$%M_v$%+tqY0TWPv?;Bj4zJW&gGUZv=|U;f4g<>Tkf&+@rY7|^CDA?GVi`&eQ_Wop_9SSJJ$_E`Q}0pp%keI2Eedsggy3w zhcMMwQTMraAUZm|FVWKT5kL8G3~=0(d*>;ppDjU-^b^ah{&QF-K2KUwWp~$J zdifO;eVy+WK+*%-m%q-iN^?PQPAh~i8B{`+Uk1N%D`AbF93tEg1a0B8t)AFkyc!an zA#^?b_h!95l&*{$Rm+%|S?>>*YL;Hi1~+ev5?Rie``nDYn9ZGE2CLgHUft^Vt;~u~ z%M9r`$(!@cg$fPWnYC@lw>!>G$ps8X4ElJ=@6_x~MPG`SY~4-^N-cAJ_0wLLA26of zWHs^tS$S5j4zAU1aObG`;M%tX8dF-4kg60~X`zKK?LqGyXInE70vN-hYW&`%+Vb~b zic!rF&LYtI z1kaASdwF}c!Mc;84RwujretBBUQwjG5BdUe?==#8Wk0y5jJT)A{4#zX8@%tb?+M8_ z17~hRc`Zzj=Q5!fJJRlTyEDdx&jg5?a!|pODIU3NU())SqJD$^KGH4@J0{@2h$Trk zP-Ppr6HoR(3A|#^3>#1N-YRT=&>K8w7QNX;`UvwH+YiH(I(?Gvo89G6Ix{+E+{EqV2Nh1cFt=}1kR>j`@!&nOj2RYYE z1)V@IjGVmuV!5jMR(q(@6E>isaM=DPW{8Jo7ucCD!!X{HyOw3V*+H`RN|{ z&jRWJZa|Cdx+5o5_&x$S=bl<3((*RE9p|kED)Eg7{`%3u|8?@hJquSxsEEhOW!DOM zqb$Ft@Qm#5LX&4WZLKl=4J*F{y33`A6y7R2HEhfl?7q8ZDE^SAAc)E^5iMV#RPM?I zT19mNS?-N+$l}S=j=P__wc?-ioL@j&t?n%vMRIi6h-8c!X=Xb8P1jqL2>gY10MS2{ z+`+%iiapp8cAV6nuoN-3s7EiYj8<@mK0^hl8q&UAm8;X`%o}Y%!W)nir(d4utAIcM zcRtGtH_Mp)HrJW)&(ED>{21uB0Lf}`zWC4W2qUw-l15piJU+^(t^@kkM{V?NIQRdD zU${)d1#`P3j0?uYZ)mD>X=Um|qsMh`9=i?$0M`xx5R4j>0RBNm)x~eQP=eq_IPx6* z+B+uZ_9}ZC&;km@i_@+cjVBGkKPDaC%R_vB!;UZoZa%49Cdpg~#UXP)aM67USY$;y z-|OlA$~O^P*HNk9gJ-J`0_{hoNosDZpT0;#(i1+Du(`(Q6LK!v4jNW|7Z@bLwjPwT zlX>TIQ`I?tyxQs^ZL<2Q5HXhk0E1gMnAuxTjqTe${&AyuROe#vn4+D(p1%PUE*yz8 z*_tpoVOiy{PXe8x20OB&OnSkN+Pl_mqOJ%K*<nsG#;Op{rUl zcy4o%dsOg}_e&FRT&BpyooSf4huX}?)D|h8c9_{Lv z)O5QH0HSwE%Yd%wuOr%nvri&4+)s`Si=^@IJ>;khWf?P_9eNH z$p9;Z7pS>4cA34DdhGkJ+TK3C?o@K;9Eq263uAFWRsWq?1{}_gS;VZ3wusd~Y zpv=vijWM@^{qbO@OJ~8-y9)2JQ7TxcY&`waXJiXJ*PPf8|CqlAH+QMXAOko!Hr(Kz z(pW27EpdLd#qWNW-^AyyK4VKj^lP2?&-?ri?=v0VFi|J&3c_OaK!Dh9$L@BuFKEC- zh_Tv#79MC|gqaIt!|!bejpkF)x|08p=HMX`B2jOF zG~oeUYr4mO#D}CGe&f5E5Ccn*v(rk@(oB_I%8n!Y6ENEW1N#3yt#1rJy{`rBUL5GK ztDc6n?6u8b`<$yn_jtDNGjmwi-%>EKRo1+K6*|>yeC3B>jJ7f_)I)!dX%J+QJ^Y*O z0$Xp0Jt+be^}sVsHAelX@UR`jM_6kh#QG<0LvJl;Vxof(v2XHt2UcL%F{hQFh*a9*@dXrLR$|nE{$PCkc(1j|u-M$Kd`#9+kBz|GfB**` z&&Ty8UZoS&t^7XP7vt7g_g-DRpMx^-7D9oEmnR=^j$HH|tsXOrQT#;Xi`0ESIFggs zH7q(8mWcL(z&KcHSS9%9GCi}8VH_9-{Ix}?xcaY^kt^dn`D*-36p&q7wL)TiWI)nCTm2j0#Yn&9NJ=yr{jVeQyBq!$?zrlV~HZry0{^}icEfBMw7D31gLv~;smDanC z?(9C^ggVX_i=S(+y=|w1L4~nxAF3x0mUhg(uqU^HfF}wTHGGtj zdQ@o37p+;PtW9G~_$(=7M6}MCT7vC=lg&im$OAI=#<|U~T+i-M=We64dC2#Vrt866 z;Nu;s#eTfl)uo*A=wGUt-L1LrfoLYesE^kraGiHH#k!I!qc8?q%mQED2I`U7twICz z`okB|U~ek;PY#L&cuFa1E!D*Y7x-cr@<}HVdjbz{Y((jaM@6FWux?L~-*Tqu{Wr99 zeh&96<_u1WgH;r6mDA;;6^NK;Qv}1h9l->KDk>#coL1$12$6~wG#H>zMG##6dj{VS zYeg`>aCrw@ivGs^a1!CrR*fIb)y0L$*Yu$a&(_(jm#0lI0V}AM2Nwc=z`8R$$`owX zVS=J$0h?MUB1G*{DpQsSN7H}gamH}mmZkv^z?_G4hF))ipM|G(2b2<^D=}Oft14{s zD)!1*eq*skdq(J?J{fK(&jbHr09#Q(_+!&rOUO=_x)+qxm1@r5V(s7ejFff(E1|q= z+F*9UB#ujuqgW9mkM|%M*6QrrtmWm}))CevXptGmf|sq!TzlBf`y(JFG=C}z*A=|g z-fJQ$aq6*g7v!VYX4_6+b5{Jg!qV!Jz5nj#Y9c8*AB`2=Ja|d)jKtuaW z(*37?_|-~|v7b?T=$uU*NzUTljukYf<@6ic%WbHfVeEJUGZwn(S>|b|AeOZde<)z) zvp%883e5eW^#|jM5zfgPLR>^ZWkS{U6N#u?EKq2}$Qq`Pu4|fciB8(6Qx`l$F;!P-+L{Ov74A7@E@$k^Y*bNf4pl(#9*RclAj|?cJJBkK zlmHz`o(?@3f@(X@Ww&|rDal)!AHih7jDvgT-+0=dc6ibH>`w^3 zry zG$}MIa3?&jbS_1648E*zpzF*|Kjgj&E%V`jov-g<`qYx^H8lce-wBNQ&*p~~&@;#r zA-FvzT9fajS^4{Yr6_^BouG~ok{ZRb^x9wKM2t@VNXh+SYrMin1`kiR@KJo-F}Q!G zH53$`)1$I^vT4LGtU8si>>8$een27xI*zJ~yJ*FyCi|qk2Dga~njm28OJK2)8nDC> zJ|4YIII;tT*KchXU#sy@y5Scdq{hv|@BaL_)seD9DgNiZpXmnAaYIqTHS9-0smnEL zlt{;zF@ag3nUzNt)C5Tu89OU@@?q`VR@JRW8C84ac;Pg7Vf@4`^e$2@?O)I|uI`pX zaDHcZO!WcTyO@1V11^KrDH@zWN0@za#qf7U5>eaciYQ7=L4^hMHLa9E{(!Cs91X_zQ~gU&SU2yxh_&>CB4Xle?aeTa$bj1Bqz)byyVD zACIII4XbEiwzT7&24zvf!PD$?>WsM}pz6Td7|EDd@e3k-Bqy2VS5mFnev@lw!hjsrS-}H_esiGfuDbyz(QyO*PPtL&7yYv zG@ekAs02_UD?n+6T}g6xaYOTjOpPn32vI%Mic(abpRYha+l##C^a4Bt9@#jbws_t1 zqMzh+r#zbKu>aD*jW8HJ*M0-Gb_OzNHg?h~Ox2`82R}fr*8zEVCL>np%m@uSTNMER zdhFYlxa^eRm#pw>jq~6;ICi`MR#dtR@}(b`23FUcn*tR4xpr7LQgU67Kw${cY7;_; zz~u>J(@4yh(MpJ0J*k{Tdq5BC6B&{gtxx?2zGp*Sbp#{qwgmtkal8OX$$H91*k7cA zd+Q5p99ThJ0H+dQvhOnCW_(_T*_DAK>dF#yWj*W4^XNy)#uFg<>@0r5ZyTYR4GwC9 zyqKiYoab2xng>pP7MyrcW!bhj9WErqMO|$Rz95zI%^O@mCndi!dli-KLn5W>E6Ds7 z8*XcDAOhAa2g)go@*|B9uD>=elKnqEtZ^xSWK-H=O>38fi{l>bHL`^9JJ$vP&#d*X;Ty6=+5`Rj61{v%rG)?YRlln_(be>&%;U^+wdIw_fM;adj^aX`F$k{3!n@ z1d8tTk*#4>@;BEU4{KoAU4@rIdmqj;~LwhJq} zVb_7Q(S;M*xjmodJ7cdD2 z4<@$#ihVlPRVzPIH1dPwdWAKP{$E3;xmFb#xk)kq+M}=Kkf|Frq5-(^-ujd&mIumj z-hI7^yO^=)R4;O>_M;A6R0N26K>6R^&*9+syZJ@bdrL9S|BRGwIT*E(PHe4#4$&D@ zhVvHvFhSFk*~x|bFhihhzf%(N@wv9HU095^Buztvg=^Cd_^eREEdXS;s3!|F9N57&S3qVy>3DA~L>l>Y0Z5mB(;Uij6LuQe z+ojdKWF~oAR(ZY7&8cJG`HrT0ch$%5-JYfR%%jCh|0%)xYjm;(Ji8^*3>o}EIY$LJN0TDv( zc37iKFKA{XpmMe13U)i1p7=hhiA)TFn{bJxws@o@a*38(lh-o)Md3QJa>CYYa2kS~L*T8HUmL6@v4Qt4y~Z^61|05qUD z0Wkwq^lAp>%onp_TmN-_dbRuZOg5kYF4*8S4yFlZceR@Uo~78ctWS^YeD#%Ok)@wF zMYACykreOx;5lY#N*qj{4rICUqRE(@6bxZ_j*i)ietz?TX`bAT*Q8bi57eO1X$OVJ za#yJBOpL#-A!ha}8{i9l9EfHwW#l(Xq(ZzNcI={lOdqRF9b5=dF?J4OGvoWvtQKuD zPLgcu{4T$0|MQ`48<*^D+eQw|ZvV-s>^BOX-pQ=yp$kkkX;eyWeA;!wbiTh+nGYQ? zSJ{cEzs5(+4P5~^lw&7#@l}+!*uEiV@e~rg+OCrb)@)5qI@W)QFY>IIb~U}bR{8Gz zxKdN@IB!z9WB9R#%25~aOXA@2$x6xM3u~83)H~_4F}pxSrP3l_{>+W|rx(nhj->di zE||(curn!|@;Nah2Snh+y%RlB_vY14Id8q8zgFVM!gAsbs3)s-#iA59XQi*ca$TA_ zOeTq!dtVB7>aa=MF9glAqvuysfLciJekzdhvL_yBA*+SD-{C>PStveXPwq?a^|aU` zD$y9c89FSsaM>;1myp|kr4yi*8G5z*9k=Ly5q$Bm*;&xy$itE3jgIM*YESCG{A;tD<>)Fn5O8IRzQcx8Q?fbI0R!O*QOL>MOk@ESt~? zj_o|c0~oLz?0>%9Qr2X}3cX)#l=}pDvF&I2uK;{(e?Yu$0~pm1i_?s9@qe9JfE(Fo zKhlWaiSMLxS&%hRq=v8(H!whEoR&6{zYC1OVvntc?Ehpd;UZ{$oPuH`K8p$)mH1+k z(WS3k@zU_$&`9+lxn{UE(68#H^KV}2lp%_O)+GaOHiO*uEO=WEHz5|I=Yw0h2~pma z>MPWT8lWD(({N?@+^+f(mmJKAtfZXGoN1g({rIzg{~xa2103u9jUWGbJoXl$WRJ(n z-m-UP6WJqD5|Su;lOvmiWJHpzBq@6)5gAb#Pgb@DGJf}`&iVfSzyEc0uIqHVj(Fam z&;7pd*ZmqLZBpKrCS8G0=%Ey%QJ1PoUv0!fb4H-!e{9?5aS@US3}VV-{kT3{hLTwQ zzCe!GsT4HgK^|{h#2;`Me*J6#-V0gSVsdm-=J^5!`m_}PVd22YpH#Rb zvhn3Pj=C2@hEx9?2*e!_-^%yTqh(`z%<&x^98(9$Q^l1!6{2R=2d|J4`0 z0q;T4lavVC4_KtM`H;7r9!heVI#5q1q0{>&%Y`jKwG1uktRAq9Ac&H-olRo`q%{CA zZeQB;L7fB60A>A9c!*cvxbd<47JjkH&=%OzG4}WxXnQ-c} z^womzX^vc2a6OBo|~OP$wm_!T20tmt}y;vF8tHe0DQVaP{f(UAnjepCA9SE9!2q=?)f{zhN8#7Lb_lf)Sh8)u{&VfieR=}4#7ICz$aD{bsSBh3|EeO zdi#z_npQo$63fCo+ip!4Kh{3F(Lyt+@pi(K;f=W`-~+eLb(*l$uSHVvd5uyG$zmUz zAw@0}aN@ZOU99v9l4k&Z#?dcU)Im{Efq23-r*3bD5^|#?`Ir|q$Uzf(M%5!q z@7aH3AtzkjhZg%1|13fLibg{^4qk4okR2A$&&`9je>aTOF~5$RqQ2PayWbfyWV^Jw zjKDUjG+F-^kF*xYJsT{-5>%Fh813FaV?vw??7X91OFNg=a6$>NQ3aA6)RRpY2Qx%> zya)u08+%*tCVIf&Ry9RZKYqUMqRydkO90OTYMlCtDUPdE!H_pY-@2`^FV`SX2DS0Q zX(UyXw^67y(`zk-JpyG2pVkUSHUw;BS8B~pgVrnNNds*aLDtgZW1Bt!3Mm~81 zLvp0<+t-?FWavS=jP8KT5E4`7 zYwv*>x6^^H!=2!DtLW)QyLvB9kuPY*qs+!N!14+WeGikiXdz!@h*^N`<^pHiZ13f2 z>1+GYq0aW)U{~2PjQ*1L%RsOSD)AG`TZ@?~v6Ibfu|h1w$<6n(nCjd1o2ZW!36Pgj z9+O%CGa2$d>799@cRI>boYRW$KO0*(JT4fi!6&`etScsQ&mzmKF;zow9A1`rixKA@ zGoW-;PWDefAZad*)r98K#~iS`(pm}U`NDHhtCgY4ur%_7;_--q`im5^T{`GtYPpmJ z&c|+t85)q!GYVN0o#}>K$dauN>!ul}RDoRf^ z7mRH~Si{-&&ut14Lh4~B7XfJBev?({`rf%NC_qEGFWdS%E{zfsnn`1`c#ak42Xi6A zSd-Xswb)D?_CcCIY@SOs1?0!zF1BbSMZPOKwqoIY2}!3OW)$EZy6zxi&YhHrr?PSY zNuSTjp8BXZ)%mp9`_*5zg!dg2XyiLd`mQ^D#H}_0-EjU1wD{l!lUzInfPRS|K25-s zl6~XX#k!XpDhyjN+eVf5*(iZ91z@j1{)UiPx9GkoxY%PR*M<)$0&VopcV4DGRLTg% z$8sl3$xmN%%V2)1}P~{Q8mj zw;1I!Q){Ct_5sS91Yf<$o({q}gJ;}t&<)=CYFvQhrIjOlZXzRKF$?YYIEY4i`)>b? zrXhNV@ch|@TRAPhFE&rNF$Ku=d>$5-wNST>WckHOVv1BJ)1>BpDE9 z^QSoKd>~^9coIn`&LoMBom3`hUQ?mE_Gx#mPw-4DSx8r{!8+mnnUM3)3n)@UszCVE z+tX+B`DSk{MlZ_H-2!{6a?%~!_b%;EjSD-$L|MdM>O8P8s#o6Fy1&h~wlKyyVncEE z-ue8%>e(bRGy#*I-Uog~9An<`oem-g%Qf$K$gRP+Hy(nRa_N5-1#^Tyfg-z!W)Yom ze7??dp+C0rzRZ>EAGeA&A>VK@EzFg|6Lm`xpuQf~{{)DItEjc3;pH&)jh}GN-s`hj zr@Uu0=VzdofRTX@197cZJ25c+j)~na+p6Epa{N>el!~0&mN<~|c>4D>(unDi){hH> zcE6hk`soV*LUV%kxK@t^INlk`+~IdU*<|pLwT}xgFGl(B{Z8*zG9&KBM5Uv~71j%+ z)h3TNHR9CrwV_W0_fpu9M4oCzA$%0`e4vR&lgs&D{nTJypewr`&h*mh1IXn3J6YU{ zR4s}Ko4&t4{RZE15iZn>NLX~dmHl$&LeDb^9t8Yb?_4!4Gv3MXB}0NWg!&&6Qp+{( zlRh-VOp*R1uSwSS)8;$K5NdC$hS`#pw`fb=|^E@nXWWkX>@`2+2Y@McV z&A;nOPk0vF&78idTCcH+IWX$5pF)Ed}+%TfiK<)R}v2u5Y?QC@D5bo|8TO zlBbo{gGk70*pF{hT7=e8G`X*%}Yb9 z%-PEOgOF~yH-4}%@HDkwBXHOsB+m}5lfyF-qtc^0%^IUSJRM9fAuj|CRK#P!7%u>iQu^3q9XeBZPv_(H|D0C&R?h3v zxSxOIEO^nVp20Ue4C6x^5+=ASkcv>*r+8y^nHRwZ^C%F6UifDNI7sr*L=U5iXpzRN zM9LKhUCZr93=<3x6q(gyY`!g2QC|Q!q-Rs#r&lpw=+5=2ROzevMWSG5nyJA!Oh9^y zj$(z~TaHXPRkUOJDA!9X%upF>Y``iFf;WYJP zCk*h-<29wWfiS>q@k+1nQawQ(rY$U< zTA}*^=38wZMev6b5$DJNa5#pMMR!1O6BYnJV!tG;Vxn2`8fmJR`ns`J`4^4#X=cmL ztQgfy@8|}nfh$};*+z`(;%SwuQ3#gs+2L(v=tOE(-mnB(WLp>iDS`S;Gh&7fq{vt5 zU|lbr*M|qU;?!yNQN^iw`#r&S&~Y3b7ExnWSz%9^Lzt>a6fda+bEx^It`Va`y8A8c zOH!NUBL;HQ0QIEdmb2)PSQ@W8r;4VYSUmX%+zKJ8BELI!3vJ*VB-Cd)e{=2gD9P8fK*(3{alM^@+cmF;KHV2avP>Pj#)SJ^P z!JG5>7c>a$S5n&)ge6EGwiiTHIR>;`US$UcACV@sodRBu{wY)%1j-Hsx#?5$f{x+1 z5j`ByrA-qJ)YJNJ`Saa|+lxce`B5s)BmoxFbUNn4CtvUXv{vVZ`^-0Q^ATSV>~E59 z@xo5=HthyW9l9I<+sNFo(W@7E(?u;TGL(NQY!Ct*LH&PAUKcv4&d|UcrS{t4Wfm4` zI+Pl?e%#-U+L0b&=>AvT3=q;~9Enyba6s@Ln|7SKgtvzPbhvxPzJIFb{iNVO>f#V- zWM2h10FKyP)rU4RCG1(&jA%-P15>tTxH7*ye$EeQahZ4yP1G3r;hDQ!@?+xMh>;`J z=65KqD|>OLs4b-U(MKonNKWxJzFAtuB-15LG$?bE+r}~;riG;n*c7cYYKj4o-B^pio7Sl zxy(P;0f^=#>*2`SkE3Xs7fRuf`N7)bIE5ZfR4W?CqH^Hxa~f)#;UsLpfK-~ys!uHO zwMOQlhY4iJhk!`{I^F#7td3D_;(U@~=eqV6n! zS7>K(40@4}r5m4!Ki}2J(u5II^=#+UipWKoK_ErEc8}n~-_Rpmdb0w!1$Ht7Tx=2- zm?yv|zsY;H)8n@f3A^pJ$5LpKqx~ubj~6NN;-LVhSyaD(Hu61c5e(BvE2W4rDG~PN z>y*YhVtRq%He${(LG7!V0tBJE_liC|HGiDZ)^`B2*Q4XFfoFm4YI7dJZpB6Ureh

)FL4AET^((>jZuem0L-Ej4AvALLZWyFZzSQ+aAD=_cu{w9TZ9X^kOk5z#+HOJ!GhPjhN{EO z%<5BU$O3$gYmLI{7WALZ2+rl!01MNJa_Lq_gFBJ3$4IHuuW*uF{XD%)M)}Km zK|jYO0>l8V&St11*w3xM;P7>FbKVBRm}1zvH4n8FQ+R(hbcb~0WgQdYYkM5k}+a& ztZ=zyC)nGP4P7Y!q$6;M80|9rR4qdQ649HuBSlN}=(~SG!FEfV(&(V{bNxai08x!! zJHeXRh&H2rtEB-Dxp3PuCr5^F$*hqWClI$8(euWzhDykSSx=8zWg%msj&ux87$l3Z zZ)P-~$k#;qTC0ruvn|t|oJ<=Yh8%Po)q__E4%psq!)#qhqYd)6?nUn)z^1B}*D(8+yB+Ee9)90XK16?vFc+F8v5+be};lovTi+tKMY56@QavQfpF zBVZ3wG+&5CIo?^ncIvf3osWIk%-fHb>Ue|nFEyU*pC~o}-7f|@yddLo&fWB|ng-MlJoTEtml_yhN z5{55K!tmt}7}qKOi9_CMmzD7%kvoR_6%&<;TQsSq-%m@uOt?03rFsc>hK}4<@0RUS zSV8xbr<1O3iRv5lNI^&kio|j}G{}t?S%j*Z4#OdqBQuW6Co=rE!4#rPEgyPQFU>E7 zH8Pf2){~V}Cq-kD6futir(=7!T}0lx&K3Z4Cven7{URqv&9JRJ?bn$1cX;+yfu};T zP|sKf0o}T9@ja<34&G=IJP}=B^8+E7ztFnkGg2S z{F-6S6T=YF#4C418dN}}p+#?&K#b3=pMQoe0qh}yAp52+mc{QbJrZ9;M;6Wq;#|rn z;eAtjSnLEBAqhkVoJZBwhf)!zJP!2#Uu%gXKo3**wvX z1+kd+<#|9iLs+nH*$FRWor(_$O4z24qWv^^4NCrF=2xW1$Xehx*}(2ze@;N78N!hP zD4pemA>~Lq7R)Bj(9( z01yXyir(RB=`p~|(EALxv~CCfgJsZp(BCu2&OteY{0E$^`JWUY-Mt0*Sl+pEk|ps{ z_k+NyD%NTKRwKwp-i{a5r{4M-dX*2m_TE@aa zhnLC!zU5ogwT)WQy!OmQ7Vo=GA-In{vtgG9c2f1fx0WjR)DStT1B-h`( z5!o|vxNywV58HhweA1Ov$;j83O^I|pK>7K_5@=$_!})WRLVyr(TQga|QlYB!=*vt< zUI>I%SufMv>P3>#`q&%TZ-aBk?Cq*I5lAReo3&rurBuk5A)DMFT+wwTzZ89PX*70k zg#u=;a1KfT#g;0j$LfK54_FlWWb6Id9vAp=O$aU0_qkRy*8~<%+~6Q+mX^|zs>6D2 z@O1K7Zvcf?j0I&jg<+Aao*tV$3PBAb=nkZtkoT0CKU{RB6`&77E@lOrTWn9&>>D*2#4xF>rclSoBNONM`Uen zlZMf;jLyHk_X}k#66luBJA)>?i}hL`3?n7Bg*h95zdIjcdi%kGDAZ;U_8u|}it^ad z!&1bK>of17C`?U9E(b%311sE>_2D@a>jEjYc_|6?X{4imLCla@5GolFLpDdqGSnuA z;BU@<0`+L>E9R%nD~h9skPQY4^CKkm*%}~Aj!U?qqg4aQ4Zp?*u=M}*$dv#2BUCd9 zVgx!BiXNWw%S`z0nEeZ0uJxjrdtBE~sdHByA^4KmCLMu@d5nn_OuX6$`dJm2b;|*H z%(m-zuwwh|(W81MCqt)B`{>AD9hAPB{Qed!{1UKV?nyGYea}6PqDiB6x6WpXajqXH z`<8rX3*qp}j=D*3J*@jjvpX?$XxWUB5KEc4afkyD&~hc+5)W%w4zAjn(sHbSqc~pD z^tMuwkli+-`2+#|BYRND=7p#Pam_^NBseR-djd7RzaIG6aau{NCS-MUz*{L9dsSIW zubFaDttCu;CYHN*zQy6Gp?3n+X=$()S;_SmTLDSx0NoY?(+JFp!A>klO7boI8;LxRrX+WPp z*qjO-EJ7HBUO{CIepN`eEJ>tN*^4-X75XwThNFQU>wFu9Nu2}PhYU{IXL=)-dLCfG zF-BlDKJ9Eg{XF)}L>w0~CF*MjU+SC7sV-;yo_xTjOHNcH2k|Zlgw|tHwyc&VN7h~> z>KYiPU7I>aJx*wUtf!|uq-2_|U-`;MsnNR)0Bt$hTtCmJ|JnORa#_5XFaP0DwUx*V zM`YPIY8-9dB2DDk%hM3r5e8+ks{$*O806|(07C%ya$L9J6l02EoxQ;~6F+M1&P*vg zrCckhuREK14^1%}`HQG^=FTlDo}Dh9n+OD}nLSg<@7-Owa@R@xUw^8US(Bn7%qK;j z_T(BW^6XJCB!4@cyR2?8X=r})-16$E8?&4CJ;xiw$_;a_wgfZF?E&we#}Q_ET(UY< zyQ2bmf28t+=Y9dkE+k(>4<623tL-O)MGD7ESh#1PI=9 z5;3HgnS7N)iceB{9+bI@DNqnyaMm4BSd<1+^ z0NcQ~NMz(RW=h6`wkf6PfNP+%(GFEewSacH^@2=+Sq46d(d(V5#Tg&kY7B{P>?}ln z6>4(Ax*8L@r=bp>`V2C~?;qDuPJ<~Y-&g4NF|@cqApOIHhTlC8%ClS?26@yvse5rx z8`|Z+!D4!xYrCns3!dA;&3Ao4Zc0)yZDx4oFUYkqlEm}*+rw5p`9@68?)Pa!eI7-T z)UD6^+kdzxZtxAv!YrloMC;JQVbntAYr-cc$Ft`82*<(kwPO+U`ghn)<=nHyGy|!d zS&2vLIg!a)s!v~8sbj(>h)Gt`R(5&?#!X) z$()p*F=U;MQLT_6kCI?EHUi_zCK*_M#edr0rJU`%D7oC6YQT3Q(LLhnUhDn9Xf-=I`N!IMF0na zw|9AyOrMX*r^iB8t@4dxeDcj1Fqps@QBs_%fTP}~%E(`;O;UE4v#fLo!=vBFf9H(n zj+`kK(csviNxmjXXMs@y$3R@n-%v5H;xTyL{Y@Xt{&>*$=$mXSSucg4xX*s!9p%lC zj1yn>kEqa|D_>7L-~RGGn5&`7(w*W-R{^?zQ8}7OpTN6~9QybAMy!HlD!)NT{O3rg zI?%wG;?qn%^rjiPeZ6)V1mkwHf9k|{h$1ts|7Y>6pP zx$4qjLKUUaH@H$zP;IN?`TDE-h-#>SG&$Tt7j+;30V+Ga z)54D2TH}_kUxA1Q0v>)F0GRTSyxg9)!rm?RFo~2-wO6`;Mffw7o;}&We?rV5^f$k8 zzi?4{#k{Z1(hd3xjuVgm!gX&8F+4K`a*C&=X0586h|U#+zyaofw&ZLs4CxS{2C5wC zRX5RVw|VNZ$Y!57(+KOE)YBg8Q!N%+JN`U(9*yt5LiT|>uRBUtf<{cuM&nIcelg#a z0t9wQiZWCj6*L!Yb}eP+f?CYPnq>^d6I6_h8$-?%h?*0 z?pwpXmZNr{GIkNpPc>V+{8GniF#t@$_u`HY@9k{;{WV zG$M6WtPepoG(@m;zCK7P0y_^_Dx!RHY7Uv)f-_Z{F|Ny5W_uH02jw6@+wcm&#hSZs zyfU6|@*$d1buJzoS<8N(u8)eo-frEgfwWo_4>})jKg8j!LEZU0yzA*aUqP(cn9{2h zsnf!Jx>@rVoq1K^M9}R@xvF;`&@_|Y=>ZJn^m{nfGw$e2#EWb58R_TpIZ*aCxLWd( za9Ssw`4xG@AnBnQFR|!*n!QYpG%VhT38F&-w-_>hG*vKYrZw<=swDopQ@K^Tcmv#5 zP|@B;39J}6@%WVMQ+E*4RV}`GcXLpSiEimi$!!FI2*iI%y2hhngIqYg;{hFrJo9-* zmLjiHH;*_4guS6neYBM=CQWXV%6og1Cyrk|_r{ZWli7;Hp0#V2h915JD{NpVYW&h~ ze;0jyCA{@}*ad>Uif5G%9pq*w_!K5p5d@mQPrwR>xj0)5$a3`k)Bju(>`(o~@^l)o zHk+Os3j;0QToRP8Io!mIAELe^ki-x$1v{ij;%0$_307rQ%R9sv)lR84;sMCO;lHPx z{$)s7oj$Nf}9E}bJ52WvYt7%V8A5#P6-W%!BgRX zfV)x@c_t$Phmji9u$H70>I%8ZgQSf~o#)H2R0C(9W&wX-xK@GJ@^V(FO$_V1I{<&m zUJY;gx0UNeFsFX!ejyO$a&Ahz=+QGaV-?jcH&t5)?xk>FX#>&CT_7x5+d?LZs{XD?{@mM=w60se))ap_IRz{ZR3DfRvG>_-7i|&t5jax&!uK)R(%*8lViF(jz$EYV z8`KJEtdZdkfMwSQ^Tw2`8p|h&r$i(XR)(I*meTVvB0^eBh(i63ARyyi%4#N0RIRaP zaFG3#kuPEv{(R=GlvUmxNGFlK`q}1p)KsXL74EjgGQ)AX(vuK73N0SXD~QhXTOvhJ z>z4<8sH%(LD-6$H09RcItoxe@$6B=>$l!BA!mxtr9G=d})AFbbMvmC+JvHMMc` z^bwcFMuO^Ch6tkeS0FU<`=&b6qD}NI}0cXcHrib*d|)8LkOK!b{Jtj zTs(J~tjo5NA=x_m72}XvryFpWC{ud=z0?QFC6NU^|NUfUb{D_yFu&@ci(3KceXxn$ zpjtd#BU0%D*T+`JYE(Q9je34B;?Vvz4W61JEeNYzj;t)#Nn~{go;9NN6-Y-K`FEj5Lz9LV{+%Dgp|^v++dEzAz9X zCxP4ESeOc5kUJ%o(*i&Z_B4(2I0kghLtRi5l#b)VoLvCJjC9t>Nmb7d$hJT0gZfnw zaLj@7q5gArWd}E!Kc_rHqPIYwwZ~yNp}*mpE&Q$vZWy`l%T2U4Nj~=wqn-z)&8Fc^ zoEK7Ct75)%7*G>%2qo!2>UZj9@5|fM$J>;h4}`tvfeg#V{fsef$_;L}^g4$q9+I-} zHPICp^RDI^Q{gSv(xK zqDg_0&h+NRT!_don4aLG(+Gl(q=yPU<7RU+pEnvT5A%ta3U)49p}QX1d#fo<55PLR zl!3LD>}$dU^h5&zk?l@b9aLF+Isy0o{jW9EqKxfh>W&%_R&5S>11f3`Qhgkh%d+M-#T*2Ep?yP)x%w&F6 zkXOVd0S?a-9xXiKwjW;he4WQ#O-|B|Hb~1VV12LtAoN^<>0LZ6-GZT*DBK}#M#6U# zY*GM|fY2FL?>&vADzb^|hmkilQ{+ny_yW8j{GE7n@TTdj^NeJ~5Nh!dJYbf9h0+!O z946}Tw{;gx+r)I!Uo$toP-D`Fp}wxOO4OnXWYEj+@=nFY`$Hlg#_gE5 ziNIFi`m@hqTlG;C%Tz=((=8y+z*z2pKchch)Zn+W3$(Ba0bWEi?pCw8-$R?6gN7sCCA5&}+bqJE+7UCyL}=Nq_uaLv19iMO51ZtJhSMn8+nQJsf7?DYLv zTC%eocT(?HUY@0@+0PcKa(w7o6E&hj)D$I?H)jQ1gyQ)Y!)oXDFYNln`;Y;Aj`JQ) zdhSt{HKynGyQLHJR|V%VQpX8f$Nx-RRwqv}4Z(XiLQ`ybd@kHiwddEZ4 z$iE;g zNAW3H{l5R?BeFLN37>{khGJ9ofyhcY0(+k}OgH2d=paY;k1b_4F`-`)So{WAYkspca$pjUz^d;b@yJaTb{?AU z2Qh5=Vt21uXRVAn#8F8~-xH|~GhMg7IP0E=y6lI8*fN1cvHxHIyr+|oZ2~tIQWBq| z@BQ5X{fuT`iUE(JvhyH)InA5*8uq`dR36{jV|Bj&6Mf|y9b=prZ;@IAvB$ziCRQ_+ z2yX4elULXOJ{_t|UhQfar`-*kA?X)e|N2x%3W#*EhbXOH&oIj223%?aIL(FIx#SO z3Z^jqr?PxNfMlp&?PM

Lya}%0NbuPfKC_r#$Tg$=k@YBb-I$evUa)b;|XTk0tOK zK zPXCstg;;lsmCgR#Fe3R2so{~@;VdteND&wxZ$XP({F78&UKNt>j@f~S9SK|oWdd4O zw?9$LPZo9MOJXl4_RR%LsN&=vcNXLdJ7{#yNNuZoxdfw&w-iegBQX(2InCo(PCZRCymYNx@b*cll<`qBEImG(P z{)bxu%}TUpMYfOrxa+0HTyoyU&hfAq1UTD=QowJrS-sw%!V~W=E&hZsrp7)RI0Peat$^0PC#yFrfM+R?974 zqmOc!KXLPY-4*K|PA;HmS3xj3teCgn>|m|W`Ba|HqVWL6akXX|Gc!Z)wV9m0M`~Bk z2@o`kYp;i!qMYD26r1AL=1S^j?<h}|(iCg4uyDat z4eTnY`}8|5Q|_)>XBF2wq32i5z@w7ShAAfg`RRsziY1WloGic9mFCe%ggYDzxpKN7 zzh{9MCJg6?4{{%`zR=63rDo?bf>LJwXp77#m%0Iz=t$AH6gm>Q>cXTlOW)Oo*N+Rt z4!_399{I+2&L%nbK?TU_f<_$vRv4#a{`;Q1B7y)7ilf8t_~8}E<@X0#JR3=4wzv10sfB8v8w|& zIt5^x&8TFqq>)Al65^JV)G_`*TZRhBue+^Z=8)2)&tU8YjyP8)>y3SAH(Ma)gqYBp z#za7k?fan)j#Es64TPkX@84XRVPFpg5%%ey4l(lT-3x}fI`1VN4nmQ(bd>dnc}qK1 zo=+Yz9MV^BU~*~HPc@!Y`6U?B{rUvVtm}Zb80UybUK~*F5#OgfM zJpdDNLJsqqRfj0?HQqNLgup<$5bzUS-yxx7cHkf9yp6H9pdlVMP5Hs&REj#N7$5s{ z!WdN-!l!Uw`iqA^;^fccuIQ~e@0$0vQVR%+n9$sHTsZyklWO#Zq(j2%%-(mdL0Qj9 zA^mRO=8b^udk-ogIb}t}mp?kI2 z@2dH*o7Y)+5Pf!fOk0Wrw_c>Aj6<5B=gK-Xp0K45!dwnF6{e3Soa6Kv)$;8e2u4k0 z?5Sm)^MOj>b?k}l%ZyRrYqknYUtb#5-#3pYwxxs&w6`1=0(u2NMD{V@>;9ypCykr?i!T`u(7&zc|wIw)pEqVlYIU zE&@!k@$bic}X0DAi|J<|>Pci`#!DDiO_0U&r2ZrW4 z`v_i{zG08L0b+xetG9v3gbl@qPfh^iD#ErHi>&mA8y!3K;1Xb9)`B|cWiGyNqSt5p zBzvsx$-Eu{JhqwlIznTUhM zc?|sR%9q#S5nKgoRV79D$b6t_tP`~1{I^R3{G-J+eHY;Q!7n&RL~nk33U3yirI8E+ z=0198Kwa;@uVU|0$^f*m!#4HDl$OXKi*idp+E4fnh_07h5a(J2KB;?Gn%?MasNI9}`P+ri|JDF<{6NUR6f`pXISt-DYs z^gaeh-ofjwJcreWevBKlhGxubxY15>7*&H;D0I(IEgwvwdD`?>(vLcYqpO(3wB7fi zHl`qyfR`^kbjfyM5(=r4s`xyqpgcI0aDJqbk&qw>gyxiG^bw+ZDGX)RWR&!CoWtFm zwd4Fo7{zf-GBOA@P6q}jk_ti-TR*>jp!T>xS-La7`8CXo?QTy(o|a~b5BY2bHh)kP z-c=(DTNtaF1yU}DJOdeQwcTv>hZ4X}bPhU-v7-d}1Ajbc!u0-RSd^=3*8&^a%14*L z%OACE0HmlrIMc>Q&!<=l=!uj5j*XX z^=RDT0o6TqF5z0(A-Ebj+?~w*TQ);&QAuUSF4);F1DO^4H0-Ef8*kO+iNwmWs{|{F z>MAi6zs(66m`m&84Ry27BVML+LTISWOfje{m6dyb_WL=-o{lltrkO$QF>s^j;&Uo(zXs40{J(!@?|7%F^s~U{ zQldsPLCP9gQ&DzpIu!v2E&jwUZ)*b~kf zs^{xg%zoDULAfpIQhZ%#w6v|D?s@_pRX1HE*kPZTwKnUfI6E&o&r2t z=+otpE>EB;V-6k~at<)!!wXpRw;x7ce#asP+B|b;)&i;pmaC6;)8o zQ6P%an=LU!R=2|MtbRdv?G~rrO#-x${PiTf(W16CX)H=BXkSg@H}|16Nr3Jtqtg^p z(mA@fl8=CyiIxKGem0h{;l|o)hZaoF1Lk_TL;*Uwu4ubR2Klikfv$ph2#8HX>SAQu z9AG}Eix7m4%r|Ora{C}#0({S)WE$R;=-4Oan7sddTlm4wU*nX_X(+2t;s4sQpGq*0?~@@|#dGoIe})5VO3SM!~#9s25QBFLx!L z0D=(1NF8+Dz!vQ$ft!ehg`AhqO$xOAWxK-gn^muqL!-fu!vo(h!P0@z4IVu42$uhu*X6gd(3u}vDXs_%gS zRojVQ31@+KF$mmT4(3A+bP90MY`RW)@Ivzz_g>DOu%bt_|2g*cAkOvzc;&C&7k%}e z{_b|q49ub>M~vr#BTXM{Ws?SzBLB~z`0a)hQX4kSICXo@u$TN?!^gw>hg%`KN;c0u zZTA!aOLzt|)$>W8+cG+T2<%*;HfG?Y1MxNynvVSAk4ZZ!uqWXDI=lq`$%p6zG~hl$ z>l6?6q5Q+g^DlcHT~Gqab)%C;Wv{aL>@}F0qS$!Ua5CEu(uLQ}L5>Fcl331lSLv&t z10ls1eUJuHy}>$a+8_h^+dF~su-!^$?c>m@D7!)$)d4TF7}=i>N^BCOH^TpYP^hH? z=&1&u>WLLXV)2a7Q9|&mKylXhQiU)>W%}P#G{z!pu3#7%SHpj>2By^y%tpaAtmoSo zi1!vHXx@+2K>T4&FNd%Om)&`ys1x=5zkdG_<}h4niSgJ1XN#PF`2kIeLXil^i9f|Z zvO7!4oxTx*#$Id#MBvbso?nPMd}TQ&>XoM9UXh+f9h!t5yUoI`{;Tf2*arvuYauES z)$j+MgAah%PEA*Y06Dt}&{z3KaSjcObd|%McZa+T3@-1}Xt7LVcR454uc0LmI1;@q zX%E`1o_Wt|!(dGw1)vxjoQk4aq&jjy=i()i1msy?3~myJqxB)A$AQgTi{bws$DgNL zCK=b75uzJ=e?H>aiH{MH`lv$M-m8TuCF04ZQ@SIVm$?i)Pc?)!P{^q5jq9clp=2ux zVVdDPvjMq$@#)ODn$4j%5Cv{IaRMnJtERY0Rbj(={Q$|KfEORzOMsiuQc ztYA-*H#y%3hCW|^dZ6B)pfah0hh)eQVU}>|K4Pduccg>kaSMYxzKUL#L&kjS@Zihg zy$?3JP1k#PGi`1z7Q38<4+>nh43AdQr9Y6cQ0CWf%rWm@rR_Zm-vRvEATsX<4!MiT zC3H%nMc2wMB^tleaIgsq&PP?1w{kQc5l417n+o)!0U{BHnikxzB?{AAuUUIqPKJko zvRJ<^`6fC|_~+3Wd_~)j;IrYjEa{J71AEhqJQ03|4jKDkPe>i{^GL091-2tn44fy{ zeSe5QLD!`4pAawC?is@(rl({xzfuW+%r(cieWPU`jVrO^?nA96!jZ<;5J&Jx7oE0H z9US}wFQ&^LS8$ouD`C|k`j~|SeuQ8uV*0yVQ1-fKl=_0^}&IEgKx?5M*Vb647@be=K6uT}+g5>W1 zifXGFAO=Csf0OQ83F>hKUg0FD6*v)3aYG5cCB+&T)B!H@I1%BnuQZ~B&WOPyS)D|` zRQfEf6Bx~FcXRWO?G%TCMSc|C0r|dqc(8Vu!HRu3z}VTNRiblRS3K4>;$b})O#gl4 zQs*N|SGg1A)OCC~$+Z(%^{lLW9#nGlHUcRG{QF>;@$}`Yg5nMqNy`9e4GPLEC?%C) zkBA0CfCWlqzt`)}kfRK+txACkKY*7(s>d-q1N~p`zeb()lwm+RibEH)3cw+O z7Y3&ENY+Y^afx(E{gc})BXA5-L3FkH1;vrdc!;>Dg!>w+kO$)@q>Bzphvj_p?#Myb z;bJ}k9wJ4T@zOj6z$m@97V;7keVjw(n#TjXQ(uPtga6&Id!!;Z-+fE=?h0#^T7@y6Yl_zN_)@N2An8VOdAzmPI5TK zF_1q&4KOdjadiJMu<_4!g~;UsAbE}Va3Wa*YD|)+l^@G;ithR0J}iWMOEpvuQ<-Wq z$PTHtHSyw}RY=sH5qZ74B(xHTe&cvt71`x z;Pmn#OE?r#AWW?St-CVEuVj#7F7W$mZ#6VfP6QM$H9lI<Hr)m*YC@QM6a`42*!nft%<-R2(l?!W*wDuI#&^9*Ug9UtpJqGJU5=z=Z$Yr7-%Flp0xO z7SLw85FNa+KQQ^5^l8cHVY%6!dX3LNXX@2vr5@~&V9diM9#&wj8QO%c1zB6(1G9%^X z-rrh)ade4##dg7Xy#f@ClAP0izi_*k4&oHp*YX8~cUKFw0p7=H7(D#-c^*6P2CaX2j6cg_MKY_usFBVx5-mo~8ge0eb**9d1moydV^55I4)20#K#~OfbFB z-}8Bzr#$;SLo&C8JX}+GRt4|2&v|07$B_P z2b>L{b$UytL;bDzy$IU@GN&&|fF$(}-20u=LR+?a&@-pr*8u+RP4Y9LpQc;FIedd> z+T;4KhtoYae>_LpK9Kh7HYvk4m_f&Gmkej!B08K}7%VKjsFTLWeL`Xxa}oKIwkKlA zj?OO?QrSL2_wIHr_7R0~|FjecGY6BJK)$waNn?MGKxr1+^|Q9nevC9_mTf&}*NqUD z5<qa?S{wsmASC6NC-obgDFzEVA$d{(;*$zWyV$jkr<&L^!egT}0)P>zHOgU(GeI7Et)v9-3icHpG8yZ0cYQXfH zvJH;#G%f1mzwugDn-vZ6q!yY2JQVu;eRsj~%*xyoOoAe`f+!etWb!^CLFo6r)%!5a zoOB`Gea|FL*bTr+gUAoiO{&yxIxu9dnLC38c%i&TQn2 zO}r-+!5dRv2Y~n+JDBiANwRlLftL-q*GU~swpjMa-)>@tmfi<;wW0{V*P<3*E$9k+)st@~B|Op1qiO7yBl=-VUl6fCu+(^( zng}cK{ovqNQ;Dkr@GA+V$^N*y1C=M1w=y6;%@e$&=#)4$AMp*B>Y8}H-1DjC<7GM8 zGUPH`hkC>kOEm#erad54XU~w$$gQoc`2cq*@p$XD62%zag%tLA56N z(U)sBfIxN%Yzm}*r4X1bZaIgIEBoMep#mx;AYPzxVS!aj>=mcoS8KEiMBn}-PHi3YZeNgb&X#{L)QhE-p!ONt1Bx6E`OCfVi~^L- z23yfTFbhybS`q~Wazp!36r(`*D5-W)q`&(v*Y3Z1PP>ye8=)a-`c97_Q751~nQ@LpVTZv|TA5SYHo>1Cfm^NZL1< zI7v|_Q$1a1+Uhmmg>THBe0DqIv&7ltE_HX+M)hQ7fv<63*9<<`g%AUljDscne<#3M z!K@Oz2gDmec^(Sb^)QA-`#ma~J!$`1DJ~-n8tVc|_lDs-eSPUCFcOx$+dmP7AeBnA zdVR7N%z2n@s5kFigD=-lRmn}N!9SNZ3M~cE;7?2HfVXOKazEfU-T6OEeFr$z{r~>4 z4~cVZviHhPR_3u~&&W;`Wv3`RBiSS)JF7C1C|Q{a(V~)_5uxmL{;y9x-{1AWuCD8O zdg{UXyg%>PxbORQzrK3De|UW27PJ}zuXfZ(0;Y$3Hr7}im}-~B2*Rd40kOG8qnpf< z?|lh8ocGl0#%a6k1(Cxz{`xW`Cg6t}-^Wc}47&2v$!|N5P0(A1uKo{nDleNOppqXw z;q!}3at?uVr%3f$O;@MEhxS@IOO&C)91C(LVU$cdLd za4*tgV^+KF)(G?_J*7Mkkmns|P8fkLJmEQDn-KHU^_mzRc%io4o_Yo3A)W2a$<#Ux ziw=`H*%PwYu6}kRQ~?DGY`vRoz6II?k51MJ2chyl35>jwH;&Z97SLYYLaPBQ@!}1RKNB=Vg7UL@C(o95@0V31134=E34sv^~2u z;HRZ_un?UiI?ejzkkjlZ9U+rN#yp?bAdoTg?1l$4c~8OMJoITrL@Z~)_B<`iMFZnZ zlDP`&6EvWRM)7MJ_@+j3jEdDN>r<(lgp)r5Rnh1k9}r`cbZ48pK*Yymg62cbVav~p z$w!6b#*x;2mUGmmZs6zVYCJdyn-vM7!38BeJw>HY`B)WrXA~632UCpruHV2SEuw27 z$nZP;bf^N0&?vb{{r8_Cbh3l*rme?i8p36LUCAPvXVPGJgs5`DcY0KgY5{|>w*E!` zcCsX`M{ufjtk3&%r^K8!gN+B2VSC+dj!$G-pfB2HbWfP^-JnB&x(Pin?26t-2v zXY_7cO*Y?Nny7W@iFkV_vD^hCmlZn!#`R86w2EodK|Yr0+M0Sr^Qqp!81?ACQq#jly>1a#LK_fW3@f$sW2*-!Y z)f#cor3&AHvs#~Znobdn@>FAzypQIKiEsoHQY-uG7qZ=m@11(^5v>oahU71ptVakm zpA^AqAbk;QGbCbA@TRfS5c^UWJEj{9pDl7a0DZ+WOcyaLpD;G6P`x0)>teNM-~=R~ z9<{UG;70i5kCJVRwUimXheaKLsg$2wgW*xV^*huAoC)d9oI|et)jMD)-Mfm37BN+V z;9U|4jJx`jR|P@86hR%JIs)M+l*a1AoGUQHTqSp6`i9$K5F@t?ILky^XFsX&;;GLiV_2B>9{HQA)X0D}{%aUbTs^tJ2# z1wphfLA=n*d$XLb$Mo<8#X3~CM6GL7=g$-pBZK=oCspkp;;1fQ;_p}1p+K|qc{Oir(S+qd6Q3Gw0T>6=P9QJ7w3aKvLU3VI4;li%z0T~RNZ z;vt*je}?kjMIZ6Sgt%^(*{iS%mSZ-Qr3nzmg_N~zN+LPoUyT!LJCf;EV(NDbCh1RN zFh;Lj!%q^O3j-5NHiGvphm_vn;@}# zIiM`8%ncfxhi(O%!KJ78mBB3)0@X;k9Xj-2hGWL&v>k?qV8?ISHU0^09mxwuQPsY7 zYMrzqEp(|GFe!45FkBTs49&mw4=Bb7F?ZsIalSQ?J8pc(im@YPdeNx~INx3lkQ|hu zE@x-u!&$YDdI`j!kfoDOu01u0icz%rIpaOaj|3%{lT@sX9rX$&I8?2DAJ%A5>fY|C zBLD}4gTC-wSqQg_V1{hQGMb;M8~zs(z7%$E8`yw!)>J~@;}5($$V(TGksX}b{+0%5 z(Zu9-G=tOa^yT(m4na@B^GJycc^m1yO75amd*vkMO~1S+SoYvmyvO?Y$SLO7VE!p9 zT{__{9j_0*lSs$-rG?4&m}H-z88K!@ptedVj2(Fl6`5W^jbT%Smpnx#_Sq>PmO8y- z;aVPj@#-`!{qjKBO#Nb|RzmV8PD(iq?!bvAL%Ds}QP(*&zMJ+`;_X@urCink;#dY$ zxx7m*py1=0jOXd(D4%2h1g%8|X?n@AYBDz$io2t5yGFTk!`ClE!2}|`nZcUskBTIw z-)PpU&yTTD?&ffMLgAQBXWqhz`t{FkgX^^a>i08oe3zqz$tVaeu=2%FDgiJ;Y!3%T z-e!WBwd4QR;9QRj_#I(bhmQ;lI=G(#9|KpjB#?IAte3gd;O9LH!TKxX+2ub!7?i=J zVQ2MMaxRqj@Iao%^&-&2eK}PZvn$+@y1wH_Lh_J~;txR--$jbcyrO-^*0KeZcx^{t z&MRR>h@hF3J>6}y?1CxhUp%5rupl>@ak~Lpr9$RrtyLmSOZadj*5ppifx8(a z9U@iQCT{N!LeEUZv-%mj z{+yB$pK8Z#xA1{17Rg`V8UWLs9|GJZ6@ynLL|s#7*?tqXL+m+j-6gL-@)FG5*{Lmd z4vMIxkE|4eG#upoq`xb`Fg%mf=;x(8!U;|>>QzUv#!13d3_#$ce1=iCq4jPw+RH&}iS|2ic2u*XJ@i6Tj+0YJF3r`)^Jpaz&Ou&z{%r2;w0K`XyVob{f3dR4)Oos?3AN? z^N=`NY_6&>espgBK>d{78+UPlr@?mMRn6tI5^-MwENfc{qGsUtqP~sIQD&ZJ%g(4s(G1e ze2xX#OND4ZBl$Tx&)?Q1hd=yx%7tFj=O(xx~Hsl7clUZM+&zV_{NC$DR7Z8^cEkeqAJbFNHkXh`b_S z>vV6E#V*_=On?hqIPbC7!PrNYBZoCfU4T|c{5cs>9rvDrsn#8CZ9%O!EfG)vAp6Ki zXrWW=$>AwPf*(|L%?Z5oM76)nakNGjF#s-)B{MWAI|-eoG3O9v@TGnA!(VDxVvtWiGsx>V zP;aoWdktIzlSO(So3wfTPmFk=8z=jDbAv8hTJH~(;yXdK-!2m&K1j{>q z`{v{C?6AjkCR5wHt?xqs!Qb!(NtvsbHR%z+-tvxJ@$}`DKyV}rMlEojxGUB1Ma{<- zcC9}{x~fNCp=G%V6L7o~?S@yk7C>B;;$;N`!PC+w`W86lUBl3bbq5sqgPd0>7ENnL z^ysb=cHhcgN-*h`eqe8;LeQ44h|EcXBH*-53LFkd?{tJ*w+MpRNN;Nqs?cN}7n}*O z_AN+|{qI=Rv%Nu?5+{&L_md#KVPsu68e_Z3$Q@e6e!W_(P)S1Fetg5 zMFP3MtT&i_`iRN6)lS0bI8$2kw7Bzy$;^=!tMqT!q1Jbdf)PQ!!?}Jq<)8WURvNxS zlFipj9hD^dFOrq-Pb~)JQv0U!2($G|#|6~fQe@s^OKwQ;4k_b5X=o5ci0m1J^^CwV zv-|dP*+n?iVH`9Gz%DzxK#c^p=IN|bh>h^HKTL6PBrDoUrUY^s*n3p&Z$JeW zy@Uy*fd$%4eqm{fJ-Ls1lM{M!M0%4=1r^{`)0xypiir?jQTgP2)TB{=iS@rwXJw&a z(Wyr)_yHvetR3Q?0VRb&W{BViKUf6t<$_HLrPqL=jt=vS3&KzR+m-HvPHv};xGWMJ z@)`J?nv=EG9PcGqBP$Ge^H8d?Ul9;MFStk&R>fP0GA@^Z!Q&Fx+zxRoqs%qa_-#bFiT1k=ahvjm)pMm35Ivomn z79bKVNbN4W8!AO3S!?Z&$LA+`Q1z44OVY9RwIAEy+9e@*SeH#$4ucSG$wZIJKG|Roh}C!{l=D6v9X*%567RL84 zLQge0JIKX@b-Q+&C*?9TwZ5gRPWQ|Jy{nE8T^`EOop+fBiH^!NS*(J^18wyN@tR0~ zuMUVD2~f=5)x{Cl0!d_IxmR^Liw#UygfT;skSe7`d|fM}&RSECzhKGMDJjL}u8 znQL=OcT}?3j9xgH$eX$x5XQswJM7OMtQK1wtu5ZqgzoC&B+{4xwtxFMG6nxgl~)Zy zM%?*K2z6oab)%wYCrJ{}l{>mX41IC_1U6UjH0fr5ynYE12&f%K!5NNs3W^6tH15rV zWEH{L-w=#+sjf$)OEYzv6}O}^^cq6kg(@y0v}3PNXk$qeZ*t0W)EjQSD_hqEq#uWIEAulo4TxDUqN|1kU=!Z#BL~;NeULvO z`N4fksma}pDUzrRu79IY@6HmJYvD@M`P4K_0dTCVVtmOX?lJ{t$6$_f;~ zMrXJ7DhXv~fGCz^^RuaUH&OZnHj}7t-{grq{}e&=|2}%B##QsSs}gflM29H_J+F8Q z0lao~dQ^0onYzdeH$rTweo(-w&ljIH13Da`fzS1@k;na39}P^)eWM-P)9>mA`A`CX zT5KWzdK1lCkfFqy3jgJppbPOrj5$3i&b>*!gN&KGzYB{j#BqV*JPLJZ`-JjZ)$oH1 zI<=dSlYzzT@~XXccw;pHIYQZcZPQ8XFh7I^EJjiI$ z1wD;|Xs>g?3(=V+>vaz74Ix~1Wl1=703wdm`|$&y{xOQ{*y`+ny~4UMcR^Osb5Gns zOn!9NLJ0AQe=o`SmTKMg>z>`?Y8ecxDl#G2G@k#8Y3Jt$A{ znqj8*ZEB7q&@ZJ=nT5YT_UQumZ3XBhEm{p|CwtV{MFxs^;lm+K+j=*Ud1fx}MU0G( z?x@cJjsoy^ItM(`N*YL4fXKg$h$7~)1!gv;J|Z;|!v;ajw#m12)-kw}yF5PM_y*Ek zzey4+IU)D9Jp0v!eg^pXcUZ@ygSR~U+Ph$dg1}#3DUhqf#++ZoNC{lDDkS|7(iv%L zA^zs;7qmU9^}ur;0}#ejcpoVF4#i(f`9vk9dlSolhkY#$0Y0% z4g-QC)bghqVYyVF8}~oH5Qd*LWxDjmUzL=K76t*fIhCGz2CPhx&INuIc0&2Z0T`uH zq6VDU=-BKTukX#f;Q0XYB+*WJM?7@*&g^7Fz7aTRDZDp|@m!J%q2TIW5&8NGs=Pop_lu(1Rd6d8$4#+T=y>Z(uEMS;&N zyHkPG;W22sRGX$irDOHM@a-_DA|X4Ch>>SV`kb?Y;L>kQf6?bhMU6$Mk)S*zHe$X- zSfB$C2r?i9v-u5@@tIm@>6f{2M%Y7YGVn$E-v`rEZAb$7qHts}r)xCqxJ5J6!4H-) zGY++xpnPKxAk2eGe_(R1CWGTVhT?d9!=GeYDM1r-7twbc8sb|J`DvmfRSp-8;Jt_@ zj7}A*BCgaQn+8s9|}O->pGaq`rl_M=m@rjcJ#4J-zoR)GDirwl%qgDr={w?q(9jDN z!?62j5Xx3@)aAv~nZV>q+?12{UlAl0RY_P4k}m)eN&-aV5ZrheDF4w`aB>V)g2oP- z$3|H+882=6-^c}OG1*BMd+?@$c#4(0_UjwS`;C1u+|wXZp&NSB1!BTrcx_ilYXQPt zuGHZf26q!1?kGAUam7bZLBx5KgyW*42ABL7a25E-$c(T2gL0z@bukvioE((Ur2?hD zn04$rnX~l^y?^6U7)pd+0%aF(%;(cA++jXckAR9%noh;jDzL=Ym{We0qRuV>wAmGF0;zW6Z z$FVP_lDISKIFkfeNSh{@&_hK!J=KceZ`J)7p)6{GY8Dc<6{o}Ss(F4W9|i%ApvmlQ z7&R9VjH_S<`YOpO3Q^`Gd>)f3l|0j8$T*Fr(lCH-r@sdkgPs*ONolhMm`CtFb?r+9 z$7ke>Tk+fY2NF)|(6@1^_v=%rDs({Of~=4Ma?py6AWUi)Kvdj=Hz)5Re3a?$;>hKA zs0opw@)z*l00s%Of{djc8Wx+jLb4WNbZ$0hUf8GbXQSGh*>CMQJ$8vB61W3q$hv$t ziQ5g&t!rg{Y&7AGSO3eq+BT&DZyjAyh@_Nun3juHvG>IkO+rHl-H)fsB+1V(%05NwFgx?;WRQ#2=Zy(m~X14wzBR1Z=(h4 z5jz18*VS&mL4r}3V>h^UN`SvzAJ+p;Tta#b-@*dCaEcr6G-!F5Q&a9;{BDz4@-|;Z z1*>rgDkjt7`FrxPEKcg;AS-h7(ot(1_;rq5E6RctQ0+W*eX#-X4IY?$fo;(>K-Da& z?ekn;LwMmx`DMPuPvEyWml@iMv>kwZDZ}mzm4hj2_}%p*9>ho!F!shg;TkB+DLIst zri*j9MsT#KwNsBATJ3*Y*M)T$QLcj&1xUV{w9F67@D2X%3xm!dj9g*3&C=3MEj9vn z0~t%-`z8TbZVnxFeGggag_|mXX%d9*QMVxQv;Hp)w=Lf(1=Uvk4c~(y{DDy)97i{< z-B1S>Gsqyb7c|j%X0^%(50QOr6hG9Do-%25r^7;kI`_XbhZu@u&ewXSOMRWzaR9)= z%1Ha{7FZy&5B#fnX%u{I%hv0V+Zt^?AFZb5wrq;E$78FAh)MOL5|M( zz-z2dvj`JU96}?<)Npk|%q00JA7cTDQ51 zh+eRuU9RMWv4;`;)8qc{VMIl9Ip0|35hYp!HjX^E4b{jPFY zlcM+vzj8J7CAbjn_9EI;O1E(0a?j7t)zZ}KMnkGz#VQx_{HHoy1YR8YA3H;#% zycDG*vd7N(UHQ{L@b#*tnYXH$PTTdni^xtgzg|>$!SV=zbqR@11q7bo8dEy)AY|QY zrQgYbgKpJH17hP&4?M#snxZz7%sioY50)d68y&=k@tz8Qb%w@NObY%B5S6kEW?3T~ ztvp!&fbyD49-Tp`5vC{9)aD*>29|SGVnDgq_5@K-ReHQ~rGj3+sIquJZ8ZyoGF!=E#G^~t*< ztTyYEUYN>a6-+L>wm3lJGTMCbD(ddUzc-?o$k^$2OY#at%|Xfheb8j~a5A5))Y0PK zuW3T8ZA0F%bpzXm>{^K)z?S*JV2z}W&@l0jmEXH&I`wLpx#bnt_*r`saiH&p}|y|R2+KB6#b?rlSX0i8GZpuTwe_*by( z7MJh*Ae)bs;<-{y10%7hWCGPug1tGKq)Kynm;N>JX-|zm=Xj4nV*ezRaUJ@UGoy@$ zP!Lj?y6`M?ZdF{Z^>ezM(BA>-ISUFWb$67i`5{zYm@N*Al%$|9+eROD%gg&#@leA_ z!b;kS^etpFF)ZddP$KVJ-(few&k(FhQ~pRHImS?b%We2UCz)Hzr}ukQIgIM7gZ0_@ z*A3m;abL7UXjt4O;u>ygRUW~-Fcid{djf$GAfZ+$BH%PB!nI`HkbG6`?!YH|YQt~# zQMLYAf8;9az zmmirc`LIW+hn?<~Ld%AMKcV(st7w98IH(E z^cu6_g1EftR#KKY?I?f#mZ`O1!zDL*PZ92OARt7~&oFgp;$A3lM3Jkr3)l+P6IuyP zo4f#!EJ=*QFqB88McnLRLhUO8&U3?I_?4yD{S8|bR+#2Ct-%U0{N zoOOp?^=Eq|KB{HCV?m)WBS>rRZkH=!bVhDYn&vK|H2jhfJu0|4wcAvl0fYqDzo|&v#c6 z)wz0dO>KrLVJBWtNLM6w=Y;doMP+{Hizo)ki)G^ja|$lj&im3Ab-MkNH79yeoKfU~@*0D`i;1#n)Opxl=ZJ2OYI6R*VtuXk>b#IY|xoZ*~J+Ile&eB z`31hbu0HW3uwL(&%D-#K^mS()RBb%JE(k|IUI2^s-t4*}-+k!5Sxp$Uu+bFszkm2g zX0eyCo+KKC7hutL*%*`!ug?;MSL<;ZxF1J7vX!z;$Zldc(ex5(9*(u8=I40oLM-ug z_j+pU(Pp)f*~8f(o`)UUmr(`R^H%efPPUHKd(ay7|JlElr28;veYz$6X7hS4HB?9> z2CFGi%OJ0wylRxEFt`5|#tpP(P`Hq5|99&`XNNz1N+M_*3XXF{=|BOEBU0_OD!hw& z%o#U|hqf4~*6&4U5JH&kv>uGBorj0;k+-k4lFcpg_1 zLTFKGUPvF7pY%3^6!o3%ER8=_deGxsmG>F0g!jCX20`;f`PHUO%&4f-PVQL(M(hE0 zG}CeBmF97AwM(1_PeD=%vc^iPA3n{=CB7Y0$bb@bD3y`klD&(-%~mU*X=R#Fw%G1J zZjK2U1?DL7bF`>3beSg!iy5PJHQ+Y-zw-XwGF8#%RX3N&e(&ytH@|{(f&1o@Wch!$ z%SC_ijY%*)SMOe^fE>rY z!fiexZI*^gI`y3?z~`(F`o`eza3IUsoDHqRjD@H;ls#Wd=sPKmRV33ZutZkUbu>P& zU`&w*$jD46ihPjp%&WLF-_dvCPz~4fD7keK*w18Rq&{*z@f?3~%Ib-oSP;)bZVds` z*Au9sc+vww$|8tUI~QYe*2WP41k`IuJ2mbZ47DlZXz-@NIK>EB`daT+>p3mEPW^&c z*WJDfk^o4NWvSLRB|6q8ciS#*fT#Y6nEGztp0)2m79L^NYcOCX94*0h8ifi@ac@8A zA$MmMFvYFnHU1f*i;CmIkKC@c*jzL)>R^lFfC6c!Oy#g|h%Q6B1;n)AH}{NkMd#yV z$KYv_l(MHSZ-$^obd)`t%&Ovt@_*8B6bS8B?4ZwhTBTOJ64yF7rwdPWPwsaOxuPU} z68lh?_xtf%7S1-Wvl5lqNl_}R(FU)eZ4cc0^x$MOodURl^tZqi=-2pQg4J(N(rd6bsJz2w+cI34G(X;~2As_J7G z|0OZoaO+AKe5;JS$!RI;+84U8lFY&sv%GAOr|>ayJ~C43G1Iax+kzdK-;TrN4P-9~ zW>oTJ>4^jI23dni;#VN6(@tXFEMC)!xho$}_D6?!4#?L(c-~9`6p#h`K$GPv1PJpJ zsY1M`6)-&5ZpE_|UI&!)UE-f$%ac$u_3-WiWi>aHQfQmhqtinO58a};FR}{>$XhBpDBBk41$o9tWP@+|3L zCQs}@VAQ4k`mzmaWR{`kHZ#f{`@S_a^n5!j0v)Abp~lxK#dOGB+)qt!JKW!{qSGQp zafPv+vW&>BJ12l^{FN7FE%@#H4w-ZYx6(=L_9j?gzr?wW`gt3~q-JbUK3B_*rTam@ zWC=IYP^qcsYa>jU49S#arWg<>LGv+uA>vMx9#){zyg~{#0hG1g`90VFb>Im5x6I^} z{rXWEhLielU+WauJ|p`7m6sHrp8jCFxmKv>L<@syb`U$?H8c2nsh>{Jh*&%A^=0;1 zT6n+I*A_$hFEBOfzc`}l;3S}4aOxtfh(Cbmk~`L zDvU3cBWpe2c_n=x&x_*XSf2 z5vu>%mlsl0eSrq~%4gVQp_s@ee3=RO2pd#EdQK2^I(lG@$^G0QZ+e5+(bvmtpIaa= zwFwi6Fn-x4cRv_M2D1+vXHgmO6SG@&e&?Xj0EK-m}^1n-1cP;mN{V z$xuyjs(lt!c<@rSpWWHL<5xtw@o)Ew1KAgry-;i7EIPe`o3t4W;WvD_q9{?^LFU!= z=U+4JtD?oWI-uBtECe$fmswNI{H8ics`Dt}i96r?E~E`QYrj3K1rg6&507rHN01S( z5UHXaUt~*DF#T|;YZt(HX13%H8zErSb9)h{9u7~!Bm0Owp{G;^QSDIjBXUq;^Miz@fvRpo@p~)n@Csl7xFN;FBN3^AT)jQ6ZFL^YD{S>d3E(nuD^7_1u3wNi z(px3@{OW|Ikn<4a(S^51`S^pL;Z?%gW$@BSSUHYTM{^N{tw|^JLEdl3z3b)MLP0M~ zm;L$Da8Wc5-Ph&CX6R?xVUdpiPC~A4{qFBM9u^x1r=IZS)4^NIGKS_tQLrf{4Aq!t z()NsWNUBwYuL&Ep+Q^9;2tL8!P|^c=r|KQA38y7rHcLI+BC(GuhgLvE zs;D~nb5%l>HRI_-Wh)>vUdzeXWGsYhyARv$AVf6V5qxof0(joV2()n2SXzCF zLT4NZ_pglBv=3hm@0}e|-hYdHL+Iw@m~SwjbZD^)s3C&jrGM*A#mc$4z#Xw z^#&W0eMmRICB*7e-rJMLt2^y~v6^fuvOk5V2TBcd3a4f#)Y@y%d=RnI8}TT2{CKED zx|mp8MtLKa!0au3@E$k_n^RzE?Nngw=aB-~gz7xK;pEBh=4-r`fmttrmF-Z1o3P>9 z+O1PXZKP-lm2AVIkGbAV7J(*!UY{p>15hww&E>@p74M<#UR-$I#vrE~1T7De_7mMe zdBLZbZp@a@<7^3D8%ofD?tgX#Z7?{7Y)$qVw}k8h*31-Bza?y35Z2O~=mI$n26Q0) z!4HjwI34zgcEx&(qXZ9Qr55_9xf1+%@4yOc|IYJm@PKhaHqAkjRZ*TQRdnjO=$#=^ zn>xWH+zuBky4cW@e(+hM56$J-1u&v%sa|Q|NJkg+0LTKj&h2W`*s%BTd?CUGZLMrC z48Il{j_Z)K8~kWC9B-KoG<9?SBj;%&P8x8BZm}u_^c0<}=csD#^Lr;6PC&^TD*~UAuXFEj z!H413GcrwQhAP5T2v@)S2ulbPOsQZ({La1Mn;Jx60mEE(R`$UghkF4$L}3_pQq+_y z?ELNIuP-WCL8*(hByj~50@(u^=Yb;

y>M;QG}Z$O0no2Kc~`51~bXq+;b@PZqwG zKBrBd6h*Fmq7Cmy0v`~s!Zm&D{S#2@-*#)hhsk0Ik~itazI|{eXDBv|K0_U?tr~-1 zA3!tOluE!I!3scv&XMd_Qq zZ%&>3_kpo0S_w=Dn+NAx0$*RI!X8|bj-WkUcECjB#=hSE;vwO`V0?p4an2oPq~qdr z;KQ20X@DG~ib}mR3GAI~-<+)bF#0$p*O+m`A)VGAfGajL#J{-4fru+MMqMuTEz+sU_1&D}N8WMt)z-J;u0M*b^rPhEHwV zilD5@o=-n%8wg}Vd-+9cWgL>csEZlXKb<>CtymIt`yqWK5AM8OHk699?e7bK;lsU9 zm^Sust_X6xSPP@0l+02}Av?xU^L18v&@i6)q5|*dX1(W_FAO_ZM%LEmRl~4OWhBNh z{3saSUjA6ie=mUwm?7N*fdOMap{av>B?L%?yx2zYH&m zb`{b-))loj1VyILn%*N(1B`Pe*f9Pa*nS}C(w!z?w8V(fQ zKbU5Wy|~t6*=o{j&qH>9Utf~}$D5w^NHR8`a?p)?LHW?q)Gfb@2I$cKg@1(}7w(|o zx*^E=%*m!whT$)3o0A21+%N}-?L&kvx~z>*Y9+Gd%PF{UPBhINx+iX=mP5RKix&$R z6Rt8EC3}01_u37m%_T_wFt{5|SKe_O41w9b?gBn&P^y=KD0m^DGJGim+`TPlm2F}b zE?)o@nzk~~Psi$GgnXu`UE{mq!H`ruv@w?feW@WplU$tb|&vO*!o)I;7x%mi>I zjT|4Pn7dm-X>xOuNeOEuQ3hEOQ}7OSuE1WtexTBMs~%V#0OSr=o;LIMbjmH5cs)iZ zsOqiU-&(!}MZ~}}!CT_(Q$E;Zm(E1)an4#7 zPDdNTcn&rX9r7i__6xYl-USn$v`+dcJ1ZH#~nYUx{%d;valsYcr_zy@4MZVCd z#EJDffl)^HpYvs>nCQDR+1kt??WUPEW&xU8{1U0@;aUf;!^(_`(?cQ;Uqg8@3?Zu< z#=yx%0^%n`_*pxr}Aff}g z{T=)XlZ&fnT%gziu?Lwy#CJ|xE$-eZ+{^U$rI8&{s+DG zfF6#O=+e3#%DhbKL$z`ICK_(;eEYPH?%5A6v8zqKbAp5u#qJj4!wg&x?{NlZ_kRZQ zi|(2A=eSA%V}=LuG{b_9U|94Ce`7irWMbG~R8Q^>GmPP5n39IDa(S*OC>XPX0!p<9 zs}!QJKlPX*7^l?-0zQtmh(wc#pFMH<3OLj?nk8nEc8ui9lXS-?@Y2tV;tm@97isQR zfbgQi6fNGSnxe`3vHd~KUb}D15@(c8>`O%`sS~e+c_LlWsL=)@$bCb^iNlSml{{`* z;w1`u62FGsa@|@UOSpvKMH%Xi89=bqK|-vIM2mu(787(=WD`4J-@XG+};XuH0!F z)!~nlPku-$r@SyKJc5S*U25;P`F*Ji&LPvm5FDJl;=#V@CHw>IRUvf-a|?Yy_Aowg zJsc1q-d3|bCShZ;QcJ4xp8J1NbKLZ(rlFJL!;Z|0D6j^ZFf#SBbwttVABQ(w5Q`(qy*Mn<3u zaOWGBq=y5XT<7gYtZP4{$y=3dCt}j-I1T6Uzk}Hsa<COo@Te6lW|TJlzY zF(nGZS&+t`uq-zEBk;KRL+sR3=TAcK5OOyoF7%z8SRDk*4+ZP2F zkC^^kk;n!#G7SE)Oxh=>=D2G%zeC@bN7?ZpUDU=G9A=<_6M-h}hb)4vRnc*WC7WqB zKOuX$eFRY*-qsOvPIYo;hmA+<`DZ&*p?A&@JO4#4kd0&P{p%R&yXTLi(izuf?*BBQ zU;0N63vhNT(od^mh|D}FH*X?E-g(d6O`2b{mYqa?uE{=vvDACgA zmxymYM-l${B>p^vC!(;})1aQ|F)vn#I=1XxUwVqGuf@q};K#?_Gy!V1faXA|0&HPl z5)V}XWKS8Yk0e(#LQ!(0=&p>OE|0oapj*6mel!{Ozdkg|e=vAgI2vQf ztb&P%ynOC&_I8TE@C#FiVX%c2Py9ZTa+f3Z_st99R*1t$$xmpMw}|sF5!4}e!qMJ; ztK4_y0hn--yl+sPz1n)h0cK7>$a@r?jmm%$?4`*@W zB?}5sx5K?_`7@}_OJ59ZpFRuotzf>FOLZnSyCU+wGZh7R5tqZn)uL<%NI}(s^bQN)eKk{+lmIx12A#{RK&p^~^&UfJ zruXX!Cu&<7y1Cev(7~ocQ4;I%W`!$E<9I-2u5A_1^0gTE*`)NZm)E;Vq7*j~xMB{euo1JhMty&pqM;&6=LqMt$>vB5q6v~=; ziJ0RZ_dztHJ;=_QjpWa^JzbRyAwKACRX+prIjqIFvRX4=^H#<4U1+a=I+*BKUrwq` zVWT~Xe=9wN3psf#F}ZhFfZe#rltRG`prH~u$)1EG=Yg)`v2md~+I|7Z06XCoJBo9pk~h}BkgI-v2;M8bLfZY4 zF;{faKgHW$FS5)ynfQ>z$s0SE8*S;tpAc3o2T4a&Lan9FD_Ps{xXA~ z;0amym;aGy-$s>>b|MES5aF_ z&_-tJ;lH&(T$BEOLGdR3j!uBNS_=UPIpcUs>_9XVjd1Gs+#TE_>_67O9A zILzfLDh%*T6;Q~@5vV)ZbLZ}oYCf~w5mHy81k3N?=}^#wwcs~_P?vtP%J-04d!K7B z^w{60Zy=#4MuS%0D$~Q>08eR-Y^7`fwBPac4wkBV*#Hz%orhXjtbM^86J=S`5cbT- z!8QkCDjVs?<#6Z%IwPSV1ZaD{GIEWMh?J6ZST&%A{?RF))7y?N7r7Fo=R_X)8f8hS z{R=|s(*!lL`B2n`F)UrO5-u76BanvLS9Sy+@P!^qCvgXyS5HCda!E_b&H(CCMgN5Y zg;>-G1QV#1Z>%7@zOsEF>S8m=HDPqbJVqem+`aOv0_?l+SF%rEy|R1>g|q|D!2|nH z31FnwgE?{HZreADAb|G*rtZKXo+;bYA>yTxIcx$RgeO__b9x^94!DjIOV#tnoo~Uu z#HswF4S^5ADO7Rd(bd>+>Ojf3T5YPtr4I_rdKtoi)ra!rk8amXvUvk5K%VhDeH)u} z_oAw0Vnl8}7|4e2I)O{etwdl>b_I7l7++uYUPUBTKxsmESA3tB?a$6G)PD=7o?htR z>MTC4b=?PN_6pK8_euw;_u>fOt*g5P&cev)sVcHDq)DDymtLG?^|PDMH_%L6)JW)= z7bFS;{Fd{>BO5>1wo|FGFx8-5e-Uku7H7E65-3JO+7Io~jTPdBke!`u3lBd?v+ZbF$HhuG#D*aV))V5S7Zo`P4A(X z*GQv_K7T(Ybyjv|WAPYmVw+~Cg*Af(#UI{#h5+BP-MFr{=LzSfYK;e>2C~5eACh2n zJ<0b4Xd1xU^=N+rpIF55`MJ2tcY#W`o^7y&>6Bgr1=2krq#>L7n3fYMcZ0e#@<|Vt zI&KDF5O?!+N%dU_=(O8MF)9ZHcjeCgf$XZCUfAQ`#wsw>r*`g6Zi|az=#kR?Csq~w zz1OOW?2pUGuN^!(W>IcD1#S&Dq3?WOq8r&Z-b#S11S&tkzO*HKn+E-noZTE>7RN_r zvOn;nJ;3`WXJ#osF>ne%RcvUS?+c*Y4#En;+jg>u z+j~Ee&j|3!g>lb7lYb8TdszLQ;xMHNbswI!q)T$^^wc%%OlA?}X zx>F}S!WCWB3_a1q8*h_ntcmfHel>gSliqD{Y)RWRUMYtmvFkzqVit|zU6xZd^A%Ku z^MEV@RMl2EIN%3y=2<(^Us zS_@;sx)KRTud$TqaGtkdTPC`!Co1VQ1g)Wj;po6nG8fTFQws_Nd*e4?M+MpdxUDpi z7&9j^Ft5qG@Zu|MQU|V5G#^0O1p`c=8#I%iiU87gVKNj#cD#X8Y~%Av(^`IZ4YG;H zbm`{eotk$q0xs9Rupq-jckT#~iJ_8jwvt_=cl#(1VJB0H-mfIxq+%j!110C3su(KN z6QQf3^0y8eSLZ>~@!kvN6E;qj`txM|paXu->Tm7B-`+GFl7nfhpyQu45zU%2SerSI zxM?VUCdj$}nX=x%N@$fG%Os~^jr2K_AdV1H2Xz%Q zAU}75Dgek>4g-jqNx+}nO>GCF)IU_-8=MY4gO|*uS}nBm+l2*fy8C)uA>PUuv0roD zmo9&LBxF()HAw8V{AvaFG!bC3D!@FGzw~3GjV&_NiHVmdz|Mrn;Ir8(%X!gYro9bK zYt>ijz?+>TD5twM3IkZs%H@?CSD06%o>jJEMya>KP@I*@Z3b6+X%t|QB!6E2V^lyQ z=k_`h@^tkk!LX5P_ALMt2rDeAJ^@JzH8d0POUiilHTA2_>$kmn&WrYz>1DjR%<(GH z?U6qnua)71rJ{xvpH>NF3}m6{H^4n)t@aWLDJfz$&rLYyI3((bAA-vi{9(dMPC$un znwB0t@)|hB@Ew6nWF*qI6x01ja>VmHu-4p-Rg9w6lVi_~xas1Qp+Ivi=nRQ?0xCsH_wCAWJ>$C;xrtJ#l)Er!G0Hk9@U_u4)C>hL^FE&L}`@(p| z28#Ge1zq4I+{dVhbq7E{b6#h|R!YFJ{xQJ`XKwnAT_6J?xNrH| z3OISbyP4wiY-utEUPY|za^q_^f#5Vr~QwyHt%vBjE`ML~{=Pa&uQ5iCFFPt&3NP9$TJG`k4i#T>^S z0UDD4i7kvMV3zLCazcZ&RR$0`O*x`Psi6n*{!NzT3`{gwP)uDw_JC!(*tdiLi2bAW z$p8p~hMW4H(yJbdKTfSt0RTf6#G*iDKw$WGLamdZnA;6`&kyd}`@PB(G>OQgyt&a4 zRsEFV<}8#1g{E0que(zPjD9t`#>3*oWFAKd*9eJW*8?*uv0gg>S_^CT!l;}M)Hv|| zhSUkNt=L4tU{cW8H5}t37#xOH9D4pIL5?n~s1(S)m!s{_wAY$W_TNUtWnJbNfBjG2?+<)FQ)yNG1kgPzYYFhAU4jz-p(%Pu{Zb!{z*H+l<$+oGP1u5D zPxX+lLoMwZcLNYwm4EB{8ZO`z1Z?4@Chu8S1>OTlGxeJ;&<06t7-~$0zSQ`%YWDqaQ+w#*2Bt zm5NMp{e5Wy>pF5`9&%7MH7X)S5zu~7KjnO9(S&p_Hp=M=o#2{@zP>#fUFSA%X^2yT zzcP-7Z~Db-#n2h31wB>erj4PEle_SS?xw{`9~^;MiH?)))g(;A1t=8(Tl)Rwv36`M z=V<}^v+PVRk@njN#n9+o2rTIUqPWxN11{6-UqN<4TdqVeV+q=CV>o&>_D&`3z%x$V zp796~6*PLn5*8cek#-zGS$9YBEvRhZwYCJLR->p5C?E@p6mMjoUd6s*FcbRDP`*la zl6};+5*Q;i`)A%lmh?c=nHTUD`bAJaCZ-TfhI)XeN}a-%O-q$3&cNB#V&u*-cmUT_ zR~5mL@MC*arIR@Xma{tQ_CP|rzor0*O#Cn62Yc_T&%e~#KCf7xCRM95sFy9XXryvj zQeAmQ$hKh_U~mgEuIbLH*HEvmX#{_#1tC>BK}~%r966+b_oQ}Etjci-wwtYALX;`2 zZ=g^%R#^pds}%E~6sQVhj)^U*3+Q}c5$54d2Cd~57hM?FUxmwk|MRW4&I1)y3a}B> zf>vy}4lb7*??x#9gXT}Z=hO0t=3btZ#>0FP1kD0CX$ZZwFa94>?;V$8|G$r44Z12c ziS|%wXeep#g_KeWg;F9(l6FZ$TZ1&ThmxX&BGFYULK9J%XrYpZ=Jz<=@6Ye?`|rLV zM$Yqmy`JMZj_2`Iy0@2ti7mv})Apo=6I0eiVHm&*GQC>6W>ui?|(NyGf za3Cm8YTn^#k#x%ryM`FJ9S|+8v^g|%2<6?S)W9vE{Z1`V|AY7h|LuCkJe_r4-uzK} zC<;b#CpcxFxAe9{-7ZO`=T#Q|hLWXGD0OlO{D2yfQFIDyAu`gB4IZOlZ$?x9)p}yM zC6S>v1%J}n1qW1p{_FsBiqSnYF&=E$Emt9T03di)YcV+0v4M6N&4zCd^ zhoIbcH7ZS6*+`1Ne&hOGRBX47TjSoVu2VQ#ZfZ`)mf&IvPhn&7t*mV>KzQC>+dSI; zTtkP`#DA2)WdTVDZJNbEAMQeOZGl2ayBN(Etbql2+%T3AwKg6lU5g{#RHv$M7-#mL z(t>o+d^<}t0=J;dvxqaJ2b@shKb;6S?Zr)!+u-GlDBWideC)1SzMv7r>2SRV-Bt0T61zLntPPSo};{>3~DF~MV>^j({gL+X#( zchqjVBVKsdr{5$GE(vpS?TFlZ68|t?R9>=Y!&7$Qh%xhUrE1?gB>7OyV-am_$AOHs z-IxlShzF;7AWc#BY}AfdIQbl%^>#sJ2N8LJ{P&^!Li9ZH93rUmNVf~$@T2#h@`0xP zvnS_RN-nQx{s5DIZ0wv~i5u?hT~Gq5ToU&>l88H+i!xF%>0o1rxE|RsQ{A8Zq_V8- z?atO~7cw*696xpyO5It6w-~mY<=o=q^8S0zlWC5Ac^w)q@n&Gu={FQJc&EH!n%!V% z?`{$)F3ZCyc6fLIT+6%C#Nf+umoNMWZw+H7lEk^E&b3G$x0xgMH9)!^$ov|b2Jg9_ z6%5xjnNzaf(GHT^l=5NOiR(&$jW{FczdU(3&Tqt@x(snjqsk=5PLZGPv(=2Jf@kf1 zJk(9Y=TN?EdYxNo^u|Eesdu+__MoJQUfDoGnb(LYJ-p-y2Hoo00G<13gzH@`Jh(B9 zq!%ATXGzp&u%Rm-X$mFP63qE0I`X4Tlw-|g5M1pCrH_oOj4~*2mjE~nqPNybSI_bn7gQ@fJ0n?rrIIOXiyw6jGtW3Wj_CNt*TW;{ zfA3$W3D2C`i?1JF9r#boCqFJ{p!P1@WN1V$C7NBrGFtS3RoM`+Ti&sicki3f4J1mt zYe{F7uspEvU9>6IiX;pd+SWhL1av55aNTQUSY#xTnazNE9EAO7a><3ljf9q@LNp>d zG@K1*Ea&dmqVa1p znQMOEUT@ytp0UGJaK97ZPI7;HXYR&_tJPR10y0AJe$<`;}-neUAU5 zaHc3)o3Rs^R0G$lw~{~kP4RS}V&650`r2zdaocqLzZW)*`LeD_r>c0m^k?Q7#s}T4 z-2R;qOPp!W8*ysPH_0a}0KjzMw-4g0yiorx<;Tk!Oh*vI6Q1h71az)t30-W+?l>IT zey29uV;JfaBk}*ZDcd2tS{TK~TCt$@CY6tPPWF`mpy#|?-dS+#rFJ*>=VxvYYy6iU z{U_k7<_2zyYsV&HgwRE2+n@R~hFQK3wUmgLbkk0@=M7Hf1#>4ua=jO(1jY=oO$G4c zF#*mLfhU&h^(X>e-XPm}y(@^i5urq?03~V_$F5VCGsf z;OsCr%?l(^-R?T54RqdcQ9LKsI_6eDkn;>Jwekk1+`1wtTF(AccWKF#Nho8Wj&2k8 z)XI0rUFIT&u{jiCj2#tZ@sv9c!_~Yrl{({TUhwCmGk1?^$ zrV#TL&(8qB^?SaC(cG~w+asz`3N=X6qrnYJF~qi9SN#WOA?rSb zObY?e+)u=nw#T>XXj;bpwV&jIOK1-P>oCDQ1Cc01F4Oo=hZ%6wW%kCe01ys!NVU%XR+Ij&)Z&5igUAV;~~7k%ANz;O4sJSgIvu(WAchFr=lH|y^fY;lL%oZ) z>f{bXmxa|++Ci5&A=1P+pzdWiSNg|9=cxLm{B%S}@Tbezj)kL3!y|5emSnttasrzR&djzD+yYI8AXHW(FlzWrE`~ zs_p;i*sdw^M9Gyt6kmjh+GLBQ)fDE!SRNM6E+J_w-CWUKUbLSM;N3#c**UF^oE^2W zU$9fr?eiUjAD7!u$ov;2&SWZeT-5hVu&OOYxsJFXQdQ->FY`R5OQ6UoWmjE5;wB#D z0h-Dl--4I4>Y8}1@l!$XnT0#M;4y#A*D>oFLVaV*(h2!B@Vlw zI4jbz$`TdLVSIRkNP>H`Zg1jzQS`4W>80iJGSul#IW1)Qo;f4P|4jL4xPgUOw3$Um z!TgWiab6o5tK6y>98L50ZC0uZiTVW2;qYmcXY7Ik*prno>JVFV#QlkwSzg;?{cbuz zF8Z##8^!x<(|T#MuzLh_&mILilB?5ZNvk-&BLR0F3s8HukfyL@!3EWp&xVmcOlNR# zQcuZEQVX!GC1sY;E^Pm8B2Usp0zDJ_;>3|V)W@UNuoH6)^!h6F^9$clZxDMdGs~ZY z(eCvbEp2MW+x^KneP!q47sxv(ciu21id$&kjop!%8Q*Dpw!6a71T0Q+-`UDm?rB9K zwY|>A%~7Az1iF685(e}R9fX-!5M72cw?kdP=0+OiGQ4Q%V~>gpy58Ux5S~aeY<|_K z*@oV9eeCRhlbC%cOS^&I#jjXKF=sN8Is0k~|J(4c#IBILqH-xi#RkOL-C|kv=}G|| z-Um8pN|mfRNmQ_!J(ZvSftJbwZcD}3Ju|Oue%)UWc8sB6M^9*gtAx&PxbWI%$=v2T zVe2=br=X7$3BGSQKn`=c0JG3^gT6_a;t&!a|6dgKID_kUTgOWG*>d;LW+DF#p6}r) zp(1Vm0+mEW?HT|g5#!{(YqzKZ%Vg6!6z56>@_iso_422Q5OaY!tnR8=&930 zcXN@5@pfoW2*WrS2U2gbW}jPrg;}pNPbku$;AX-3ZQxbyF8DFht+;+uF{mxP{Y22I z_^00v_Jg^e;yVPY-}{|yXg^LHd%pwfO`{ahB^0}Gsd7^Ov*UHIwD^qe3ew^Z6cVD%w@hUlIx4=D+9sI_2$+ zZ+h!6(q>0r(pT=-YTS{Jk~f23m`&d}aOxxpCh7Lx=F+Jelb(DrA z<0&-Kx9?#rIj90EKZ1rI&y4fC0#xiyz;qnTOo-`PT(qCHlq?UInZ_ClwLX?B=XURz z((*X^BU9jxC+BR65Rc>bp~V~mgNd%6zc!ks&9yC2(2Mz0otkW+bo+x78c$0E2HpLSpUU7QkJ0k;bakW8|f!We`{0X(}0Yc9PRl2R+7pSE{KO!S zeQj_;MmnqDzmdpq=H7uSS$Iclx0vTX{qoXF9gjToRB4sYJ-6AZa#{PBOb~W zt-QVN#(T##otNh#4=G3IzC{$gy6%YIyB%#NFUB4SBfUHsCbK0eR2!F#DBTp)i$od| zDwLLmjPgm%=W!bp!+98E==*!q^gU6#tv2o5U|nwiYRYIl>MS3Al$^q@5Ja>Z*_&oy-=CC<6m17C6YF(e5d~hpDUW?0s^-c+VU*ENT7!1 zVm83OeCe8fgAPzxeZF&mtF_ku-5LGe?uZAh3JeX3bW{tOkuoxxT zt3q)6;Tw&_kC+^6tY#Kbo^*id+aip)0>g30Ag??HLeASp05$QGtoR>Z+C)9x9HELa zs(k`A^$XL(XxmDx^`$QX;sMC&>+RF{M1xZ+ot|>U`iVRJWTaq6Xw(==PvjH}I$PE( z?HBrlKvlZt6C(YoiTuO@bx&J~m z32hz}bNYDyOSTT(`?_JaVH`wXbb7C&m!cY^@r;T)1=F>nQaR{{`)p#swxSKQ%x9iH z4Xh4Jk*At(JE%U`&Sa~zwJ;B|72O?)lqMjd+bCtU7o0PPUmm?msOt%dD+yB0G)Roc zPX=`s7enTyY4*OGOy4MtmmKJ`aWgqe`y#vA zV@@YvR}WlKKT6NT?XT8*Ww$sDNLXh5msc?)nsFlVVKLIY@#oo{_1H0b6^aFw<9vTa8qK=@B?{wZMfN>r} zT15U&2)z&Y z35^P;hXD{z6iMhxLj8EI%9rLm_4*AIl3FgU|Rxqcf7^)hY?D|N0;P}cI)zj9lfYERdg}X)(?(RlM|l!{2 z!BlZ-aZy~og8-}p(F&CD%0p(FsJ*qdFxN_}%5ySf-{1jw%&ekB@~NiA0W+<*FsMs* z%nznpi6UmOprCCr3+3dFTDba@DVFcF&0M5g4)D#qO2#2Kr?Nixmc?ChNjnBw1}`o$L5T( zwTt(W&VfBHs(+?+OTApNMer7o!Kg2M2J%N)1SLTSivrGAKAbO;J^m}S`Y?yEK(x*K zHTkA{X8Dnl%EV_4Vd@wIVR6UYW{~ITBj>aCT&gJ{9&+pHno(w}9;1@KRGfQOUNWs2 zOH#V?Eh7_Uex0{8%xZ=h2mG)Gq0vuI52%&=dSYCMV=fP@bS2NBRv*Tid9cH_o9uq@ zjP0usV-ulyfuQHhAfU^gB4ZmQA1c@fsaZDCbWt$9wuyk{{h;|}Kz|5i#r}O4P$#H% zoSDb{KwaV)LLHs40l>(Ir1nil8LCQ%tDi$z1aBR3`|L1a3ZX(cea;FHO^8mf0RjL3 z?u5Pr-?{PD_!_2LWTx9!qll2f5&;mFf6G~jus`QQX?id)|1o~XJ@&qk_1<{{zSYzr zdfB_$Q~1;Hy)GI0Tk=^AT6Pb~*HWaq z-NYO@5W^^E-(2cD@6u{Xcqbz+KD<9i$p%NZ?(k#w%aju5{@V1Fqp{98QGxXent+F% zxrTmK``#RX3Xuruw(Q&?4qferi6Ztb7?I+q?tEupNEcZ8Bk%QpTu(?S;S>spuSn;` zg$bzTnEGZWdmc|>)Z*c*+s|NifSp)szh9M~9}l-$K?=Xy`Lry|HO~DSWE=nhR9T*= zRUCORt0rJ1X}`0X%RG`9O!)+;ze0=`ANy6^h4+ElW4zn}6~Fe@=Q-Uti#PHN*GCr# zSho@jXPSS#jN-6hVA5VyI}rKI`YK{6F+P9_>f&j~J<1t`=^YpV2h^oM*k9if9x_1B zdiK^(sOJ+H*HOIwL@@RM`ZzE3ofsn%8vv{^%P|--BwL?z!+BnANef zs0hGmV$z98;X)q{`DtldYG{NnNH(Dyu;zPGx|0xF%yc6rTQFqtXOkKQK+sUqxzw~v1^3D?@r{5j#|g}T zkX@$r%L>M2KYsXap@{J7?#)zRLH7d&BZDlYnXubDjQLHhtMFbG0Q&ihXhz20c>eE! zjjR(|aa?q=!uJVSGu>51J$;ZzwHZ1Dwbl0dpFK(HBz#pJ5OmFi_muyV2SRod=liK0 ztb20I!o7$=Jp>Zp!StNfCYnGtO}-~G3bzs~E>~i^cp`O)cs^*Yu=BApQ21q^pu6#3 zbwQqNuth0cq?1(zyHOOpMluf*$WiBipN>BP9eXz;-#M6_W8m0+Z8%S*0a|05tRJ3EShyeoqLJ6U}3;qk+{&UZ(R~g zHIym@@6@$MxMF3+1BN#RMG^@P*Ow-A=&t4$%>Oo00}Z_j@9hKP-v^PmUeq|2{B^4nH*< zt-Cy$#7`B3bT&X037oh6I(gVzs}ZUS3x;9Y`W?QTkFWN|ugaat*yRvuynYa^z?j)1 z=OaD(uNdMralpc=Qg&}j)J|6Ph%<~Ad>P8fQ8RVA<))aNErr&9l=8i;JycAT%_R2Y z)x2&j0G~o@p`iK*4Nc`~=o&)p%OAhkfulx1|V zhL&>S#NdK5srmjVuM!*t-uV#S6iTJTE>~UmAg$Y{{{deu9y)C$6~*yLVwr%p$`#&{ zjskDqVfZjS;pH;U-|+FI{lJbY6D5Wi%3mVn`wXV@D!YfjyZrkVd8q>4z}s~+g=(6UbIDgP4aPM1cNfMLXgeGoO*n z3yvlZnYq{z-EV&3V{5Drm!39Z+Cz@^x4_Hmo%fo|_YV2?1J+DPWLphT(!L(UKOX+v z4_?m@2@8$Y;D#r!+3=KDSC@}7Gec&=d?#=@N5RdVbw-lY`cQhjl_ z6+b;J-2ZFEmRr zJGpje8BX1y3N#5hI0*Unw48L+y_%9<#wAdazDZ98|pZO3iC?eQ}kdMfiBDCyk2IeLr$DTHR9GgF1SuMd$Z z(wWMd$Q{xOEfqktz@3j-3?Qa?p;qOp+#wEMvja~(EOe@pF+^)wWKQ_9nTplaBU2z) zkAGtz$aNt55Y@E3LYnpcP+_A6u#d<*pWh?L3noQ{OP~&IfpzC0rH{`$Q7UHT{i!NE zsu+rdJ*+vmFy<8cI#c=bYZtmkGm;Ibq0^p01(GXOK@_8Q4){-7&S|li;P0zp>D*s$ zC~Ji+#j`Wmk%pNh!$`R=WmcaCxK;cY-BB+56%`)@G5u;@X2TQ=ys$ZT3lEcD${!>8 zl7+R|iH|~>S74+!ZFwMN*UQBlup*E;`bqhDQ*QI2x5={g^ULz5Q{>OGmduTcL>5ry zPg=6$oe;BPeNgFi`aO^R7H_cR_9Ao218&Uo%}?#_t_^h3>`FmhrJM1(Z3PuTZv01GlUaY9w5_x|W=u>iudE^~ zJv#T%IA7*?Wc6#vK^1(YIjC6Zc|AW)_L7I^PP<~kh<+VZbi-yzj6XNU$HRf4K{mBr zs9@u*cUh;;GuTo)z0=yJpAeH275Ewc$TUvVZ9RW>z{LhC?Qsu=SQH}4-Uj0*`fexa@N4#=j zIeRTFz-QMKD}*7}`M5|1;XC3Uzo-xxjkPBY1b>&WFMN>_2jCa%|Dq&afOUYol zD>)u9G-vBx)9ycx?938pJA9Gp1#pLJf42EQ`m$*Qi^q=L=S^|SOIfUdIrT4!Vl_RO z?_29h?wdcf8vea9|FmBq>+xy?EwP96w^}h$;)hnuh79o%L-59i@QPoqb9|9gx=n0cM3b_=D8yE?vf?9sjU9R{1FW^$S*PF?vb`z%d({&B!m-80fz zmA%XH$8YBIR<5%2AI{N@=>8M4*M?_54XIVrO1(+P(Mf~2yQeC{t#;H<)m|aS$au*n zT~~ybxOUl-Uu!s%nqv84eL^Y?2I$mT-te+)Bq{C(Kfk%ZX&=>JwSI&CGCj5g3^BWU%doV^cJQq;y$8%<{{mgKKPrw3wNM;FR&GA-&j3X9{~b}jKo{rl0x^>^?&sPZ7EmS zoZ;VG%FEK|cP zm>$-E^I;Ed$Fx!cF|CCuo2fO0+*+RoCPA}o=GYJS4oeTUusl|sk!4Y#$!adxuY+a< z+IyGXuHo(>A0&Q1RunvwI4nm(r?1v=R=-0dgZtmNY8*R6m_CXIC07wdxeoPUYJ{G+ zp^0(*Ef72*^BalA(PK~J&$C((&B{EAU2`Ncd+3BG-91uqXTw(Q1**dWH~3Spq0nzg zM7=F13$8eAAB!vY@UF?c5!!6}uS_XV1zIXSnMfu|ViE4YxwKqoZfjL7Z5O4FwQ)?O z2*GfgH=XWN*>NfAUF$ge%A$&Av=md3ZrrFovYS?I&Usm7c`oX31Fw6W{XA=&b4`0Y zd2bFX{xqST6dIyBEGqjHpwn#j0XA=#OARbsL_ga;J}I<~RO(8&5N>cE z*^8>->y7psUB7&-fV}#*1{$XKZ{U;^?ipZ_6)wpX@}j- z2VU!h-gRuChKw^NclL_&#g%{e>SFIR-=tTkr#dFos1N@DFuV>VCxk?Ev-i1`+^W<) zgM9_>MJZ?)u6OPYpN(PU0P8AgUI>Jd@U_rshxs;`~?d! z<;BwcSE$uQG+5?cQfIz#I_K3WwD0sjyS^r%qJ$rj=_MG^a0Qb6CGwZq6lni9hWDpZ%f#`2SZVc^e5X*BWS-aR1c zKFjMBM%`200g&nL2)xULJZ_w+8|Xi&L*X-LeB+70R~3(IfX zk3N}7t{g@+2dtqW#eHT5y>9!1Cc?$Ag-WcYHu?%9Z~Zohl{a~$3p54WG@ylxukp*9 zs*@uvc;8+Ipz z-OKz>-N&@A$JV+Gram9INA#!|x2{AxpJquo{t-|#U$H@ZwBcnLtvBF_C%_(+Wk zD?wI@!ZdmZln7s_@X}5Q3TQo!|2iVw`xXUroDevyqxnEBXo_4*Mxib0aKD45@*+HV9kqje*UG_zlZU{tu3!Gf!!7+#ZBwxs6RkR7N(2V zQSX)Y)|gWw^p;RETHi6+#_lyO zLqZRjv~_{-Bsf$1YWC!0=f0VrSfi7+fmFFqQ*aGk+8~^Ib!UykQ~7=>IvMVJeOvqU zyOlFj19ca7LmK9$8AiL0y(~~ydZvX~F_0NM{fVZK_j!T4yc#(H?yfz5*2Y=%=71Xv zo^RA2;uEC_bhwpO1I>h(2GfA`P+_0f@J3Zq*_bhNX8}J@sYaW-&j`^A<)k*h-TE*K znT(WUSG>;OK)9LwR8bJ6p<>B$LwNt`naXvI`yS?ocHD;1)SfOcY>IXb;PF3LG>s8B1FZ$(;lfCev@ubDC) zJD9Y~VIO-X`s3j4hvzuHj6kP1^!9OVO%-6&M&p=6RnZ|Q?Y5Rsy$IS$Z5Nlb%KJ>6 zYdgBQs7$-$Zz@5m3xiOouw-T`j5s9Q;h z5C7f?Y0=(J+ZgwiybRb_T#qCWf7PNE2?eEfH?2rw^tREwBC~%f;!%*tpw$x$CqNEy zm{NU!#QXF%@SDInn`73N2{EYxWKYeTV!R}CEtFq;&1?}F4m25j<{PJVnIn-ed>}h) zBXP+aFHiP;34smmcDu-DO|pc)arqws z)60~NB>Jm8^67lo7E`haxwEd9V=C&I%a7lw6!O2@WEMxD*k{x2DUa>aTuNbWH&;9i^Mg+d~-V|bI=rXJm8p{!%38~EfJ$6t13 zE#MaA-!M7ZmTmneZo~dAi2H%u5**@2fdIgU)=+~VT)65gETkWOawOPYK9N%8A0+|s zz^hK}IGqOzIk3wz)0GZl*CA>twj>!T3nSo_TP8l^YZN<$i_XXeR+NZ)lW*UPUE3wG?zG0SUk4>qU7~W`$sPcHJxGf?9{{usMTn zho5rCSSXf$01Bn6^ukPq^PcF#8^Xy z_5fXU<7T$SSZo8+l(J^wLIw`~(qPU z3&FVpn?G=<@CgX9eO@eM$h&YPyc#Bi89Po(Ac&iewdc~E6#|e6Ew3RzCemy$gEP*& zBYf8W#MZC^$R&6*^WxEuLS(;A#npfqAY-Pd8*E1$yl!|j)pT$lj?oMjft2vfNSq&;P`bCGuzJZQ8MTQd5hr<#Fzzy4A#>!lm_i%Mov5l>`2E?!iP@R zcunW1dk#v*U3RlDxI7Xcz{8S#&mOaI&Ta&QQMZk^%lF(U;bDn?LbxyvpzRQ}sj`Wb z2LD(|xZV4i(5$|Bvj|>@<^KHp%Cm>kLR?n5Q04Cbe*Y~~!kjK^StxH%4*2>`v=Ky& zn4hiuc>VSkJRK?u8T#vMqRhHWDk83*G@)X;O@v7U=ov#jr)P z3LQN0`q9S>zu&&j{y~wB>PeqkB*Z`sVz40PZRo^-_LCSv?{rydPF4VZ(e%!sIqT-v z-LnB3)Udq<0kPb2b4Yjt@N!q-_r*tMP;eDpW2UG5qEXikko=GsHT6f~YaV$Yi2m~` z&}+6X7=Zcw_vA90HoRGY|L^%3VL5>z1K1COWY-TM9=&_7-qL+#ex)JjOrZSx1bXXm zo{8IYp&xqvs#ugA<;8pL9Aj1Ahn2;ok7S};JkST!@HK`-kiX5IeyMQ$B=yam zX1K$s!0)98F*mMv;jWGDP6m6N|6fzE=^H-Ewq5ooRStD(45|Zu*Z=+&U?-aIzP7h; zx5TD^qxt+&JoK2>03kEDse5OAuA#;O;=z&7E2@q_{^}r2&lYvs09S2_a{J}OsM&Kk zMi9oZ)zwm-yvB@&>g;{6lo?38Xp%U&$b3)~R-rcjKTcX5*rE+kY?t6)8w~MS)}!-)%%@Q`Dl4 zI@#-P9vPyeUZVZE5(X35VWD3>}o^6ja!3f(8g((8@&fgcr*vxCD2`~y()>iw=ezMp_IVii1wh8!G7@f(bg>dE{I;KsYdA@1S zeM9FAoPOF!j0rsHK7CfZN0y#Z%ITEKd_I(cO7}gp zRItE)7` z3b^x-@rmYIYy`azWf?<#E*<&?J_coC+RMD$ldTe0p_eQKS2214g7gwpj3UeQL}Y>8 zYQNfd^bgSJw%YyGuhLWl4GDplLJgiDmEPk+~I?@++}KT^vc2S3HdqoJ&~Hf=pe?EC~2qI>YeuB#3n#cCJf>4T;_ z`GiEE^kAGo2O6?dOETM;hgujj$1rN-;(_WVjYfY~ zMFGVA_{gA{6#G*&b!Jq>LA%h6;Cf#jM0!@=>erD=n8ZikW)UpK_P}wpHRe15Fn{0RJ^-DB$jhnm z3~$R`323*WWkhJ_$UldHta3Em|EZdnG&jSq;4^wZRD2CTI`7oS8n{h+-bhezU|*8dTbC_bHN4&}qL&PH)a z<6g4zu(JL{ub_9YUS#vuFOOztAcgj_#~>$C>@5g9`cLKfXCQpBuDtWze&Uve%0!at z<~w4IuNL^04dcAq(eAsQEdg?bWH(m+(%9r5MKsK8|E<;laQ1y-`e(C{;pTOj8|4t!Dyq)n<>lj-WiHa2ZG+hzz56k_ z423_Fp}tz5${oJ=*v*f1B4u0U=+c|aQdQ89Wm7!b=?L9oQ1cZLeq``2^}Ebh9ngKq zi$AVlX;(B@iQT(*-bH}Yf>B{BnVW*<|01^<%ufg}R@|C(}X6l6gNB2)YAfOM%9AnbqZWYKbXyOVCY8y9y!FLDq zaZ;B?ld(ubbFzts`OrGA&0TS|VURY`zqRc0Y&a(jEBT~5>_$^-|D$eNZ%2;R$y$iq zsgD|~2p{wI+ORHLgULtt(c3dCT1UtWP`ApgCo!~~BBm?hiNl*6C|#3zI8P;?FJE!p zNH?~_rzbm;l8sj33qRvMt<%uIq+~o1bI|X!8DM;cd<@h+(*27f##5TUgI1F#;}p|j zy$vTt+E#y-zWGuW;rhw*+n=~>@*w{sHSKFzy^ha1sosdRoXOi>_$`Q;rQqv^)r&l^ zE1~s^kOseuD`hEWMCn@l%3=;TAINP`#|=^ov56nFbg0i57WyZ;iL+UJbx-J__JN_0 z7XIUVAkRQMW+v84=f-AP72a@2tk{VVBOj1NprCi&d8XxHJ+8bzvtWyjGi2wm1>T~2 zHN)w2=o$KpMarkh20D%2Krul_j>XTnNInHDIr!6pa!qhON&bUK^LkRw1TpC(!Oy3} z3Gb=xS9A|+LbzbJi&XmgnjP$GRzFC8^7J0RTi_QPG3H(BovZjb02Qo=@+{nY|85_^ z9<0m<*43;xEO`f?&dMdYn|+O6J+=l%q6IA2bBJgmSeRY2xvS@4V_uSJdc^j~*EqXe z-|JpqF?S{SOlaa^TkGG|h|f%QPl%aRrt1z4eqkJ)j)NoKUSj~OkF2NHJRf`|aE_WK zlW!+~9wF_rQB5Wp+U7#d_D}g=xbzj^}m6(mjL5L+y=qYfCX+L#D2; zrIcz3K`S#&>-b|&WftK$fCO<#&eUHDw`f?6_A~y{J?x_)^WpQ~qpy#PpG%B>@aH_k zHi(n3bS`#pIjk#6PuKl1%zj$j;HOPX6U5s5&6J0gyps)jm*M!;M(wRn736HQNi>0r zDgWSxQ*J#W~b~HC6`s1q~E3{OiueP0_PB} z=KJ;*Ex6yiNH}F^3{gRlbn_Pj%a!0V@9(DsJ=SZs#oox=B8a|aem8S?*N(vNVaq3s zYw$-FHH`oH2=x&F;t9X$&~aw2fWO~3KDSSu-#Bo|&7xqZQFuQ&Nc_#&UpFk!c8^r|8yHIpPsnil{aFuzDW>~FGlw_C<@ zhXJ1Hn3>yWPP}Z5H`qmDzR3l)A%~Irz?FuziID?(ey&D|5o7CP~^s!#Q1U7Hg;oAZ&^l#b{ zfmXZ)8%TpW0Dzusj%=&1HsBKE{w}WPJg{igL(A5-8W6qFsQFl7cClN30esAEU_z*S zZVEkz!KLjkuNQe!4${kR(--H^C(rcnIN`C^O5gHxIGL?U6;zW?0G5$C64Oj&bMfID$d3cTx|EY ziAd{&?5FDfgVx>Ov}rBjz=yn6z513l6rG>4`vqIoYxq71ZChTX1XQjkowma4v%Jzo z;Rt%M3u0r$xISTq=%Wd}0fv?1e1tyhKRLhh8{O-J+Qp*Tts1R5;x#7BSI`N9C5jxq z(`^`c>J;guPIx~C5+%p`bDqtt+*p~UUdN!jBpz^`J6-wd_gaJ6->)TZQSlF{Fx&)z z^t!>_OV)o&fH*gnWd-+ZKnUi?g6h%bq2<)oQ`_zO0}ZFNVV%t?r0Kj|W`1jiH7(@K z%Bf3fyGSAHYF!{HU&;LgTgbFrR;i!pZ5}!$776Iay(o)yvpJ-6gCw?z zap}j`eJj$sRr-v@24#lom&eWjt9+TiPj7O8TnaLrvpo+L1csV$EZX!he0gG#@5+YO z^KHCP*rO_=kY2G}>*A%6>#EL8hs)@(pGKdKdA^J3&9@(#DkQWQ-sKcz?QJ_!q2e`_ zWzZ*`!|>y{JHrP9>9kE32clm^j9yw%Sph6tWfb}`Ax4Ir{)GMx6^W2n`O@_j&$a>{ z$Zh=ob4E_ACcGNoY2oT#iAT&^dL`d0a=K5<2V@ig2ntac++4Z(%2Gal2I^TMjTzN| zHzb&kTZP$g)L2N-?ZMkSc4Z0Mh<|>0`jO$IfexQ+i-uspb?kjfb9b+6IHxy!iG;7# zp~4VOe68;WO44Xd~cF$&xckl3(%T3(ILGNP;JHZh<|MmvHt2#2Nh=%Xzzu? zH)gW4cB%T#6+39vp@+&2e|zyYiCJoEJ^PjD-v-#Du%Ekjsi>X}vG)KeY~zcc<*>yN z2;jhk66CZME$XSL@w$~fJiAM6v)8iJqAb^k$gq^3!pVQl9+i7BJP-4Xkn zW1gaP_p;+t_bYTFv-ni`**>?ed|{XSADT*L(+Y$9U1g|aZ2f&F@}9a{X<2&Og1hW4 zIT%exVzA)xg9ig!dSyr+#XrvU^)7B@E&^de>2vCkeO8VcneF8SjMMe=LK@|iuAGrK zJv^GrVNADs>+mh$Bvq^FfK~|S0#K1mAF1NnuJx)O>|Dm+U|wDJ)ZEGmBAcQvYq!I*p3t!z7#VzAj8_PB?butG2QUudZf)q&>YsT|FamcyQ;j z^}*fTM8FKPx_IBvay0cQRXB6~W%xLoqk^^+Tdwzy>9HpHUcY!^pq%^lxcW9Zy%znb;C%}~q+0v=B*3r>B#7%R z!=sU<C^jEI|GDz+B0K&3RTt9BYm2v|3gdjkBHL9scdeI! z7~D~zDE7EwIxwgaxX7(A+_wD{P=K0N;C34KA6FjI-%+a-mZ!I~wvlqa!2&DV_|%%0 zF}>8qvMap0k)wn6k3Sn<+NiWr85%LScZja`;&IBpiO0HMkxjqREtAT;H?JAEJmi>K zyH2fFnK(T|_43b=hC7r7SJuH+Io)O9C)|GPKC}Xg7dOi@)a>Js*5wN>Y+{>|)1rM% zI=Ong_Kqcq5|S&n9RgHt8cfVy^&R6?nuQLgGy5@s(pH3hjlk`~A)*;`wKxhr0_3!cZD}7tb&EIDs;D?1=Dg19{&UG}7V zWH$`0pRFSg-s0?QSx=g&Qi^!b5NLej=wS|lT);UQsk<9uk-BcAu@*Y^V9kKgvxB@> z`kSi8*gln=qGS2^=;zrf_-qIkn--owlXZ@5XHiWjwJGEsw5&HKuy2E|VMr=uiiVdf z@1X6Q3RiiftFq7L$KUVIXBKRLe&jJ6r?##e<3m9CcPtg+S}e-#_b%?-b?wmZklXwZ zLrvSk%3Cma%J?lzAtc|ew$L>+PN37QO2pK%?b0?y*Q_AnkQOgYNkSe z3@sJaC-mGkGq++fj%$^E?-^bcbsO!>=HsAHp7x8(KS1es?cRId2T`vwkJC2FttaJ~ zs^n_Q_`TRjs`JV|SbI0FE*%h?&4$77L-Fj#@qV6{kYx6ZyX-JBeB?&h2&RERuZL35 z^$Xqjr>Y;FcX!=vIstSbm4r8x9ZD_ed+Otyd-V0u#$0J*axw^7$N;u2K%m)zeGU=t zgE41?o-uh~{-^y7migL!EKIID+GovoCWMgH@H~`i-B#O_)5JC^GAy~y-UosXsiUpL z{`br-L0hXa)sbw)k_ck3P=C+y&vr%ZECR0Av)s*tQN;2&Qxe4vt-qb~&UH||d--&Q zBd87-?v#Oq$G8}LV!Bi=Y)e_!BLj4-y+GO;`ZBuG?{LKHdo*~5o=(mKWgr%Sv)(U0 zH|N53zs)U3><5p)uwGDo#cH?okgz+@SyZTSc{-=bkh*)F8gLzOfP}-Nh^lF zV3DNG#pNgxVk4WfS>5MP^KQN-Tzu%0&-ECK!NXy^|M~OU-2$Wv`ST>3euV?%;g2s3 zIES$!*=Kp-qJ>WQ$@+0_A?B3xLv;F{>tAfW1+-lIRrRG24BoXoW{SW4Uv3ItsFUJX zxm(;s>>k^%v9Hkxf49{!)7@$BP@aY4(s-xsvt9j2qv-+8#Sw6ddYjn>9_5?U^ttR% z5k=#zw$F2K6MoBWvC($5=Y=?4NQT)=md-c9=Vv=b{1++f0H74Wn;D1|>o1cqeU%;c4p{rYu~FZxBuRjuC*ZpUQcXP729`Y}h_H*57pR`A-KG}N6pRM3 zn!@~<@`~d6mhS3KhLzW#ETU~_#!QD2V(&aKA{#ruSwjbg=?=#f6;CNOzvP(lR3Y6n zCo8#M+&Y*M)VopD7w-_wDtTM+<%*%3AO--JRV&&U{$pZ9%;PVI-luuI=w@5Tj1jDP$N{aC}2 zvKTRQ()Z`rmmXEIQO>)%K!3eTUBW4}otF&R!=vc9=)*EVXYOQZK4eqtr-URZpz<5_ z$HuKPFLv`afXaZz{mhTl2N9;a2ikQ=`;Xr|dMLnFHO(yS0AUslWEv5Iys>zhm{5Mp zffKj?S{b3C)edia!8#A^;=%IxEgU=8p8nHWG(e7y2!AYFd)=HnBC81ca5D?SwxIdu9gI)P7aMz@x*u=rz>=B=8%rIXDWzrUlr9Wf2p zW@*BoF{WvV#9|~Nd${Tv;Lp=7w=iaG37B?|l!Xj4FU$S)2i}a1{r(8;4ciGDo(Y={ zSw_rBp(cXV{cl6mhLSXl0=?)fVP(@hX!|t{FfVw3BKCG%q&WCn(tW+_(pR(hu8zl7 z?9lsM>;F_meck=A;>+D~e|XY2rf)e@S0KH=bdYvH{@|`87*3K;8U$OYpEqJ;c|FOq zF+5K7F8f6X*bPgz=N6(dCU3N|XM~3hoH#ihK#3E20kS0F>V%KIH{P}rJV~RCX(%#> zp#D*c20I3gN7x>*SLkRPKsx;?_;cN??tYSU+ zM{eKt4XZJ|0Br0|P4;x1|7Bz6YxvspT+xn8PI;=hT=XsXu<2qq+!(f>ZyLS;Cpyjh z33+cYg;ul%Y-9;S6szM--A`A(-Y{u?rVa;#UlA1AouAkZxQ{7`>zOB8X5}$;W&MKT zs!X=o8|YKIcMF;toAyWzbfyMAh~>=K zOwhMUwiMFY?;|nDy}`V zqf|DPWF;fK=l1>s)YtQz=bZCd2f&qZ{I1=f3cc@U!4+ME#qu~;MhjI27ypxQ20sZ` zt2Lg20;qPJRf=~9t}@fhD;8~7cAhVn8ZH4>3|YFz5H<>>6Hb;QS(7%W{KN3_4ABpY z{-Uc>G-%Y5xc{sFW9(%F?rZ@*WB)!S4m}F0*Q&_=r4#i{ibmpFZ`|Z5JKUyyWU*1hv&KW2g2S7=Z4I|X(;A^t zM6Au+zG!NC9kB28)Jd7h{+EGtq%?^i4X=#sI=p6Hawb=c(aSdpRzl=wSnhRDF^fBc z-kt#Fgv1lyZz>aZTi^3j_&?*>oZU(bh#v>@gfCO<&V#9CP}Qw1$7#5>Ado6qKZBVu zuT4V;A_|}0s09AVb7p0{4EVRgsy^Ww-NhNeGe(GiA0Znds}KcPGaG@$Boz4P9O$+y zD!E5OV;*G#a#;XrV{$rbc&lks+WdZ)fM0{T1Encj1)>Q14MP#_*0(<4zs5wL!sMe_ zSWWj}r%=Ug?X)_OVzuV^_3h8f2YSRqdUbwrbCbQM#?do9I&3!3w6bU z?@u3mA=V=ug2(CA3V-M!#Y)yF2K66+PfIpYX75i=mJ)@GRJ zL8wJpG>v95glculc32paBEvd@z!l4lzTx86#rdr&oqrz%6$e4qX$ok6Kj@XAp|1)f z`hamLgRvBHaxfg|sz@3z%h0W}YHuE<6NC>34TW4?V6P_W%3Wy&!*9S#_+i8#v;q^4 zP2g{1IEdzy=-INchnc`j%Kwq~A#?F|>@@H~f!z9i_Hz7d`jZr#amnv~0Xuow83;00= z&&jx&OgKn_HEbo+wg8@1;{_hKTu4%6{qDBP{l*tHydb5LfpQ!Z9fte@c)gVu69d{D z)5gLCjKOL!g4hI^U5o+0PF5Xk(*Yw?!eKdv+wV#dFZT#skKkjS-JDVVS7G`H9={r~ z1?SyRxu0M@%O5S`8DsxXG3jTvoW3u%N^1z7nyj-2Peq@wFGQ-vYg52e13unV2dcjoRl| zsJy}#YJfJu(zyiYhgVxVqn1Cwd1pTn8korQ8RDudAfj0(K{3dVq7eS;@C22nHoa1K zxMM3RM;#hg$>sA2YDg7r%1|m~d4@ch$WFC+?Jqb-7%zb*S4PVJ-r_#IHXbswmo2f^ z5>D2bD=#8MLdm>}KC{P}LcLF^xa-H-%?Es0jdJRXt6jD~nfTZ&U z1m3)<&=P>jMue)%zelQ`WyE$uEA53z$3yS1xa@^0UNkD)qsQ1k zp7ZUEHqtJ0Wh75J$w;3IfnjC6KOa`kR-D6{6DmW>I-f!Vx<*Rv$y|n@9Wtc4%lT(b zJ&td0u?x3xXI@*PVC$K_0!hgM(1L&HBKp)mrRC2-_3^WD+k=6Fs!H1j58J(no$&Dc zOx%cCtB7ABF{eAlQaapsRjx{%Od?viOHSzM0|@uhgqGsL)?CWY1~SAO{*$)s9#97O z^l}~Y@A(#69lB2pMtsPdx{Hwq@?Lp!jY`M%R|O?yoGE`a+&Y%8svq_*VVv4rAg~9Yt!(1YM!g0kZp*54m30utoyP z&E-MJvy{|#+lgupgc`#i5j=NLOv6=h4krZP0ausE-gMU-fS~bbRGlSnzn~@SjZmdx zFlk`+I^sg`nEMyV2VgQ=j(ekJx zAf~Had_Q3CBpos|CbD}3H!yU(LKNOiK={0$vFoDn=@skA-I92@p1;8ri^8bdTL0Rz z3%)vrf$;j9b>KSiz3Yr5j6_6j@wHfEZXOaRd8*5r$8r!Fxep|@2qY3(VAy-rf+H== z$Z+qJs!^~X26@OPbR?L<<@IApl`rd>sSuFu0OHrstb8i)uS&^mJE@BhKc*Y;g8}~y z@TWeHpi0^S=5iJz-G=taK$5iwCT+y&(B?h64>%ad+@8e7E4OsZ1g#Mn^q|%7=c&2c)|Zjj%`0H8mTU7&oN7bW*q?$WQ4fLr6DjN&nKRe-DP(@rb6c zPxi@uqM<3r5K!P?6-AZnO6V<@?|**qwUXHzvxsTF)sNLVqYEZPOW?ue2KG5%h@=T5 zh)kYG@-Vh(I#T^}Z7FGg^qW5xLlJ3ApM}u@3c)hDfjP>%#yV6rAYTv!xh9hFzCCZ< zbC)ZPuj2&?pw7#x7zUZZ6NxWL&{sWf#Q}pB+h|Cwbe(62$itZW;?mJR7SaiEauS7363BFE z{p^q0XMn^r{{XBv<4#BDl!3(g@RDp`;L%c)Ojfg{fG)Zt0uBKjmjoCDUvr^oxt9Wr z=a|8ImT_9ev8xJLQu@LVVQOqkWIAo3aMIme3%FljbTe}L0y0~PuQPlHmIrWmb>8Hm zh(5F>3gr$Y&yg2VMUmJoe+fy9gZ*kZ%%;=>0Bz;`PUkm{9po+Ogn!fPnywcZfdnTi zpu;uvgq;#g8)N@Ic?JoEBbq|(9 zTd6qL6CIIZ@{}6g);lW+I(;8J%a*%qjfZPj%;%{ZWYwujSe8w{wtBF5i~=v9#zBS? z<*od#1juH~zskJj-BRH(+P!nK*s43KL8JD}aBIpRT8Y<+qu{JdZ5wRpw*J@6z- ztelCAqXq(z6GzprFyfEQ_#Qdms4?rZFP-iZ{ms%{NPRgfI@Rd$@W#x{fz3o9&K9Fy z9Ek!m>5z5@3;5gFN1i$jV++7Zg_*6)ny*29P;F{B{^GseHZ*pNc}(sV-s;` z6Ok9We1AWCSpvrdj=fe7hLjv0bH>s zzi%x3Zcg+FMHa-U;5;h@y{*z61v~X;ko~Gr(SYd50}UEW)zDl@X=u1lnBF+6hIojK9%U|CH8H z47ssPt-i`1p=B0CX&E`7W!{gmg9g6ITvR5LVp}A}D3MT?mbE(-xKycAmXcK4G7atb zo8IW1`dPI#cDH8$yu64FA?ani9ls+Y?-1z?4D$r5@kM$O0`tdh`Z?7UQ|_HM0qopg z0;EdNyx94`gkJSE=yVh{OVFvH2f|1w4cvOA2=qGSMQ672GjBtou4d+{^NOmyT|$6< zkbxtjZwl$Rp|=d-=oAJq!K@h4-#OuT4oOYS5-zqcON#H|b~(iIkWxQ%yDE&-h0WJj zkMzeHPz$EYbYLXAvPxbR178wqFaXQ=t{PCc%m3ZnPmV%>)HEv_>Vu)4&~ZJZ)$wxu zZjUV%?j%s3cDO6W{`DRRs&@aM%=~v}3!y8x^+A5NBqofz@&EuE{=|Cm%~-QIVe1Z= zoXx7pQTR`ieAJkq$Hh5eTz=wG?~;QJ9KZ2*!NINN(c}i+C{kPQHEq zDMn?1bV{iwT3G_-VaXJHozOr6j7B$P>MUdfXwb)}$a_w58qT>0^qkof^(5h8i89o6 zD${{f=R4^vb=qA&3+nsu2y*M`V+Hvh5)RtYny2;+lDfI?r)b!|OoG+jL98zc^+J*6 zjLkHKDBQ`sIJb)oRTq^z{?e1qdp^e9C74JZ5sVpbV6WEJC(aT#;tNp0&IbHxyW9RL2p!iJ)ZeatPDnb zfo?b7G`PB1k4x5rqPmc|5GGXX#m>{&cmf{ew&H4sh@TLfU!oq*gi7F0qG+OG<7PfDeBTKfLEUixI{qYSQ_%Ql85sc-x8D{G4 zSYEaAkL<>w|yo)4Tq zxGz%6_5eYN7raZS+LH>SoA=|jpp9Kw*px=xGf*xFEh}1V7d7_=HB^qTF)s`?b{tWQ zUraP+Kq&Oe^>Y1wcr^Xr!_^YEeiO!*M2R`XV(hCmpsYCp`J@pIr>+dE479Q>%bY~w zhDyNCG~tN>YngQO(kd{VoO~UBOlr#Fd8XP8gJqoRZ9|(ovmvJcAPMx`V7~Sa@jNHg zrT1VKvl8?Q@BXhZPkU!!X1KJhWv8H%@ zzRlS*zh#criy4?aZx%^3E}fWs1*%ap<+-sfNV}R1uEhaQ@$c%Mlsuj~mQg}wHwYC)cgPQF zdyEZ9nGPmi(EYE-7yDq^mE_N~?#e_+rD+@?4HTNlGN-zVgG&n0tV($+>4iNexFb3W zC6BA~6!zGnN;KnO$^N0J1)5^nS-01&zhNv(R|tGW9DqwDV|pbJG9VPVid!7LjV1+1w=wwvZece_SYly zzd^qMkLn(5L5*4eNp+&fySIfYZ1+0mvmALlXWc__@XgTc(g!R58RdyAx%KJE+=ua( zYVxYXbOH3$_(;}7>kO`Rgb1t{rT$`#ln}AlN=Kh#-M3aBRUv8NnMTWa#F2ix0P~25 z?}_+0QUsg0;jtDORDDZY#m_aFEPS&Zc&1=gpjEbAk_VLwOIM35Kr%c z7R2d7QH(*>$t894a45wJR9RKpM&Tk0jT) zlTuQ!I%JtGGCc(qhTgqLmtV)tEttWUJGb-iR zMJ>I|UC#&SYIN9MNlSW2DecVp*1>peXxV_Cu(eb+Dja}L-s~k0kq7c==8T0p)fPiF zFZL2NLx9f=L%z@@EMPFnf0btOLs&k0!VT481hSjYe_asOz%y1J%Z(9$f|{DQ*Iqj$ zIlTaDwL676>U$Xo2SCV~Eq#xT^of9Gr8bvCGL_~%lI2@OjyysiGYrWz!nJUjqIlo% z*67{A(00ZSKvpz1yMl2G%!sRGpump*VBt#Fcl#92YQwVGrOOz9RHEVl#=n-+%fQ57 zz-Z4n<6E)*J{U;mv}$&o^+E6~H08#`sdUXu0DSW932?Iq{!6Ca{|W>2U+@^>kMk1Z zp9xl`RAO>bvka;ynGBs@xwa0x$QbvH`#l0frU%2(;(; zr7TWzDJ-G?auY}{z|SjZjg+9%T1Xi>M{4Sw1+e#3<_ed%r3L1_DyJC}&tlo?iM>s- zIRxe%2a=IMS3FrJq|e+of6qtRYr_5#8iJn-?P7>CozbpG4%q`^^hUjl}!! zuN_Jf|2_o4jsk!_b=X^GqHIFHXOkmPf=rxM_DnZR=#t>;gr{8Jv1-qr;xk}_ixHq@y?{f3j*LMvC zVV1uT7MJg2we&p~f)78og2VQA%+e$V!P|Tz03u_$Cdt!ir!U+^2ZHRw(kzuGuhYS* zITPU4>I5NyKK(o?@-t(e-U}!~H{=HC{Y?-3KdL#XPGpK zGFtbo&wgun8?oJE1o>|&>$D*YE*HfS2{{Htp%79_u&oBH z6b!+yfBV3>uR-h_+LA|3b#H93%?3aznJ-eVMx_FKcq^TAGKx*hZI8r0It;m18=DIS zl9A5f%GWRERQMIh@{o&0CPaK=!BYZtvRO`RT8zhI~2<=PGSixcA*Z=o2M!n?Bea z$96DP4V!?xhMLRO?phX}jlVB30hoJTzl}Ya9gC_5A>f$MD_zie+y$sBc7;S30zSr4 zFO%l_WT0il=M#)bT9yZd z>na=1h#-HWh`x28N|G??C{b1)k))u_|>+GoJoH!)_Uy% z8xuOiU4Yr4>D2#p!EKV@m;ec5$^ZR`aN(T~@237GO2M zv!&Jg-*cEBDe`Xy7qp7ZVY$=|6yCw7O6enJ#K+*8_udAwzEc(N`E?Hyyt>}OOCbNMvDfLnEqR1QZ@YqQcvwWm0#xwqbZy^Xu25^hkB+=)DP) z?*~p$q#$@vg9}@vB4~$=8?b+Rc+V^Go19D7LV4XYHVS<{-84i8wP|THLUP>Ku4ff?xaTx2B z^e)F7H=lvY-R@!7!G2p;`YPh^qIA^T9?gOXJLB9Ae@<+E)ow7lRBO^1eruwGHeV5f zE1m<7^F!ootkbpcs(f5NKV}7+%@;&(9DUJxWGBfR6g-^D{qiV;eG>LTjIU&>R_x`!9_hr#NxVdq7VanMEPd_Tf?nJBx zB8TP#Ch>J(O7X`L2i3bmQ=B4r^~i>ik40~A0}_EIpgmM$q&XG-_2v(oTUE~n)$y&J zca3DI|1a!_*BcpFHsaMH$(YdVo^U|G39^4ytOy6%E^bp-SPE}G%ieK`Q99ZHu!?Af zh+ge%aN!|LyV{1t!xaWVvfsLKMa_u(kJ7UQz~2^EluuUny~}Prdkds+DEz(sy}hh= zXjz_BWr!@rz`uU0agMolL(`V|{qf5s$BEf@gvL57+PzMbQQO1)R;F_T=f#@`Krfvtu1 zwAZ(QdwaBATy0+)8Ig_Pg)i3-DfrRf4 zrFq=YjY(;a9$heZ09#Q?Hh1d6&7jH}Z%*Ckt<6NTJg#A)bXVO+#1MUm(&8ahBp^^e zN#*lx76v4QT{m)Bq_YwMn=PtbIZRjpH>U;nmioJ+zmnK*)`4w0hFeEO;~B*j=~OPg zwUtUfs2MT4k0-z>UUmoTc$w!Ew%BPSn)G8vLR5ZLy6yqcg=P# zS~Sym$$g!`QNFKVjWi3G4j_jvVCA-ZZ?Iu|4Wc5Y=T%(`8sBdFNLl#l7z}R{r-oOP7xi?s?dwwTf1a^g zUITyN0PTi~Pjs7vvW2a=;&YL#Bqz{Ew(;#;SHsmxNSi1XAHF-P}xs(~;-Q>Q|cH8E=VwXA}1L33=^a9=Hg2OQY z(KkBzLrq$L#vg~^1UW8YbI#BCCcX*k`!*WybOypyg?U%6{S^@9_E6Keh(n*;+0A^G+SMqpjap@2bAq^W6(X&CyDieTd%> zxq3mfgYiEdOmb+s)b$n4xsj-n0XeZOKyGJ?;gle3?b4ow&V(+@O?i^nY;;e4488!& zoT~i%Y3~8<*_CvVj?17uAt*Fn8+MU=II(7tjX2z0EmtI*0lE)$R0w;>DGmz@Ot+kk z`oF0t)&MautH&%Y7%vs!Zb&)HoDNC2r?7Y8%{S<}*|H{mWR(nc^BJ8Bxg>n}hJ=p4 z=LmZuW+5CcqlBwHNB!vjM@D^^KrQ1wd{H>-8efK{B8SV1(vD(4m1FLO&UdLgTqh=? zX)^}9(C7@V(F4f9TD>--%2_{q8zKWTON)gtSA;4dEdNnzhDR?B4KWuj~)r0V+;*m0bj*3!SH16m9W8%ac% zQyELH2t>#l&5CoE)t4coB!N*B=W`rC4ZYd0-H6JtC3g6Xl!6nnp%E;b%1K{EaLxF&-;aF$QQ4hVtwCep=c!rM=XocxK4`oH3H`R(z(av2tKNc(A z=?4GnPB8+ncHYfmpp{y6x2O8v*r*EHU)M9%&9?RQtB(vTOP;fkm|9@=su*Vv(cjGv zvbaG-5wWUyL zprH^H-c%6oFBCaU)OUy;w!kCC-|F8H_hElQ2hcR>^NVl2QA`|wMNfy0Rski5@YHYq z4>PzU_r5&1CWcGIsXM?RThG%lIdC~zS7G$>4GPjC7(n-ou30!0!rm;VpS|(#Brp&< z*~p%1nqNqd-HsBU=bRdvE05!iYnGUtW|+s*?C4FW&)}1yG*+3z)}7WJ_Y!Qh9Zdd!$H2#Zl;i4}5hf^gU3m>n!zs(_2LKQa7Zjv&j|4S6VDF%amfXDo&- zwvmhsLCfJWC8K5JHF6McU)PxD5tGL0Dy8PoKR^BL;*WQH<04DMR+1dlI(nw<=i)qz zv&q6X3V$J~E)~^7Li%~GjFX094W^#^^=jd0HP|J7FwSpqsdSz8#VoM;@t&`cUu&W@ zVTk3>+tl3Mf>_plnZrRyUeb3Cfv)zEpu-&*qSWr49B^b{JT`(T>hbc&O%xyxR;P$= zNqrdPsd#XYXFl=GjO{(26=_?Us27alSi)yu6HN8JyjF};)diAs=&DC$3bWPhWK5Of zO>y7tmM(?apUumKvs(!C*_Tli1wa}G&SQ}puI*1R-(M{|1yCWo>Qj%Tb}knAw%MbZ zmogk+)Nl=W?>%3yFl(VMnRJLUr?BQjU=awDi^Qg{s1ebuKHUXcNh6o)VtU5GO(5<; z3DrKh=g!gE7vI&Je4Oh!jz(D(pgKa;aP@CyUYp;t{g(aI<*)>}GVeP=@O62`KH5{G zWr8D6dWbM(hyxM=_M|f2rLZypC}H7t*YG*h?V=s-SlrvR)iXe!Y3{%t86_%ql0=~v z0qv=D>6`Jq$Lz!@XctAGgXMeE3L7wIpo;LP;(v5)I82y~w)_oXq@-HXk0(yxotK@_ zF#ntx-BZmV6*nzW=BTY(?g!OhK#7izuF0-OE_;!xX$<0iGo;Fe18`LpIa*S@ab8bc zsSupGDn`-l22XW)5yDR4*VY>8N?na`=|yP_MjQe~%w!9pH3$-bev(u0=*t-BF)jVl zV6Ra}4N$wWWY{Ce@&E>0o$d4dH(S=0*S}&+`Mgm#HnNY5$InE%SjC;lhzkn@@daJX z9(Ucl6-4KR)!NBqBihl=gP8ts13Zs_cr)GxbiBdunu-H!(81jIA>j-od3g71CeTyJ4 z7#Alzn7;0IFy!7@d3h9FXA$Qh;E}kxoT<9pMwmBEOeF@c5qDdphEli*Kifgue;t^9 z&;b8<>|9;MV67n(Kcq&@d-PhZM$5t?7^RL;SQj3Gd#ardq@~;yh;3AeDP6)$*cphv zCE|*nora(Kd$)lUYsQG&(qs06g=caemsmO`1VHzpr7tz*JEu|R*4SE(yRv@8_|7HF zgI5{c4uX%w2!RQ5CBM&uX9US-@Y$*l&YA%Mq&~6I5SbtZxbg{lUQfz}Vapzd93(9X zCEh`m=Zs#Et{lDfE(I?8=mi4-UfDT=e#gP>ho?VZ+T6lwi{(~esivNFJN`nE%m0H2 ztX1F7k7b3;6S*q3ap*4mr(Qq1)0@&}klpE6<;@Sle&o0Q0^Dt2UF;00l?aBr^75;J zlfn5{9#r4M`jl){?Y&Ut$@QgPU|{w$2>QK?R>z*60#may3|*F*VWlt9l1QbG>JeU@ zURaX@^6PIJb^&hdRqeJagt2eGY0NSn_vLM+t*RjbX6Aa=l>Jf3oW`a{iG5jfe(T;J zEol0j{9TyMyn`C|hBs-cBKuEG7gv#_`R4wxdYQaIu*QCh)MX6+zS1H~E#g?%OXDUt zvaQusn$#NBf&w}g=N>FE3t-PadwGt+NGbkLV-zBQV{2#N)RAOR%*W~?R)%fWo42sa z4fP=LIpeJNbf}aoU2jk9Rl0Vpll5FhcYaZ|-Ws+fa#~Jmg_-4#VHe5b$=~=YxO^On z6^)oU!?4}twz5lps*DYD;mW0|W^jt~mFo729=Z*y)UkeIKR_Btg0;ZC06ev=a?T46 zfR6#262zw5mJ^GIMgGV(3scEQHyX5zO*YD-AesA65`j1Qg%7N|AY+)|!f`4}__gC! zOgb(Xrc5w?TK^;ByMi8Vy>a%@?1R&bT!fsu4-xjU@XdXep!XMQK11B|D-(&0-X*r% zTlSk2T@~?P+}4Aco`)r>cw%H|Ht4WThokkCE12r3ubCZ_3 zEAqcwBbLS83#va=v`t-&7?3%LZ{8h?DBu879(PDJK1L(cYm8B1pd%P2hKa{;~BHnsy-Z(Tp4*a*i(ht-;%0%YKRIgv&&H4nJr)*%A}KBtr3dniHA(lT~8; zfKX9<)Flb^kf*}LR^IaC^U9+ek8jEoT$%wf$h3<+%tc(9n(2q(+|)G)AMnBZ#YIr|#}6<55&xuJ!{qyqdcMaT^uJ zbs|yx7+6l|toHNxYbs_vb?`rlrqxaSm}uXaD6aa>}zkflZSLb=(6MrCKcJth>8ETm7 zHoSBuRQi&>kFB9_q`*vIJyv@|`hMsulk6+6zquFOW+yN*^S`~vc~`WHcB$cUzcTqK zc6K!&E*L@1&>$mKB7z;WyQDKgmUU0xvv-Pa>KB0Ma zwb98AhoL9#Xy*K@aBFgQQ9Z9~RCpJvbBJz8EzpvN;%xKiZQ2?5*3qOIyEthb?DzPF z6-6idqNcB4LwF&oP?Vm~jEm!FFw4BN%z9Ax5c0yV@$@2S!nIZ)&WS%dda5~$z#Mhb z^sdvCtzV4h_`#QesAGz(Fw9~Vccpch0Iwov=rXFL3*8@t_vaDy<8P}fQ?||L0HQmA z*yuQ?OUiP|{VOKn%i41HVGzcl4BghaFWj}UI?^zI69N~xwrf=HdUDq2i90wn;swU_XO||) zG|CEzX6EdG)|K~gpv|1I@sW*JVCxn(1mtiSW!u=%9|Aa3Wwj@!kbK^s_^fKa5-4^n(k2^bLJ`hTc4~w?OY^pX);p+2@Eg~=%*PHKuQ$zo(% zp{PoHwULDk?KTI-C1Yve_pfwODd};UGh$NSqiq~NtPyow7d|3tylxsQwI(sJL%WeR zaL)Hf_3ISg9^`lwk=Y|v8T_e?5cQ2cD5cziT7ZXA)MUXLjaH{B9>H698eVhVtHS9l zR7uWNMu0*Djhr(0d?(&FGmOe(f0wg&iLEw_=%%zuvtzg2#%%oRiT z{e2|Pr`KDn#UCaW!Vp4ja`kwjbnKE&@D3VOmVw6#FI%Z!a`49KTk(pxXR}{1Vh;pb z-ukcF!N{X)G&;&fH3oOLB}{NbA-O8+0x*Q^?7NVeYB*Sba2-#x$pkxMrO=dM|2de` zB3fi1ly2-8W@-z!H6XJ#ZDB{(iVk{>m5BBD#{@+B7FfUXF(RdTyLL16lULQEDg3!1 zu!g!}iZq~m;B{>#gM4a+lF+a3uda;*@8{HC_dXX%TP;YG$rdM!A|3)aJbY66dXEx$ zY5*pP7dy&yur@^EN==-^KO32Gs+GqWhYa&Nzm<;b-V;5!y-g}6{gE*9caAz0cCY;K z&^A|J_^?<-gXb(7C7en2PQvw%)y?)~pIClY*^f%JwHrIQ23+a}ne14{!!S{WWxatN zRxOM~z_@rOdNat@>BV2gaE73(H!-rMan-n=>i1t-^_$d~DP7Ra^n4bXb}(E#$qd;Y z|C*LNn7x2{A8M=o1({TKPpoKbF~|8l@tF;FA2A_~Fz+*11`-JBdGq@= zMHeaD8fHT(pWTShsLrX6LwB+~1f=eQO^>g`_y0e=>gS)l*P@B}{_sauWLo6D*#JeQIYlUhZ4`5; zN_6VQ-SJX3yRAb*)Ma4&@js-c*;|L=h&R+MoluIn&xIqcfZ?gWij*8x6wndk)d3i3 z-64xM3M)|DfXQM*p1K=WXuB9YvEr|W&s#;1-w%`zBpL3CHJb_8UJ6Z1Gx`Ag{U$op zQ1Xn*7?Mk6;F;}Gfp{hzVfBxQy2-w7icU=YVY3AA2wFo^iW{GXQ^p2et&s(-12gt6 z@YRj3ED6A3ulsibTcyAY5wkV*_iyq@IWAWB1_-X})UgrkEy<1fP}3Z^ zYA#Vy8en7rs?puB`*FzFSfebYyazVpmnI^WCI+Fsztq=XD9{3Nq@0yBr4}K3`H>~04c6*%H?tfA zJMjuNQRbcUW2^3l#^=mjAzm4_rdjd=tJ*cx`yP(_akbVsGDfVN)aPe*my?wq0;#T6 zzo@z9h2=}35^obmc^0I7K6`bS6yAxx1TM;*-*&hOku&ty4ybQ!VaJK$JG}R)i*8E) zXF|bEau$${I#+I6ymsp^>(c$`9)sM59OYJlMlEHh+1aSBh9Ms2(&i+|dFqoI>TgBjmz3p+QbBUN#W8+h3}mXF*b1M~Oc zCvtM8`m*7(S$G&*XD3TDbQda<;Pd^&Bn&CegH!16Y#55Dj3H0`ZyLVFbkr=ie#UttoiSSZN^^c`NBCBKn_*+5YfjS?&G{S$SKQ)mx znBYp46SJfQvUR8NKKIu@mYkI zEZ#dYecmQ^S3S^Qeyh^>l?XmR&&nQdLtki~zz)Maw4>wM^MW%vm_8H1s~M(zSj0r* z8}8ua){gu)65`SH|5{7-!9G57-nmM>owy`!U7~lewnmN5lE&Svymxa)BqU9 zr{96viYwf&U!J?Z#B}J57N>Ne9bigaM%-rL#hxrO(_fR`tf%7GWBe&W--AcgNymrBLAI=dm+L zuM~R_*L{*u;dl}@%J&qjkJv5FolZo-!AJETy#T9#uw9RxdrZl#mv@lLTzE^`1CvtD z9B)eJy6g&{o43bAjHPDd>e9xiL?0D*3$J?-%q)01L8n>zOW0bNET3$K8FW!Z)!Nr}UUQ>FZdss}4>ev4gGcWo_oPU7#-IDB4TW|*pJ9RHG~Y`2Pp6N9jkJYpv@5aeIRaZ8dwpYFvyAKy+I*fy z5brh0$GPez%wOBMHo^zOdgSb%FF~gs^qR-kmaY{eIEXY`f>|;PqLm#Po^6a({G|k| zwiuc;r8-e%qG>CE^oC4y{woM@1*V|J)zC~{{WpXO|H_X%QRVlobjw`V?LxAr4ZSo5 zJzTWc_8g43rB|&jpBof`!m&tEt)SN)V|(LTp2DXcctw}PCog+~Y9r|?J{`wr{>@R^ zcm^1#S>qHkzc9+nQUEe#TeieDsyq+eJw6a_CK6NTmHkqYmCn<(dU%~hw60Ke`ETh9CO7uk%egTIum)~Z1^uM~DXB#UG1?KoDli(8Qz5 zY@LV{8(CRUS?)tU^)+;infj9zz*^vi4plf>#C`j&`KYjT!6ahj3O-)4dk-(i-MuL~%RubH~N=#}tf#Efc%l^FY zI5Ly?I@0p&6|GP9-{0I2y62v=K3-!kp6P~u%c`+T9^_<4^;=Jus79@D(#bE(ngobB zyCmEjZqYhSblm?@TLrHok;ko;W|fG(>p$0y367mkj&{6PN~pjqPG(fzuhyT&-E&Wg zo5fZyzL!-Vt5Ju^bX8@^z>#DmDvp%m2K8`(Lt){lZN=+{yUS0W4lX0BqcbFUn3|W3 zCQ!gE*ju(s6p9mC*s5$;ip%B%a&X)4@t1{a4$Hb(Yyv;#4j(d7CZ6quVW@8~@p^x) zQ(Q#rF_e+MGHe|&Z%}p}nsf#1)1Wi2<{TK~7K4@j`6FcEn3n=X0Pm>}L{b;jwDgO?Y{kWWhWv;6JtxquBV#CORpx1S9aPUubjj#P=99MMTkxU}T z6!mq4T6M$fwN?h~5CcTx=6t0hi657E7fGhXDp>Q7BmEfpXk!nZLkp~&kU3%6duM9F z;Z|7hr1i!7Za3c|JGut>YALF>)a|T9>)rNZ$p&_x?Z;;O%>=^Ifok^7puGL1_$8rh zIwS=mLWY#wXs7k%S?TC^iAOPWUzyBHNd|r_kAVqyHTehUfi2rz!44aw2|TK+CCCQN zuS#hlWE5c+-p&oPRd=Ihy6ohbc};@GFN@=Ytc4jTOQ81v%rHE+oFy~1l;eZ(a7$Ed zIYNoBKZ_VD|NbuFxs{GSA(xkR5_*u%Cg9Yl zb=JpnZ--9Eu#{+5_HRM2s8maiwupwoXOx$a=Hc0mLcvu@;fwNzmawCkd$MSFI z;+OAy zew}b*DXILqdR39pgzUNiB0_H=@9QV95@|5qUGiDUY*S_2Ff&XUOD-H#i#Jkpe{P1u zXz63(yJXj?=tK5H$Z@W9LI^1_6quI4ym$mmVN9@6G0jg*0gK*e6pa`p)MOjlJ#D z9ZXc?O_3X|PQ7?4K4@;l>aC-4GZXBwW6-1MLAFv?vrAJW{lu)YIQCn&{LK9P{NkF= zB3VsBW0|)?60C=(!gND#%KLp!mv+A2cOBbFcLHP542B~jlzd6EK4uLCiOUIB5`j;Y z+MYNQ8+FxEO$Nt-e8Ktl41zGCbW*lD?PetV5#PJj;Q8@w+zL^nM>7AVm5*JHd>0qYV@rq`c0Sp;-Km={Qw*5c&P ztZN*();(V_%UnVUXaBJ}zV7o9J?CyGxP|c%N869J4Zl)s%Qq9Dk*!CgV@|*ahHO(q)vLzQe z)RHcAu=tWx-?{kF*-{MdU!k9H+{qEES$)Fsj`bV=zOHWBL8{8k_wjFKl3A_IAAqE` zxCILQXO488O2gk0Ho6)~%(erhD2^`+;jne~FHI599XP1tPLvAFDuuHZ|a-%%7NldK?cd1s?vRjj^@&}=`HlKxpFo-5$B`!*xhud zY@O@XrnMG+P~b&)HB)?0y(vF<67kg~7e+If?pb|!lbgrO&#HfYOl41|{S`bMSvuD) zSiV2~UA61P0CCtLM*PW94CnRDQ67pSTpvQKHi)lZoDI1Z{~Ficl_xO1xFv@l^+A#FzZcdwgnk{o zTfX}uOolaD)c+bX&bPiq)*+M>{^C**lJjFemd;mNWr&J?+6 zFqXWfvgRnVzb&*Fq;{)oK^_u=qAjNJjI%eF`P!(&XO`cN1fVCSKczRnS7W`}TZOpA zDv0n>51~yEtKVtx59LfobOLw8{M-4H_WKa(zPY1VkCV_pukH|2Fcp$Pf0$RCc%V99 zOLprXxs<`&<%XWOTN{ny19sDLzdE`{$6^IapUT)+)4w#&kPSR(-hQOa$oIh9`9p{~ zr8W<9ZPPi>3p`Bt4eldW-}6h*SV}Q~SwXmdO#hB%*i5?C{~2AJx-PXinLfG6e$Q#^Iqg>_Q(>Yj{5Ak?RApMtknZ?E)o7^s6Mnmmo?00-6c~3+!O-@N; zixhZX-0u^zRH$G5;NlhNA+Z@1-JIJXQz1djbTGFp?;M|agLnCQ(p(B*eTSXol;W$O zz5|+WS9HBT+F&xZrs7}l@$nx=%x28*H5Z1IayB|HH(HP+HxFI(&e=0r)U5GztT&RN zs7$`JzwxOoS|J&Jf~rh&i3rN2Q=G28vXoqs5~Cx;Od4_boP!m6_qLT;J(b;qhIniN zS8r|Keutich-gV+&zg9$iFB0M+c0dwuaZLHs!|v~6*t~Lj{>dhP-X_1ddF>pLLL*v z_+pnzaV6%EN3r;E2kF3!fGJBUf|}PlQQ^^65ZECSt+d%R>GWsE7Nzskbp8@%aE{gG zQ^}-_u2dNqC~yI?PkTk-1Rq+Yo|}vI!VG7VI%Fxc8m~#dj&XrE`^=lWyU!n{i$Eb| zI8GCj+R?T|>l5)tbPemO2Hn2joW6Ajp1J3sy`G6Bsa-4(nm;VhkCBy%B;a}lt|)dEXSX@G3}Q}Y~RH^4RqRk^7Z2Pi{J*DA@^LX;5q^qse!_! zlls6@|2jJ4{ic7h%%GfFW{c`30%Iq^%$Af(kJ8KCXT*n)>#nYbOjr#pq0#y52CHW0 zPHdgi(S-Y^nLfY9>c z^hd$RWcGpj_G25si1zigEVovEJArH>>wgc#zx6)?BfE^cwDOFc%)y%7!UcGi_0w}` zJXz-X2bq(<@V`Fvpdw9jWCD}x+?$ybk^hSFo+54>{S3w9krTZHHPp=CJNb7nlo%wB0W?DgJB-E?fd zFsEIq>9grKPnbz|qjgqDBKx9GN9qx6p&7UFJ?eQnlKsgGB2tFqN0C{WR2+|p!M@Nrq?Tza!5iYEAK*&kd> zP(TnPJDuw9N}doc;ti$#=XLWt5N@(aJ?XD-w_9-}uAcXLF+a;&rB!aD>%;9Sx5Kk* zh>Pne74LK9zjE#Ote&2n`6M#4$AUZ+C-TMLtjjq0bqgf7DUi4i&$K$u4nGBrIc&Q` zdzxF?`uADf*uytLX)d-7esX>gB5CY7v?Pla=`TTVFNZ5y_STN~?K-Q1l7ko7YV)$- zkv>xTe>}ZaK-6EfzfE^{GedU_A>A=_cXxwyBQf;Q-Q6J_64Kozr8Eix0s;a8zVkch zf8HBzxnO_Ud#&|Z&tt8bB#~Z-rm%g1m7XwiO-WvL4l{DmaobO(J~9(XGw<0BkVw{3p?Vi<-;MXH|h(JH0* zp03gV&bXD~t-Z9^bBf>z5m~EM=gi1e zc;h>nw6JO(8D+d_hGLL#nKDu&ZJ(l;htHZH5ST)M8$AZCNG(@VCQ1?V6-yQIVhz5v# zIN`&h5)06*89=*W4TAYw)HzrBgy&)0kTF)p5KsYcVJvLOb6jX^l}J+l5`Hyfywhgi zn;kXl%rOK@l~hqB0=|kEDVHwz44>*xRYiqx_-Q|BP7zx+6k~o|7fG#BFh$xnCr%b> zXeDcfF0ctVI6?2o;M~M$zSMWYO{hp8 z`G6gy#=onViasM!v6`y%_QA$KDSs*2Btx3q2~y+thkNBVNMWJ+J{Uz3wwwbaO`X>H zrl40-q@PLtWj}^rWeIP^voJ@BG+o?!u}4V&rT+{p)s>sG-5TtCCa|=$iJYKL$dRR0 zO`uGuf(7hhG9&GhL>NQHn7x^Y?DVPacay{N?SDphqe6t|fa6=!?pMjVg@wg9rGIg6 zt^9bbky+dCa7Uu!{-x3yHdv)CE$Xzs#PJV53h*0i*v-%KQ1(Ba76?)Hj@~Zf^&3g@ z5;8OaypNM&e@5ks2MZ4W>xnkvT7AbJwuF;UnEOrnrPP`1qcU@EQn{IZ^BAEW_pIo=cG$Uzt9TH!Kz&?#rH;1LlgNeS%=)M8UK5b z@tB5Ni=Yh;S6^qtp{bH`mKTG3=hjhs&UF(=9IzDcMkbU~B+SwdV$o(Zi8z&eFbsx& zl3Y<3TaVc^EELw~bA_8`Z{!~-yhzlxX_II(Y<25^)5hvBu z@I{_*h50rU9$>3_a7X$J4KaPn!Orke(;B>-YXL3nSCOk}$&CPqBMIX}Jd0c$w zyxtonAvcC`Ur?Fsh0e~%Xf`ZzO7X<|8s9RUb6O$*)sA-DnvC{$9btasyiV8&vKt=J z)@pZKf|;fpZZ8JP5?0jC2d4Nd zvwxU+;FD0esCE=_>KjJJ5P(V`9}OhYa;1P{^90`ewcaMuCFtM;^2WTovbe)2{jxQl zNUFza(6uZ6{YiE{wp~RovmCLQ>Ww3XJ+UPW3%k)?p#i)qPtB}*K$i>-l zt&ia^H6b4ebp;Z(W9RO4h*xMEQAz`5ZBY^7bbu<`#M3JimrzF1upz)a{i_(|&x0TC zNfv|^_qt3nuvwK%Wh{pvoxsrqMgJx~3GXAI+}EWgH(o#2DhoYBvFHXdvDt@wg>+`@ z9VeOwhbQrWJd^eM$NH^e=GQ3TO4|}Y z^e&kQT}9(C@!DYwCTnn;;N18|-nxes^CbIbDnK;i30vG8b>1D0ge(4^vRh?e-$kB^ z&bTFC)028gW1K9jQJg&p>Qra^m*RigL17p*U~Z9l&B#@zj+KwUO+%8#A8$|JWAqkyGKu1aaHg2$~E> zhS-Mw9n!CkWXFHhO#hwHB)YH;dkK1d^Lp^YX}6nta=mASz?P*9)n=}-T7c)Xo(^tl zu|SH4KomGF%*7bOe20zmbRP6gkO;*bS28gniX!p|}&-fqf4W#EJ=~&=? zi4Sl6=S&w^-3+b}L!(Bn!fet^6;xyLW(K6+|5e5opl^VZD3wXrvZ%G>r~R}LBg3~N z(GkNmQF?kM^g(fnF}nAd25|bS(8jn3LwftfIKCtk;y04BXg;=_~J@2s#iA|$< zq{^unlX8C&&=C0A<1(5K^coq0LRITc*Yp0uba&0eCq>DT`X9r?A6L-Px}OuSJ2*sn z1`oYc)(jw-5(1H=Bk?UC=7!RH*-q)66`i(bgoaaMvvF`k)?9T9Eg6Um-6k*cR6t=b z0okgrcSV+-#7~idg(T3y>}hq?6x+4)y`IBhHbS!*I6l@5S5FJnv(xeCeu0i;va*@D zPcXF5qsQoqYP`Q%CBi8x1P}qjruY?qX_!_Wh(O5bRuKR{_?bf(1ZO`s`z0^jIWrSF z;1oZxX?Tx6@tZRz&jX}n>yPM7?WX>e&%px;%){Sj4jlS$*AsW7= zV?K;Vr{86y1SmuKOC>9G(j)G$CB@0FhnKg{8hzVI{VG2x}+b1*7G1otK z^c{aA#n0I7QZJEcqE-MFqQwMeZ#2GUNRObW<55U}?YXM{%YFAP zASx*&ti|{4anG}D^_O8g z#1P*EyCtSdBr(7=sp%VOp?aaNkhak9X76{~qedIb_zWIcWVR&ENb{WSX(3j zt`{nn8!h&h8ZB93B}FZ)HV0(6HHM=H@#1Dm`Bs`Td8w6Q63i6a0?z6U_&9$U+YRE9 z=7JNc)d<>U>5S#%5c0+&q5oBl%I|pu@#0lIoS6A@ixgR=Kt>>LhT~l&P=ynhCiez(D$@mVH;J z<1(GQh?wvf)P|2}qXw`aq!?Wdoz?(|PH8Xdo*mawBNAvk(|K&+`t1FVU6LnkeH9

##vIVD44}UC`Qc7?^wbtS*@t+VB?xb>N<*v z0ZI%xR=%7%SXkkU;Y#P@Hk|?EHhNk6Ieq`j`aT6X2CdtkB)W$Hbbo|7GM~Id6-;s= z)E-x90=;S3Eg6TOket#pijy)Ml0jlegc{svdYWM;-Y*LQCko12pPlbfyAZ|=s~oT& zU_{H-?B}P1P8iV_^_uf&c*FU#X`GCZC&r>#x*jR?M|MN3nV3q2{3EHqp5S8(xyd=l z!v)y~ZuAaQtzPC6^R&dL#AI#d4xvIbaY4W7rFhHryegF#@Wm|`!jBJUo%E4OWo`qY~ z&(YBkG|DqkXwER(MapW99)I`RfJ`y~ z1-iXKcsnQdL26BK1hYjpt47 zcM9%UJK)VW6xS*7tyU(&INq3byeLNbE6v^F&4JVW@S>q~zvVedAF&Mq3PT5v%(XZS zr_@YlwOoz1Ga|4=G2C`EJ4kdcbtPTdXSAUo)WUP2C^z2R(wuFM()sSqVdd?=1*-Ab zugYQp7jBMNRNp9(L{)u)9r(VT>nnEiU*TL#(#wur&+TS}Ey16qk0nI`3BM1)e4f1= z@5}yM&vQVKpi|H<#InUGmDW{cy7{*gO#0qs((6~S5_M*>aaJ;c^?xNe=Of`$-V)x)o=LJlpp(e)md&RTKzF5ISEVqJywyz*ia+aLJT#V2GvcZV1%-W?9{8>vv*Gad_ z>RGw^C()%~OY2Zaug|WD(x!d-+#kkY`TX0Ut%NzPlHWs!xGg2q>P*d2gDx`xgd9q_ z-`m=pAGXLqElSl5wwTC?c%L-whN(lYhp|4uFxA1oIOuI=c=D48hvZ$RHt7DwHtw@@ z%ogG)(T$Q9Bs`8S6Si5^KE>n}v}pxy~5rhgZW>AUhU=@cjONi7@1_K^7@LYwW7cC zpx6Z$zW<%T-VBzk>)|Wlyfv%3ZWHUz_5MUq!Kx0&GeKrEx8L}b_qDvnwq9Az!dTBI zgToXbMyZxpop2^WkJCo3)s;Vm``jW5IN|R@OcNYRH&m1=BIo)d`?*eT^EBJU-Rc8G zNs@A2XuhQ>#xjyqaOJ}^;Sf_{#;DT)RtDuh$3GBRXSnw^T!R1?o`SR~tJ_aNn--=b zjbfCd^XS_GY47FT~6OPiP$`fFLzTMr-3oi zIXf{%<$oeb?s{`e)CQ3^B=kg=h{+SQaYUGz|n#{~| z212bb_mq2&D2vBW;M%WfYUKNzU>P(h2ho_+&$@d0saNtWeIvUpFs^15{wIWV;k}pB zcNxbDwJAbK%?xulAxvuQcVSRc$HWwoa~2SAYbu+ViXJCIiUeUU5w8~inGSuUreThk zP$-B--|tuGui(63yswqfJj+#?g$)Eb@ZhkC&)J>KXw#qpS&NWqj*07--;%m7(!?qK2z$Tu-fJ8m5IF9PsBP=$EKEI znUt%UFQ*9={@ZX@b;F!pJ9Ur3caZRk!IaRH6+Ik&s%9I@ps!@`O@Wv7nzF#M*hq`290QZ zcwQ}()fXwQ!h+wuR~0&=+Po_T2qIFYkvvHEQ zlFBPu6}^^aaiV#YCZ-bL?m8ZhsA(7dwzm&~J+; zAJ6$pp62RmiW8K=RR_&58?pTQrOTbi-t+4=QR>}sdfDSy)Zfe=nkBe#(szEys}#>dgLNvW@<+r+mV>$Ff3Fav+dXHd#1ictc{QKCSv?nX z2e;i+kBax(1O_3miyzjY5G;b@ECj}v#coc(fW#NmF|6XU)^nq0Y^_KY!>Q&nNdc$V z^<+}a(^9hVBthU_lPwcZE-3^)K5Pchfa0_u$Vhl4J;Jn~FdHHhQtxTL%aq6QLXsCe z7QCrINT!mG!?e;lJzY3DhBO$#ibRE3DX-*ReYetC}bEiL>j6 zc0NI`RlS<2;wQbcvYR;T$YgVA=d%-0+eP08?mJbcpZy87xtXhSc0x?T=>6l3acM2l zv3qd9R#=XmnpkKr&P?o~E$t_Mm8HJ|`XmGR8i}rxEckleIJ8SOj}eD($WPlAM{XNy zg}l=lQqU?DP3k{%nzy&rrDGnkw!_AbZ-;7la^5AgcL8gqHT{(zA~iB`lwO0N0jDk? z8}icz6qHsTFb$3Q11QX(o?w4ya`nCD5hFOMVlx`4RCYgFsKVb}gHkOdI3!g=%3fPY@6<`vK{+H}{5JdP0QQTRqcMJxDo}xz9z#n)9}YYVrvj7zqS{oGyNj?@dNOra6ysD3!1{!kC} zIj(t_C_s@F%p@oQw9|Jh@b>q90`n<+RI-K4zwe^E;Q}_@!59WzGP9Xw&GX=Y8V?3& zuQoN&x*}Z{akyXrr&;I>1v~-#c+%Oe_!64X(2|#9=6K?AH|pw7 zEXTL3wxf4S0wv@WL|{hk(J$ZarT4hM3n06rS++h6alIqtO#GXa=i4o&0(K8n9TkdgEYmj-(q@EoF>@Dw(U)=@`!7>{)GLt51LEL(>?8*BfddTePDjsl@78H zC!34%)o;mSx8S;<+^%JuA~;JZ?C9$}|KYTWtWuK1OSek)ogjCxy=SX;tkd24fxGH5 zc4suCY=%%g{rg^qLD@(+=`hKk93m4YSNzBTljhY)yCH26xAaRlOTR!rR(r_pXV2r1 z6PNz5(>qG&mfd|rC#)|^#~va6;%<;hUCLXVM%X?#jkxk2(Z%NN6Kw9_q82%h{E_ih zDiDp-%qI}s)pS$Ww)4wEHD8KH_gB9l zis4}Ms`R?$lZ8*p+!x}lqw0A$gUBO_>xpY$CW-Ju3lik(2eI{X&j#-8>VTFz!a67% zU!${CZPOZ6wmI|QYWcJx35?|2M}*oIM1IzKp?xOQ)Oyt!SI_m(te*Idt>c>lC7Lsy zF1B`_DJ)z_Wo0KG`gR@jt_AyfIVCJpD?CoXK56Wd=EI9;NrzQF+xxR=yU?ATZxbCE z7!0~e0L5fL1AQan;2=ONc8i)caYv8<5q!S?#>gA)Zb6Kg`=ZLwi5QL1Kz0>M)jNf6 zExz!6v@$ILzMuZrY~OG4M~0AMAPbzMDtUmcU1sKvdmrbYzwUjP6B6Nk@H0^TTART& zweuu32_rvU9r7WK_X=x%^c>iXy$oBH7A0v@4y9@r%=!LpNVRe?sI7)fK?)#~gZUb`hEDq8lVFr4Y4D!<8CGfa$agb#z+eUdUrR z_n+KEiT4~|zH=W^}q^5l5-(m&oLUk-2ygBZS}I&n+b*76D1mO`yYRRr?m##m5f z=V)x4+=6-cmO`B`KLRVV#$@#Lfd{s*)UqADZ-$znRojee)L`1b`i3v=tnFwfQ;UA? zpI`mB-cch^Bh$E=+HvuUqsPxQ+vwXXVGZVyT_@m_ePLdYEsMb5BV4xkY&YYVH%9CFTGvJ)= zT9s)CF6?nY^jMLnX2?fSQf5I?#gYkfgZ6x3e-_TPUbeX{Pb!1*MifeV+He8)yL~b@ z8Vk8@M($fQ70TWG8sQW!o#$v}_KY#ZJ}+S5p)G+AIVqu^sf)|RMJN|UR##)nbmem8 z?mTHlJz|d#f2fMEMPkD|&yLK;zn?Q)O!oIc4%diub&>C3dHN`22!-O|n2fkxd?a^7 z&B_~FN2JU=D;xlAAi#q(!S9OUddW6WD71<+?iMrhW< zaPg8lwmrH|@cqBH5b-o3n*OaAK~#ikpwvvT)W-C#R&{UN6`Y*r0PhWr@5FY+QoPi>uB@?u^u zRvXRs=;DVr@2?1CNcCUaE_2=XE8bA@nE9Jv#eu;SVnbtFf;&=`VpK6~q2ljP1xryW zR9k;Q4xnXSC$GUozGrR)>qnxO%BVrEeUzhl6PlzG1@Vo{Hd!Qb7_+DRhjP}_Ie0lv zDhQ`nCztW<97)f9lio~BN+q43>!Hi90`XA4fi29VpO!{7*3@J#EL%{*5w3rh)#ABj zNT|yEIV^8mm^$vjX<^7YYta~#@s#a{oPZSG_OS;4SxV@IO~0I=eUG-)(Ij`SZkk}J zHNbU3>AdMF#w>q^N?lX#>f@TJTD=oPZI}`k06F?Sz)F4VWgp#VnlT4J-w$xEwc5Bp$!|ZRSE13t$2zQdCA->TYjlh>6YP<;xR)*o>-zf5%ZK zdUfS#VbOsALo|JQJWOB6w~1vYJlg~7cBGw*oOLd z?UhY9(RU?C;dLfL?_ry2KW>l|s3hQ4*E~I#UY-^|*zY!;1L5449P!`#S{2@_X)ys_ z73Hp)E`}pe7K3Q%yslFmh60%Nf+lSj6%Izg*r%4qc z&~cxiz{TnpP$W+L;cdHCac z-u{42nyz;s7SFUJCPP+2%U=hn0kj_$YpkL-yTa^u=6PNn&F_mpOY!fjp z9U>s4hB?}XX93TS`8G@9xl!m?YNhq@sAYFQ*MHX&)q_|3Ck`_ltl;IH$UJ!HN~Ebj~xflNI92crlAf+w}k!?o1upWG{CoftYD%+Qc4r>*enz$L3w=m&E3F?IL|^0!2WG)cGr zUQ0~dk-N>A74nE1eX3E+d2I`s2r$#aaEI8Jv!thvU3~VYj`Vb-&9|jefIpSrx2M{S zEU>-mdY*S;+2QyA6mC7Ax~B<@4%m#3=_^3e@n>oMaMqvZTCcSF9Tv01f?u;w@SP5z z_Zp20z-PRtxuJp_m{@5+EPPJmLY*iBP7eSL9IVEe@tw6wj#>egH>$<|V6Kh7YgrMv z0qvGRG)TK>1(^%>N)4-EBqfy@1ob0m7Bh6ogAWMILp4y*j?8OKZJ1t5gm|N1Vx<^$ zr$6$#Tk+E4r%`5BH`3=ewYoD7=MFobWEn8o#VMtME(JmW%GaA#bB1MpguU-BbPi@z zA;fkBTS5|?b~BKu$`9VfXEa*!cUSgC=pg;3aPCd~Dwc-BgS;%0a3Xl7}q%6Uat{xB2^7`=ab9Kwzhlddv zs2PX*`xzt6v3Nt2RX%sBa{})po!jq{!EgQ3e&=knB~0%A)B!bu?VDGtuQPrmf1h*-N6sUsyEd9wT%7pq zEM)>hT=RJgT=^f-c6cMSz%t4fOkp93*XjxtN`F&^sJaxdu9H}!%zDiR&vx5)dpYEL zx3*Mm7D(E{P6#2{*SGlbxtEvN+kSH{7ozfG)bOt~) zm2_0v5Ijkgx4W;u!g4~QHva&}BDKg-9rhBwHf*TTo}lwEp8^K+t5t=f0q#hg&DPk{ zGs@Tnadr73c3eK(OA^18wTa~e5wH!p(MtmSM0~iXVi*e1;S#MaqOGC_lT{kcah9r< zQEI`>`VncuNswYr|Ca54>2-mbU8!mUmT|L~jFTzun)szk99Bv^@fBrdnVUH8SVN8V z2ufu)Sfq*(|3rG(zc*UNDe{^~y%l1Cou+oGt#doN2($v_`Hz9~E@;SQKdLMkm($23 zwMux^0&*d>uJ~Iq%3py~WAMGpnz*%I_&ZBqZ_Q2V{H}&h2H&24l7G|?zt?>lPECw_ z`Ex@3C|@D=pVYe>&j)=vR_+~tX&d*3k?C<%W)X< z(%WLsZu$%(lWNi}ChFKc8@Zc$&kJz4X_{q}EZU|xHY=`xka4!wghrl=h^Z)Dkow69 zO_BuBd{@hsHE{O30rZLaJkLdyLARR)I-$_tC%ZPm6Ht&TJ~%%;J4pG=7uFfT}3 zo9k#Md7|eWB!npurf_D)#4sf`%XZWtvv6*35p^C35@k_scOOusQDyxhiUJK@*4R*@ zi^25YO2*vy@OQSVb0OKPd3vmYjYec?#6GL`L<(jNegHdhP|}$<*t8~^FY<4HmaxU7 zAtS)>ysc7i-MV_7LnhClC2#s7E(ymA?gw!o)4c1mBm*Iq->Bm^*TVmV)CtwCJT+Xj-qi`=R) z=W^{R7SGvJ2=4%Yc1x<{WoUV+N)pdD%e@>N)Uxz)Jd8~1-kWE#6M%qOKBt{>&BB?^ z(r_xS-yG{zdL^!h5VI0h zGDIP^$7??^mON2pe{;}iy{P9;g*zx#@o zrXHn~Me%?taUJ1It@3l;_oPy)$sN_a`Dh9OKJM#zH9gzD7m{rJ1HNr8ejqDMKKscS z;Z1`d3#-1pTY9b2(nI_U%UT#bGXK4?FElNmcpPOmb)cIV93(&hyeYVn> z7||^9@aGAuR$~o%Vp=r<5?m6f`W3?2wGP&|Wg4=l)za4e`*Q4`32*Ut`^%4r$$??! zM2b_s^C3MyL1)!=_*iuQcVA#O`FYV>H2)Iqeme6iPFIP_kGkZ}z@iJoA7d;}GtkVm zd~C=20N7wgg!&^>xo){7E`_WSV=4gfu*Dvg_EkDJ;*Q4M4M8^(%@RTvo0OI|j$ln+ zQf~EW0}BI-d<D`Hew&`dVwd9zFIRPUU>mg*k+P_m+Kcy_B{`g#5X1iTsnT7$ zsIZpVT0kxA`qnld(vu;TIf-VYY*J0~739w{%A$fEqu%hL8bOYdVWWK1@?S)23ii9* z`Sv#Qd$Kw;L?NPbPWsEj6Q0kjlPf<3OGdSG#7Aq2gA&4uJz!svFAKr+k7g%doj*_C z{ihOfvKJZBZ?v`!3W-Qf^cmw6gSi{Fq^(c+u&UJOdoN=)ti^jdao*Pva$V}(ZLDb=Cl?dr{JS13`8 zr5z_CeWd9&limsZCwB^Aqi4a3chy)e8j_;AyOolU!?<)fbbP)}&wH@$Ni8=WMI<}n zmZFt56Fm>I+_mvakpy5|K{XGyNlrJ_SlWA7&q>RP`r}{wN>McPWi0Z?@^#nyYYVpw zLHQSFmHL;|B0}b$qkLFr;mQVxBZp;QByYwD+3CTI&iZW0k6id6i=UXXd2WnDSpj+q z9uY>+Z#nq?a_FJ53kU!AHQ6V686)>+$`$)IJF@b7*q7*LIB`luh%?#mcwHik8Eui; zC@g4D(V_$CEA6AQEP)ZK*>xOznl^a9|7bW(`sv=3R^WRcsO{zgUl ze$(Eyb)2NbBqTpRwUt{q{_KNRZ(Nb+QzqV4mAFo$-_NG*xeuelFLC2hYIrcjJn+0COm z)lnjC5twVy^UfAxy|VBr%PNwUq$0*u!n51X(emlcbC~S5WSY=^Cl)`0c&2rdM%1_Z z!p^;69xs)5B@Jb0NKqN?lS%yP&9YRBP|kZ8q)(X?k9 z-pEX(8tGfwtof8d+<%zu7GfphdzX3-JSWam81Vd#$X$NC#tBL-HG9lth!~c z@m!Ha|BV+I(Ed`1O-Px*8B{GhV~{qa?b@F;z0%mcSO_Ma<4}!4ek2|cOBs3C&!>9oaw#YUAWm-iQeFUbD_G|B32{0*nIbTUw2`)jt$7Cns|Ej zJM*rO9I*Usfb0J0tU8FoU+%{}`G-+!$#2mSZ8HjF5aW;kJIsdi+Ns;&1D2=Ux1>T; zR{bBo@RvDw$^>8)p?V5s!`mWrbnki@|IyBY`|KQ6UZw8`Kcj11-iQ~+M5kUg0>{gD zyoSWqQb%tc@s`543aeXA-MDp7s^wg>z*5-yUvVzh&;5qt%(|nXCL?X)ACo!>Gj)E$ z>rB)upnAbmn+D6ipXKbvTBzTI_MuOoh&Y23{1ciwx5E>j5Wd>D3(H|XezYdr`WNon zTtENg(e(r`qLhC8UTt1a8sF^@4+4Npz<)8c!9MB!F(uNN9N*fkjIpn7i|H4&F5<=e zlDpMqQ$T4Z;YOeb?`h-;o{B0T-q(?N+OGSHHMY)x^xcN#Pt3fWF~_C;v!AIp0ofiP$79y5=ddBACkjeeB2C zSFAd!3bvgYyR!I6n5)SfFEhFE9bT9yNHkg4H^c21bB>No2hWgahk379Kgr&mU@!6s zeU1O+bkc`_nvlV(953)miT)?AJ5Q^5OexJoy`wF_x`>8!N(nsmDxFK zJ2w#4A=ufBsQk4aq%V4I>`@Q?$Wc!A)?$C8={`4sg$COS{@~cj=&Na|PKUac(Qb&7 z5vEj#E;41NLOBbZ)hB`O^M_vE@riVzx%1g&#MBK zA--AtS%Umt#BvQ=)6{Tk*W1wS|E4xJs^qINMcJqUFriF1C<@!cR%#|p;kUFEJP1Cn z5TDk19TJ0E)RDcV%m4>#Sd%JD0J;6T;Y*!OG>d;QQx}{ zutsTN5~TLY6DBb{f}4hKYU`IHX1a_>xi>Ctmwz~2$o-6ll>4&O)gk4x4OFPwxx!i4 zmz6;1@JyA>bU0wbKRa%sf~*zU@7Q&<)@9Xn!i^k^JVM9J(n5t4TY}nbm|hr}`TU$G ztzhH%LFS#({=y#2#e=8q>6Pr%dJI+Np7BhhbmAcehAocA{H5noSTk5+L@8rIT9g{00Efb6KyEy;0H6Ov$bIKVC|LB$Xde+z>MUG(| z41F|oMm#N471A@N<^-kLbi#KnX91-Ue{jBl2=g#KwS*R-5>O^ngz7(Ix1$wn-R5~J zG0wJZ=^N2t8FaBv;#!v#IqR8GLhNeM@F${sPs#n1la=gta3jeSPthIpj~=`_BcBt% zWGX5#ob}8~9&)KEr?IsQ1lW9crK6+qslgG!NhaY>8V&IMtQiMwDfxvNbJosY^5=Hx z=iQda!)(7`SD8yFLJ{AGDvyib;XO18E>~qs&@kqp}v8C{@(?29T51 zQ|X$O$b5fQi8k?sDr@CNdq}pIy58R(z!TSU&1Cy~v8j?;(xzH$ZqJ`Zvu+AbKwV!n zcGO#`AA6gFOcX3>bUVndIxgSyM3`-3d{m4$D(b>csH78!Ok|<=R2g7859J~e3g)uu zCFR4c3OBky!MZW3Rblff)$pjPcGndLz(g)+!bj&y?n?1WS)YBk3|2S*HVDr6ew2?HpKP1}Ysg1tU5eaT~Ouf$?)Nhu5X14~hu_Wshr5uaoC9)3Xk$R|0C z@{)F1w$|1dGpPXS)>JpBio(55%(qr&fD4I}Xn*Xzbs7o_( z-t(X1r-NX|xJ2PQi^BgMdcr=R{k4$nn=!#DDyh%JA_g1Rk7K0*A@ZmXsLNihUM5Y5 zyv%~wSz2&Ez#Wm+q8^tx+HdlD5~(jLg`N}S#u85cxT)V1n)qu7b3t&eQotPl(k6X% zxNTWd7TmS{UKcjXa27SZ;N63gB7gDQE`;WRAf4&K=+MsD3k@Kkxe*00n_-$oP{T;< z0c4P!oYgpSKL~>y;qTPleR9^VrTY=KRVAZSPQl%eJm8<%B!|5pFx?W_EmCj_EdvY2 zSG6W8zd*If@^F-tS!9+7fNjiymMEbPojM%M#DYqC69&=<#D}HPtB)42BBr!8E~pw- z6B;MhV}Mzc&m(1pea!6LhQhT}Tj|a<%bK)e;1H__YI~lKrKcB?rtk>0){V;uWe<+M zg;+~P$2{WEQC>%b_x_q&d#RoZUz%e2guw!XucHoibK=}w= zln7`clgvwnN2z|1&2c_*SE3Y@!K@b^?XM#s$lp7d{iC|izU-3FZU*a&LoWqi)`!IL zc->}#A;+i-V+&4~Tp`hW+EOA4GM=%J;>5wC~Sq9 z{k(e?)#sM^!*y_+X!$Cafx8Iu)@de z=dtc~=z?8H`2eKD@Hbgb!E>6^k;s)Pgg^2OxQ;f|aeoDWVIIn=RUqieAbnH+HA|uz zLgwO(+LE^9d8V9(v@&lT<(?5E-L3SC?Z&?{-x4k+8A1k>T(^9rZmQA7oMIV@i!Ove zA8px+r6Fy{_Cnb#9tDrq_ibVR0XYZz{B!+G@Qt1@+(m7)G%ytyK0;81#+3ixvjIj7 zJq<$$Tiv}ymsLn7wvy2!PAy4o93cvm;v4gOPqejE-Hv6WYbbRz=#4hpc+1O(#8@HJ zf#bM;Oe2S=A4YUkM1}1%fx$r7s_BKceOShkVp1*tqGk|&oKLtA616W*z+!)NOq3Qj zdRLryh;PLer6d`NoB2KggPx`AOO~HSpeUKbiq{BFxh_PJyXUpZorN z^A?VuE-Xeu|p!qM!ptSC6mf z@)EnyRI*W3amqd**yqzyn<0YWDiJM1P`P5N`8CppX;3_u$mQ{B$=jV0_0*t56{3Vr z5JaeRjD{}rStv#hOQa1sphELJHUY5~rPQIm75$s;q}XY$jm#82{LhQVUkStQx}Wg2 z=z(UIE?@pPDG3`Km2rSd3*pIQ9StJ&Xd94cLQktID*v&K#a#Sy^9ZJN(bdm&OxG;A z4yjCXb*KAl)0z-|Uy8F8C$9h5mcp@Wc-fjWfjyoEih$gTvN2qMsU^obPl23FoCkt) zy1R`dh-0WnKwfdxi^0%J#m7thIcWKghlWA8rv58X!<{>!-upv@910cTGi2gpFhQbN~Q3vK*2wrt*I{0OGn--^nH;xkQ+bwgKn&+0^N9Ao% zSemXCDBUV85hb6@h=Y0IPKA9ml?h5M+LW9n#b6DfJK`}I8|N0xT17_j<6oZc>UX}6 z`_)G%ck!TqkCT!ejTQ9r>-BMfjW>?I!?0t2ngh@E5c)r=g!1Ml&kgl*{~E)J=`Uda zi2f@c8f2(T8gC23q`Jdb$_l}KG0-hF)&>DfV1J zPV!?VuY^yeB}l#}RiF7h!OEfbSynv&f?L~G zx&W37OPE1s()U{G^QmoND{;FNTbmixIQOa^y(LrWffjHM@8^e4StW#a6IGW?)(fkf zotPeBY}2;Cym5I~;S^LpWfEDePaE*s4s~kc>)GFbs^<8&F2DNyr*_-l`T#0V5%Cc* z{T1iPb`B))SUFCv`NG@gH|qC6<;*#v1lRmehewNK%C7;KG7R|lItd1PbrM)_5%h|- zc^9Bi{46R`sKoQ&H;#uE#23+opEnv*3<-$HHY^>A5{ z_P*+!T}&p+GEPhmvU3dYnA6H!9 zWKGa{T}?<{HN4Cbf#It7io+?v;sr;BV0d}Z{knleEEpZ}?vty|pf%E)iVq)^;v@F~ zyS2fi_|yx1n{wqu@}-l|pIz?)dA5+RHE3PO4E3jT71@^kM+Dw^Js)@#k7F1?^8(sJ zn&T%9MVjWwiRBwjkow2$<(DZ)nZQbM#cX-m1VFu3V#58>^-zPhHA4cqv0C+`62ZcZ zG9)osB|Q{hSbIk$nKsK3Fnu!}u{O4HW?qfue37jwq7~x&m4_I}H9K@H6y%98v=jl; z2yQifx~oC4VO&@#=y-Py5`JxCXpe#kj({nnh*=DN$E;J8x%r3g;m=7ZF?xdMhNR&F zcFy%SX@(1#>2B;R561-y&vC4^AYCJt8_|?&p${j*+xs#M4)i;qoCN!ZqNSu+YssO2OAk_t{@cD|MPsD)VST(ACr{-1L zxahtC4Ng6=U;Xz6SaIVXNhudx{D+_)+HrQE;b5s79Xe|y`l*rf)l>Yi^sY~Os98fz z>F{kSz}O`J?*O%R=`N6MJyqHI^?93xLf$gC0dQ!f7mAAFZYEdAbu>dD_2s0RQ+_8d zMjDj>S78)wV6f2;l3@S6)@5NYmxeRp|Nl1Z=n8Hd%{NJwgHRR`^@T3_zt6#ou^JI6 z#S`ccx5-$^>Z>q4IAlqcWE8O0*N)cH&{6gUts4G+Or7I@oKf4gCuZX`XzWaEJDE6* zCT?u2v2EM7ZQFL+SdG<&edm6D58glF`f%-i?scwptkEA3^Q%`7a#d7C79J_tO1S55 z1VsE1ab~$kD-oV@PDCmeent@Vt$wHQX3C!C zHN(}|)pBdlG&<@Hs*czmdt!ET|B&LG_VtI>RzqXxOOBA*ad?GO7by(f?g^7<&xkD9**3~)UC4!ko~r@y;l5+)OJenmBFh>8RnLWp$Z9k zdXf5)>&h7CS2T?0mR*@nmc18z4LHVKqg0u=p9Wt$FNV)y1IUA!f3$l#$mAc4Sv1aE z;CwpOOS4^iY-Bcp7Uky`kkUeG_%blZ^hMGZT6sOr*xHW94$&_gZB`d6nztVB@ z&R2C4q=-RtF@mBML)^~D9vBvLUom5f_7V@>@w9Bkjr3zW-_Jz z8JtEcf}4tKV7N|UiX>}H@uFX zjCVO>gE21#5?8W`$G`-Y?Vw)0nQ}yt>x=MQ7ewGX7V?}9P`>F^!yp)0&|JE5sRj|%=KhB3+K4J=J;#=fhj??bhVveI!#IwsydNBEhd}!82(2)t z{HEGhA+Dcs17X52q%^`AKPD71J3npOE|OE;PnA-do_0bzD}5l%P#jRei z%Q3L1Cd~t|q&n&M(|fL1Mrx=H-Ys8=6#J!G!q+r!Uct&%laTs42~X&Wl%u)QKtrX` z`&0DiwVCf{B4_!L35&B>P-blkEMg9#x4(7mJ4e}+mQS2x>Tolot2`4z#zc!VzmT^; z8b|XC{?;QS`(C-^n|u~qO9QUPY>oLnRvRoXv%e=$(~1ei4cFFso$~WG8hKfz@aqf~ z-iam3=3iHvlwnH(aL`L`Iee)_+#ro!*xWRmtL!CsN0)%XP`x>wpN^OT3;OvWSjg+`eckl$(CI+8 zM*y8~bXB8;b=VDWHS*v@erVw{4C!Bh5b%qzYum-r!tD~032q_ENSyLY0Sy*8pYV_` z&hr}uExq&ESrpRdy?n^q#%L@o!0QeX{?-jL!VQ7Yj!tG`Cx)6PR{~^sjsNJgj@x*h zhZn(Y=tTaW1Jq`JlkiD#vIIW5P#|dg^vZAkf_%~Krk-#YFVw()dFJrd%sr2wN@&5O zqPBrr*jOSJX<0f7#Eb?Y4{-} zJ}aj(B8J+h&N0Up50J=8iwXRV&}kx5MRC>!?Tb3$ z9Wqh*i0wOFQlgYB%~s`l<|hK5Ze=GFv$8vR@T)?(1T-QNISSZoRD!|uU$JznCjxAH z8b4y4H=^1xACAh;ZSsEw;-IBQBmHtWG^p1;=2}8p@z$i)K3WG~qE~$hulG4(n7uUd z{HLJ1IZF>`M4cYFhrUXKMS8ZosdeI901sNk3iC8wqqiQ?za(O_mn5GBgJ6g`V7DYV zIpE(X6ji_djS#HFYI#9ELY0XllfS5V3icZF^~n7B!i`L~ByY1{()F{&^Al`Hu_;Vh zTK}?=fYffnbU2x?G0a!tXH$YD>n$H?e%<4N7RNDRa6n=1L72?=M5PJExhWsVS}3Xx z!4EGgWvkH8yD+X4!4L3wa0Z(V^Qt*Hi#i+Mh*0lvOA9Eer4-AMeSkkgj!ft82=VNS zcx7RP2QND|(&O;37iBiV2_%hYd>8PANSxD4#Rnmgi5(#Wja$LUbKZrAWJ zcQ4}ez!NX@xWv)-{oeg~@|rZ~-`*p)$B(ct4ihdlo$k;BWD=>YVYF*isBBaU`zAVJ zKM(^}XpRR?Z7LhXM3{q^Le>>hGewr0_bO&9&Qny>#;(wZvc&r?E;xwXHjH#qAe)zG z;gDeVBkEzbR8(1Bl|X~1n{N=;#a>g`AoeE-&r#m60%o(;IT~D56!LKW)7n2HbRv20bQ})z{f4GB;c_?S)~6HwVo3dk^w+uE z2v-3c&sIZpciL53s3auMrlr@_eOMxAtmVeGX9?AMcNc)TU25=>J=EtI!s%2VL#c;_OgpDm= z&nOJT6AebR@i!lzkyQT%rDfi)VQz?zmPS?)4t#+07fmZtj<5@K6oLi}1W8n* zfa#QikKlW?o2y${h$TUIyB+s`^f^eVZL0TdBM(ig?~Q&Kk$h?z`U^v6QI54D2p77> z^1&yRM>J@-VN?a8T)ty z)s8{>q%wABqCYA*Otv8pgUhr?t6?aX{}>XhSYlkof+hJK8O4K+u+}nbl20CjmmdAbINJzrqL9cv9Erb??gnoUj;5X^D2WlU>>E)urEX? z!C!qtofFiVgVm$-;$`eIdWt95m;Pp`85sBL)=9Nh0hg0BLGGANXd#N64s|F5#Cnv* zxIp>>L5lCYcU+pGmZ@1{aSFS3yjYZ^et(UYLS%x9OiW*-X%&ji;-tr?-_pn^Vu|jh zysjH*ItbN3N${DM@+0y2IYj-46iaMV#|F(b$LgpxgsKEiRL~{>v!OL_vf{7iX5_D0 zpS4?bLDpz8kAN|!G1o{`;g8$+`w)l@wv?ATFgfk}eP#sG%qSZ16S|SG7(Vu9$7P;a z!%4yh8UUT^`)B*l>hC(=g~)KSm+Pna{=OHk`kxm4z8-z4f0O`pFlo|LFDXZS(DY%n zG*ZJB|7I$r%i5bnGas6!4yM`V>>(;=g^SiS>*x_x!9tTj*%{O{lwz+sN;)wIQ%I*1 z9me2Av@G$Z9H>_3t;u^hD4N#fa1UwYW-vs$Po5L0?L89`$S{+eKc>|no*7lg!8tw z3S08i(?pGnvnd#B5HnB2|HG#@XVioHC&*~Vdlu6r^B6vqY+|~Sj1;0b zAuNr_c1(dYwR64^YAS}v!;7s=gVZGTu1?KL;1#EIf%r)Lj}Lg_Qx!hOOn~iKvgv4c zQ!_1`TU{P;>rpf47wF9Fm`HaYD})d#!e^A)#54vG$K3#y01WB>cxWuRl`IVNNT6*Q zyKOyV+DNpqqrFA?2JHObo6Tfu=e{@;;t5iVHq|yeN%4AWZ5=bu)qp8RZ{v^WSyoUs$1|d<;@iM5l&JqCcSM z!=k@su)EZ5APOn%6m!0@|u>j7BENzlovLx&8|{DOze_KgNkZxEi_cw z3(Ryv5s|#bala?}1fCHbbbD#K*P7QXxTuB&VKK;TW>hD8iVvHhv2wnfazF)iU*ukU zoqX2qcN*lf_@n&qH>4ZV_%~C#_qy90lq2*&z{NUw03}y?=_d(Pj$ZPGrd_I_-54r< zs`VHf%YLoyLi}QB83FHgOr}`vsA4D>lW23?ShoLh#^}HI-?gFJx+lo1x`vlwA%1 z_96WEb9%W#0#mPZPFC(-#9c%9PD^eSMu0L|0Wz-D81A^rr|qdApd7Q5kBxmUzHa(K=^UT7qlVCKLYw(pK_ znN{EV&uRwKQS^>sVF)6)b9>13I0Jf{H|UVIu?nmmZ34xggW)3@bpL2<1Vp!^QBwQ| z93U_|pzwt^l!swJZvV3)&Z3)I23p8fQDyinY6zTWYG86S{s83SB?&h+;s3cmZSdjT z^IZuP1TpmS2w!q$j7xhZjl3Wmjtkf5{iAdd6-zuNXew8syW4!32g{|3VQ&3)qEedR ztDmBq;6lb33Z#tVrI_O$8aFpbEcH03sx*YI>Hm1SRX7qsG}Yy2G?DJnbbY*yEeJnw zmIWp8{PJl&=Udp2(b;8687p&UgXNBht8j%OH14v=n>;GG5Y2aBvk*&4T@J?pJD%eQ zbhw2RyJggw_l2&a;w&X+a7|fHKk&o54~6i#nwpf$_UPaJ**B)|m*+mIvTM2>`@+y7 z!OQxVX6iUmJD9w$PSQ=}tsbZjVG66e!Cqa!Eu`5c9?oLA~nsd{XvFGxaKdsPLnsKK`;VEf^OhnSpA$@Pi1WfXo%P&FobO5(Ww2@vrzp zUI_`Ay(glrIHJ^rviRS2Ljd>c6&{Sby=eI_+{tAWMvO5h z#eU*P5|Dkw;F_CWnFoX>eh)2cvB`)zt&8osuc*Rp<3lbOEUe|H&WSVxJSX8ZxoA6~ zg4#*Spy_>!pX8-5%=v{WKAjh>J>Ayg3hY&B#|i&;)vGOjYCuT zM(wqGEXhpJSel2G+nlh4Al}+i#kxW{vYHJ1gO&92228Bu^a(c!jnXMws`bitBI9LA zwXvdCv6uOZsSt!v=F5aL?0YT#8}@8$K!@*|zWZgpL2t&Nf1M)lb8jYS8wgN49vov4W*EZtf zuu>RjwRDm$#F{X+jCYtPV0RH)2;xA0WHJW0h+--WyjPy^f^Dhk<17=~wkTvI-D2C; z)tT7^E<(sPwY;WFsSjWG{?1)R{d70v2yZ6m&u*vNohOC*lQS%BC`+>BITXEDJfC-? zLrI4qd#F%iTU1{|^}>eqa`szmOa3&TGQf=;H*3cm)?BeXra+GC!Fj{YV})TkyPv#b zXM?E)hy1LJQ$#>3bd|VaIYU+!_g`n_a*1)2IzKs$a=h4oKTTN8IY~}2X^uQ zT8Sc(S^wg+xiL4U!aE-F9~q~YOUb)a9L4%?%|KaSgD_T7+|Q$oa-ERb+k@<)9lNxS z)H!zlaUcv;PBrkA2nqAF6;a&8BwkO;%<(aqV>mfgPYgBt6itk{$Sj8xdi5beGAW`R zbCf(pf^N?1ft>(J1jk}RioLoi)y#dSN`IzIlE5 z>ACYX3X0>8=y%Blv_0ZR-78mL8RMT14iAl{;rs*UQ#j?nb|8BaAkEl{_@dbkHi5SB z@jyihJtxd2gs5ot%t8;31V8JIKx>5-y_T;ZH%tNvk7iXQpci0#Pq9#e&B{(UNkT8y zO)?j;`%_F0b09nyf;`9^>SgKR8d;NgwrwRI{0re27T|2o{nPXySV@qo`rmwq@!)yA z_CY?OZR6#a`(=++&^`A)Vr`Y{7+RUz4k5OBI|r^s!J)ZcgCSJW*jf6zyuul5Kqg$O zh_@Q!%W2fJ>>q)QF8`&Cj1smT*0_cxBhc)NOx#Bd&BXD9uEgNe`?Y_5;LLp~JI_Ut zHnhxTEfbvFlqF1vl#?mSbP?~ot_;jV8I*DN=GD8P!bo3~hFjwoA|>pdV^pmY#HMR< zDMl2#%bW&RalGn$orhc67f!duO;YpV=t{;fiaNm@qpKR7f?=6}*TZb~@L&ycfw-({ z#w%bYn-cX_?_0b7E6=69lZ(`y0&X?3tdlL7w0db(?aP7$dMd$`hwTrm@?9$I$YlOQ zoqhGEfkC5eGzGq*n6{N?S1vh*Bz_S$*R$xRY~lX!i2N&tk$1#*iHEPyqEs*qba7Nr z55E!x2M)E-@Y-TC&UK5=((9r7p*WT_BsdKjSf1Vp+KlH7Rs*WN%dH~9YD7qx^bYSx zoWV`_>Mo#N|3i5zg)@CN{U^HAesqf&u5Rtz9d-5a z?}}g_jRtv=dgM#zQbYUWFEVd9hhCMUrOvDqmV!a_d(QACGhVtO zneqlww15(dlaqcNy=HR_>}|itGDCe&6Y>_{o%#z3v-23Q17sl>IY&j$nWJl5#J?BQ z!#UmFbHvn{g(322Kr&y<(MbJ`5XYPEntER-BldYFojJ8WHH|)!gO(L~TW-2gZOW)K zr$@{>7TM_rOV8YsPb|oXB}JBNNg`?Ja;)3XeOxd^I88ToI}N}Q#Y2O|UXntLN3G_w zsF+zt1x0;FPOK63MNwthg#AxSTR2hjNVelr>0GxxK^SZ2hbEsA&PCEwkb)Jx5{A3A zX!=;R340NF{4VDCA)Rc1d%9mVeJ`Ak1S-2bPVZqXSqa&7B26G-B!u;~q;A0-SDd1! z1TY*i@DF~(yZ`7v6R@f2rg8p<*JHUUB+|TInV$RH&+>1pu$-iCRN&{|Qk1@&{K74%TDbv#*ug1QZ5vG(2!=qCM<0A%C)Z7e3&H9j&77S8l zHtZEl{6l@KdWJ2})QZytgrD+pqy;=&%`9>_-7QizMv6(t1`B7Nm;qu!GMALq zj;W}VN{KYeYS<=GiWZw#B}>PT+KT@vKRs8~SLoO=E^(9N zgLUer20SpbTr;cnciG({e^9wObcjCi=`lVTZMYS5P`snZy(-+a7>uOB6$@_TG}Wh- zX=L*;MzKj3U=LsiMk_hY&!OvzKEP{(Ym6J1naslal4oA!1L%6T8oL@ZsQ|nwVcm8BX zpfs42hM}3&JTce3=~?Nhzqn<<{(Hux=44~!B<@kXb%JTk*(am24jbc!Y0O5V?s#$* z+W=#SeycWIVu9ClC->%omFd>mtN#v*LHs3VPda^8e2M_Br*h+0EkAk1UI*?U7R|gG zD~-iKmie+m5B$q%H_ZtjO)}c3cuaw5RtDsSbmskU6Wz&1)ypjo=)d;YK20rfT7*Kz zGLykHcfbGAF52l`^bsohJRKns+{3S5zJDJbMi;s09!o(A56RZxU-c#TbZxE?lPQJ! zA>T(|WEzE77}^!1G{ar2Ij7k)p37rA5WVZb8C<~P5`laU{yd88m6C8LeixH z9Nv%^okc#e_^=oGaw>0B#>IQazhP6!GBl9aod*zPkNoYre#49EistVjY0q1L;cQPBqHEjZ)O)&_!#~slvyVftKnWuO*cV_K_d(`A?|O zZww@v_Wb@te)XXU3R;{P|5g;@s?7h{II0ANLLsFa-4t`R`j(ng8|ttaR|T*C5&s3`FZY=Ti8kfOGA#s@tgWkFA>Ns(h{Z7SSS4Rj2_E(= za=(A(Ab`jD^V95!L8B8aF%qeviWKr6+gdUA^0Agm6XL3J+(eSOC~XZ~GO9 z_SYw^vc3+Zj|0NkQmOOBqZz-LG}M@tjJw3F%W4bS3>2%(pREn8L3kh|tfl^kIeNXFQ;Z0Mq>#aEy)B}0?hbUE*6 za;xtsZvHY=Sl(h!!q~25XA*Mp!KA^TVL~Sie9ejpx)it}Pt>tzg_dvAj7c~k^claI z@V7#E&*OS|OJ9NO)!`msx+Gp1U> z5szP5d%cjKDomE(BTa6b?yjaaEBe{Qn{3d}Ubrb{v#HkmL%mSc?rLJ*(8`d5 zGchvffGT@$=MCet;D4AP!}Z`{zv-&M??bL}u$xS3Lzs~UPyUnI$^5!V;v zZ(%=Dew1JcVboBlNVn!yA{V{D%fxo-&O=xuNc(|*ryg8s&LvzSJ)BsRD21I*7Am8Y z{8DK%&M1^c5^eCbYF1JqfW<@dY?^^cUk4k!k@Q%#*m@TmYw5cBq*s4YHw+dr@hu%O zigfVgV`whZa6!Wz#SX1fyZ7b-wcuN4I)bwq!C-niVltX4^5sJX9 zYJr{qkv$v*nW0-2(}~Mt(##cGN`>YT)s^N=iCy&xcbP!rwzNV)+_W^jkbVo=oNK~0 z!heD(6@b*}<^=?c?fH?f4HeA8DmL1$Dhz#qY!n5vwF80& z2%4guNIx^FTnI8u$zWU0@`S^tEn~wLtvQIi6y z8+X#OSTL$NA=xwEvzjp82Q%jI;rv0{{405N@Kr(4We_S13zXx57bZyKJoD}W+sFcK z5H~tZ z(S7ip%*aka6?_0}{J$KfX=`cAcY1EwFMW{kq}wvKn49u3YK!!TYsCMqs}Ui1nr7Kl zy=LDupEOFIX~|cM?Ge82vkrpgy=|_Dhqkmen?Mk&#{K+Se_b%*?D_P&sY!h#m6VTt z8Fx$8nLtX-A6n9|`T`b22B%v2;K#wZ~X9MkB6>qi0P z$oK|x=T&|drx$r=GGG4EvUT!ih%jd0bubH>fTz27w!k&NFtScg{^be%nz?mS!mxYW zqm4J^r~Z+24AZSp@?<9hy=jn0n^3##qD&U0J9m^@%J#gU{3$yds*}`nJiAi^ZQOL& zwt($W8I9QQr|VHu+sg%@`Q>fM_7w|+FV=E1#Uss{J7F*$M>VG4W56$fjI^fbgzhXq zc1f@r8Tj34XJzK9sa3<8LI$)}$+)4E1Rn-9DI-mSg zpO$sslOac1H@z>Q(Qq)RLO-R}U;zOz7axr|Yd^47#ZhIEdk>LjA;i)gg3eLHw6gI-cBQ1#wlr zpx9GBL|=W6d`zr#@U#Y0lsiW&2NyS)$d?~cjbTrbP4hCHnSL6cP`i{CaR-bJBO=Kw z=%EzGCE2*zo{d4T8DaE?+#g|XP)>#C4y^BLx1T0o7742eD987#P&=xE$^Wpx;Y@m{ zK-RGRstiC~kcox@FJ_DE!T^4)F?0{t{E5k_#zB^53! z+|e<`b8Io&rS}oWkseZQapIzV3wzX8 z7K4TEteyX-=Oj^Y1zG1d%u#o`mAiMo zyH6@fc})tfO<(A^=53)`ZPsiXbxR|Ktdu+h3to^0qPWr}OpU6AppIo^96CWVZ=H@2 zPMg*R?q*x0#=kJ=T)0y2i2Hzy$Q~@_a$fGLToiwn@;1#K$=Fo?L{G)26OQ+A;W!0O z_)Tu(CBh_sdur{Nm@=n9cM@IN9{Lmr7DTc8sX0gdhV?q5GNDodD>o4PHi(JO)|o4j zK7Z))h-o^Y5_ty)d`tLo&*_G=|CMh{tbcYcCJsb{&JA@7nyH^KTCfl$tb;wHn^NHu zR__kAHNz=yr3G@>79g71161aly49ZN z`i=B$7r}F0=1FHCl4DRLIM1-8R>D;&>*2Z$m~a(t~(cZ)`v~tQK2!MilXjUg)M^8|TBCoCUSz{h>| zKVQGK2bTAmQdt1+`4@vzTBv|^=MY}PEP4Ityc*Si#ope7DEb)eUQM3}c+M&)p~yII z0}dI4$=~@T>;x614JY&==wsQmIG%6Llhm)W07z#I^hLbgF4C+}5u}M6|MuTMqR7NQ zm$yt_K5*voJ7K0ctTJqBW|Aff7q?Iyvy*JQS8*bVyDvp}y%C_Ie-y6XYt4D1-ScCx z0Fdb+;p=5PDQ!q#_OaZ_7><+bu}BVbm36}@nM-h7TqwL6oIL|gyK5~Y8>i+8GDYj5 z)kFSV5+Ua_Jav{_m<%xSII2s)bhW+|Sr^>MP%8APt^;1KG@7B;lP`@xZ&xGO5 zC!PzHq|IEKtp>U#m4<}e$pAYKHZYDA8IlRPmrY~q?Ktg0G^~4}^{?vwMa^I|uvS(0 zRDQandpL8xr_v#OgW+`>S4!!#P5|mHiOgB?i_K3jEXV|lCU{hJH>uvcY>%b8Tl8>4 zt2)?aZ@w`L2z0K_vZCsYwoeNHT=(L0tb&v_84b{IYAXdA4}gc%i!Cz_@X$jIm1kyY z0#S^!>r{ffYOOEne0bIWXGwub7zr1o&Sim|3bo8)t4eBhR%9Y8J)0@+SsNbb8k=NP zZ9xFpc-7j@KCz8}PiB$9>{-GcR$JulJtSyfQ@s($0_HT80Msu^ttFvK1UL&0=8-a(S8e`ZACzDx`<=ZibLII`bIwX5`c8n2r?`3m` z<=k$c8>TKWo7E!<%l{E#%k~E=q~*ZUdD=c!?0COtm41lA6*M+w9g*^xiDyw6>5xJs zQ3eAGDpL;{_-w8ztKLC)J#=c{X0c|4xa>n|tP^EEdJ~e&U=cP#qJ)EW(lOWxl__(R z{3RBlH1N1Q30cOh(5$HS2s9j$y~C|)DO2uBY6EY*=DYsQJ(Pn^RgmBdgLSnS6O9fk zSWqT8ZL{2GF{MS;qC!dbPPofHG7WLRNtk#YTv_dG0$&k>>&$BKVJce_tB>R5)CL1U@L%Y>^oajyW%wU$ zD8pHGD1a1K{ZX;TxmzDjJmqAp^l5lIWUZ#WQEU}0q7oI3*)I(Zz%q&(n_|`%&;Rum z8sN6FieTa+<}gJ($Qh6QQvj-9y#4eRw*$oJt&-#N$BBibuNhi=`PU^3D+xK+;%7t+ zDJE`wLGpp`qUiOnK!KEh?9Pf*PytjS?B6I3vK{&|k(i1^%lvcbo?tsfbH*TitE!l4 z5==GXF5Yu?t1Vg0F@j>!VuA%obAp-$U26YUvqIt<#_3_pR2L9L8s^z83MwEnh0;I~Ac#ZZqVm<)cz}v(RC! z^pFZB;EIzO$AzBFv5QW$tC`l{QT3fIout>%vP zYG0Cb+Sne)rjUOTVt`{}t^ch;#LMjn4dz1PZBGpoz984(YJKQhMdQ<~3MUqJSv-eP z_x`JGyp8JV(ZOjB^mw}+_tx@NF=PH!!nk+O6yvSbzSY0AdSQMyim_1pyJEjYJH^1v zs4~EMpy@}~+y4Je3u77d)YVEZ%FWvLP0k}?KNH(~DK_X2nRo11^t%SufCvGHMY@H- zWlF?06+e}p@&BdpnH=De5m&p;Y*rB^+-vN`T068%kt5-CLFNW9eBg~6u5l$0O;i84 zb{(%#3D>eR$TUFS=6fnB#MJ(4I{4S*p*exTBckH|xmpN6VOUX5fFOkqPABZ_)>;K! zigy6?SE(f=_(=Lx1S>&Xn20Z$NCW|+OO4m%JGH7uQBU$pot>jl_(kVd?v8xL-y0>qNlYCbET}KW)x>xoJ^^bvPX)b8+2FfI{Ef^09sH8a* zoY*@w!= zNG0lvXI7R42%_OssU+VJ=Bc1)2swON?!l!y7+=y3rNWJ8_CdQx6^TRcnHXF^-H)hK zZE*@DPA+1?7DAS0yfGYgIMV@VrVJTJB;_e~J45H8)&s9wnf3soVV-;BWAa0olq)1a zwr$(wq6KL-*L%Iks`csgKi~db$fdvAde5Dkp8MfcPpe(Mw7HDyGV>o*kwolJ(m{{@ zLPCi2<#i1fFfu~K^m@v1f0hOaK)0s?9J&|A0Ge60I5fIDZxwaCNZI89Bn>(et?Y}y=@C$gG$ zAKsL_7cR;5$x+Hol66k-LK-jHvbmZ3T_ZvF^IMK*TqPG_^5?m*-nVJjRk!8in9p}u z+_KsX492M`DIPAv_e8Dun3KaC;?^=(NW)qI4h?$jhRUjkfPy9!% zK)i3A5`{`Mv#GTASXaUy)3L#z{QjlHDjIzSFUn1=`SR=~fhhyQsm(^wIXFQpm_AH} ze+{&sZ6>u=HROJ4WD0+2(2X6qOG{&+_o+jtkL*#nG70>OHv+tVB;L?5(lyUTQ;6(D z*`P8a^=QSciiOq)B_FpqkV^X$w3#mXR5+1N)DP6I^iJ;77EJzxPzL)#p^&Go4)Pq3 zE1RSvIH9`gHhPb!@z(d-=|8aavfg>P$mQ z?j{qGCQo21?d5=S!ocN2fHQ__+atS%G#Kyyc2Ffhm@oWm8rZ(XEC83zkC;cnCT`X^6^J7oDQ{Lt+b8wGXSVGV%3!!sc8}@tn!8U`*4&$0wa6%s#ee}Lp1kD?efBSP=-RUSs*=H*LPohvwW^hFSJrS6uZp479_2|HS4nl^ z#PKRGd7X|BUQ|^=yoH?+C(ic55Db1` zdQay(Sop*Sl$EP!@6=Jk5I98e!&Q^?$ z%~|X92_t7T4VtNciwqgAx#d-EY+3_uhBgyaG&y0b3p=gy*UCa*5 zZOx_~NhWA*mpe=4L`-E3M^XWrg>g-__U(WDON z7f7Qb6lkR?3@xPp$b1OW$fwd|veeOr;c}?H7U&a~7|UR@tJZk_R!c9`_Drh`tFyJXL%lyPFV*Pi5qSz86bz zM;}7gnP6j6OO2hII}#^p%#5IO=U}%AGi{1>4cPN)RTFe)cIAK`Z*#u34JlJmY-WWf zJFBb1Q`tDr!m_B{cf>jYFX&WBOt$yAbJk^sF5%d6Vodw>Rr;JwXtp3~1) zI>}VsT|6~Z27TABaI(-$y(tz8@HeuEIf`56a~M-yUq{bsV4qHW0aA4m!-ALwE&1I$ zhJliX6q;Jj%+s_%6;8mC0fCt^cG!BE^=?IL@x#8m!ktHsli0IJ78cotS%tCdE^Vlq zdZEJCuu@@mpqho&W7!>l!67B>&TX#n>?Ta z2a7LX`h;y>`tJUJq~%i#V~x%@jTK7Ll6As*20t4Cvd|3t z5C$-a<&~``c{I0GH^hiw-xqhWsjpuXF z6CMk;{Dbivs9pACfF7)!AtN{!1S~~nn9YdBPi7|gWSLcn>LLr|^T>RMbGcJO`BjHi zD#bQg!H?g3r6tNHTkZ3wlc#hg{*f2*xHgI_KUttZ1CuMpdDg#`o1~H4yV!sA6}+PN zHQcc~QD>7K4xJMSp>oIGJ^ygQB6mPFiL46 z?N*I5=egr9`(!APGty$tMTX3kT7Qho*5ws@F+%lfJc3dgpb~;LeRqL)&=a;f&E7qb za=E)6htwa^R~pM<>SX$vADu0LNLq#CkfRXP99)E4y_=>U-wt)gGD%RvJA#zx-+O3JyKyq7*Z_<_3_yEQEvX;Ai=N z%*u8DP&f)y;g0;6TO+!*bz!dDz3~(v@2ow z0Yap`cu@q9-X?k2peWSBs50EbJ(NvfZlkLGm*O$MEMJ}G2kBL+1b#46I6n9qtu|Aa zTFMr_cl2T8N9jKr+PyB!57jtvM?*j zKKOolD}fE9?1O3`UMpCAsrv;RlY+@gOg}1efSFbkdU6k>quITwTeqb+_(|XwF?3_;GBMINE;JZv`c3?-Mh1!sfm5 zSQNtYM5Og|({Jhg%>Smo-?h4@pM35v;-=j>qeZ`RS=%Z}MT7)#Rdt@P0Z%NyH&MSXGQod@l`=rTa$AejeVY!E@FLT8nlzNhs}VjtM8L z<1!(>!b-u=TNzdu*LoM(nHOiEE%;(srfjetNpiuB9$>lRzE?a9D3a6jtl2Q=m_+^1h7aD5gsoEN= zt$6yeUbO2#z%~v=aeq>O_t)5Euru4?6rui|9kr02(0#h@y(KVR5&3(I8sA%u zLYOvi-qVPn*m?=J_-j^SZ{4np)5u9@L4pivp83b2$Ums zb3Y5+r_xqlb&CTT*J^*fIzHpx3f34#woDu=={qx2wUJZ%H}UgjBCl~jE1EGh@LFr= zp+yXRy*@5!F-zB}tr=;R2@i!$M{sj*Zn$ie@{lZ#5Z6s|_mGYMYI=>5CVU1-FVvqN?a71jLcFiCTp{Fp@H??iIg{2P+Y$z ze`8@VVuD?Xpp07|Fz0tIr_r#=&l za0!|DW>V7Gz7M~nWJDz_S|-{+tqDt?40KJkJt>ltFrK^ic1LWioYGZUsk6BQ@hKf& zZ?zk!GV3<@G^c>teInG6%6Z)wkDY7}C-I*xsqrNXw%FBT|0cOR>{p%w3M)JON5Plm>O3bqnjVROB# zq<^SU>L``i7!8PQ@AKs%xuT@#llg3AXICe_5KBPRk1RWy>CJY_H7Xx{S4&sD4`tZ8 z@Uc0v)xAx$pkx;tHu(~qT$*$31f+hMI;8KgB_i{;dCarT?BP1Wak>gZ=EU_7pJ$AM zDjWb-IUuq3uzL6jssSrL+ObgD3kqea68?f6MeSJqafq3WrrmIHDb`CV{j(Uh+Ehw5 z&Npdk2=iq$KRzED*(#?@L=czz^J%XS=w$6dWHu2UhIEMWq;?2sU0lIEneay9rP7rI^PEink)S3vIL0- zPxjbX4Ssd4iwOA$e9xIl%)8+wwh^g_F~W`eVNLMm&_{7XxFxG3Ja=4Pu_{ zhln+*eiu4ksa6P!Zww{$G=Y|s5_pG54;_fT- zNkaGQaX$LR92a}WUWCIqqoHL5e?@OLbX@>KoOn%kwg5YjDxwA^nEY`bT&l-)M-XBe z+g0O>u|y6f^ox=7t{Par$RWI+Fjw4Iqc73O!6Rltl5(7t_(U)BFsFktgJC>pGk`~2 zvX!+(oX9C>(jlmEj8Hdu(e?nY3dNao)@OMlRNWy6^x}aU27t?5TAUF~_XNf{U2i>A zudgT3PgX_TBFh)+5BzWLT|!MAaoTb*b0$F0ZHQY^I;h<+l8&1sm|zWsxYX+8hdE@l zguf;(QC>9$L~`JkivRR@POXN_KQ1q6m8##WK2Ku=ppYxC1@S&K6o*Z*1g1ZYR*X_( zNlDr~DH)(O_n#T=`B8Yy2#fP3DLB(}sX&NF>pW_ZunfhkInH6A^Yp&Mv;P5;!5sc6 z;;`YV^5!Ff?N@wWnvI>Uv#SufE!QjjVYBPJ&2cvK#t%}B+d1UDEq!4FFCS<=iE&CZ zWc1l9GIAE_OU9Cn?gW*6Gc~n>@kgv+49KTIgA@cy0|c$ELHkMR?}iQW!swf-BHffk!wD`3ozq+y6P!W zOSTx?o%m(?j8I=$d+Bnaago(L2yIHn_p7=?Hp-+!X;y!EBav%ns%m7Ah-BW5r5G^m zrj^NMrP*R{RicsZaSrq@ewsQ7-v|DBcFnAyrMGtK{5eA3`oD;q9tmcwyu2=#!)|8W zFi?%2ypA>efNAzF!w;kb59+7n$0he40h{xF6IN# z&Te?4DBKmLI>ml>6*p^B6rZOBzm(d%H+aHPUCyqlIOs@>ZYIUa{ygG#BqUb|i#~yX zV)=fk4k+AjOBcnBbcL`Ph~)+*dZ*#aSC0yL1281ow%$Gb8oP|Hj@8-TJ^rF!)U3nz zeV^N0cPq=TOZ)Kt7`ou%ky#T(0TX2$lM3D>ADFli?=%a zh|ORlVmRjAJ*=)(jV+Dhcs# zk@LS?5;6A5LS^*?q=kGx?PX#h$PGo(GPQo69CM}jD_1TtK3Dj>$vD?S%rUg^L9qWbJR z-L&`;t3AkmsKOPL`rtK`Y|wV}>A;_{GL^aag%LhERCYNZYMj~}BR*u72$C2??xW&D zbUxc27#6*l`*qDWFD|4@hcqI^SH*>xbz{M2ak|WrMa=pIAJxjT#@v(Zk=%r}%Cfdd%k#@_-f zoN53{PnHI5rSlX4uE?yTk!LvTrKE8dCMjX5^aO8 z?1c*6N6x20`y2lRfr$hMwIq0mRvrIoJ*~sbPM+vkmVaMvdKUmGCr~Gn<-rya%lCSYq!v%EDO?92fJuvmu4v) z?O7=wJ|^8BU9lu`cTNDDX?Oe{Vc}-sLcr-^Dxy{ba<{_4SHAlWTrMU!#ljf5z^}v% zKP5t%)bgEcuZqOHZy?3FRtLq_k!aIH?Vzf$y|I2qkR{T0f~{Z*ujz~Y09Q8fT>IKs zphVny<)L3qUp!qn{?|_BGxGtALulJ=enWKRQm%F`M7Fx&EIhA?pJp=g;ce;V+~7rp z!I+f%-8IG-`g@!5ccKKF=8C2;smTZ+=DVvx*E`_96geG{1`AV#&e%P14V$WtSoJ-@ zGo4)Y>D6!P>ojOYEwHqY4n$)?oS~LuxGZVQu@7g=ZA@Tz(Sz-!#Qu3aX{E~)A^T8aEEK6J%{^@l5&HFA>seZ5>bQ+0aRSAw{ zlb8;NgA&~A_q)mOM)*ugaqJyRpi!yGBRk6zd;|8Fhq^>L{Jd}$J0>YfvI|mcNY3Qy z4mACdc!-9~G|IDVuYT;I-;r*dfHv1G2@k+Vuh4L!*x0=BENy*VxrwXViZ`xU1AJlf(}u~YtRwt91nb~QbUQR zElP@seI=nEz@Sgyy6i|p{$U2|A3ST#r^+BTERYbkePz=SqpC<=*!2TY643LVuf3Re zY9rQqK&T_6eyH?ppF4%Pk%pR%)oke#Q`K)3etN54lV5gI2schr%?156GUH2>kQ2+> z$K)J$*0gneQU+0YQAdr>es<#Rh)?6eJfRRhL|CEaXBD)K!#(}-8BrIa)96I8OdNfc z51;V~HUA24w1rm$wf6X{_cskcpzm+4tE7${O|!5m-Tl=|%%1LAd!s3+U5l{XsClPv zGLsEDZ9z*1>Fl&1Kt1T!N;=Qrn{>aW3wKl4l5l)+?LEp|VY*)hjD=R-dy%gMoAd@} zQBKVIYnRhbKdX)*+0@rk`b#>#46Ziojjnb{Ob#S|SDmE4Qm-PzTO{t?!yBsMgJ0-75yydHX`A7x2DQZ9%BWM-R3k9 zyc%iEeumQBxZ*k)B`niRIR!(-+Y{-iC&S3bgOZ>~qA|pk8yF0JF(cUwe>D-v7v6Ta z=Tx2;C=-eZrq>dlwrE%?gdZbBZXqV&W-Z1F|PB7b#xb!cWBEH@vb1VP8 zoWLi2knMh8Wx~ksa3JCPM;r27-Xwp#-T@Ufc=-HE^bJbDX)Yzdf&Ns-1uSjFLcli= zMyRe1|1DQ9_z#8KBy1|rAL)!#(t7@5I=I;-8t+KB`gpb3mta1zaygR6@kw}t8{`5t zin>J>Zazu+FW~oW-k{KQ&He%K2eM$|?NKjD&&divUEhc;NJa>1*^5+IklvSSr5jde z^)dH37zn#|+nS8uKQ!IDXmht-a18a#~K=GolBe`n9gnU{#6Xk zc(;|Ad3&FI)l}BjaZNfn`*HJj6a#|~gI~y>MpF8<@zd$MagdFvqD|D09%uIRA5?RC z)GLg-YV-3zDM31o@rz_j&c?H<3djrkQ|~H$Eg>u)E02s4818&Q0o0;A>N>6{JP#s6 z4zVKSw;~u7*Mus^TP~zFeVj>^9gx2t;C05db4T~48{?cizMa5-hUpellu;RuGtL6m zOq?m#JSrOQ%7>`>11eG9#s%_ zX&s=$z|aEUg_*}9L0su`jaR_~EVnpw$mNasY$Kc6&5?@isQZdWdPY7ei*5EA)io6I(`Rk!7`#?f!$D}U zw_{-RYMV?SY7{(^5ly@}q=Ak6oJ(0D`)+Sr7Q%ztiPaOk-FOSAQ#oZ3KF&#dyUtjX@l=8|I^ zMsIaDiLIk>7BZ~Rc$PemSCn9{eBDzHQwTxhMzm>MKSi*8=lEBKdt|@|QpfsbsAx*o z@W`WJ!gDQwPxQis3+b?RX|b0`L5Rih#d?>0xyi<&aZgm4CZ#Gqc}U5pc1ArNxl^)b z7wteotwKNLpbkx#Qqgz2`}KDIimsINmEW^?vy}ubDv3&p@D&X8*z96d>F>2Z2(7sy zzg7+7@;`&&`GtJg@@$dWvA)z4_+@JY0{3gdsExR)PX@8WZTGLpEifaaf?1dnH)4k< zRMPqknU-a)KxW^Hei3klao|j=S9ISd!hO`o3a8&nqt8xAeuaso-|xwUADG!Z+7>0z z6pci6$%^T_j`$i{aODWGVjiR{5`jmKN+ex~=`MbH--iXWOthkXPI{Yo4=Fec#q!hC z*S$n`!^0yw*{pO5uOG3?tMUYM*XB=N}Si zso;P+@1p*ab0n9I?IWdqE^!*Z0CjwC;>B}62Q;-t@e;8MWM3hmJJbnV>yty%BwlcP z2>XrI*qpm>vycXHJtF1zsF@h=!m*?3y~hUy!Rz`Dbsk4<V ze%TF_rCngy5x??o;SO%dvHv7iW0t-9l5~kHbcdSjseJ+Uvk&~cFfOKXp%brHYah4; zt-%(q`z8#1oTs)7&$<}(||I*3D z4^k2zlu@9sW8}NlQ6|)BNUGST_kTgx%5ou|XNK(3GYgvo&g4m#V|p zFC<_Vx>&yX%St4(UtA!P!NXU4B@$#3aCcU2opzn?82#TQS3ouk zklYX8jM&9cJd#bU>R`LGdb4#_wx72hN+ zk%l0%2ulq>PCO;yi?F*nuT8@^u{sUiuk;Es%aJWl4X7ola~|YDuv)CZ-H+pV@DtRc z7zAf3mZqErcNmbGlHn_SsXFbaj4u}D*q^xPqzSg`M!APv2ugf^R>msfwk(Gbd$GNW z{&ryiaS4|Lkww^1!X2r|sG-1z4E#0!RvLkr+#QCIJJ+9-F6zlVm*%c`Dvl8&l8GGW zqMMw&2&7W(<~b|{%iJhBjw`8+R7n*jN=N%bqaRT5I2k(v1#>XR7v!1p3q^O!sxQ|8 z&2&f#0@xK}+-tVf4rdh?fe96ZEak0|c&;OFe^2gB#Ii=)<^m_{Wm0631&ftj)cnq; zW34ckZ0(?~`lp}p!jW5?EPTorlFxOGz3I&{H`nsMn-xr?TRFcL)FW$sQs|+8oIkpkSK4;pHlmz& zmSvXx5t(SEELJc-@ui_cY9mgL;wWbzv6c7mhN!7@@wqSGM?gtNN!nJ=j+9U%9PVE&*0Go{rYL2-b?N`BGla zy-~<_j&GN5Y552DkrHYs^q``~FutaqwDZe$QM3V_CS*doB1mp~8R_T$*?NAwNCQC| zm7d>RMsr@a(8fH9)B4Dft?SOGE$HFEbL$*ztr{#|Pfedoio^5M*<^=FBd73RFr`Vm z4w@XPRLWe5w2rOtCC!u{U*A&a`wz*GAdSf?>|8iX#K=rd;qSiudOV4Q{*+CE|AW?D zA})?<*ztW7?@&AERduAaV%II!EuJHk@(L!$k|aL%P+qs-b0TmlYa^N#La=hGvstdF z`|PZweB=q%%cX{t7jfv%wWgYnQnZOB7_M)Ll&pB*PE8tq(Ppi^Hp?C2tYVebygbIz zkVmHrKSmwed26KhE!TIi7V3(srWq1+@>u*|Z?6(|d>p#A({Ma=j_FstEDKxOCzQ zk2+;gv4?a*&)_;@;Buf8KGUy!pH1VXP|Li2Pt3N`NUQ+&v9Wrht=d!F$%SlWD#5^t zzzU7K;PiRoPgm>gPTuH{BCALrT{f<5eITb5+yI*szz!D3P;8=?tFRk_@CD28Kw39J zQA9{=JKb9u>!FtVQd3Xc*2;JzlD{$a;-6Bni-fiX=BjUhSmj-4u|*`*UlwTwR2hYq zlwMvZ$O?J{iwYN64WF-k8S!)2$7VeOL&DXDwtc*VoO%TFo#t#@>a!>>+mTD=5Vq|V zxOW5)ECj|J;Ab2*!a3jUCVEWNplMpsP?xI~UAk*8a=C~$i6@5gv8d}>YjJIXeJMxS zPpet7Q#Hi}`DxFJ!gA0_&NR>)MR~)4{<2Ir}7fHM6UD*#Z(tLG4cou10Or@+VB6=NfFFYY`@orh>qEouc&b}B!@tn0&$s{+XV z=ywwNyNk;|?hQ4#eTMNUaX zGCITCkbDf;$@l%L+RfC2VT)H+ zRk+XfKeq0Q4X&%C(yJJYoN48&1Aw9npCxnav*wad&gi8d0~;6Rb63iE+OlY?_D-az64i3%$OIda{Gklm5KI9O?Yi* zPRE3l6zDiT`+hCXv)A@8+zh8`!gMLm^2oub zqj!cO>YPO-1m}Tt28){AAOB||?=h0$^|;-03a}3?eNkB4^Gt_hlNvfNHUpY zi0uQlj?bXPxUrjFbYq8*chiXI>90KMPzu z#4Gn03qy2Hk2%(!hG+?Dh#fw6+E~bHlWTZpykvN!f^TS3C8xGis*Fek^}|c057RFU zu3)S3=Ns{nBaFkL_Sqm3b4yadgXZ$9QxCMsi5l&oQ2E=qq4?~|hn(|3? z-eg=Y9`d)5`LQeNKprf^zMXT)(@Fu^anZF{=?Y+cO*UfVko%|^I;SL~9^;oF3m?dX zjOe-UL+czc3b3n;y(WGQyP>gGFcNNy#gaJ>8cP-%g_AstL)AJ{ch6TyDmg880`LCn z4PK$n%%sBt7%oK_F&dDaGM}BkulxiuALJdViOIW@pjdj|$}DiW`n>Bdauk82_ah0R zio3F%*70t4B4-EJvTZf@o%G5aC!@r!>zlFK4MN6r=g#M$*?@U}L)Jnw^V59KU;ABo zfII`)xQ)i-dRo>C-zUUia0YSJ{k6zR*|(dvdaytSMR71OLu_kLH242)gMgHhcs~jV ztW_T-p9KL-*5@PIkL_pCQ{pCos02}z;AY}9SBz3SGzeh0>XE=o8^`8SyEXMJl8i%V zUSiGMWo+IxI_SH?W^yd`L(LOHd2M0Cesxp&G^`4>lnra=TnD%9O&hOBRriXi{D9Ef zM=8&$pPn->(E<)XBLR71KOqU^3@Ajs5}{u1 zMCWX_dp+lWvxgtoSi%<0ZFzh;j=u&tT9tUP;MD9|s(l6VuF>ty;==-Y z@gq_0zg6QN<`1HwhlgMT8(*;;o8SsSAHsueRuMfH=3#xT)r*QIzxr6nny9X)Jq-i^7AL%y5f1dBJ4CR0x_^M;BGhJ zV}79Rodvztsjb;U8ZiA1v&VVNnEuCr@}mQM^n%1>fIVwdHvomK6SW?OWZteZ-quBJ z-828nnhPBXI#E8IxqTIanBI>qyhXLU5r!HelKJ)wpWbOr!Y1jo;)62j zW@B+>?1kwwz6s4)D3&olQTCxnBYwV1IPvACLcyDFjv( zJPe4z)B@z8p`kYbH)yJ$Q^s7D^u=&M_5K4eXE%`m0VM4rH^RbmV}uoT&w<^FR0gfx z1E~Pk*n>Qh8VN{z+Q6wr6}+RFkoGTWTi{)P|FHQ#%%k~7oGXFL#`1)#WM3TMrvA(T zLKMO`jb#7&15k?8-O75}sXF=LP{rb(JN6$oGR5xx{-Uk2`M9sjLl?fisPSP1F;FN0 zq}FHw->%47F4M2h5O_fQuWkRGDF6ZM^i(Xa#_=n^8#k+maqX9r{)(-&a zEFC?za;}a}hQ$Mbe(T+qoVd?k?R4voc4gnneLo8;@6{)uRmT#0vbL3fVvJqo!M4V zc%%U6<6U+OKA&)G+LO%&iT!b(@L#HAzcoow+yxlG`AH?m`SwU|VPs!k`*`jAb2c7M z0R^UbXfxtoYlduSJ8$NdvdjsY^}NS4cz)OT0=Q5owbSb8X*~RxF&QtI)F1& z6%h8xb0Ouy7${}|sxYVVK(xs&{$meV9i{t-rwR0ev=af5d=1Me>f@xn!D@& ztMyd1iCtd;e3LwQpwD}oL=?+Mj}S%UT0jZMp?Xk2*U;q&$ltHls7-# zp^|4Czy}(;0^R>L`(@gOBb{uHrwjF?Xe*ODX`4YbU(b>=^|FM!2$q}JT zUAYMSqW}F#=850?8=pg9*5|Ry=24S*_shC@4M>6a?_yE_SDui2_c*KU|9RE`!U-oq zd0ExZ5h>0N9IC&6VFFapVa7$X^~s>1T+HwRP?0jUE6#UpK9cocw8@SB`qQrT@p|75 zxp;Y#?L9_;Ky}a)_`WAER6ke*I--Jve_EwEJnCJY+yJ^><3OS9bB_Up77nyAn08Sh z*b?s4chE!bX?WlM_p9m02R&+SwVZr9FtkdsJiH9(RWc63F@Y?)_On-d585Xi`)qHD zYzn>j^%)`O;rUO_fUEvGpkw#=W^UTTfhbm1IK2s)a1_YFG!=`gp`1nAihKlU8>;|g|Du!s zN!k>2X{F6(G{-T(Ap{LHfq4m#+AanOw*J{kK<#Z?||?AFzs~ zYveJ>*L(&Hk>IzP08AOGzCXtcHo>3%&h1)$r3Z-s6x`E-Svu{&AHV{3=(vy7Y03;3 zWnVCdh~7Vc@fT0`n^+AnjL+dry@2#Wj-&1NxYF}-TQxw~x9!7!um1N=;?djuw?F+4 zwh3@Gf9q|4KlHap1jzMP|AH|8Z3M=w84q&)XrEhW|+dvN?aV6ww=%Te?R44j_5yz#(zEz_z%Cm yWq*c)Kv4ACc<9ei4|vXRzvQ36)Z=r49*8oc3|IpJ{}f)nlBtj~4f;Qgm{iaJ literal 483521 zcmcHh2UJwq)&&Y5#nd98t%76~6#)SyXHZlGBswo!TX8Wmnj34H zTHBZyGiaHa*nVrEM37VY2C|CQfBrdg1ny%WQKqb6)ywv~TUXN1>h*fDc>~MP#}5f2 zZ-_f)8?k2uttP|`3tYPu`y4rOY(BYbd!Z25{o13WzsuQTv`G|qvPu-^_@_Lj&nKjk zOE8HyybsQOKXc!YoF4am+j08WSCvusAKlgSxUn_S?8S~N#!}`t5|vJO`H8f1fFBc$ zoNajk=lHmT?v0igdG&63!cTu2it|fQr&GSMGN$#cD#&TaClt6J4YC|ID&79nB~Dtz3dIaIj^s>$Qv1*7<(7 z_bRjxg(37OvaTgg!pOfHx9ZrGqW6Jk?Wka37rim`M>CW zH-9y`kd9jQINi)ty?)bwl+HON615xWMPtONIDX^F&PJUHGCgC#Euc@+i|*~x3G-)( z6G=MWMoKvtI(x9yO@Bt zyB6&~J&8C19^i^!)UVI&?LmbM)Vj&OEhkd;Z*=Y~zvDQo$AM#TH)-BZnE3(g3|+tj zC+)D)PiaWy2NuTHebT5%OO$fO%9(VJX;>G2bolm#tGV0fZ99b;x8ZGN?kh$=vR4?n zl68lk=?)c2yPGAGEHLm5(AVD77tm{YVYT()`Hgq^=N7vCBac!wrWk%ZU7*fp#eG^Y z(sQ!%#P`RmlW9M7q=V|z=5NM&97UItRd7FCI6*Qx*Ef=kCVA6ZApN#hhag6cD3?pM z;ArEc;af)J9HL4+MJEe$^FkP@VhG8OouslmvcRzOn`%_al1FsjkY$CSSK`Ps8k(h& zY_b~*DIc#2N`I^Wc9XjIV`{_cBAl7e+QO@B!*<_<3HMN;x#nhsU6IA6r$YSr+?pdn@LnOFxq$SYsVqtzG~{;&V{>- zetF}BP58;#zP(EiJl>ex6I>Lou?fgnZH|dL$!+icQttSph=N#KZw_fQCCJvJ1;Dt=1v!-IiRM(Tpz^VO(GYfoeKLvH$>)X-C)bK%FJ`B z>7w)tCnNld+Et%VQB1ub_ubvE-dibp@b!MiolvDG49pX!u6DZK?I|8hWK&dp!=XZR z)Ta}TM|fv~omG1~r~SM1(G_!78k!DoSH~o9uqHps`5(OFA^SQN1vR1 z#7(T}63zYU4E=RCWcKvEf|(fyoy2aOvI~yg$=Nt|EQ8!5Kd6*Ba~jk`&NV-GM&B;= zkR~FdTj+Ra@+5rS5-nEXo^s)GW1MX98}$ZR)O87N#VNcnx3rqhXAAaITgPQK2^d~b z#>H$NjU)Zqb!WEqM};suTcJt+2|ez|B&2qll^OFxm*4K8_3>yc_q@1xE$#AuADQ)& zzPI|2`tj{Fl#C1mp6MC6#|0rZ!LpCtXR{L`8$X3>_ufC2B=k6I)<>4NGvujMs#YG^ z@I|)s$ucAlG+$+*NiHc}x3(r+sxLp6H_p($`PQ5;wm>S-HRZ$6lUk=ZYI+K_SjF_0 z1Fb60UcE)8IK1J48MfCn8j<2Y_Gs6GIK(KE`Ndv~dSNL!D~3Rbd}&eI=1$hhj*W-k znx8CY3CDiMF-gxX*sDQr)%A_gO=ButRPiDRFIcKxV zA5UHoNqWDromfz8p^A~{iVL%JVWKK*^GYX_HlvCpw)OY0v*m zoEFRF(&vso_2lIQ`EXsiab)D~*6xRO=~K7YpRgRybzu#AZaJi;chz{T?8#}vRn{j* z_4}uzkvqm!5=XX4pW(A#PkLg9(K_#5)n<3}NpdXHZwx;kgt~L+i#)DsccYf?5d46g zPpzjGptxyF9YaBPhFXB~W=i766Grb-V*HP49yPvI5{)?AREa)HD$z<8)936-my%bzusPr7TYpS= zWPz8*ZZJ&zb&Xb$=|G0WsaP%Z-jIcJ1jjWW@n<#nKHNywC#mgSFD5IeiTM3e-PC8d zasE3)@18C4?nIMN-%}wQGFVt!6}MT9O|Qzc>Y^uGT&>eb?LGK%Q^DoA%+DTXD&CDt zrZ4zwH=n&N?fKa~7koaJe^6CR@0eR-AP>LW&wj6v$;>E**WwwpQO+B*4tvgfJL5Zp znE_rKUJ}23;}7TEZJD>}kFNiWrYS(3#dOcTg|~83 zHO!QsD=@YUFhv%nEp&x&E;e-L_HlWZG(lVF_l+VzV^q(te@ zU*tFKuP^A_{BZItJ!@OPtxDLvsO6V5#1pnoJn+TI7O^!H2Pg&s5E-~R~y&)viG zzy1tCOx^!K|HFlAbabaV}&-n-d5xThPTiN|8>wmYlg9j%4^YXOsUzy4OG$yvm`*I}dI z?*IDxD~B(!dHw545!2B^Jmkmx`e-!ye)LxENsnW8E+~~cAMz3&^ji04oV}geR3~3& zWpyRfz9X0axW(Il+=BfKLMFz|WiDx6@vLQsb1$DY`iANLPTZ~rWw4u$k_ZORej;?J za;(BlRwt8b;mtNF7WE=Xvf!~_?_^F|A&kW2A{bU4Y1NKjeB55fh}4NYWde{XT0 z&9rZm9YM31^jy<8J-UItU;cw%KLk?vv(SkYc_bmRxbk=~Ft>1beSeUV{H@0JM9ie; zF`l(AZZ=Lkp;w-r*AwdrERajRycWICOSQE~NgSSQJNrHT`Sa)R-~T>(!ZR)}kA;y@ zVvp5yll1|5&bGwjMdQN4*Q97yF4MlEuV26V^6zb8)@Ey)*BGg(C)uYreEQ6exbN={ z?yF@z`CK^YYQ%G{a#y4r|L4!2%jny#D%d5X`^S23Fd@u2bAhhhwBpI9u%zTOZEV=?S})Z;LhUH2`DH%0Wd&-Gq2Bdgtxjt-ooM?2-s%obna5*a!b zPc4W?a@&3D=&-7Jdy$mQ*nYX5da+r>YP8n7s_LFezSG9SM&e+JMa6N1&1|4)&aG0Q z{DMk>@tyziB$fqs4Nh%T5*MRH*YSjt&#IN+l!0)^qVr$()N6^nQh_UzZL#1ntySAo}8TA zWxU~{(AroV|L(MeNl#u|Vuddy|6W3f`upY5Ql9R{x;nk_h9GHmdHL@JCcUA|dg>Lf zWw3?J%quH!C-j2haM{Md>_AzG^0$_j%o^s?hMH)UTy}PL&kXPEhAHeF;^6x(2@zNu zd^}os92;5p{vKHtj5vrFA$?e`c9g65{?5k8{%$yiubDl;g!SIxCxF z$Y4dn}jjxG-PR3YY#7vH!^)) znQSo%XJPnsUP(zQSFa^_y(?WsL0&%bbXSI&ilk(gQCEh7y2H3P`v8a4#5-Dki;?^G z_V$zCB8eDOKfHf`70txVoS(Qp*G)D5<;#}{ZaW56R=M8QZ`_MhlpI;EyBMuHA5Pcl z@vr_=ZEbBOc8kI9;c5i<}iB5x;RGN@qvnAxdM62Pv!P^^l%_{V zb9I|ZuqW!{$B#<6x+;aHtbv)T#mk>hQpz@ZAeA5PXw~|V&n6Ej%sdWfYrfq!_`vRI zV=(=|z`&h5cT(lkXSy;q_V;!h8ym^d{x2wa9p6##Dt~j9l?{Xlukj+1+tsZ0;N4lO zrjt+mQD_z|>$F(5AuE%rkZIcXv8L)3u1-LpD7i^MbaeFGrf9TJOiYYUV=yPmDfG7H zy$}Yqr&?OR6uh+8X=rE|OP8LLncchhJg>l@;|5J%fr*NMK;_Wq7YRB+r&~9il03~@ z&LZYQwk5n&RDyi9g4G^;{iD-969vU&Bl5HcNAKY=)| z&xJ+=eT?o{>`f_a)^#%4l|EQBr-=*(2^&^eG)2Xm>ANe@`z)sB92x3mwBbxlOlQWU zotNJP4t}y}lp;qi-FflpjdwZfG47U@{N+3<3rYWcXnXQ$n0n&IE9Dj?d^?bGuZvUaCQOjC}K*@%j@#!tDj!9 z(>nSUbC+{i(e8@x-mdrF4wX_A#lG_X&|V9ktN6?%I~>)k*tL*U_-E13L5AS7?rqly zVmNo3!`O4XjTaXj2B8e>U{+Ic2)6jOjb(+Jm-pv#Ti%hnhNhVf(@m^9QkuBh+lX(oml;5zFL(1$eXZZ$jInuzU6gpZf+VHgP$WcpFe+& zjg3XM(jt;2f<^47kRtzvhKA#~gsanClk?t<%^HUD`Py3dGF{6i>S{z>05Bi_zapS` z@=Fp%slueUFu&Wlv9Llb>a4M=NlPCZJ$@d6Ju59MV>kZsq)&2}8>6nSjwxP#OIDtl znW;>{@46E&9H2d1vRH1^oih%}&NNjy9fdMn9;=6~#}Da`Nli|EL>CGv22K~* zC*pOH`}be6?GG3rLu2&;?VTHgZkhA#C3a@1h(I)-OCI80PQ}Type?!kgJlK3lICKRL({&je49ke2mBLc+npna9@G>O00ivi$tc zy1bE{?JwD&+}{++M;%ALGz8IcTThwqV%GLM_iCu4okympmE`2cvlcSqM;l@KQ5D_)fG&D7J+D><-1bly9QL%rL(mBS&uqz|71F4J^^bBJ$ z_((2pZJkGm__5a*{O3t=M~Ke2S$rw)dohuj+Y!=vhV&vIziD~)l}h6Oxub;i%B#9n zDwwVE^STeajSGaL?DKBOyiBUAU~}3th7!l^w4PR2xFO0|wu)BJ=$>B>XE%ebR26IG zhjecI*2kyZaSfG5NJuCi#*$()`#pqFLw(T$C>BQ;+5A-R*0Cy+0GR|Gu>C|zg2(<_OK zh`eV`G$G-!UwVCxs-dw_hx5)IDK<_#gp9|&wWEWXpI;+bB9aT8C`L+;oQ3zLpvYK0 z<3Vm@acbkl5v0&BDr#Udw46rIYF0Dy&|D}wmrmWHd_@7da}zd29cQ)tF0mQr^0>eUR@;*WBs%JS}rSSCW)3SZzrp}n@q-C*p@80o1 z-0V15U@rJOu7z7;R1SKGf6JkjriBFLT-Mb>;i~~IlW7r))6)jI-C?X|-xCzNlD0RT zr|!7nv+wO3zD|1n`N@;qk;SvJ1_smPjUoGMnQp{@RjyyZo-;ks9A1mZ00A&Q zIjJ(QgH}^g>drUnI`b;u3P9M}h3D?>?rXDMM&p*^r*UzOn8p?G`=+1B;_~nB^Sip8+-z4f3#qSk?7uAR44NQaPjuWqT5oV+YI$~#*gfr z$=os?5{KmJVhRchH~Z!6FF2%mr)l&y*5`U>^?RhWtTsFs%eTc|DKF}ageKge6FfvMb(4xq=`EVWlO$d1}gj<+4ESw!I`S6uI9E|C^a9h3Jkm;$IQ$uW740i=bLx_{CTT| zzT(bK+4P-AyMZ=-^Cz%i&@0O{qT;B8eC`VgEds)ikJnzAXl}1+3S~}Um7}Jo7pvue zQi)7k!*3cgrd!mJQ#|keA1CRcg=M0pHH3u{B=QNP-Xl?`jP519qr3U?&3SD`Ex`?M z6bhA=mKJCT(2>cseE%$>zZtdgb9KE*4YQbImbA_mKA*lO+N@!Xz*p+-uHEt!e&ahsnb59Hh+$O-s&mfl{ZTc7VuGAxHs$jNeCn}H&93r-1}aBQd4 zcvVY_gl#7m-s#iI1;(Mu!C~ibJf3P#@FE+6`;E`;SK%_IsTMO`xpG&zH_srTv-<$) zxG!};$+vvC9wS}X0{+}wmuYPPB05nIZ#vZ}GBTww5EP!GeP zm^I`YwM)Q!6v2yzx|^gy-s%<^&T6Kc+da^~Qoa>#>RMlZ0P8wKVOhSw2Xb+7(UJWV zwE|{{Ju@A>zcV>+v!4!417NwH#~A1vwzjq)LvfC^3O zDaD)P*mrA?)f-;s@S@|ZJSy`jobwM479>5fN=OhK*@LOxS9~A+LRX zyV%~$&CSKf$3ypX^5jWR4-eEmKyvf=pUVy^ZSp^Ph6+tY07xdND3GLHd3tWlb?3rz zEN9*leG}LyKZ*}BsNGo-!cV$pAECJo;4S%KTws8k7@h>lD`zpr?{4kG$P@=eAO(dA z<8jETsHlj;urHS#!ql`e^9TT)4^?DkTLRO2fzP})Ag%Y?~%F#-l`9vPDoj- z*jJ}p>@O*lLt-SN`7nsPe0piA>fUYJ>GtvT!hxP5i@0}V3OGnU_pyzC#|=9LN01mo z?0RDx@#32g`IFqLnr?xtK!S&tHx-4_ zb{PBW|Lobbi8Lu;;WxmRWNX#sy@zJdVP%i^8Wev(OmDrs#w#J=k^XDREgR3kTeQc2pr1x@cwO@~vA<2hedy{jf>CcmNUqL^?&#BAFkQ23Ytb zEqJXh(}NhLoI>A1XqK4_^&7e&kpbni$hG_C3bNExDPZs!(x^BN0_XnQ_5)Yfy#&#a z^~J&cQD6Rs@4!t#qkH`1$HkaElnT}i(yB)ri&-&CGTXb%1t9Msl? zfUfo=sIZw0zD0fgdcAO~vfoeNOLsoPHUvX*)d_&T`uyS~FE8)7qV`s2iVT)&If_V0 zNevGV)3!YMkxJ=|&}lySSeyZ`^hZ%*iuvwgV#T1)Q&mF8 zzmI*7|3dIf$B-gmgcd#?629lOJlvcY3MLT_7vab(sbNN6N@^!!QcJ4JT#W{%h%YWSf8G5*I6Itq%k-$RHiSZM&rY9VJp^h_J(EY0keJ;3RT*jy)p9pY} z`uh4sq_w-)iu$i*N)}oK;e`8dcU6@zWG~7*iU9BT7PuxXz!eo0wdwU;!iI_LgZ!5r7qNDglCpa#~G9@j3x@h6q4!31u=#Jo1xi|0hsIXoMd>=5bt2 z?g@*IrX(eGnr^?I9~K-O92gisZ{!EcYAvbRBbncR`wa?2$ooTVOU7w=jf=~1W1%0V ztgAaQn_0mJITiBN?!rlYGQT@5q0EyzTPwUy>zya2^U4h!fbY#LU4CorIMGBb$3S)F z)Tv9ipHu>|WjFUD)=&f&--~&)X!qsV_&B|Co_<3hjn(8gUgym$d7(F6UNR;mk8my9 z3(N)?Jd2@Q0t(bHyeX+`?Z#20f*Lk6>8~;+z-xI9*&Guvd}6-uiW9YEwPR$&C)T&O zCA3|^cWb{PCy9;-Y zNEE}LMJj{oRe-$Gdiqp8T^aosnr-=TM^IVvVlabRPo6;s`=GN-x(^V!-_w)~yPsge zdS_~8Ic>@PhYwMd{UPNy9)D%sCegWREn9dw5=fZoe!IaShWH+MKr+H4h54uX zZ;;Bx-;N-aGzT>vo4+J|BTgW$4Evmp>#IrI>xwydH21qa)75kG6)FZSa7Q3fd5a-7 zzo>QYq}ivDsb(PM=6+=3LySFm@SwzYK5zLibPJD|DVHigb3zANnWV>|u!oFF8Nlzs z8UWswbKymIhOru*8E@|bP>@lD_{mglr+`Ek}E--n;Bp2)#|D>XhXmd_o8NiK1&_I zi+=^pwqYtjCg=ilxXSQnnHRGz%`ZZBMeq+70a^q}-=-t(%Ws#@F?ydNzqtOhxNJgr z-y@`xn2qZxNP*gjWaclFZ_zoTBkupeu3Wc{BAlF@Y~l1Exb@zlr4`w=cY|=s&B+0R zp{%r&L8H7cUiiEcpTlx|nqltNTKVP(t^q10rc#o@rKYaVC>7Q7j+mKHJ{)+mG}o>1 zkmBNE4}Ttz1ehMBVo>APMT7O%r~pokjCAIBA?{mEQ*?nrL3m^wOa4L>SmD;JuP8Qu zDK-Ap%a_MYK9Gd( zE-^p-{QS6wtvlC2y*xZAmEIh92#6G(o}PApVcb#JeAQ!!cr>3$Zui@_Z}a+Lm#>z5 zt+Hz2Ei~@VL8mHszIv5{h8~$-qg-`AZrc&~f%Z0v*~%YSVsg?cZwceLfqZxI|u?TLBm zSYWdeT0T8JjY<6PhyDjrW;n>OK&U_mzO7>>bja0eLpW+G=70HeUuexc9%4;I29$e^ zyaq?WY`}|8l!SUr&p|>Apa7*KBX!xVrluwbD8FFk{N8zxI&$LVTs%{BTn=zJ+2Mny zmtVaOjJb}Ej)!GZ{Hrc?Coz>sklo(Z)lA!D*H1rj1@;~>KWvi#o4Y&46Ckqka_epx z`|rJf!*uUcM(z};3@)$*ELohx82%P6|MCo8cJKVB#*c1vC|w33=;%O85Ra}h2o4B{ z+x1isyzREP1KbCR=xom+hX-uzJJN;Lzl+7}oZfc(>%VB<=2U?6PZCh9uC6wK9xc`=@nAqC#@+bSBAA1U+4bCaS1Yyj|w-O;kz&r8TEkyLR z8MnsVZ4ddRWdrrd#{ycuR3;H5+Mvo|%c&jJ3C*8WQj< zRYHm(|Belz72<8)E67l1SuwNe1GhM(l39An(UOWIF23k`gVHz}Lftj%_Jyg|Rjr~p za-$?$wZEFvgH_+5nil}SLf?At{sBYSpqraNJG5P^F$qdYK9$kN^8Dh<3bO3IuevT` z4tQi2BK-Yh^Mj+JD9)aZX+H}3ATAL@=bs9ALpY1U$nfwT>nX{#eX)kkpQBirHJuhq zlkJ5p)q0c8@VBW3*vc+?ld`X`ta!bB+rIHM!Na?2W8VZgb_3`REnP`i4X=4F{m}A_ z#qobpB{*SknYL9nQrZFj{3qKgAAqXK(1e%YD4XWo0U&PrFd5gN+w`z3Rc6w zo;FlDR1{m=0y1%+6FmtiGthZl%$US)5u8-q1^R6tWms2K-MVRUm@jalRQyg*I*gs0 zhH{(;GV9D_Z<=_fm)HdJy3;*G;7;F6S)JJ@w72fA+9Grvl5eC+s6l}6rc=2 z;({kwfhGt}9!LNX%f&s`dpizLuBN7@=oB)(&Vb!wJBfccMJ$}HN9RKaBH-#;PN*F+ z%eAX@C;bsOW5tf8g++GWErk!37pZq$vE++mVXEe+Wo(h-nxU2fInBROtym9&ahd|N zr46k@o1u~$$!yRt%YaM80V+OB@qMqo3=#o7LY86#_o7hy51(@N}E-Ie8O~QWxhg#ml=*AfB>d5qaKEk-U?0onS%H3tp|xW zzp%D&R5Qk?T}5IpnXYj~-cxf+Gi8}wbewHme1u1h)A@#r$Gd{7rs^%aK1~IKR6?xM z(bCF((cjtp$ymPmbeOSoXzgG_#J0XRHwT45;13j-glD(U8(Q{^`^KKJoNA3dg!o_x zilLasL0a%BvJN;Px1rX{AlT{7h)zuJO(?XNdjl_rN@)yXZ1m}2zPT0!8D)Nhmc_E4 z)n-N(E1_lqNz&BxSm}|dDDK}-Ye7R|mxh}QAV17t!tFw4orVkD3iKg?fyM)+oHuUh zPBeu&tRN#d^C7+wuuRq}S ze)~=`?0s7lx1IQj7?bxBulAnd%ywn++s=I$4N>z}IU;@zG3O{jr;5cEd>S_iKI{V?M{==VxiSnvi)x*Yw5Z0@ zw_2Qem^++ND4&t9HI?mQmrYI>#}u$Iwxq+DLPLF*f2G{RrQSd_SJ(T@9%zJ+e+tO_ zzdyghz#vZgVIWgH+nkGM<=i^Fguoxxy(5kb zj>NBZ{>mU;5+Qv6($>7OAZ_)CJJ<{Qql!iOlLiZAoPaa?j!8J!TT6M7mMC6186fFF z{*O@&C)UkwGz837VA_vjw3d}v48|CCPus-U&KH{WvK8x+f`|z8bLT5nOH0deHj}ZQ zIWxCy71%U5h9>!5#gP#aWSkKoiZ$k_j+QM|;kJXP;vR5z;M^KB|6bH#=ri;4k(j|g z7G$4>N=}=#kmqV)VS$xFn!|zu16g3ZpcPAg7~xZzlha;TCzNqdj=YRsrNG|Fsqw)a zN-h=FwyVO;ZGKW%n}^qN^%AdR9t&2rI^@ao8^K&d;kv2G`O#&2eVQ>AFas;3idD zae0&!8!fIrT5JA(4T+(A;o3dzq^W;5Si^NoCx6cRwm3oeOx}5HS}HmT5a%Gne%+6i zsNjl;?ifsSV1_MUh}vlG*lo^q(I5{LHBDzN<_p1>t=J98jAy<404sNj=x24J4@bJ%{b(FKXH2}ZC#FHAl=1n@4 zT5tc*hb+b9^qDgetVY2FDE&21*~UkT10X#|b--v}BNyN>5P#`mhIcat#{0L1UJ%F@ThD;{AAnRs_hY)oAI~5j z_dU}q&fMbEVM}tDmQBYCVQ?t_AtGSmnA$Z>Z^mrH{bR|{PrsbXH!kdjR7BG}pgumBhOv|_ylwpBwn;v07^sV_@6Ih~yZP@n80pe8*EUVf}mEdePIVZy<_=i$7(t zOZ3_GxsoSW=x&dAF=`7He^gO4!K_Z_V8%lUF9r=%K|s3vT4O`QSi|}+9S};%TUD(xT*BYc`L4Qpl>F~-o`{K4 zf^~RAgKxF5J-C1Wt6yghlvh*JjJ!lQmSON0N(CSS&d#Os86SMRG`YN$D;RvLYOWlb z#tt{52pB&s4Spf1&@pmgnN5cMGblDquO_%h@%7kaBG(tP@DYyrHmMgSKNueB=zgHM z?5*E{L&iGQsA>4woW1W=*yHRhk|>8Wb`D(w)Bc9dVK@l%$pn~J0A|xP3LF{ZUjbnW zsJwngkbpPOc3wH%!n5W{lc%vwkKk|EPtnPL6cONUKZ%^c|4luYU5Yik!;FHg zEOh=bt31@|5;BRZSUwPFDF;O3`u*n<(-u* z|H}+u@hyc@fPR|GeC>-VC6M z1+qV(Q`<}(W^pgB6>S5l;u7n{JvvdF?g$y_gHzhDnlTZ!?mvC{G!G3nVsNN&8}@}% zTgS5!ARK8)iaS4j-DFdLUGxy;lStYCZW29*Dt_&Z-ywg#ap@1lR>#jhU^c`I<~bF9 z(e?G8!MGDw;ye-~#Lx}=Tw6agI-;kH_C~|0UU;4X%YvjBw$%X}5v`}1q~v9V_#Uby zK-Z)5^YmR$ENK>!U7tp(=H{FBbAca7&M&nH)Y-}1dqJ~|P0$8_FfSJ&@Q#F)_V#Vb z9{+J5Mdx!QdmMmq@$&N0>EPe{Nzk1Dkq!D(kl0~sD@z{@?Kc*f|G7C(@1el?=@_g$ zJ4x}Q)FD^Sg}4%$PqYs>F!}iSAn9O_#S6=~>W$rgfCq9SK76Z~s+3 zh1qGz-6!Mwf~B&t*Va2ML*u^2F5#k2DB^B3cH&6E_G=buKt9(M+| zTqh6s4GMNs)lBuWRlOo0x!e88B8?q?))0YAo93gCALI7#8)XIMw*b|#55e>w{5+nmh2M1UeQ8Dl2esU|%Y15*sYX{u1?5fNy?PNiZ zGyWAgJ@To;#h0oh2lDMK;(HkE&LifS|Fzo!Fuc)|YnJ(6N}{u4F>0qe!1p@Ej~Ka4 zmPyKIyEu3R%jcnC1TO@;*#Ju*9WmNu)z&0<3#&JOC3O9{Pa}$AzoiSVb*9Dfsj#(j&434IwgYRwS`+Mf z3s!+oEl#UbQfle@w>#M{L4fu28|KVmX=LvXl`;lXhj6tDS2bnHmWZyRBIb%uGDl%*7Pgi9XHa9Z14|Bz^V~M~_cO zj<|=lay7KHFw)brntoTxHw@aVtJ&S%#WtK$Qn9?>`YR;|V^AoObuj29qqVyS!4_Z_ zUs~#%B`jDzs(N1jq)kitM1F^W2eq3ah0AIiI1VE@EUJpquY~fum7iILG?|47e~5k8 z10fbmK}#wrak^G*F14Mbj4qkyM8q|=%Q-k~`+g)R-|TK|6>nB6GS>tU0!(-;Ui3ya2GVj()>?$0nzMWKnZKNWPyjD5e zRiY7H$j-nnflo843*r zw*`_5J?W!d3BE0Du&c9BT)cSQ3HI}@iO7G%u%>9QC_B&(`R$02qRhG8!bQ;Pn1b0Z;W^v?;t}hl0t?6W!+8Mk zx;XDP?<LZ5^QE4^^<=^9|)yRgEd! ztCf&SR7z`WD~ORMa(}GH=_Brglt&SFnM3jnkai43@hOA@6>T2k@1G4+D_H44Mz)>% zA+;BzL*sARZP<4tney+%A(pGPg(m~4^2Li6f`WqNgh$+gISFqL4hcz`zjV-iDJd&s zB~v(vFqOr~m{8Y}WM~5vzM%^TkRI#rPZr9+K?HCPz{+9%_=z$$ z&|LY8r+YYp%9XOeyD>RZ1_rXu`?C=v~|Lsm{L4t6AR1k1OEJLFrLq0ApPVE>1Igp2k z2c~(}8wapsLJlhv^*~%zWf?-^^O+$PhC~&ZxVIcqp&x_?tAy32)O}zOEgFSR^H7F> z$?XQlTDG{~bYpETC^)^x;=`x!0hX|D0F_5i=m7?0!C?LR@M038wa5aAe_3LHqTq4+ z>nb`2CP0vvz>+vIIoY_JB9jCWF?tob48wpI2npq-rLQ(Zjs?{VfPn0F0h}9K1^(>) zX7<56Lo^D6L@-FpaEB?uEJdHk8vLl2Z3Qc ze*2}E?9Q-Ge#^1f*{kK&NlD@WrkWcY1Khkx4M9&8alXHta=1A`!ewROXlYN8*i3to0R4uV>Ry0gcOPlkV=@LIQ;X{j)!f1V` zlhSqbv!s$mkI!8wG*VLjc`yc)UR1ORezsa^TzN6DB0&HFzUzh$&0#1%2!@0Te)%Kr zFtn3=Qt75R&J~77yI?;d5nU9ubDYV^&4tRV5B%7I6r8%##B2`@v!K zpdo)!iz7`>0UGI;;y4+*|Ecg`LKnn|2QtA)r_9e=d0Y^A0`U#W(ze9qZ^}n<=c|ls5MZjQ&oOE(yrh$H+`9i|8zq0}!X^(rHq3tQkwvH#77DHq^qNRzb1|g*Az?%dv zrzt9Faf!t03rH&kEtBicCYWLk&=+x{P;Qz|ZTQGzpkbVw1`!_Js8EC}%tmF{GfHVP_It<9w2u`baPJ8!o z5i$<*7)W%m@G|2-tAWQ#snkAu$3lOwd>8sU`d7dXReRudtb4zCLwx&*djX2N8RorB zs$TxaH23=TDdKm*M;-xs9ezawzPwOBzt)crynwXeBv0?C0+jZ(92E)79yGYCkXB_= z!Jr%5kOL$hp_!Q(akzE=Usk8oEA{rDtfzkmbB**d%a8H8&dxh9;;Ul>J|(EG8XALA zaSw@4gHi`Jo&uX$dBI|`&KCrH$-$9PM^ZOL=SQB7ft=xo6S#5pFDsKF(Mo-%V}kl#$yp2) z5hO}3IMhi=li-b#jf5<1@+0f6k%&YX`JJaPl?!_3p?ipc?Vp$16FUt%GHabDj=M46 z+Xc|Kp+Q6mlFkNDoG`L5o=gEFzb4>t){v5F%TVJsIbHIu8%7s+i3L1XLG?BwxJ1Lq z7@{`dw(pXUS?khpvbX<$gMd)l{yKIEe3M`rqN1vn+=CT(RE#pSIT2B=;WH!o%*!7; zB2WTer{-zTyOEFc=dL2R0qu}iz-)X`Y)*mk8nL8zGU6Wb*g&ZPs!IyWVD-fv${fCj zS3Pr|)|@s8!e)RuF}Wti*(fXiUq~>A6-tsv7~sY=$b>-JPbX7IV`=%Q@NlE#t)*&$ za2!Nj*f(cUFWN=-<{*qo2d2Jzh9)-0xYj?P^=DMdxliYN03ti&@u+1kemaF<$84vF zP~?E?iK34f#6#R#RwazZdQwPB_(?`!RYnZ676;i;>`GN!!4m9{NeczA|9f~L?Jq4qRn-#wrlJ@mHdxdvTQrO2p)d49CJh2ybb#!2Q@=s#@y%?ynFu+l#quCxW zjQwVXbTf!D^uC;bf5I23-I8r^r30PmFq>Wo-weT_+jNOltZ!OBl8|qs&kE*3RLUG} z6cs-xLlW$VBZxK|gIQPQhZ!Cy7*crPXwk>ltcE4l4P60XD!+++j%#@VsvY)A4gwpm zY-2DeomA#0Yp!VLv1RcT!Zh(jO(I};ckL^pfN}I{ac5Zu4F-lbVNTjtnTp|rmd)3< zwj2jqvHSmh_@IKTHf7k#O8BlXFN068gUB#`4+uMOr9e%}a^A9(v!Cx_1tU4w`UllF z21P_gDeZ@zweW4H0m3mbFsN;=@Y4mK<)18aW7XkQE7l_{M?p@WE_Pla!be`?gP-5+ zH07sHKgMBdS7F*V!M|c>oH0ikP$Lyq@-8s@V{$f;S>{gw+le{=u<0;0@)eOC6QH7B zUFhHTb=!$k&ga6BUsPxo%4q&zXTh|{hYXD{59OUBdXdeg4VFlNK-JJ4Bn!d#cvCe0 zK8(&!jALvWx9{PABJk%`>D?!V3>AJNS2D8W|H@^21%fx6$_rX|fj4j8o+cp3M4_Zp z4Q*{SHHWdTBrwt(x{_+#*4z9s@MQ==ufPYuZqg$gzQk+TnOq5@6GFkDXRV(941+`b zjfwm_!vsC&ahl@0VV+=eVuB0hCRz@4YGz}3TxDo_Iy5?(u@Njd;Hwk^rnjSwi(b|V zDxA2jY{kQWK<6NE68#s@w*D{?>2>$T30^3ev!Ci7DJRXhQH<$uRsY!S-~`w=bB!n0YuN}h##~raFP6uJxeHX z?$0czed{l>5LBC^vFQzowJ0JwIBbXOXgcO|6xo=w)lNrmxioPZ$HG^3WV94$F2Nl5 z0hU9fF$7&3a*%>kZ7VuB^YJL5L+9r1?dk#@zhLI%1e$stSS{%vQ0v6e5#pK(QJCw< zWq?b7t;+ZJoRszeZI4%X{0g>}d&j{CR)Kl*UKix5zcZo<$03xd#Up;e_yx9$n(m;6 z!q39ei9M9C1Ns{B(COxvN4a9A!L4GH%D=nX0ZuKL!_~IqjKGd@nC=m}tu(WP+8Oj2 zqOIXgl=Tq`8{(ueqS12kN#18;u=TQo%44664?U z5P}wrLurhGh2_b~Q_ys`5)II7rKy)Y!}mXiW2&%q8*(W)RWSE(Og>g&zbod4+F@59 zrZIx8mPW~GX-%!KE*cE-+!OnY^0d4)Av0#~fUR+1VZN$&q95bHydq(Q>xnc)yxO%Z zvj?Dj6AymO_{$+6O5=b5CIbF}Lj<~Z%06Wwd^-pbM?ykEjB1}`d0oGyK+MBvArNd@ z&{cv(qkUiGrE0v%fi>shrynqPN&EO$M~}|`!`PdE^|-ce-&f{k9-@>XbCeP#VkZO`_s^*oDz z*Z;iE^BDGhKlW2ucdg|gtPdb>Q*PW}S6b>XSa3SXPR?S+CBT^z)MhYSPxd91G14qb zYFdXOsd?XEz34wW+hD={nd}bah`d*ZdC!hpTS(!7j_$%hrqk;_)v^&^zI;hV9GF3c zec{4Dd&?M}n6-2zz^554Sn!%EWHaRn)mdU86^2og-tVGXwfIHYlbbRYIS*A2%nQBU zeC4g{ff=TkOLp20(d(%H;Bwn1`X^e4oQpO;Qah_O__@_A`}?|}hkqKEg&k&|9JEnuHiEX2FW`_<*LX;@72N2oU zH)8m;Ai27+vHbC_vFAo#KhWvbusL4W7f;h3wRrH-ZXSS2ueHThu5Nd7N{#k(yjj#YDtck^$M;nmCt8M7 zD@3Iit=$qk%=BusZN{+FTM+3gVc&{3!4M|)t&3~3Peu*JPgggWb?XRc_0%#EF1{*d z+oDlThmKcQSBJ?VW#NBQ)O*3>C$C>`WS-08jl=Hq|6)otRjlL%pK2XnU%@>#b_?!9 z+1c3;f-m{rbmJh({2INzK9VfE9}`lI-yTl#$^a9m-x`|KHOY=s-zCc7`Q;OU9^Xm{ zAGCkwAqyv(nwt76%E|d41pxb!`Vunt(o@2lnrrldK$oX7_gxmPEwvl=KA`eVrwr>e z@@prT8r(j@ykX?!7jBl`g63ak+a%n9t>=z&+}v}=Rq>e80c@#zm4gjYNVv3&Nu8xf zT42HHtzaM9GaaBLtGf@Pu}GW{5qo8%s4Er(Nl9XNzqAg*1%LmHfI06)olDpT`airl ztQ+I6Q0&--ezTvjbJBT`e%-{dZ^N?_6G>rnl8@H+O~t>)a#`l{=O!E0E~jcq&j91$ zJLp;%7i^lmaN4wKEEjNQgzVbZ>MAM;MSbPUg8qfF$m_IkDTD{Aw~gp)XM7CR&B}}* z{~I}XHbt?1n>TO9$Aq946h&!_xAWAA zB8x~F7Lw5C9W$EDgYQcdV?Irwe0Be|tJDf(4%k>f9s7D~hTDp{r{2{0%50Jlw6v!G z&%!nf<$NPY=fjeEkBjTnz~p@1^R&N5m8CU(0eN|Fx+xcAgB+MRjbW zMV+z(q_8bB(WYdw3}bskk%f#TLxlkL$f2Cfn*&W^`=iB zYIz6SCTsLyp`qmr6()Hr3^^*=1etW$)OhUcnTM}#uJSxYb*j5;EqtZ-FyplP?(L=3 zY}38$2I%bV987JIRuGjDP_1#%ckIX2txZbLJb9hcL6DY7G!W#DRc$B@?kdPj>>(-1 zZ~M=!BPwbiJu0BRvGuH!QZLqD{^aDK`RT^?=Pl|=H?X%&hdoP6`&?Z7r)yM1^^RMC|e%Bkp-B)(UhUS83 z+;Hg79OCzFaYwwIswlR5G}-fMq0gK-6INJMgn@~jtgLK=+4-1uln4(@O+ewrfu~(N zQc?_3OAuaLc#hn9dX-vGwMvI4V?U0+cH3pNla{*RqcXzFSI|@{s(NEONHA!|%dYG% zuD^i+-+IS1bpaA~(bA=oGBQDzmoH!b9K$PKIjT1Q`4g5tI!Lz|9Go@PKPoEfb|C{U zZ{PYPG_;!^>6ni&Wx7O|QuaeV8I#pH} z;p}IyN|2+Er2)Y9p>gF(ePkt!8_D&6iN3w84#l-vruyLxl?DAg02r>$S4_|s zj!bLtG)GjzDD?272YQb;@z?XFT3op*xT`l73A^|2f4+2!xenj6{`>d%3h3L!qr+vp zbsKQ>{{8za(*t~438&ATDK0KXv6tSZvf?Udh}NgNTTDSMT_`Mc7ju% zUfR9>fLiWWQ&$OhQ}#`Xkq{O$JiwNxin1CjFaPM}%Q1}KWZnQ;wQ|aj`kH)hk|jcP zc4A-LYBCK>9hwQ-{QQ>CwBwo;?`UfqB-83qjfvmaO*6HeMmvkUTz{FdZ}sK?9ZP

?ux9+Dix5;;iDxt>!h0?Z<-gR0rY>x89 zA!<5Pr|$YnIEy`_U*l=^bi3bwMuGy)=IuGw^1^DmaYn67gNy>9C#woGP^%ecF%^!w zwU{6|EO-h{Lyqgm-fN9bm6{811C*78KcBh3 z+*n85tJ)04g=h|ycb%xZwxetIh`tN|QEJ<~3FO%(29T*7<2|%REWr$iCr1w*>LM$< zGQf1PjZGv5VWDNr9Tu8^T2qV6*mv@C?DO4+N?%6~aFOQht*-lOAF1Z%0sfcUIb8P3 zxG{VFEv0M<6#uYWv@pr}V$_&RNrIYRK0EjEvnx7UPS4$%Ub?q$o$rR8GJ0+mwu0u7 zs0}e~1TVp(nE)J_IAet5hHi3WJ6BQ1d7E3_mYUFZI>HJ7F;FXBh$#gl`}-`-xl)?$ zNsk>nv^zGA{&3){@9uLBes<_6nB;6X zQk?f<&kvIZG&N9dMMkc3erMs4KUL$?agK zp0St4#&2vVszo;=ZDqM9^LAs zmNgXia&(#%*XG>jS{o=PjHu){vxC?M3Q{%+`8LUq9(6u~Gs?=;`W1hRkE}VlAJxTq zsZPQ!%igm;b&ge$skP|%sP(^N`lH~s>|vLoFTAaQMp|Fbj&_|}lC-0sz0|(8QR64ej%{%rSId$G>RrPF}U3yjPVM60) z#>pWQBZ=$z89JfCq7dW#*a6|&Zk}E}qPwA>xhZz%;JWhfwBUikZP#{8>(=}Bx&1mT zS_~O-NmOUGpQ%&5-btfa`>S^Ue{G7G^}^kQZ{NP9DSC88^Ukhz#&_2J4vhvzF+qZG za8(Q8Z^FmPlP8&)?KRO>M+oQ3szg#+VINU5Dt+%=i)S{Ypn< z)RH^XHxzfM+B{C?OqaL{X4isLM)Zt%c!wbfni;l?_UUqZ^!Kl?5Mdka zOdIU+y}<8)S3big3JSq=(i6=qwoF|@2vDotKV`g43QH@_4g?3$mZ&HciwgO8>M5|O ztbPALqiIJOp1%Lu`BJ}G%OO=+ne?5d@p1%u-s{|zc6N5@`f)~UCk?F{p_bfE;8ZXE zZrQQd7$1ueCFxpcuP$Dq%wv!Ayzo^3-@Pq;C-%&K|K!~diMM>YSk>$jggraJF?F=H zVR{;6MlGL7?V7Wt@w>*R+Q+aKgn80N#j*~+*2~?LG2uPvb=zjIxD{LT;S?ndz&qTm z=4pTW-YbqL0;(%3FPmJ+B4TDdK+q%8vnQRBt=^3;19Wy}K=$mu3g-C~@ra^msl*bF z@KWh~zxLa=fM6>Rm^&lWf7Q(K_2O!|eWAW`b9D_29=mJbzV35HRo;cKZ{IFTJ$v}@ z`P7~Z+#GH{>pX-S!_dy{-9IfOva!-gNSJ;F$0C!c@Jm5%$MrUKlyIkU53lnax!*yM z+c{fzZ&}vLke;%#o=0Q97q3c;>Mglph@V^Z*wgEpqSc^2VtDR?R#wEzii=FXyyz?G z(XqF!y**&p5-F);1Pr*jY0fsLzr)+x`~iZy6}}r&s(U)*tCSPL3vv+|IWrpmfcLGyuraQPV^eBa`|iQ zFe|6j{yi|>`tOojwyh8_J7Hzmxv4Koo@|xQ0gAwW#8&D^pheUT@tDapfA+2B&JWcDsC8F;eE& zmJxRB{kdhvkB{ng_TBTPlls`(*_~#-Fgu`Y6_ErQI}BQ`wMJya*S zr(7{trxhqZN~;W>)v3<}vw3Q&>if3Z3>-4V^E$@bnS}mofM*nR;t!6xL;&oLf@+Pl5doJq)6QGC^0dcbaXtz zf+pPWjco&HmH!}ut)agjS-J6*j%NVYih*W8U9&M1$at7YXvGw3z=JNLtRm~csV%5Z z&le$*G0u_pnbF>y){c~_|5g3@F$L3EH71+O?;BF~9X~D|+N)2Wg?>66@)tYD==$zo zE-#F*NgJ4bOqM`qblFTy1hMrMo@U*5j-a}wthS6^KWFF&R_~q7t}e{QA3ky<-jNI& z8oCC5TO&V%i?#(L{A=r53aTEHdN(bf*27>qaRm&rXrD!4;U$nK9^9dp*aLqbiJ z9zmg|n$fG=y#7gJP>5l3VH@o_S>`JKwW769yxP~=q7YNYmPr@<{|(j0B93xWo}ls> zJIa~*`Z*iF=tD<>Aq^V7nudC7^VTK-2EI6Npdv0KF89%S;U-yVO-X6hjUFLVtuC}d z99N^0vud!afCt*WcdwCKhbQ_=*2^G`{hSlKh4yD^$QW%*BeVQI&B{nwtXEajQ6Pp_ z?mu{cY5U$G+r;J2w9yhvX!4a}-grTdij_=%f+~n9sERjkr3_Zb-@m_vnx8>gG0g@4 zE*K6fkDU~$G}KnYopR61OER)*-jCfk!9_|OPTqX-29+g+3tmC$m1>2CF$3`pwtGc& zalU3+$qY?FbC8lRVFkR@T78egKqaLTYQC&}u7i|5c}gk2p`MQ_r;cEtL3F$cmmQS* z(F#Lat9OViy+=#2U(hA`>%{0Dx{lxwP$KEdmM(6oXQDs2IwdWZVFTZO_H^r;`{{#Q z3Ke>#mzM1EO~_jv4a30CpFnJ2zy?B7Ms&=itmc1+=#|WyR2RLN{keds(X(eyM>X*; zHT^g~0mK+P)|I4LQc?mN{sw_#+HoyGPF2;JA`gtNysAoHU%&fsx~40&9jaM7v_GPu zWmxU~PLsaX)C9iLbKYsFXmPue@_}fB1Nb)riuX~d9fcUH!&co?-obvgZrL)h*mhr7 z4Pb@`MIdcIJY{UF=Ljtp7p4a)DJkJB0K7ftRd)(Y@TFriYV%iUy2;$u+Pz&(P0bz+ z0DQC8n(%YqpXkSwY+bphY(S*4goOKq=`&^&UDm3rO^#u|eJ#m0rwX;3v#V84L+1~J z8@gD(yyhDbQH3=@-ktMt&z{Yl8AgK9{$eTHw0tcgybN}=5q!+#_X_S)Kj)+D$URoW zN1Z%zB4Sle147BHYm2o|eUh&8Y=14DAWdNCEGm#tc~oyrm}(g+kULMl~`cA|K2#a-;!1I%vaA!`pFvTDO;8DnF*m$CWrh0cUW_zx4cJ`7{OWuE3v7omtrMD?h2b6^!^d^XEE+b>sa>Z|la3ejb{#F}bcLLJ zJZki4lM;nyB!FL-X@AoDyFo3Q!oN5=h&eZvq4%-oB9F)hqxZEL9^512V{PJ^lE)>Jp1ohD@C3ZC0*a8H$Gh72`gtA^EDT_8t5$ z@0ob=_;Kq=w=O^KH+=XN8h&_%xiW+z{u#`YEn2d~&w*;aTYh9A-7@cnK04UquvlAm zi!^RRId0sz!QM79ZNfU%c3Pk%_^7(x6EwGqIc3qJw-hS*9ed}tZUoA8T$yU%_0eOa zocL9KU|?bfZnE{`Q1K9q*SK*P0 z(78tsWP$3~4FFAKWo6m6Zy_)Vyl?MbC9VQ+C?Kii<#`=GYzY1IL&gd|Mrb)o>Qd~B zdgyZD_hyb-T0@hp4zNp2qP%*j>?FN~dViHNVh-xl)47VU7{O{7DO6WfbZz(F$ZGnw zkHFo(V9lTVLQ*^$Et>eKAHc3N_6n4hHBu*|F}3hM<=(6UQK$I(@2s;tL-b^7Wi#6G zz8lN7o;bnCGIDeT>q$-C@Rz>o;$bDb69sp%^A~oY4~Aemu7Y z+!zPHx#})!)^s_wYX>%&tI-=ZQNUTE1xKfo8yPqI_>kB^wFNc3<{`dWN5XHSOcM_!Ps)n3h|WI=?P zc}Fdq%89K*!Uql>bBvx>q5`7nz zenSS^32pPjKTY#4y=l|RDl6%}%V}LGO$WF%_n(P97O{o(9mS07j2WWd0xh{Mjjos` z&rUe>Cr#=p#YnhQE_e?&AI*G_BJ}MG%={?kDTCywUi0#nhW3(_Txw_6t9y5UJM&^>te z^m@QSAyE5Yx)i+ZAG*QHJ3TaAmTGB@h5UZ?O2jiP{vE_b+tdYHzX;Zu*B1W^RC9#m z`uyR;^2Lio*O%wgZAQv!uA8!)m&sxO{5tab?%fF-&$#3y5(I73kYm4_6)!~qIkr;o zF{>BtXxqK;`IT4awcFuTc)Gdpr#8Wj+ybnG5oiC21X12$$;(i`uwkmG%<#9JW4abV z&98Lq`iMa&!O(xt#I2%`eR=)Hjpt3@7gzU}rpz(rJiS9zkX%9y|E^F3m{;=;jiQ9o;!P$pm_+*df&dvH)W23m|UvumA5K@C^cm8 z;5{t?ngGX=vOREKV~m3>pkMOnC?m+x=5p%oJ9wWsL6_+E@oDdUCcja`K5RVDLw=gL zv_lXxpFV$n4UI!u(pX=nS<>p@c=%$f@cE%u@p!bd>(Uty01e!Pm6Pe!1&3z*uF&>e zLZ7r_7@__8eu~scB^S{FT&XW34iPUFCRH0KPi8@>(5&t4H|0I*5~(bOU1`Q(_s~XU87#s&ni&ZruFL z2~CNzHw%)&!%(P>fhRNlfCl5-rmyoTsL^w~R`!**1H%^;kqld2xl+u41D?n3l#G4G zuiNY5@aqxWbMuW`3T;hvbaY_17Tur%Nz9g?_g|kbhbsqOpohAwdC5cM9s12;ri^n19+p%pxK^yuJ!eWt_@FiG&~Cw>uw)jY5Abhv+n zbeU*PfaQ3fYz%`IID&8i!Ul>YB=e(3jVekHd`1KS$UDI`DStjP8cf4jFo#I(p*V761=2uA?S5{9#y{D2%T6qAc6KbD zZBVyS!`%4qA8)#e5goYM?S%TOvyqWCP$ltUuRHjB1F5ly-Ta-#=_DDD48qo-Q6fqP z<2Us4M-u^CQ3!Ybe2|Ebqi${^NL$(1)Igj(y5c(I!8}^t*Fgi=hwLwl;Q=up??c-Y z#K)r;0oUfV&NDNM{@eP9^Ec%?&yX3DXj-&x4XZc_X-3J@D0~{`&YkOL#~(gDa@n2* z<%(6^yq;cKV7l~QPhZZfE??>W%_8a9voEZ!qq=*pBK~_&EDjD1&!0c1yOSQZ{4s-e zy3)TMU5M_b{4H)+Sw$CN#08%|_mY=kiDieBpwM=5@x;Dyn};hZrf2-DRlV zU5PWF$WcP7(k+Q-#fLlYIQk_s@A(S^}(*4Ac1g=X1vhhf^2an_idE5o;-cl=a9Ou3o(=_N2vWawk_b7x1e+J88>iAd0Vp z|Dk4CWNVv6I0IU}Y#F=oi=FTm8VGoAEkdBOGO=kGWv(&9rF06qwd1kYl}&w?u}r)d zvtFW|6DGmC^`Shx)~z|tQj|u2*?)ZZn05Y!y)LIkp;%Vg9~D){%Z}d~T+FI*eZlcC zMMd~-#=;YCSSkN$#k6NQI##f&;@i;S_3#^7b$e$>%v}9GohEnI+C9ogQ3SK8kMyK9 zLy$zE-09e7!kC@Xq6shcC$Minm!;3#ROHY3l+-AO{fX(5yT&eD2OWB3wuU$#@Vd3n z1dn#8W^L486^d6sjal}A0zi$5i+R)Vikwai@uXp=S#TL&zOWPv~V4{Fbts! zsft{PAE&OX;=&FBMN|6adCy?jlarI_{+TSOc(8?=lel*xwVJq`MS7FD~8E7ebf_10OG?BZKQY%ULPty6?QsKKb-u)7f?b zPcM)bVcQLF^d7OuopTPcLo6Jk)PJUj4 z(+zhL1;L>>eVi3&x@_gj@wBmPjW;!0Xa`{s5AEORUFhdhs^?cbFy{Eu9}S*r=KEJK zUj}&R7NoljczjY#1f*<_jXVP7#~I~jqnS3;xNIpqSZhWl^du)!b^0Ow3s#gJSHH2> zKe3J9OF88$mM;&oU~KTL$`X&;wvX5saS|s_iOA+R{;OdrhcT|F2!}}*^1*t3tf$}F zHfjIf(sw<(#+ryjOo@qw*lf5}r?abbIFh6(o5hREx$#PrlhM`EL;Wj;D6zTA z51%|~Yh}gNHm)@l%W7-X)*f@Iq(Kj)=Iy!qE09geL2OF3*Yu3rn<+!_Ntg9+e9oOly!YK3zJ+$HuBvfcR;K2#~NIL=e}yGtNT#25WUI7?`HGS z#p=x&XC;Tc%FI-Acx|?6tNRTwGc3GQ&3U5$Zr1HB+yA&Z%O86Sxkl!H1M1(XQHMgE zSd_F9dUIhL-I6#+ed5GC7P1Km3GwwR`n5xa_HqPdcy4i)3}F=_NOJ z`Rp*pE{g1O+ja>Coy=|h4mi5|v!r*!JM4i(M{gQUaUpYGYWojQ@S#kLKf1($z{;c6 zQ~A!V&EIdoZY?B#fk0&V_sW7uN0c8N3K9fgY~1>?w|JiA{jThV;bM8DI6r*ag4@*E ziV>5GtaM~*zBVgsbf7~)}h(lqSZ>s?7l?@J{Fhli*Xf2py{ zXzU~e%sM+^ETsz79qPkIN~bIf?Rh39Cex6ZqaXc#K43?{K6$N zUPq5!OlWAfIn|#>RC>hAdJq@4E1!CSHxPhUaA!zDT|e3uy_s(r($I_gwm8uI=GCw5 z+78TZU5Lq>W6|}vn%ALAbeUl=yW{-HP#i#7_c_41=WTF5e!S+>5l!eE8^z0go|UzM zmo35%Q4()=Ok`hkEgn=D|9dC>IyfRU`ew}K{uCEpCXtz=&ruu}AIbRSF>06OhSOM(5KT&{%@Tud;5A8Jg7p?3Wwd z$i;wRFkw*7`BFC^-sB(kS!RrL0okc zfRzH4tQQDKYq_yJp2md%k&ZZ4! zU;Y`7=EVsf(OY@yO`td@ry~ky*J_-49W+Qy4Rygue)S!y^W!g(r*mW0K&go{Vjf)h zG8A>7!E5XUfv?02!JH~tpMF5mG@1>s;{EHH_CUikmeG!N6gU&P>xVFOHJbp%kx-{N#z z!Vgo^D=@Urt(W)C{aZeUx~+n82}0oLm-lZGN73f>p0NI! z?|(Nd_wX;v{xUS7pWw3|TRl;u6uYI7Hk-R(CRHnZZRdUK9{{tr1qCm|ZRXpp@-7u0sT&I`@F22uD~>pJ?OA=7OOL>X*j$GW9fH)ENY}GK z;>Wa)C{~jj0u>F{#e&s$SUf?F!dbP*}*2@N z%b%bnM0pjr98tT`)bldYhICDWR^4~~x)X2+#N`!9uBC-CS;7~y3WwlBi85;EWUpTe zrmGqiS7*4Grxui!D$D$bD1TU|K+na$38pv}7?HyN^hX6-5{ecw)%u*#h&9J^+mp_9 zwY7&<@wMw9pfo@hpaA?0YX;ZRA%fGn^19E@P~9U_HM}kFv72nrC>unKgiEhwpH}~G z|1I7=+R(z zCEGQAgXo&EJRb-_Dl!w!8v+$abwN@h*D?qt8a6C!&KEeL*PAMO_DKB$r@0@Vf?#># z#29)_(Sy-}=H~PBE2nN}{SZ0r+ClXv`VGRw>$!1Pt?a63+;i= z{u3U7h?x-m7(5)hk6MC%J^_-cTOH0G3jzd43tc+8)%XwKO#VY2S7X$guiOrJX0Xgyp~`Z|g|5fI554r`KgRDZ)L z?%31;dpL8(f>q0KX%j$dpmfzX<*ac7n4ikVd)94-6X*!?6qvkW%yZP&w4va4jQ5;A zZCd9et57}AW*r-D&W%Efrg=~Yn##Wsv$FQ(br0BxxYLMtj#Nu%svJ6b@;2o#nPr~2 zIb((<_1C{b_tzt=|I_ixd$7dJp92zISmk8e_o<09w3}<~f}vl_%8p+hMA4NZ2Lf-y zkk88nb#)&GZMgZJ)28hwJ8x-#`FYhn%h=7EP@@N#T#>;QKHGo8y_kR% zn*aYz*LcC_D3;uruU?%xb!s+$lfKw$aB>GB?|o;{WelxKghj!NkQ{i0aC25=#hELF zJG~iw*Kk5_Fg>LTnGZ<+m*1Zc^BPHkj|0g55%>}ZC1n$ zGPieD|X#3Y+sSc`@pVu?P06_?KQx=sA{^YnM7WC^gg}QBTZH{=(g<;sEcD_=~M#w{sQb#r*Tl zx?;U2O2vjYFkoG~-6FQ7odpfS>7!UmvH$`+N^JInv`;4!GX_G# z<?E)AW4B%RaF%c7S?rzqN=J<$#dNW^K&npICX0L*s(}XifI;L zC&n-c1MMK?Gf*6cdHwgtE|WUu?wryvmTjoHnNZsx0?LrTA+ZhPL^iCs`(b%CwJPW* zhBj7#yCdG`ev=9;eqv6?u5F4OEW5iDb<1jiE?L=AbU86(AmlG~7(8^CI9`coCLNke zdM2^}Ukfu7#2Ur@ZGOHnY$;0XVZmLq?X}-?l$hbM1e>8M@U-8)ZJU>sjRRH!=Y0?^ zu3CYUnL*S*<*;L~TL_q!Dk>@tGYT*OX#y!BI0X*?y^8trbi9=^C5xm5d4eY37VQX4 zVtgAG%s%z}4rDPhLgxXydhU7ByH~FZSBSl!CZysyN5%Hx>GY?`)stR;a&dl_EL}R# zqpRtTm&asAjBvgeT`#!z=SqMvW6Vy!1Y2(1s+CAzUj0_O1FR6ooJWhAv>>RSm@SPBWHRiXyy^rW=gy6SNI4Svc$C~&s~YrE+~)N2 zM}8zTfRaEU&_u>}{2L;P`u}Q8rd2vnr_1#18a|I02T6Wd^vacF#4{-@4F8a#pm6{Bw7M$YghvteV`b5mx{+*@Mo zOIDd+y0x`n#G8N)nd8_dcSGml)VEaRjgV?^fXknr9=c!qO{?`P@HA}9Bj$Wv_hY*R zK`)R?-Qlfi?yT4mR`W_)xu<<(GxDZUpP(*aV6h67l1p(`yo>3t?`)8s*GpLc7q!*6 zBE(?_ixxgk%JjSnpcars7}!`0*Za?`FS@7uw`=Fgn`NFxlog0_9PLAM*5f_|`b`0o z1A)w|ICcCuR&;|^RlSBt7)-kzD>wuTJDy?X%K-wOS+7B|4V%5hOhbm9y~Ja1aCZ z!XA(Y+9*M@K(Lv4Kqd@4=Khfa!wf8k2>Mgw%Ox}9xWg}gMG2wouqYCr>n`Dr+MCx_ zRgiigOqU~q#Kcki1_lHrIkBQw(q5Dxo{V(fP!h%(np;s z8tI3L9QhT&-*JtD{Ya$(LhFC=!i7iKnw!O&T|uj3UW>GUv?M3EVt3BF_)-fFuqaKa z`?xeNU_!VnUujGU< z381R3P8ZxrpyVIQrg>AyYakxdOHp4UpgtV6H5$0lk|O)Z&h-Zu@9F?34IXF_y}leE z1lmXQUP59bh8yQFDdG357@|l!N(77m`j{n z)6#Tq)c9g*^y`WAI_=YW24qCaebn3iC_x8>#zTyX9zV8&@kM~mVLa;DHj9=+Y^3e& zNqTKNlt$>h;cbX5m55o6=i}U`fZY#z!u#QZ#vIVBg)eDc^bnDXGTE=6ZES7FIp#eA zrMh(KQj+$s2k>b@3}9M7i{~EfB5WCp&k=thGSU@Vm+EM9W5cG%^9NZ6Fcr_g=seJN zXqB{bT)Jx27U+q{?tGH`Qugb4W)LVpS3(frGea}xV=Wpmqj`Z>T5M}u$RI*dk$C;# zpBEyzg>fw;A<@00Zv)uIg+iwLzh3K?E2|0ApY(GHt0BRUF4wBppbON+N53JJmS!&`RA;}8*rA%3<_JW z$!c?Zv-t9}g@&81 zmoQMrKl1nVSM1;4s_gAOqQLi$lNwJPI@Hglc_x~|oG6!7KlZ>(_CPe-E17i4xJwB_ zEGIUv28d#`>lYwUEs7e{sDQO16ZGdLXp5#`SPbQxcnwAc0h`EO77UhO!?8PS9PKoSY0oMwdRGxog4Fu7-)chku>FtzzEoi8STvYA%q0Iqf`$e7E#Xu zD$>&t9r^O#L>~jL();qD=r=2ixv{60D+@LaE zUXLGN2J}GTMUi#T^VjdctKbN&KrJlZNYz_Dm3HJ7nTd3Z8XWJ69UdMmp_$UKF2p-l zggQv0?(=m~lqTzMj9`>z6gq0ykA1kEO>mAD!pz)Uyu6aN@sbeoZ!?jwIafut@un@soycdyr54=aou>4<2C4FD!6F4vaEI}7G?g{f-Usud=|m_bZl zP$q^Q1%q0k4(<9ekFM}pn(s&zR)f(^h+>k(i^-urMjHq&&_Wzxk!rZTb|ug!mDb@} z|3ANe9-q|{nh%j^UPrH~@RTg7Xs|>+ItuFoI39Uk5?IxSyVcQt!T;A$#7_coyQ*mu zh8dc&vht5T$W0aocDr(pPMk2|HrF|U5AhD%8yj$MN)o%7)vK++ZQxdzoo%T9GW%br zU>m*2ZEE9Np+ zJW+Mx(^=%?S5un;{_X8GHq0wprL$1h9}?(!h-E2A{qp6b*@m=A*)eXWO212%mSS9C zBO*jPGTxv+ax_lTD)uW$iei_pdhjP-1xEBm6J;dKYAa1XZ5-5010y5^RX@Ib3%-2& z(|B+I3y?s?8fI*owJKGGsQH3*1uZNpEj`Dr2H-J2e%z%vj%aqNL|Gz+ zTXsyVmMu{T%)WZ}I*lq=#A47!yXLP7>Q67xTmz020l+JoOwn0mJHkkfV@pRb zV$CccbWz+J(u9V{4)W6RQyDd?TcdYV;@d{z(|$qX8;pe%6(p^mSheOQQ5w;y(41$( z1ygj2*U4T^{GIybTyzMlo9ov{c8{Nnypnj*t9NgXQz59XNcA;yT|@TUvZaxj(+OcK z_tKZ>Ojjj~VjT8k&8=DUsoxoRY2K>+>NRWL_1;Dzb_6$P6p<>NV+dGOOyeOA0R2Am z?h-@+D0EKd!SM!{dD~EdkR_aWpwU7K7#{4f=RwQdN{7rae^8CSTk1o{Bop|KQWQbP z>o%3hZ82<=)F!ASFhQ|67tTYGsArHuE=9JEERrxec=p+PjC#LJlKc721;-veduF09 z0XU9q9{&)Ghx-d?Vfa|{UsG!U=F332h` z+G6r7IhGF7F79G0hw|F`uMQm| z!oy!jxr9&=-ksrf{J5BF;k+O}5MgN5mktG3_Jo*J`=RyWAUotb8Tz_r}&Cgqj(3H<`Z8DL1uEG(FQj+4kp(e z>ta6YBSDlSe&sNha$@FaXhLGn)5Ko_% zZ73N&Vd=fQU($#2p9y=@X$~+sx~6&FY$!W z7!8D}rFEY@{)SLCQ!97@We!xDl{g#{Fcx3s{S#d);t~>Uxb&r$Zco9-f|9EjQ&x0A z8Gkudm*$*1c5FzGU%S^w0b}G->Z|S`(N&Rna`4EJ>kP;<{L1K$i%T{9tY`O>^?V8B zIcW-Gm^veld)ioc`jjn7cYplae*9XcWy_Z%FV4))w&10W6@+lEv3@|cTw7hLd?bO; z%54XI#{K%e!Z@8kBOuxoah%NOYIiBAogN+xPUux_*+U3Ff4+ZcoB5(kA0!RbI$s8g z-byCxE;5v)q#1j~OrzFs=+ND*g!B`n6Q)NT7|F2W|6lBN=8P!BqQ`!;bR?)Np|s*J zF|>+5n(B)4!Y+VY(3Ds;Tl#$`rxL4Oq!XE4Sn+)Lt4Iub{#L=Fq zbsgE){CV@%sLte5(tYsu#nl)99nct$v)kow?=|ONIq@nIjFirpJv)a_Lxa+xLkFlE zkwxVOozUOC+wd+-WiMWQ2!;1x195;6a^X248*+a5ys&`?d@j3p&wN;uL>oztzt0``ncU6#V%de+36Q!X?kERLk;}{^duvrHoAU7D< z{BZbCPIXCo;1Sf7U_KDk14T5a>BlD%xSJ8Hc@O4)r2G=e3kpbC85!`49i~%5xC@&J z#0aKoQ&UyAh-#+RfSTG`9%*jVtH?Nb_N9c;wh(B%uC5O8*)e4w2_fVa#Tc+7 zL}ol?>g^)_GsTO26NnW-L&L^5`?W9JKO`h4)4=CkTfF`F-69TZ*FJqp3JZs^h1&NN zZ|X$v5Zb2JH#{7&BnG^iSl&Q#k5g5h$#DA=UxQ)Z;w}&ZreOSvvl_JzV}q(pcVK1D z#Rj1TJ!OQl`QJ$*6eDSSjd-6veb4*(NhzAqk7n}@NV<;m7yeus_rbDMOeg{3_sw%#;7}db*gY%^ zw5V0<)^sv&Na#bt48^B;h~Fv#$|^*(mVOzziXb5GlkzASmJKo0(zr?ByyG@2qY@A=cT)J>1QH%Bl|XkJfrk>dwW+ROHh46 zEr@n~y#?#zbM)}xuF}$Dn9?BMRDF830-Br%Yc)qLg~jPdjiPky+@viFc^vl@Toew! zKax9{PR#s}n;S+y&ez%Vy9A2vK}SFe1RIj-7GR_2UqNj_y&1HnGyGDBkB_Eev(28B zuf!o@LKCRjx%>|@+8gG8KMxew$7cmMl0gpopIK*9__46?HvQ}8&vu~&+C0sC7sx9# zr)6cUIpe~xNBH1>Eo;?5W?*sgK53QyW5(RNcFo|2l3x63&n3HeUt9V{ygbXQnHtki z=4^zUY+SZUn>NsmzGf29K6E!Ie5BaP;S$yz`}RFYiN1Rdf4^WW7NpSPTnZ;5i&Vk% zXP{Gum%p~koHD#9S3)UY5n)o zEhzt&^=y<%nNLF`$|@rwCa!)f#6DUp&Wz}vRDHLD&rRt|2b4)EgFf++wQc^=q(k~)=!UZYthQCD` z-Q+Y`zC2Tkwl%+cEq8~PD6;6T$YJp<7V1x)d=HEq@xctzI(<52xAAQ`vu`c#)L*I$ zxraJLvaR;4Sx+eyk`ljL3R%-?6L>kEP{R zawA5J;No1x4MV+WwYL#8?RW3nhs5CxGK=GUYvnA*+3icag%vsSvf~ zl+IrL;al?uc$Z9u%2~-boiYN8o{LZCh&6-w6VOd;CL$Nq4jbH{z*wiU^6XvQ&wA`Y z08?WD&u^L5?hbDl!yW51H0?Wdz>yB{f6#=j&E9qUR&g%P^oz~qZ!&?~yzk7Ma%m@C z8Q?96x`{~%30)d?3s0nEWYB}?qAwEZ*|E_|a5oJ1-w;a{_gSKQ2SqQPs2h3TMz4FX zUiFoq=7t8?8tYdkCVx)vb|2kF!1x5d_7&_G4df;1E4qrCE&~sZX;IEW*QZiystz2O zr+qAYMirs4j!-eF;oFi=MW{S4yhX0mp?!NsqGPAu6Ng+u`-5^T8s{I}#wYh885D^}b@Ib3ybmA3e@+1HI_J10fs zB?^W$%h{{MnR(E4T~;X$8J-j5!57 z_3f*$xmif9*PXOZUloGB$#@%=kDMNoN&(pTgq0RBw!7oT!#NMCjC_klR>s)?or?JfSVjN!-j8KZFl5~WWL+ztXS?nd)oMG#$mp0pb>XQ90NMD%8dn{tnD1y@&ep}Vqrv4?;3j!ZWyxd z%9Sg%=h~zwiM8JK>qj<^AukR3V334Q5k+agrr>SNXfQmS-#EKqKfm_?ob-+H=WT;+ z&zr7UK1BcCg^8{iyIWr9dD_=^aZeU>4WJ_3c~yCN7q+s`IuKuSSmw)@xP};JA)4K4 zR=$!U#QOJr8|8~T>bdy`Jz|JGXG78Os+vpGGr>7RXIWW=Y|&3o{VX)t3vTW-`Zb>K zeghAyPrMN>T|d5l@VIfcjI+}E&DEu$N!)U+7hand#>3_DPkKkDH!9zAZdy?g|BV8e zz>}~vjK50{9L<#kia{QFvO>@oRg;bT5`#9MxUiTuIjHR$x?Itb#ojba5Mi2%0aR(J zf=`T>UOqbyIe30vo-=7nW#9Z!(3X^}O}WJyFryDTtWXfVR;^kgweVLxu|dp8mAS*t z6c~@Ye&ph!!uO$iqi*GVFL7B$fxny^;fOfKCvWbe9WK6j3C&krtqniqN2Uc0KCpMMdvE0g*crsAv!$VNO8kp4UIRPg;%f4GbRL$^Y>cHi^^Cf@ajCm z&F8%zf2(N~h{G>1_CdK>31@0`{xlti`kVDyo8(**sBtgs+7RkyWRy;ryfQz;pkezm zwW=X$Qx>=d&Q4Z1H2OlYRs4*=APLRv%MlVwrp7jFgL~SX(SIp-ll5) z^;1LAPFVPl8Y3eZEFl~`_tWiE72cO-=nA|f_#&wDJYR#yXU3amHO`HPy&5@kq|qx# z3Q>Q&ZsDZ5&!Z@B!d8qAqsDXF+}u4T<~yY7f#sM-O?{?#mfB(n@$X~&@eR7ZmvnoO@^TMOFBEe zr*h-$76tC`74Rlh=lj7yXosw1)Ks_lqhf%I5PvE zr24v^L-CU>uW#kfND4Oeb;#Jr2Wc7LSJU5%_v6-!v$?eeew-fTi9eT2^FBLoxu|sx zABkZ5NK!t>FBLdygjL+Guob$;#Mw=8;EUR7Gi4|Ba!_JLU&V71T*GpQq-6zMxR6JC zLd$$KVJTU>-CuvrIaA{EK#baFAKPj)E!?n$7xj)E@G?#nd15dP#=l4k=rvT=65|mX9=kNbK^~TgKoCZ{hg%SqC>elC;sCG6#6@%$eaA z%WY{-sLVcv@I?chsOG2xi)pvRMG)^65tk_f!ol#fqf&#Nmz(zKAgnhQ#ViR+(@e#N zLe+5L!Z$pCc(t{a)kt*0Y?gK{Gi-hx2f;c57Z2Wnik!j_RDqp-g4fNVqawawx?tw}?trx*rP^X; z;`5d0czUAS^JmYlgB((DoMKlx8@JuFHfwt;A^I>dtIhM9_?tC7^Vv$5jr?kq)_`4% z^K$)o5I1%I{PB53`%CXH4#5ttT)262gNP0AMhJfJC?WxRIb{Y-$M-FbrH!HlWJ|-B zbLdQP_l|E{aGH&Ttsi&3z_(ijztFbv0z96+O1TMliufS<5Fl?;cMvl=JtiPA^=o|p z;;Hnq3)v&8H!W}2oe-;~EWhw@Ri=}XhB`)WL~)G-on#LlCiPoKKF*gL-8f#5xK>kQ z&#(qhp6Cxg5_#*^&+hSZ$||l$hkVP-%(QAYjxK*Chtq*oLi!ZfYvF6!XMgn*I6;VO z<1XWk#Hgvc6`D71zGB5k06p%V+@(E#KMQ$*&i9kc5-xQSBZKuNQUvgD=A;4k0O-3# z9=hqLSe=_l(a#6u2<7HwEK=CHt1W9m0kzjCHs9Z=UgcpYET~k$Ptg=%|m0Ny* z)^~}GWcuHHC%)?@&N~MP9-g3;^yEo*Pv@5G!RdVd8m#O&KvY4RNhqzdF;}Jn%gCsr zf42H%wdWJDuJn!lk|p=oyasb6hon$M@0g;(wi8X?nrV)u#etj^GYmB->W1Sw5nQ3* zN?>SuVamZo7b{g$UbZ_G%_?>;c~igrBq#Cm#FmfFv;epXl7fx71T? z7ri-g?dPg!=1kSqP0FvJqE_)$=irIvFbGpJ%H?==iV2WaV|jUbGhTn?Xy!=?3_~x@ z>A<_ASjJtj2_sVOPnUJ_H501~vm&`Db35}U7|xkpcxEl!ZRfdrw~DH2-eU)P@!d2I z!_BVUMUaIcfLZw3lmYF2KFjSP)F|X3{z&(3weP3}!H4-RXzaN5A(4ev5R7JnhEnzD z&z#w;Su>tDPIdDPUIX@4a$@ajAbR6++;QZ%M$gRb4ebQ~hg^?Ot%0koC{09H=kHI& z;>+h5@uwDCZWZ4mxH`xS%49?XL+uNS(>nV+ zh=2GHuEwl3AI)qRr}bAU3q?h4)A6^Z)Dm$*CdFnBrk6?gLy4t|S9zH8Cu87Y=VA%~MKe}l#op7Wf-NcCRX&^EVM}aoE(DKvG z86Yacgz-iR#8)6wMo?9u`1#|%LyG$gr9WlW6jQD^mzR^fd+(kv7jgje_e$; z)L&Dng5?bTVDOXx#}W(>_iAM_HJ!-PVfA?=p;gCT^7cjd6N+qGbP>>-jt0b(av^ zM)u>|7qpCMg)SJ#6PiNd7@l5f|7$s{2}FGNZhl!I*RF{-@rhl?%)zLg$1@jX7%1r} zq#wF=Z3OZeF3l?>1X^`W&dbm5DJ=~E;3>YH@8UECgeJBhEaCJc&&duG#U%TW6VPf7 zR|4o^lGvjRMBa9ok`kOB-Tr75l@sc#7B*2TKc6LF8M+EkXUGPrZdsqHzp;!%Qtr5J z-J6J&rVAIIHIvY64xC36J#qXvQq|KLgS{QstQpG%nIeReVuPp+GguxZ!rME8gPsgD zzF>i358n-hl2yC*>^XdKDT%jfRc;akKT^nhC*>1$amHZVbosqcBHr@*F`8>aRkJ>U zXo+Y!rwqsi{Q!=%{Y`|F349p!88=&Gz?H?Feq+vvG~Je1X1PBdI&7FIXTt?&imx@Z zL=?)XK$LpCi)URv4waX`77+nse^bni0XL{~=`i4{GsxC2UZ~@beEH|f2XiT^LW*t&+yipyWD;OsZ-n- zrS;gkGuUKLd)d*7T$)>ZF|NOmHDJ=DEzDB`>63vvJ8ig3Ok)yT{AkTnn4dHNNBtkt z-UKSdwte@$OJC+9_MiahK?>S+hnQ zu${`28%ZkOetr&ib|NQ=XEC10ixmX3_oGjbPadT-2;rYL{H10~Rfla%dP3k09KNC% z76wKE!??i&Xq(28pTDUJgIV~f&;l~Hv{VTxH^l7-@p!{``5ljLhVl!qT;kerVgy_> zZ643bVcjVA*Yd%goSi$0LPtXtr6$xSz=`As_4{u~U_9>Mq~%Dqv_3=ewWn2Qq5|2= zxg=|l>NrOk_l`$=d_?;Z9|W`(>Y+1P`Au-7AK#nClr++3YN|>m=EH*;+9@a(1J3|tqck;1 zVSh*wL{Jqz&w2nn_ltrp`_q#iz%tsu4Iy?DMv~mR&kgOS1n2Kbcq)sXKFy-r$xCgg zCpY-7w`tRn)F;J#=ay11u7E_rNQO)C@AK*}8*IW6@#kK8G?!<6;X4}YBsTuS`;4Kd z7+Lr6*8tFbL1^6IQBBoDd%%y4So%=0;W^Y1O(BY70RP#UP5ill%Y-rRC)UnUeSk+?0&{&H1GYKYkR$^S5rj<-pj?2xVt& zKSsv|EV?li%!u@+ysQkh#HE-RQb_j!4w)@RqKlX|!h=ZRA+UDZ8oHA3N~9y9R6wWo zKhIzYIzmmysZ5#F4IZ!>4>18Ny&pli{6y2@sU-cF&m*y91Lv2WKIscYb;C~AhVQYI zi zrJ~I{4e>F4{uDEA7*sR5Nu5f=bE6~71ToSN|I_Ky_xWT_22qW5tGm(gD#pNsXDDX8 zdvXizR@9EN|E^(@ zgVRNA>lNE;0TqmY2YLy_o%s0pqTfg2wr}4~!xpWHXj@CB^X%-30X3)>meNE2%gdqY zo;759>i`+u0s+bbjM?{XB>rvNQ2sQkQ>;3~qH?hwW6y{?|I{z(J-EAM>cOh2N(ynh z3Eqsg5}cHgBAgjpNpZzPXIF_l8e$m6imG7qN1UZVZSI@RTTe znQ{vzHF8gyhxC#5!ru(2O*ieYy54)p+|r7li`D-{T;%p2J(`sbS|dixHq46UeIa(h zWk^d)3lU*qVIf%hM}nfFA{&=*hgx7*bcp^IhepHzca;wxKN3KkaY=y}BxvC4E_en{ zf6B^qdtw&Fp@>fW#<~{FZ5#P%nfd-O#OVl6ISOG(w zg(CvaUON0F&OEVcGV0|W`vrL8C&sPr+=SeV`>3hV?_f9 ztRa6REdB6-fR0^`LI0y-TM)VW@Up!&?aEroOI3r%kGC4?y&V2S!252;j~>0u5?FXS zwC3}f%4&Ey2B`Ku2XB&jlOl{R-&IqGD+^wSVyS(g#(;kPQnItNk;g#~!pq|(JnQiJ z^A4iJo33Q-TAodvDZYRDVuD#ETF2yycP+V)qefjN_6V>yIo$#xGaBXzq(5JT|Dptd z7(oTbWXe}!Fxe8=v9O;F~M0XxCJ%HSOOlty+vhy-M(ZpkYUK%^-%GZ@fj}000?m0@sF7wIn zQ-c(Ox0*YD{(RCV!j?Y0d*AIU*+x*zGGKtNUp=7q zn11m5q2Bqw8>rv~2?Bb zRH;~VapMRno6IKZ{^F92p?_Cdxo@9^DC@iic|StuV$J&C5E)&rz?HAqOQk2o$`NQVv|R^L>zWY_WA>k3cj>GTLI>Af_~*Jq|rjhCrfYneCK8s26Sq`#Pg zLwdK*f&sMx(g2bTX3RXtNa;;7uG@fX>0}P+A6o$a;0+D|TTz=qWx`-uSiAN+`hY40 z#<%~Bx*gtA8oB3F+b$%r+W(%|n}=Dj7A^ognv*c*)zz6CcgcTSS%6GW1ToR;toN+y zcIC{gS&q+=*BoB)rCYbtRlm>n&% z5hu6`!Ywcu#bT(q1iFDm6C2$;7Cu|c>2v(A+$4`jwSyC<9shlxwL*WlKCR{aS2R3Y zCVe6yTTFJVr_N&BWN)$e@l&TL0ah$uj(4G|dQ-_#IFnN!wF|VI5pF7iS#Yq3PyV}* z?d6=G_vyojzDFMf6~CZVpX;(+nLM)ETrwQi6vakCoNfZ?%L-Eau{+ zX8S9$W9pqJe>?k8J29=JgS_FAvt34ATJ}PkgJvk9AgEn98|B?BGcWb1GhI1Gg z_PiEA@q>m-+{KH+V1l{9MlnT8Cr_Pvbw&0HxMUG1T1&CBi8#mxaYpt@UrU?vCBqQn zVRA)_v7jIsYzV?Y8YMbvGz@sJ!+EdMwr)*Arh?#E@LAHC&(v99e=;eMEl|8y|0l&8 z8tF34Ht+hC%jQo8C0Duq3O}D}^r2j3O;KKq%gIj<$DCg|Z+f4-JIt|?H)>@jc0BQ8 z228QXz_TNoAGImg#$K(C)P5P{v2WjWM)4KYikO4=LVP0jkw{jQ#3UZlx;8tF?wO>! zj-s>+BSMNE%qs;=7-O?Q1I-C(6GZqJ@C7KM&_@=itb#%5kj9c~$F5yhE?+(%ahFGP zhYoGq4s*yIDdN^hF(1k3#y|UQX87SYFsR!rgKZ&%j)P2c+Azm2F0Pb4u?v59-<|%o zi97J)Ih#urmPO@9km4)xB6y|B}tR3cD>$1nHTWy;QL#+obh?=K) zd`jD<=MC9jGICk@(@xA+?0}>$H?zA(y`556sy4IJ+@anAXcd-boAlXntRw1m2t|B< zKB2Qab|#F~&WtDN4T(5|zAdr*`8)s-5|u+CO3`&MA_7y-w)LaG!HdD2^O*IY=>E@Z znM^*7G^NbCzRG*p46=EOiImMx^fy+o(_EYbM1C?alp9@-F=1E--W4%&T3b*Liu#8=O^>L)5mb8t-z7|`Iizu}imSZe30hb8^R1dsno|_#koAjV?Eq3@WE3-{6&)j?AoX9g{ zn8Fya=s}XloImrpx|L}9r*;jV(5S9NCfy9`BV(t5Z}h(A=gllFbK@NsFQ_^|C>=)& zk>khcEn`rWZ*X-7==*!RTN3J$4&~dBmKzj+KTK$`fNYnpT7^SaD*Z(P3f*&qUdc;e zzr3paQjt{STNm=xHwPlC`pp?7?`n%1)2=!!&Y99Rb~|yX&^fs}4=62)BgQS(yO8q> z4~i{hZWwFZy$6+`xWHhBCIPwP{?_NxoA>WYq@x+cV7Qr1tchzR>CFc<^}>H#bo5io-j5U zJB~H^`RmsfBpxbrvL_xX!&yLi*tl^Ex()iiYs%3xotavuC9HqYXmf(;@sfM}U0Y_1 z&bYb`BvD*!axZdouS@k?riyYDdkMbSkIiS=E=Vz%Cb@r{!#b(>W%Zq7zFl?t{M@MP zxd}VCsFQMQ6%1r_EjDi2G<|j_^@udV6B~UNz$?ri04v69p(iO*G1$aX1STz|_nY!RQBH0*YfxsUqCFy?Ua!RvDWR?%Oa?Coj{;keOjy0oRdT|0sLgw zw2ou&bkj6A1RQOwDno;BbnIpnfnVnxcJcHcCnV1iDrN^AFr3S{`sUlWp}|4^rh!`@ znx~{=p)t0X)_ws^Kl+SzJQu%Ge5Gsfn?d7RIPPsCA2hCMjH2W; zZPls^EI$%mjh)Y#N;vw_U4)9P!+>5$MHVdF;(Gva_|%(d>ya!r0FZaRpwB1BKB~EB0Xay+COOpGLFJRtnw?#daj&>h&SSUp!;LR zba)A~K@W3irWwaAa9omY0!>qfpe@==^{)4RWsQea&2hCg6SC|Jf}8enJxDT}oo!lm z2s`%R0X3ADCC73N_MfPY%G2B2KYLL2;w=~*3!6`h4>RM0Osu6MedQ%0n!b^d+4Afa z#m%2BYQPUu$yhths!_j;95$@&=QE#4WHmgY>i%B{G!~1_=OC%*1e!($t&1K9_>$fj zHD-TZvq+goU~Ei5cf8b0S;!kr0c$;rBTT!TN1XtJ>4AFKfn&zGR>3=9<;Tp6&C zvIgd@lEOANac?ue${&r?x}2xc@voEL1#2yrnze2%oCE<2ZDHCL-)n96gCiGa@^r4V zmE(j;W)rh4Aj(HCeRvE>fqo#r+QZtDR6Nrxm8#;j7;xgFG8_YTA~Hzpr({EJvRuaHgYzt*fVED<$ysTlvet;uw z+WE^+dAp*KB%>+?inED0d@Y!mu=5?8BYyZ@@jX}UT{TL2L5J_(&gM|pJqzDLjOOVGiOJMMV`QNvGeB9ZtZT4d0&LeFPIo&DM zP$}7SjKPX=14#eIp;=Zov0qe_r5Pw6)cpN5M~~zzxSiyT6MeBhOW<#7Eae?*ejM55@*EG zv_I$&Rc}>`>0#XZl^r{^3p%iReBAm+C%@`7(#%)@C)cky_xSQi>j8YilTWn6_-brXgGVYj1JG(vU$=IkZL-ve~$KtM9_U-z2 z;*wd|gSzsG9((s7 z3t?b!glcZ26uj7WYA;BT6%l z*7i2Rw=+QTJ9DoavOQC9VDmeDI-ekp(0pdbVg@WuMfgWlefe}iY}_Er+r=WbHfRKw z`1^Ctcaaq#jG0j7;DlIxuRA>fj+7DU!LGASZVAkTU$I%um^RZJrs07;Pb*KYP8#G| zi;B?9v)I}uHbPkviwZe(t3N$;tGisbl6i^pW=}WudRQ|jCq)J#x zX5{WQ#Nv&KqCbaq3aUtM-)yQ8Q7Yf)NqRfPI=_)ItJ-&p39NayHCjIZqpITS84wg#L5eQOVI;f^Jfuwd>Qt zA>%p?iqz_w)Rz?`a=c4 zD%M#wyY<;c&k2@N>YFidTg5l22%5XYzJ?b-69fOVpzs)tvj~vKHbMYh5)X;FnC0~?Vx(nhVc~L%UV*$hH)JOi=kKRs6GP=Xr z!1ZKY!j~IYqFvg!JH2$=^fcm*y?2i}N}bx7jvs0U#W8wmiK1gq+lyGWmv6AOE#lK| zLJGfjQ*0d+Pc2GyS|!< z(VlPJs(*hToTuNlnX*uAioHo+&Q>x-q=U#iMmA0W)1|&y(xKzW(dlTVxmwQKqOCCE zC`8OOYll~HexuhPQS`p_PTswvc=p8M^i(-f`(`;@23ONW6jJsd^MtU$=r{Zl(lG$J zYeQ=@y*U><1dfuH;f)((8{~Kigezv7)w~}u#7&Q_p#!tkbNonYiOy#kA58~Kh0py9 zE|f;Z7g@WWj9YtP*YQGby47u>JP-%85Up>1W+vuP zm&lnJcL!kZ$}=j;OIicbfo#?uJaAxJ(Hl=6Mj1{pS^;l{Ue<>ns;g9nIGwQ1mo9nx z_89_<5$i5Dd}%uQ2JHq7U7FHG3hOQoYxep^cy{hyr}ODRp0>ldtAX#%xxMY3{X$!7 zboqK%MYV0b|EejQKabGR!1=nJf@KOrsW2uSW&c0m26D{jw{HWC2Lc7J@@7E|o|D}# zK&LmA?=)AkK>&Ayr^I=X@o7#8`At;>GDm*2&=6^8d_`7x_G|$)+uh!}tD(~ES-oYmbg3D< zd4Hb5{}y6C78HG0d(GRsW9T}L!^ZLAA(}Njgk2+DUSV+D(uH@U57Z_PURyu1px7bF z)_!5{>eumy*S`v>o!>M{>xTJ)p4q1~)O8eHrgd$-aVY<7Q2+^m10`mUntzR<(~O2o z^n%I^8E;_07IFbEFR$xP#pn~o2Th0HAqKocTmhxlA*zaor6o3q?(TW z;th?4hb0#T;s5A@U)~opt3r!<5J-0Xz8hAJTPJFtQ2RB1OlYvfU^_X-^oOsvhWMsr zV|TF&iww_aD+n&AEB_n!XZ5pJ;%TjGnHCM<$_Yy*Bppmns|WOBng_{f!y87ZTf|5% z{3Xbp)c6YZ5x*^O_#gpmHcJIqyfUT+Ya(~^>lAUo6_R{+TtPnaE5N0?E9>#MO9}Q; zGn^Of9DYSEuD5=rc6E3-B!YE)UBIjD`##TH`LgZrJFm-sb=$nYOea3UCDMG_?=#9O#AE_w#9neOqy;%o(Afy?Qt71hdTieyFNqxPQ9NN{Wip ze!WBh_;AT3tRXJUyNW7d@(@{Fz#}DtIE>=$NU=uro)<3khjG}mXDZW&f-p&6O<(T? zx@#9{V`BmhT$=9OJPiprvj61x3y@mVJwt1s>ev3MyIAtWa?v%nlt+8*tY`X^dX2Un zgW9x)K?NK}Tg$mY2V_ljlL)^AnKj(Z3Ux2s=-#*|4Ftyu82$MO}*z zmh*yw?V?b{f)e0th(8FvL{mze!jm#<7SnDGH^~o>M!7*tAGvkhAGtW*g~ovqRD10x z?gmC)Zz5Iy{yknhNi8`f+3xyBLg?r5@yk;R>y*?o9Ok`>>(EqH1U2Qa=%Q@io&2vP z98beIP#riPYD+91@!bt*J|3>?BH?;387p0R!CniBEBq)0d8f@l`L55&Jl|+)d>O|Pk+C^ zoAz(AL`Eaydzc$r*X?WsG!dPAhn58!|L&*?ZpSB5^$LkVrE#&WLUdiU^O&VCF)z7w z+4{%4Fa_R3`tm+g_6ypsQ+E6O{Cs4(q6Jvw0p|r0v#46Edh`MPzTmmtLz848uvo}b z$K(g_`geBh8Y+3w-~RIAr)bAB`lPD{!0?}v&flWGDNBZFhB;?nm~1tw-&0=^Mg=gU zX1Uq?Z`MD1=RJRsHK%LrX+h7_eq9X?j*R~(#(Uf>18%_a1c?i z*)1`%aIczixn5_Bv#iDB^13!%Cr_Tg_Rjo4gD1;Pny=U1v~AnC*T$xgV@+NUcb=Sl z!FE~ykatN}{mMMB8Pg^7w^2uh{%U7Es{^+a1C{M+q?dv3x5AmAzqo;S8bmbB zH(`5*|8PMwX=E=M|I=bm4(nN{lQr6{*8ntU#WEj+~En{1;^ zRB^qmy%cr+{2%N6hYl1+TLmVZJo$3Zmhng0{PC_2+;nQ*s88Ce1GThrA3pTx5Q_ke zR1|X?6%sVN!^=y%5?@2>LO|g$)%U|eb0(Dfb-AbZ!R=Gxu3e3*t?D5zScH4R_1CyA z6=%=~((+nmm~8n82}P_OcE-D6)$dfbo0>8EJyU78y*I0DX)~zle*R77TV9B1x&`y+ z-&MJyKX>lj*hJcxi+9BuBQx%%AO&?9ssR|K($oBku5;Z-!x()-LZ!m~HZ=H*Z>5p8 zQQo@P&HaIfTr1#5Fu(Lt+HbXpMxUhCvK?YA@lZBZM=EOBTq?YawuG%Ix4i{%tvg2V zYLXbPSi6?u_swVik)+bTFF9#KPoQ+&&_p@+D+dm|R+F+;&naL=_Q8RrrElv>-71r% zg-@MTjvhW7gP=wX|6@q5qT->?i118<&cNr>E*$DI(b3SWvt9o?PvKlgtBu>_m*_B! zUyift17CI<9VtJ0>{wri=;xJC9(L~->&i%dOz9t1!b!K;mzr zhD|0a4OYX1T%fg?vKA_imlQ8QbJ{e2d2~-nxP^y?4j61|@E`98%PD*$zR~=2nLPIm zTb56pNEN^Tv2KFB;U5#BreC!~D~Tkz9;=RuDVR)6lLofo&w$bYgYRT%gTB`I+x#0F zpP>n+?EyFU{_h)W^1t4ggvE(j^>h7uWmi`%w_mpG_wO%KkxX)bb&<%0~mlQ_YUF``TMm_Dp}8@)#-HX)$-i^QWY!;IG+QPoF%wg8LG2 zGFE*Z9FFg<^U?^~F+6}g zKuMtGD|UsRpB~U$s&d%=vlrj_P2j=tzM~y`TU&Mz7mfql*qUzS!qTD{OW;aY-}t%P z%S+zxi7+e|##UaEuC;hOWb><-wI&IuDm@I>sjoRYw4MqLZE`hsx=BDh$^XVg-gGCq z_WUv5GYljjg4Xq4A=B=awefqinzwVq$n(deJs!u}L{2woHc zBHT1Q14L2L;nK5`zWC{U6v>;*C?>WXQj@ZD*i@Wr75Iz6iqYEIp9%{d5kMD@d_?t_ z(jKH(oLT>Jq}Q`woEzXqtiI45Zz7Z5?M_6XIX=LgU3|LgGChY6zeKkKVDhC9p&Mcj zNEZHHt9h$JM3NtDO&>jG4DAuG=wX{^ddF`97YyIIN|z>tm0Rt4#sHws?lngT=o8E17-6Siqk#dc*GZO?I4el=Ev3`}6Em>S>=SW?=ee)+o zhdh}GxEp`kFylwS4fiN)#CFZsj~)$%=Dy#fkIkY*j(CX{Z}-ttkZiY`$I%c02c9cN zoV8@=0UnawrIV;y4!dcA2p4BpnEaFboQ(@ zdfU|9MPl6{!DeNvvs+tTAK&WhVNlM`;Knh}wl3X!OJ`YGYew#4p6yBLwKYM^mi<@| z?s=r5*&schx@N)V=A!isJJu}shM`uJTS;kWjxBAxYsMr6S$F-Tnx#d=e7k2S zf-{!2)NHbT{dbjmP1kUM8-_oJ5+Y23Ta*{Sf4>fwe)IJ}F#^3x^z6{qW{}JWk8`D= zp>}A^1q#jStr^&Q+}N>UJ*Z7egjow8t}iPXQc`wpj+}pOcN`sckh5ehc zZgj*qh<^jEMaj13hn6u4)p3+}_frd9rt#K$GI>+X-d@^}l`S%-gyz7!{1W*mWZSnd za61#7P55x+0e(mo>{%sYRhDaUB`?YK)va4H$%n+ZH$IG~;!KQZuvr>QcCf}qQ8Mn{ zSw-e1WUj;GvnZ1JLYxgp>|$U`=mM^Kaj_SL#o~z`_)y(KOo!DccNYtPD7AZ3HFrjt zl;U^wdvfyrfag>7wYD;VnmfcABc~TVhYZ<27)fq3dp3qYBO$|nUo0h!VpQz;5Q@7L zzk-+(T!D_uAvqj?fG<1#DJk03(Q4tNBS(IR-*@kEnF(v^=RF&`b}^}Z=cxt}SL)@| zr`Cxzez|Y1n;e$W(PKvQOKs@&zDYBhkg0z-7x$A6AtpGW=G@BY^?+`#lbt@5ZipUj z7BPR{&$s)yO+BPUKw zQ9tc^IcZwpI`eXd4%ZcO%L$ZkTa=4q^gFXD-7ibr=;xgA_MRFUVQ^qBqa^PhUhEF4 zd~ESy#GC%dTax<;zLNPpic>DKtGW%OK zmzD~l3}XOy{(^^;btrJD@(`VYwGTFcu7TFAj!SE>0gd~9ZPc6lUyfX)Y+{`JXE1l| z8+nv0)^*(Gk=2{u(2!0f+fd>hZT-mnGA_%-IWLu$xQ?DLdy`HM@Lsj{<9nhyUKhev z%jN;AKHy`H`TDXpda3VKHdt`wmzzO(apf{jyjxhyt}L8b*xjiYmEAaT!GlVxO@+tl zRbRHNOrLTfVEi&>l3+inhqRo(bxj@KI~3@i?f?7JbjccZO4QH>T~CVR9I zY5jn{x{Ny}!oNFLtf(lQn9)_Ly88Xp(^1`CUKzS#wylj%>}ku*<>hD8?VA*|`*5A< zgwfr)beYFi9lDv*N(PT?c+qZkl>_qP@!u&`3UPpGqzm{#>^|Q=6q+14A_N*Yh52nA z7I|SalR8RP3e=U=?aYT5hb0az+rwgLo3X><`nvD+(AnsRt|DPguB-Blrg)YHswpZ+ zRCvXfwur10`WvFDKR8ERc4Q!s)?#fv)65r~Mr)hFn(b2y4(fB69)jIILh+Y=X{B_e zj8{{vM(tb?T+4FCT^)inYqWaxTEr-_fJQA^v`==|96x?51g&X2un*zA`Lw|ZZp(m7 zNvMEEdX9Dx>xBsKVkmpW@)@?a1MLT0kQ}tjCw{*JWosv|G-?eesBHI|U5v~{0M#s} zeQ#8r8j_v=fZkUuml)DLc6`*l;;MnGlC7ee z56m!JM$nqdv^bIr+@iiM6&vllt5rg^dCH^~R5pR*71E<10ES zbprcHlMo*JznYko6^kNwAyL_75MMKfL2CSlxj}fk82Su*i-h53yqmkp9y#~nD+!?$ z)nEfjT0=Ga>GS7mIGFr|^`3vPT1P%O%X!4=GCGjhs$fHJ&rgne4LHF!jXOhE7KfK` z>*S=-YZqvRobGer@d3JA&@x@p3>l*ki#jTQeR9Jo9f837Slf0=C}$H;gulB1L@L30 zs={Oim=SdFPknm!Y>(;#zv!Qv^h%p-|AK}rgQDo6n^%4oyI7*Fbu3Q+-2bXI=4`Qi zz3%BaL2-gVo*!|Twr0&RhiaPpVID2Qz1}s!udtkEqva7CuDF$gL}=MO@vIP-oig7t zfc`^126FuNnR+h|t#WkD()5a(^;*lTHteu;5xE-UjL+oCudT>dF~8qWN-y)Du;WFn zjqFW+sbarM8b>K>$8ESeRz|nl=#blE#f}e^2PZA2v4Tum@v3j&6{ooT_?g~i4mlN0 z8VS zdoShGiHA*VJ?lHHY_+x|YinxM$xgM=y_jX7Vi{S$1V=@Qhdif8Iwvmcs$Rv%Zdv#c zsV*X@+tX0)3eR~lAAovk<>JL79N*pLF4Q3WcIhJx><842q?3&u|4~1`v<4t4)zps4 z+ky!4pY-7`;u#HG4JX`HS5;lILobEM>vAz<9$$La`8mO($6MWxK4%b476LgT z(zy8FYg{L~{N!02!Y*nuQjBlXju*!V!g69Y`C8Y(eBgy9<=^&c=G({j)%=KNGtr&+G0IY`S9$)R1Zm)D)uj$cky z{vL#vlv9@WydGXgiygAA%((k?s+XL*x7B7LqtL6{xXM!99ApA>X}l$O?z=Zzfwboh z0WM-rNUwIoC~wJ_ujBeSt3$&ljt#V%mxO9QMn5QqZ@hE+*`Yrja_XGy`bwmeBmENY zPyZABZtApjX9bZ8b!Q#hEGtb6ZRFhM&(HTB(o3`%W5XLFg^gXt}?dYsx|sc695Q*BMn&YJbaq-}G)y?-w{E1Kth8g~Aa=-0S5LJ<>q zWm{Lxy%2)$=Wp7$arE)aGOzhy?}{A$`Bn}(5&a})V=cl>`XQav)Z9SP7@N*uW6R1r zRUoC6ixy4jM}@z>u^aIbpflsYgbAsGgBN zKf!YKc70j*-=PM5Wbm~!9^X5%g96kx|_{sr?EgQ@I zN5~$Zt*X;Vcb2%Xx+Q4q;meo=b=eiZnf%pK?Zyu<#t%`{@BT%#M^(oFO)f#09w`6s z(dClQ@V?Tq>EL^Mn|vvmzkbTb>CmfShH7r-`;T4u?;px-%KJ9&E7XBSabZD2)6WnrO*5ReV+qYY3Ga@F+z0HT=voUjR3B>Lb2Kjfv@Pp z#}00Hsunn%ahumTyVvKPoyV;0dp^16ym6Kajypw!gL4dzq~7uaSZRt_R|*w0!Sp+BNZfjGaxF7!L=(-&>(XI%zrCf(_5q9t;j%s;ldMNGHk--6AxV znEvT9qEya((VgAT?y?Tpl$5_Zsd7rXv-7&<(ypZDukRTy4)FNybEv*sW$XhF9jyRy zvE+`+NndAn^p=pQHC30-`s5;-M{~WugI_nbTCV1@;JJSY-My$x|0TlhL^1NUf?A-o3UGKMl&Ra z^9L08-vYXG-}1DbeZtcj|vGs_6(4DB~lYCErWr(y0VkQ0U%eEZm5 zYT>F7fAD0go>GCjW?tw_hJu$@j+$scWI^C4ftR8Eo3TjK7=G|YvPX|z6AFyl ziZ?$Q4m7N%_+LGzI>#+7nktER3|Y_+h^lZ^d&^N{+etcUfpgUZ{*M2P7ab zHKi)pdp4A5*? zyVm5juB9>AqcWprQEl{%QvG24?3N zI#WvDs_>!5h`3!UNsHe-LY-1IEnUTok?Xq9t9#``EaoLlS5k9nGOA6AsZ#yfJ7FI+|aWC}0H@@va5#_v_fj5+DLAYmXkA*r2>brpEgFj@KuwGVx_2MXtQ_w?v8p7J&g>@W4TSgS!;p_2-u4nz~&vM0ss{i@GYfE{Xeol zMp?^gprF>29s?p62pY5IjjrZ59qfJ})>QffGdrV2(%Sf=r}(#Myo?6dU1_kGzf)gZ zQ2#e-_ix1OTi7<$P-1iH19;2vlKX9D!g`M#ds=6wenwM9T|C6?lX9=n*X1-NW!+`; zfHB-NRGID`y+(}qg&!lE264sq8@}`*LzTOTKT2JRKf7U_#1I7;zwys)G3_N zDdYnv9JVDWOtfruE5&xme<~&VB~n*jgS?=v2nT;(gRv}7F@fB`R9n(QBu}1{5mjvxP@?mmWFZ{lePLw1Ek$cW97ussfA@a z?%%L>$=~vQoOsTq{bbyS&Y(qh*IW6(qUupAS1)WU?#sX|oOv-#yy@-h1B7v9CAkPf z$FzwuXk}i~;lTh94zKIqtb20;Ox>XqCzdT={)f)%&%bxR-!eWEwDQM2^`lmOVI&Hr zP?82e-o4%bH*UL?7%&2xzcYA0MA3gzXH}UuXgWT4%S*n@S*7-v)-NRL^pm1@OQ2z0 z!rykf;&$hElZMmiu7@Kx?f^vcpGqN1cn)UR1#<8gi~YE*%zZC@UkB@OfbZ`QKfUxO z*8%~h6Zywyg|Hk0g$&Io%fnWX%Z*EF9T9!RYF>Jy_WU3xbp|^i5cp5v_p^9=2yNX+kxn5kxR+MmhEyXazoZiO|9U8_nBko|BgL4;i8}CPg@@!#^Jbc86 zxxLQonwVs;`lagmyw%T!U>>)q5S9K9Ot(NEa9)sga}69D+rA3#vAGB9It}axo?u>O zh`Rb=qoed+m?=DFbh$B|s*Oy>anerumn~!OLJ+B%WyAryGEP1Ccw|-FyZ#rKdCZep z*h0lpR+rjh<;o$A!@66Yap?9cBduHQ_qWCB+G{lb7Qn@W%H^cWaO0qjDi%YIS&wL& zFeKp48oUUAb4)Xv44HLTKHNh4SHyzm(;#g<{nCQN>Y0W4M%(E^(+)%DZ-lO~JD>Y5 z>)~N(;r51j>WOD93s`1%U)NbdVd8Qt0d5|KkWpb>S+<+SeEYW^RC-mS#P&kx(NpFo z5dP{a4&p!A{DC%69?W0)w%)6YfYuCxyqhOv8=X9DWOdO8+ear}aj7iDw;4bRGT3)q z8^?>6FEMd`pW_FKM+Ck)o}^?WXdJvk{)9zJYen!~pC7!GLc5H~kB zT*hFhV}8fRe>$^e6~_DwHb(V(b?D8Tg}J}cW*93enRIb%L|Lyt!oyzJn$k|yG*T_D zg-P^dwH^H@J}Cu6BA=AE6?*37=xN2*L$5TCI{D6A^4Z?vxYQP?@a=7#e$LSMpA6^6 zxqUqlJ$_7ugJSK>%*C?z)^GUy<$2=g!#;g#R!YG^SqjpJP(AYt;_I5;ST@bbDA?ez z@YUP7Q|ze&qL;RRV5#W;=%mGaeO*>EMc}7BzgJx7x>}O|pe4_)%swS}K9#tX@Z*>B z6+6lYHxrRt>)gB??m$x35+?MSGWQZ0WOzSJ5_yd)U+d(xNxggbABNKXHs2TLa={$PDq5*#qxjBee0%e+O&xY{=XDAy$I>BB@L1rq1G|i>fb;RdYKZ|WToBesc0(^5 zs`}1Eeg&>Un=psjUd5ezY2Xz(WT2|ZO6kuCHWar=$lyusDy1G*0jWus& z&DMW;ao8(V$EnB|eTL$mpD`~Ph6&n*sx~PB5 zP>Oe{9KOC^U6Kq!(`RTgVEm$NKCBJ_-=`AoR?L$zE>Y2w)ty1QKH}QsMnd(M@s<8= z-+ILT{PKC6?fsq~B;D{4yBb2>Ptyyt1|~0KR>*1ndNF)AXb3K#?v@V?vJ^Le%uZ@Q zP|^9pq)+x69t2$(9k*8VyyNl44cJsl_*Ghp*7?1G(G+46GEM;$hnYEPbnnnXlYo^l zmAmqq6y(OT$s~3jKkFrP(+TUKth_v7ZTL4c(bb_hw<^z9Bu_^;YJr0OX8u{wpGZJ4 z$OS-rN;`#oCY1)30XnZn)!3xY>}PX4H+^J81dC4CM}pa_l~wn_`~Uxtg*A!)7g=Z^ z(Hhz@61J{q+^E^A6tW6IX`PD2>Gn7zwe8fYqU2`IZM7cA<1A{Xs*bDPJkxKC*Y{>O ztR>q40|9PxClm~u*KKl$&0ZZWww^VQI#pwDvuyuYP~bhGa)clN^d?Qu=NRo+n~m?#ZTTep#60Gv-bN=T%-34v7B)mN0<^pCEM)t^n66E*!X3MlCN~g*H^#x z9{turKy14$sJ^V2ahsEkL@-WHiW`;5t}sC*h%eAra-TlvPjg9ejaK68K_g>sI@kAz zZ{In-$}P`o{p!Mnzm6<;_wJ^i)$j z_Xh;hy`}ou~Njn;vX2XHKUcuKG=w)jWnrZ0w(<|CM5tb7ySSD#>N; z{dz6YF7&Y88Ld9jE5bb5%18n6H+@lsra$O2<}h#z84?0j72K#Ajom~u8UfoYJZ07* zi3(I&EFM8K z@BANg^A1KL%|&Fjm$LE=K@4nP1y)whUvcWDh63QJe~8Nf$#{rRsbr*mH<((Fr0eQ} zsar;L{(ftGgF@)q2ItJZ%W^4cCLG?WrV)G0Wpi|p;(4<5o=TJE9+4;*hCEbi>Nl8X z;TN{=`1ow3qI_?C*88De`D_}KD?;@q6q;U`6Sgr;R}JdI{<#^ZnqWJ-LG!00^4Rj= zS>fYI$9_G9p?u{`dE{^P>{bBXy!nZkPof$;@{JBcXyg>1PMft2sVI zaxX>zHHOo%yQa-eD(>a6j;dVjg>q9o?Mmg`L7rqc(aR>B0uO1P9fq@&lS58W@^blE zoM?o(z-UX;#kRgak}leXY-$OI9|I^Qep$yU7yTHd++*QCjD_xBAdXfE-#F66y#;qs@xa9Z_@){5cEBQN=u z)|GX0Jo{~by%6B$?{InEBY%j)^m?spNi|Eqc)Nr)ezpza$eJY64R5;4H#BTDdHsn5 z-M3EgVNo^IkSi~)pEZ5)crpEhGiSKix$^M1og*ufdr#hyba<60MFt%Bx2$T{?YMSB zpI!TNcXUgQW`PS#YtB1G2W?PMG@ViCvCAW;6@^W}x~24yd_PcjdEc0HsdwX|wI8$N z+aEf24ejA0C6oHtV7k&{{XR$1>$A(~`}4?j2genKQKM2OPaHIyq+Vm$P0~I5>_Tce zD3VOUA17dlmN*YFx-LN}E{PL(` zGGeRJ<2g^oSPy;z3je9KdX)Xs^QU5g;qyD7+iPo6-A_piyH4q{)>NgrM9t|p+|3tR z=li7FZ&t05(*x@z8;AHokj2F!G}L^1L7p$8;YITa!O4@#%b24w{LHGo13o4S<4-~du9vTa#kPvcBy?zT{d~jl+t!B8eaZQ zML4{ROyI$F_5qw4-xg)#Qdxei|5)|hg>!K-7$zH-65Cr|C8PTuU;L;S05eCpXm>+* z7zL5EGS@w?!vh+dDaGh|IJaFE1xQORDrZPPN=q#hbtY+r6kOZmdGh4k`dN(%@6;C7 zn}416`Qx`#XG`ON<`NV+QS1ym2rv&tMbk%f4g@OoA|F|cZaDU*7!JMXML?AYc{`~* zc>H)tn&R@%&US;7h?p^>J`$t&vIt-vQMkj|4GMew1dMFRvdIn|6%~OpnmRQu7W z{uhJK84emf&PbAecY<%byicHVcY&}rDfJbQ;QPieKL?EnP1xiVXKz2%`@KWgTqj58 zZrbZ>ZuM#R^4gf1R+6AV7d~9OcHMZL_Uao-MtejmE*O+*5){1AVAh{UFW$dfeXo)G zecK&N+hvwb-1hm$vpWi*Go5M&M8z0P)wXDJb^3^WugcPax0R)Pw(94n?eK_IkcxDZ zdd|*TJ+(V<SJii)NrD9-0T`TJPHhIu@{OgUaw4;B?e z?@+6PB%&h!0vjcyNO5md3#ps^s<=DVtO|VDY-7sH2men7p~VxJ3l`%5C0_QdCMm%S zU9gPPr}Md*0yFCY>dB(lIe!&Xm%qD)Ua+0VSDt!y=zmZP@XIdWu6?pCXD7{EgWp3N zH*K0(l6G>$o`|qRxqCxZH;=NN;ut_5n$F^~%q$^{MAeVhaQ}e==C9{eZs|R4K!^1T z`=fdfkpAazER~(?9u~M~fZZBfvgFBRTLm(O^cI(Gjlt+(liCk_ejEVaUT_)SVj>mkER z>UHqKA>w^ntRT6gxVZU2Fas3!=S|@9| za!QvWo<1Ra8qI3DAR!@aSKN#XYRMUK23Z|vL5L{%JhQPp+(d~9aqAl!536a=Uk<|` zjX9w+(Zdp$=z+6KcR@A(Hd4mlztm5{c`~uyW~Y;(Te)&J747)fzYe>^C)e~yd-XbD zOo815Am21Ec9G`fcpJ^$kz=*R%ZNFXGtWyB|Ku-L1LRDze-aWLbQ!bTd2{E^x{UNj zEWcU#{_nKT&){RDx1f%K;E!Ka8#~_qc@)faI8@W5Npgb?XUqsuszyK@q}uuUzIZQg z{0cf@%+nq!IL<58!VIV4(4|5S)$u(W-oS@dZf;7Hc&?diS(2gRI8OH3d7iH%Gvo5 ztYoq&;Y8+wvctAHtFYim9qW|erCsXNCer3>U>RCl1vs-Jn82ZNzE)Lzu(ws|@85NZ zk1JouNedO>XU71i;41sV-7i!OF1^|{kbHS^%$AVZ143a(KDcEm*3WWyb>P72WNobx zp9{_=Vve;#PEsfvUH$Xb_X7bHb8YvpFDaVz%*SSUL1W#?OJi?c+G{siZ_$d|vHK2a z?bNt;;A_YeACHSoZj!+l8Ml)s|Ky0n^&{@9HMlZhYza$o z9T!e)Z{?X+eNjgL#rAv>8ySDo{J8`UTxn^1(hpS`2Ao`bO@3D|k1FSZK_~{tYd;z_ zO<(_rW?=bA^~F<|8b#PMumcn0M=cRj&(HW_Er^YX|0FiHS}?-e_K~ZL3qLz0gcT$# zp)*_{Jd+9mE|5z|i9O7=XQZ~c^k>p*4JL5Z0V_A(h$vdOsfD`I%wGPt2Prg{V$X1` zVOKs5)_+ByL&IN@(IWiZf=AB=EliE-{8g)DanwD<5rfv2bXq(}xlLzHMYj;yh5I$LqwOah$g9*6DrfudXPt9OXQU zuuQQ3e_=xk#?7=}&zw`kcVY@}WPtkhfv}Hs;NGbBHYh7(dxYFdgiWspV;4Z%#jYg6km_*4wZaJHva9i=ExP7@HJo zP4QLq7Cf5uqsMTky%svF{bqKya&@sz(`zm1#)x9*d>IFei7u!EX9O_QX?+gU{uO1qg2ddeCr=xVo*xk0NGH^ToBWgwZ|Do)?#;01UNjj z!Q%-(z`671i{ixEYURtdK6}*I_WxS{q1&k&`8neQM&J$5? z|NYeF6bP_#l4zS260VkGI(oqS=)tqo&ujXB42VSqC+Y$xX6}u5e$9I5OmRW$u%40e zy6jx9FCL-@7`&Vj^WS6#UoHvNzx~Acij@XIl|0Bq&sMz5#Ut^t+FDY&!=(A42EG#l z#!^~6Yy>X4H30(Ut>#z2HL7QvvoRhX0)A+#TjD_HR51C*B_S_s6%a zVUh#0_(E7W7@m59h{B*xDT(<^L-mv@=P#d1Q5B(;bqpObjQ=gcr!wlm=-gSI817x0 zv*6i0BQV*1KseSU19Uzpd_h376;^=*2cSaEG*?mt)|J89cy+Ltl#tLMToI5WfSXp& zr%zzJ$f{o*7y>B6|72G6o{8s#zpk&(J~I^bXSSEPcrkqTe03DC0zd^R$@?=`a22F7 zXPPg2=ZbCVuiuBPDdGvBE5(h18brC@uZw@5>AA#LZtRr%=(Q`vxdoxust#qbiHYzh za#@(L>OXZ7Cp=`7@E%x58LTg`HtLjVlSFV47D)fWMR0*EH6sahxe#c73dBL^D?+MM zK4FJ|cS<4?Q=Axo>MX(k08_+$*Pdo4_xhI4!y^cT+-kqA%^-|UomGTa8_Czq!t#7R z0{;3JN@;+9yUs+?kUy3>1@yx=G+c?hRASToJI56-r0WEx1t;eV0qb@FsE&bUd90)l zL<^*a6>bj0Hc(*Z*3LO6VM8DYH${nvcB_Hy!6J+ofetbW#?k9>V~KAHjN#Zh;G_+i zxGGxKW)xp5EmaOk6TpJ*z9&7c@bTfM-JLsTt^TPeKX|=t$9Kr+zL}^6Xr^Wj=<%Gx z$!ocU7^p)+e-J7vm~38s^Og+cpFMqkr~GrN>(7y5mmfa87PZgyUJd2*<1l3pMkF}- zewMEi07fBOt3-Z|vG@gKPIB^92$_fJ5xSN}cerhnY-*@(06!U!%bmV`(qYpI{;czyoCLrwafrEEV9$j@EKQ} z_O;d5XYh>I&neWS(Udm2oW8(z~`1U_=@*;6Y1@vrqhFeK)s*dCp(2Hd%GvGszpNwcS`I#EZ6B-(5N1cmUu zz#-u}bbTVCogqZcz8~q}GCT|$%+-B5BH7*K<5|Gf>uZv94OIx5C}?AL{`@(G!F^BZ zii8rCt5Mh+IIIHky91=nnlu(nr!TMFuB^#Y%e$RGaPC}z3_AGyF|@{i=7M2U*A11l zI2zJye85}!0|Q|V{=UWGNVz^Q^(EMsq8(h{6Pt@jh*GHXWugrr`CqW0=n@94GUrkG zY-`5LMw;wgc_hJL!ujkOFJp#RxB;9h*w!_faT9b)aCJKIJ2^Q~ml=_Q0yPjQMf+ns zBfEOWB~_EWJ33@XTmwh zW13OGa>o1V2S#UL9P9F^S8R%Zt2Je`F8!7EE(Ou2j9|fGNleeiar2>m21*A$y#iJ3 zN0T&V?s-I07owqa3vG*+Hh)?|Dj*>hu{7H%nf}fjQ0_D*!+^!`Oa$y4~BUPj@o zxa(}tO4^+0#STFU({=`{7cECeM-URi!n>-f3MjWgtlsy*S*Fzo;}C$8_jRz4ESQXR zYk90znF7T%IK)Mji;S~sm;f%nHCY%s0*_ic1r%B2u>B2?%LUzfOSyXXfE&j!bjCu7x z)PSWgGZ3%4e6Wc5hqAlF9_BySd^G9AXGo%GBD_Ql4aR#?P1cirHpF?3Ovux-HbqkN zQ!V?xu{gp;V3`+%34GMTMBH0Q5Cd>ve@Ms1$w^HtEYP9Ly3|RWLJU};DyLZ$f4IXm z#3+i@zX6_j4SjgXe@Apg(otHdTtqDSmh0$_<*Msr|ED{m0>qtG(uWT{xS8>Ov_9+G zn>G`M4o!bBogO*+q9L}1wqL=|b}R1}1<*j>zVxsb8VLYL0u|f-l3)^{4}=v1V53(1 zP+*qp(GKwU!Ac0CoH|Zkpaozcd5(5LyQaF$N>%@WkQR_X{Q#02b@1aXZx5hvHBp_y zf~oWy{PSL?vS4liJ0(TMZ0UV;AA$+;Xk@jh!26fLzODY^^ zr-1fBY2ek#>(@JlZ+ZP*GI;tz>9@b1|7P|_SS&mPTpq{{ptN(e_&YTK zNwuyP!N+yuH?<`EaEjiVRU2y;JVB8YVyD`_R=he^F%VKr-HVNdQ_JRpN%_%UNZnoQ zQKQ}Rk-&)=`Y{L78Wmr{RgU{iAmk1Yefx2_FG8ZHx7S8+5r_aw)^fSYATIJDqFzoe zn2WLirnK>V9*;l3&R|0KN`6vcC02b{XK5wEws0Zy`R&g3&yLKLp&!S%UFi$_mI!db z3_z*c3^WG;n#*LFBV!irt-3t}g>wRyy_5~|?}H5=Ay84UR|eL^)t+FZ4UE?R=qBwf zHXa1E3DeAxrDTA$Z>{}c$5jj9X)SO5Fx68o%0aLs&c;u2%P~zYQ`UpXPvR9g45j%z z+b*kO>w4y1_r9v$-;qSZDAlVpd47KBVVJ~I32&Ym8-TDh-0f55ZDP7TaCUxYn!L(W zvcUibDQBrub*}vO=IR@$mp+(sk8*rGXqqMyU2syBITF)id$$|E(2P(ZD8+6h%`AQR ztnQ!syk4tL?tBxMDFI@eU~6Y*QB95Nmh_NzPr89r>}QX~#J^*1tmntO-o9uMe*jP2 z(Xb<4O5@`~xm6EM$_XV%6H9wb$-+mUI~V=H@1h7ldh>98SqI+n$w^1qESD$kOL1=^~VTsntsDptXp3IFj5~^!@$z*ld*D@ z>rJ;DDzWen_+~vW;yH`Yb5j%dWArQ_#A3LkRlOZTF@I zFE6iEa2AH>?JpY}p!@}Qha#v**{Yor;Jv!>b!&t6CFr8i5`mMW{gF1o<~Dw_x> zUSnfEfY`&j1|UAb)zNmfAX=LV9y-sTmF`OUKQyU&klW_dRQB^9{4_hqui12D)K-7e z_q)}PU#TfoF}sAyVozMn;2!SJVd7*)Xp|kSQP!y!?AAsD+3KNzpp=x^ffmcPYZ;2kYyk;BV0pjBJz!UA(g{RV zoa{uFE?fB%?d$wyryx;CqBk-&uHF#Qb`~VDsG#WDt00RWA-;_~=2BwenM3zZLh3X= zE^BcVWA^5F@9G0fu6SLB7Ab*q$xoYzQvO6gVVmuSdm7lEfECa#ylZJEPmn>IgsM@m zTFx#k)Bk;8$5*?E>=kFQ4RGjTSf|9JiU~+tFI-A~O$hXMBiKDa1bXl)$C|E0R!#S_ z-%+VI6cm6&XLx|8)(4NvvL}$2jaEMP_k$5^L1`|-vCNT>UUg_uxkZ#4f8= z>pin9^3elzT{plWy)p*;4*>s6CHjOhaE%LlXF=Y5c$_abDyP;I$Oy-~DM5KEF2_4l zl3hgV2WyyPH{E$*F}d_Jj4%no@-2734zF*0(<2JTq4$E8ox$K|QTKD)btS%x_So$X z$jHHmCU3t}ii-<4hxfK^7adCwUE?5Fc53oN;cFtZG(2I?oN$f(#+IX*R^pN4vmdBm zs4)GQAM{;a+Wgrem{?elb0wbup}rT}ImuE4)UbOO%deWDRm+{;wN zCctO_VgfjuSj8m`6c5Pu_^QUs)aCT*SCqWEkS3jr@H?rcUnrF|RU@sKwPiod-l0hqaluYeP21~CofoaTPb+{{0^Qj zfSZm?!IbdE@Y~!aG4Q5oQ3p(hY$me0>N4f8jfYYv7p67goGl$gk;I?jtaeK&JXC7R zw%s3t-@wbF3CPQ18Ll^+MoN*4I+uX>Oc@-<4>F?lPVfgf{i**3nAfoMo~Q-U?e+)3 z=CWbyx(VT#C&R7PaoD3D;q^1rY2u7{9^#b@?)jkc={w0$*wBIU3z6EWIgP<_;U?4K z-rK|Tkps7$!}bo^C?j$Hlre>LQ`hAKPr@0=(6%hCz6u4*kPrWKhZlmjNEIERclf4x zyu?)so<0B`6h(x7C%eeuqI5$YSZ$p=AZiosyQ%72McXzqUt_{DbS)gG7hN^YdnE|8IFzbH9PysuFh+&{dLT<|W+0^u+ATne+fVuk6nTTH=5|U2t-Fp>7_WZOabb91yf znc~{IjiDU?_k%Z1jPA=2BJozrT98`tw58}Y)6)pEw=2KHW1N4b zTF!M((qS4=}T4tBQ$-!JoGk@+_34sa)&FO)l0&c z=Wfj~PZ4JNF6mS}38Fa)z36X$Ln>1(sNmgu_I*_;X)YBTUW^bvNDQ;@m-gCfKRtE> zIvw<>x=VFt`avQ*WYW1K$l6z+RJKcU;M&h_Ue&j}U+J%o`%@Cc+&W1QWy zkBg)CU<&lf#ZR7{8*?+tU4M7B9oR`d0?>aN)S|gntkFJ7{cEGgVOf%+)N`xPti@W-Ol3Kf zvI%oYof9;mcYJF=J((}**>pVSR$}j*Io~)1QBk^vs~3dnz~=0h>#J8gG_fyzvG0rJ zJvENOMkEg^?Vdv@##ZNYtm-rD^KhK1=|IRiNajGz`ObXB1G`M5pn#K$OWFB)6wzpT zogtBh0?j9QcsPS=?1pkEBO?HI zRxv@j4AL@3ljF?vi&1uH`FY$FSL;Y#fn3XowiFk3Z>8>(u+r85MYBHI@=bG@CR2FK zBC%EXXrS;Nw4fvP9k5ODFVjLhHVBa3(inI(`?e-3%z6sk%OtN!GzK*FXGkZ1EKAG9 zKy7D0g^}_8sy(TmAa7}m~sh*64I{j0bCdm>-Z=78e@f@a<(ktl*Ip#1!^Lr73{L(OV zSiHPLU}3UVdh1i_Rmf+C#)Y?!`)%KwhZ}nG>1S;vz5C07jDA9Z1MGWBX~QReGt~Wm zT$Zb`8#(jFn&5QsQTpFiDWwh6yaN%F@a@rh!vZw41xf-_b|2*pH`9iiBFVG{JG}R9 z_y000R!%nNlqn(d5hTmtz4V}pG#u6!!=d0>-}xBc$K8#&Ty@XjZd5b7PjJ_Ivwf&* zKH9?0abaPh>>12)z3Hf&#o%*!J9(y7Aab+Ixn~OsP5BdFYX5+MsmaMc;A;S{xE6C# z%=@6S9<^O-crX<>M7PMxeJh!*5@Gz0K0Y+<p*Pv`^0}sRr}9F)COnDRbEgnT;Y<}K?7y33F8{tnH#k%_ngJOS11FBN*2fp z2jd@Rf{#wM1M8_}33O&-K>1}OkMa2iy410{`>%vKl9;r`iGzW{f`5% z%jo{4r6=i%4GBS1??T=NI9NTsd^_~cLwVcV;39M*p7u@Kexjt`D6ywQO}^4q|Ib8rLoVguMdEU=bh!zB8AtwL!fhvDY;L|J@kTWp!2#78%0*e61sBA$h zR3cyoCCcbt>nYOR^0Ht7Zu!5j#}XDa@T}4jw6VKr@qcC-+xZhOGhKo#gMZzTgzK75 z6@lwJ;Do*-Fs2PS{ZnPmxW9gEkZVN64-z3F?inDaF7eyvV2)Tx7Pw27`G^RVdgWL9 z6RW}v;2BGupGRw^vUeI4opeo~Pu^3ne5y|4Fo=?bDVc>GFtCZ5AHbz3LYu7xe}7^( zUc0U-i8E1Mwl!`rKvXznUp9WT{ag_8Wh7h&+w{35q1Z=e?;@_y4>EoGXoiV!_x5h} zSR}MCQ3bCbxTf!5()#7}$^)&cfBUBQAot2{rMKtCM_-=2EmRqdCC(<$vaxGW)BiS3 zJ$lNC z+$(kgx)>P+@4L?3_@fhDg+;IQoF`9I8s5RXsII1Fy$JhiYPS_|aQSw2Ewsk7U(J`t z{=E|+NvGM5V7`|DLxdR*W^s>js+DS8VNrGWX&2DzxqzkU(`#z~GCJ&n_T|AHTAxFDCK>BhuHygWO9dR5)```zq)NMb%*g ztBChdDd5prV}>G+7KYL2H+nRB_s?kbz!URbA6=ENQI?cjtdCK1@F~Rf+1$JtZ}bxA z)9|XlV5TWHiFKP^3SKiVM7#Q>KZ0&``pq++47DsOkxYWjJUOjI+}Pj`q{3L4Cc+eV z)U>B`kR1PN%+~G~Ar*&OA^mjZDmo!D;z%23`*v|+CoH}%i57Q2;vW20@*BVW)-L0> zdakwb9qMoCh{&%@K3S?yI-Qw?HzF9&vvD<(kHE$0H2KT?u6F1OlOLFts8b)lw%utg zeAbBhhjZ5L&}>gic9E$2uVOk}RcfNjQAP}$0}|U!gJn)csiGV5HzkUCiZ0=&O2RYu zE)8LWe&jMT%BZVJ$BaSpC)Bx`bg##CFJBIAWh!V{{Buz}P0vT7*9w%^E?q_$X|fhO z%0{q&UIS!eX=wwBb%4g#Dh#YST=I2xl&Yr^#wrugblm||;-XV2;}&+&1!*Or$3Gjj zl-MW3%${7h^xO9&LOS~Z;6M+U(xF}bRYa9|Wb)iBKSb7P(79EU4sEO1dmwhVOgmse z*yEe>uFlzQSt0+8Q{<^ycqijQcYG|;=4nXr{m$Vc)aNo)cbAFU`5Nv7 zJl)WP^)G4pTkG68JLvvn`YQVFuwMi2y{r`3M?TPQz61(qZ(rX-joQEnhl1f(e(pfv zO(1T7qrCO*_6M%NyAv?xx)Jy%!Y{aaZBO7+W|~*cx7mgpV54Sq^5MfwZqDqd8KU;p zmB7`;j)Yu2uxJG67;HfV&0CcJ_<_v*D0nrSz>7+8Y|l8HbPW4#+e(PD&?*^*vCX4f zEY4R@3JRCm@Ne%c7^Lsxl17KDvM6b@$;(BnVpAHkDbv=!;CdNJOYu1Kckk1Up5Vye z?Y~zFip%(q&CNfXdIVp(RC4I!?Q+YzdBtER=(1s9vUEmSM<>B|<-12o{5B^n;zoTxfmaTw1=Pi`#pQ&3-Nk95=C9% z7Qybzlg+Ek8l zw+r=MCO=oBo*-qz(!R6bCmrAif00-IJ3RJ@zzf7YPy}*yULwVmv@!*ezq*1#C&-Ip z)ZFw+wQd8FTfV8Yqeyx1M2SmZp6ldyK;Y>Ac8 zN#K8FFy8`GL^6Cm3!mN4Q0fjLcJPaEDEY?qkK4%8g--8O2`Z(y&oCqpPK z$g<}nbSkKT?(S~$Ca;*rZk{9|t_5HSIUtmwXXXLkLg&unC!HeIYLc)r2^{S|=ky1K zZzay;yCXJNRqmlO_4Y3@NoRLF<0N6ipoy0M9h_WmI$7_d{<+r zG(|j!({@EKlC!)h#^++iP_mDg47YvOgYQbIHv^4?j(Zj1?D+t`MA6eJPU!br;5+?v(0e@+~vN`KxMonxa zk!8;jSqd@Y@+X*}Ogqw2$GF#1d`sR);n^Z>KBhTL=YLqx6s|h9&z|Rq?Kf?HPruqb zdQQS8D0YqeGq|TVjns_zc~CI@8HKDAo?Nj(M=`=u zGlTA>$_(t8SOS3(zih{1?3`m`DtpyVO!exA#m*pHuFG#C!y8+P*JUWx2P<@4oi1lA zi8_rDt%!n0K5#F=oxw64r2yQSP(75^T?erJ8scui(~R=d3umzwl0fX8N3kL`L+iKY z-aQ{bl3DtJ>CIQWYbAgxk^1$X9pC|H&8%LgLIT`YTn$Ka!()YIFCz^N-oAE3dbwc! zKtE*}n~wMG{I#yq=ulc~rRyUOJ_wG1BB%6MOl5TqV{N7=6VUNyin%deg5I-3S5vyq z`=>$qdIemL>z<#$^u(}8l?or12j2r0Zt2j+=;hth!d6HW%I2KWyK~=hfbQ#k^;RpcMA?&TwH!Y5_kvBuA-|dA{Og0T zNduIf_u3?t{|(?WdnLiR8Zh7?Wq)MAk_W5`Px<-DvD0tKn&5-~bmal8e!z{O`dQLP zcpxEH)CfKrQ1oD{Bg9y=La8t#sHZsWCG8Wpgt&a~*B$@Wtz-{Fk+NT!QvMPVhU!dG zm;$u-UQo^qV!UP_z(636sOKaQrL|&%ZJ5GM*_6ULdI{Y{kL+L4a9{O`)>cWD1g~^} zXjmt~V;xKl>ARWcGs3wdIWoQUH$+mOc5|xJb6EE4|GbMIRZVqw1GAE{ zc(!GP=BWSlsGs>{6VKdaoAjLrl3?cd=4|&OQ_GcEv(_(V0@jgtCfCOc0y@4U?DN z`OWsj&AqwK7!;O{sPKEM>O1bbZPhM-T6*}3`2{? zyARqezvY<$0XOJg1GML00e8p4giW&j$-hPs?OQ`^k&b*cfdkScxkdNCj`3od z3-M3ZPN%<2Ow>4Ex(GUU2=f9`EC19E+5Je{i#I9#huZo+FT6>>dFUK-fOA{%chkk$ z0DXrf`eJ2d;@jBrU12xr`B64(jPBK0{CVNN$~+bs3`Rttiod@&4aR0<1#UeNGnDeg@K>$Vl#Y;T?{(w;y8t+?ao3p>yz@3Cmw2S6lBW z6)PK-d_)Y_lF}vmK_ArfP*+!1ErrV+VuwF@cXC4+oXen=W%S~-*T|Ca`C+k3L??kQ ze3hAOUIt!2#M$e&`$WjI`)b(1xS6d!5Ya4LKGqhso*~W z>!t)^(}5j7zl)gx@$_}nbz*EJ9-`f*yN9k7PkN!4%8JE=HHqal`X_l6bS!bx4V@eI zize_5g{G7{{M6}diBwWjf(8q0=IEZK zQ(9$&T)s&~7C+HS*6DC`=L2Gk8r~!|b1-#9cg{m{EOLwp2%Tv^`Tor

>CS7HO#GN@kCpUL$VJ{?E3MOf$Z?+G3hAiHS7Zb= z1MRTGba6iH<-p<))D~;NSTuNbAN~+cd~gQc`pWS7RriK0F#?imG}P6Pe|-`}nnGvR zA;jEa;l94?haREzAb6baNTiVE?72^~^hRucO~OLLZ{)Tw%iCE~UqL!GK==TgtlQvq zs|MJHp>-nWE9U@M#Iq*&*U#Y9|Mp8>Sn`lh><0cii8*g8?k zXZ^d|b<)$<8T;(fjEHC_88D>lS-O-@AzC$kFY=Tdu`A}b(_!FZ3OvKN3r{XvJ%-=& z5+`SDdzl@sUlusmUmolNIAQ-&v^6wb%$O1s!gkNXEcNbI5_uEIvg`TtK{bFthTsyc! z3q@^TfQF>0qVly!F+N{3?XX>%WN`2kU@r4LOj|a{)9=AAO1-DX8h@}3Am65(TK23b zuxKoJ;)Ot{y7%gl9(1PV>MfdH&JEJ{B!e&H!$TR^4FDLJx9FSGvtXxTA&BfsZf22> zHtlCUqlqps-^DX5FuBnx;c|sIm`t`4K5Qc+0*FoAu(@8FF8bYbuV!p*vTFM;&`pRkEic;zs+s<@_(gyjB9*B?p)O{f4Q zR>4C>QsT3xhe<99f)c%-9(@1F>sv9gy=I{0WJ(;k{f6deac@F!x#uN4C;X1vH%q=2 zvtc7!FwtLYE|eI7Kkp4^_Vl(vG`Hu8O9ZN0-!UTNp#lCyTA_N(b#Gv!fvDg&uur#W z;weA`4gDJ&Uam}Q@q>>`Bd>5DCfW)-|6$hA4gYX{+yqvee*Ki5mZ#9II zcAlb-0j@V+Xk(mE0qcrzzg%4;i|ha6k7r?#~GnN{wj^TUE3bh(Ep`P{FQj1!uv2d^lsN6eKNku&*2NH z95F(-M00Jn{f5qgfVQskDa*yu#Lva*U)kkJbFF%gv=tZ-q}T*r==USZvLj=V)SEt( z1^06C@LgT9P!@A|=SVx<;19vxl0cvE==R&rIMW2P-fz?rYJhPBIzcG0xDOXev4ui} zn#Dq|B~&pWK$<=1X({L{w)HT0jYEd*8~Q~OlR7>64a%Mt;k)>hXXjbOZH8@Y3Cs0* zZmveotJQXiowcFMzp7=0T?Xpawxd-FWX2nEa6&e;oSbY2iN7UA-?_iAv?NQCT1x3 zg6APg`)Q-YPunhRf1)*;^SHj=tJ8yVvN3gVV_|Lr7Z|WoNNNA>gbwotXcID-f6+{L znsnFJ)li1waM#;WHuHcVI|+F8t`d8L9SLrKXos8@b413HHb2*27w&w0!|^se zb1-H{jf0BR9c;2sUPb`CnaXGE0sRAr3LzzqFnGfEDHxZ5AOa05Xy;>7ejB{OyfZHQ zfY%yW&O|*g!2l>5JpVL$FRt*yZ9DrWA@J702jV=dtgQG}2x|%Trn2{ynpkKfA0WN8 zjHncHn!MfPxrM!DirI%DHy3x_sU9LlK{(nGd=e_XZL>w&wB4n);n@{&tbbi_{%s)^ zWv(P*Y3i}t7N6ULaXYdN^lKElcL53}<2(Qab1!aV35r0l&Cw9#1;L|}gg)!Ymbkx% z9lk6C>A_Ru*Amm99##K=yLn$3r=^Z0Z7R`38B7+41BicC+csGwoSi(uk4q( z1nRxuli#^OJZSM+CJ$ys;p5|l&|S5Q&lP`DPF&2NLomZ#Yi9h@Eu!?~%dErI;;lzA z${rq96yB@hph}SMppJZp;3$~#VPUSJruM17A8+ji5Ygv?PqRtvNS?<7U-kERv@`QS z3}8!kCAQ={RNXzyTF?Gz$U74!UfRT|;2}n0@k6f>E(xDK-|&AXsGz6XZ(}7G5Wol- zlHF>I*KP8>Gud`_u(7v7LOc-zf2^!CArbRB_y}-;UKZ>>Vrc|t0ndVN;nxG~9{Y<; z+c}&E{8rxrG*v)6zf*BC*@IH{+p>bACMY; zAaNIS!x*%S8E{d2@tKkwMhN?0InqKoNaV7Dmxr|t>=|KT+R~{3a8Sf`Is+cZ0jQDD z!x^<`z@^bfbCVE$@1UK8&xAEGzVjn0frTXVUH4y12aU*MT6cjj0cZ$Bq@@`hPXGs* zgx{CBltu5*1EyxNKsfw>?*Axy&^`6w3u2(iG`%Rd@9GMJacuY-59G zLm&KGJH!!uru9U%9{{?sfK@n1AJ?F9W;p^EPT-?x!)b^25i%`?l0b~_x0q@O0N)>Y z**zLICTcE;ita+J6wl?0>7XwI zEOe8G2%YR#1HC)IoeP4L9EH~6*htb1tFu1JdM@V+e@@WBvdz?-U=Qc)Znos+BbjJh z<_g=W71-WGssq}}6ZFUHv&S3EbfkX@$h(+EVEGA@`ygoHSP~Nx+aXi|mP+6Ivd@*E z1dJ}f9|wi>k`S1ZT?N-lYODrXgUlpb3jsj3BH>d6fm3IT@BsRPzkkOr#x(Azk4Gj_ z9zv3zA|&TCn7|Hii!#?@5rzOG2WGn7rz0VKG2V5-h!M2ysS7l zIFK)Z&;!C(@ZG`-)i2Yrfp59G3v7Ov+WdePzWtLkGs#Mx#iGVm9ZcfX1Y5BuvA1zV zx>VU;1^jV^?++%1s_frFq^?Ur&=)EZ)U>}lDKsI{h}65uimsA+ibE|e1w`Opq-Hqc z*ahi8lOzy(sQ?5bG2mY{2s!h!w!NrCu|An2CYDxBEUju7L3Z1_wde-P|q^Q`w!nWdJ4|Q_u`* z;H!kmi}`6)uK&Bc+Rq@&MRLk3bMgg&V2_3W$qtGqz!(6CeVjx8H-+ItZY#-0`Tjd`E^nLYJ2s%awm1}HdKIq1mCcfff8uWL+sciD1B z634wCX1ZNy~CAB6?5;>rx+kuf4*NdBk+#aH!@ zPS_Y?1{nd=EwH~a1*MzxOv^n_C^Bdxu=%D|2*y~&;yHkm2^$~uo}%QF-Xmbj$n}8_ zgPM~iVM5fhKR%I341Yr0@{qL?U5w&j2q$DN`kfyCDPtIdwPhhA9>PigZVUJ}faxly zz}4qFrhSXqp^|_DetiLCQl%9vnj)q+QIiN=8WZMm4umd95pQ>FFB{WF> zP9|IK&X{A`^JYac*mKxvGf?tJzVZy_?N09v;F^35M&?lXD5@{jrZj zWweE5mp=+=*HUno8@7*&7M8zeKHxjgxmG`6}9k8Vq1z z8nG-TD53^WuG>sgms%Y8P&ETQtG<7+Q?fMz4c!vg+hSW|&vmmBM?!6x`Lr*X0;dIx zI>939lwu^pTejSm#1Uj!rr`%$@0eM!&_*885LF{G<=w-8Rxg9sE-3IPpxGHvdz5xu zof#hOZy_KphleOYoAb)ZK>*C1q~ zUF0$cKmU1wZ0UeB6{Uv^Xg^L$IAE#yDw>*6;}z`}567(&Cycx64RFv^81)%g+r!n? zHx6FWr(%T^Lyat_Jc$?jjrVu#;8TKG2pNk1PrpR_7ge)LdjRp2(941q7|la)VHAfl z0yZ3^pQFe@i2|~7Bo;WYNx}OKTM;;AB;DK0og@NF>eY{8H;7C5E77IjHFUP zWIwbqXp|SZ2xK&kHkT>Uq?(P-%2x$Pl@*>Dx;S2ZzGAOufQPVzJt@i!c@4BM@bhcu z)q1dC<^Q@Ax~@N?7XDZ@nL13xJ3xE(<0=U;0@xb2OMt>Y5|tDt0;j9GI6J!3{Mc$L zsT?zSsYcl7!HmJAT};uGDsS@pm*K75{OCsg@5JQ^!Gh1B^KTj9gTe_+KdJxqK?Cm+ zG3J|mP>eYrsqZWE+wZ<1udC*OC>_B6;cW|h@(X5iW5lyXWDTCkl`9@^LUHu=rrg?* zFF>;#nFA}{bY8=UOeA27qYy(5f=yBsRR7fm(->addRc>HQtc{%K`bxC<#+J9eo=wB zBeR_}1Uno{u3>#9EvUYuoS0t3jp&xmbo^F+G3n`-S%H~T>w*+E351dU9dOJlm3ZOP z0S`_GKY+#A)?cB)nqS+!J_KhC2#rrZ#7Ll}fdd0t?%H^;9AW-Kok?l~`&7t^ycp}X zT&+472u~R46oi(16sUt<$b5dp30lL7Pvc|Z0K!72I0#ldVzYkM@b@OswW3emm?fpt z*_%c(NSz1`S=9gI-07accmsYbXxs~;%AjKa3MLFyp$lN>!KAS)AcQXk*069g0T9hh zd>;z$S}V7x&ejY@>SE_0l+lQ-&KforWvzMmy@&cPI2d2YM6tis=V_P}6)((G-$}@T z?pC+*B^(x5BgbBFDH_mpjNx?z{ENko&>-_(nG%~xYx!s8^74W~4`M{;jt|6AqvJy% z#{iv@He!58}mk*TH`znSHMb;D)c2;Pn zBF(9cgr@BrF6%c?@g_vt+1y6IT?%O&t0Jzj$oXeD%SL+>ZOSx(K~?Y@7PL7yXte(O z3F|JFeiJ7J;fc?DD!Ac@J-l^dokTXc8paV?-PLrR%5}QdK^H_Gu;&Ncj++q%pTcZO z%11Mij0l#?(&`!-0^mI?%_QnfOEe0iNYDf$If$Hy&<~h*47y^9gEpCHEi4_E6W*`O z(5J=JEL5ym?sYyI&|i@@kmbIgUdq?lPEf;%o_rFwky3XNdyDB7Ffg6Ka9LwXwC;1r z>L5Szgqwq-i(HFF1z6s4!QcYu`3u718oLhO{z=+LOHR0kir(=kdV)W~`_Ttjc;iJN zCwqZIyL(^q3!FIvafF<7Is{ID?qM5x@wCjyuRAPtGET9U=am4xAwH{#EJpF@?+0jb zd}QhH8Dev`!p#09yECE-?GBJDKY%JtT@#i&Z6NC1RWZRg%ae})ix{;3<9D*iv*Tl6 zS*>3!bd8H&fsTB`3c)#`KYO)VbWPvjUO@p!}0s z0;#JX49c*`${t8Xg@+r}QQ&w0KOC8;|0Lbd~&(zy+Fg z5lOW@@3vNNWkc?rRzbOFuFLYDhD)3CX2uJU+?YEiL6Zat5}#XYxmX!LvEqmN(fmc( z+?m#dVU&EBEB1Cly=B2pBjd__LgwYLQh1}*i1l$tR3L{6`~vUYMc2UcQMNo^1g(f8 zrS$g~-6)mlQp5Bm=zBA)!KxN|r1_!;DgYf77DEt3d8Og-TiH1+RC>n$p}STs%>DV3Bcpa*SLU&}@D%Z`u4?QLU&qi7yXUHNauWc1=Nd z%*Q@)Jdfd9MMg%-9In=&LFv+V=tN(0l;a^cLIeut0uLZL4fzKCq1e)44tcstR&X*J zg1GPlwU^)&s%5>c10@Na>hts|!!`42E1dWGpT_?~%lQJ)0B?bpGa@YYZ(dG}Z&~bX z9QFq_@h&|_XV(%NJ8lq-BC`Bf6cM(G6}l&o=(X(yyE(%7OoKlF&I2ld?h?)sb->~i zb{{Ti1;WA?GTBCySXX%uG0&%%f?N%j*s4o@2H4mHs)0WdsO6RciiP=kqxWF10assF z-0pfk+u8YoZHYcma^OQjPU$q5Wcuo3l^T4%srUg3C+r=WPxo&4mw#;0V<)RL*LS#h ze^iK{#~XnFDuw|Zc(6y%UyS~ki~mF$T>(vQt5C3JY=5O*JnQX&Fo5lYPC&AMpfGhr z8@Ex#;4~Q%y)r6Te_lKP&7Isj+=2nuXJx`pehv*bsVXDJFLhD^)E}IUJtvW{`M$Bg zRFH^X8>yZD1?hyG92{Fi`QcuCQShwa`n%QYyjMF6=fY=Y`U4y^7fFTSSfgMt zb&PUUQtAe6@bVg9r$E7g{0NSYAiYs0y=RK~o5To(?0;7{?AjP0_#s<=(_kit+(m@Q zOxKxy-e=}}=@`zvp}}kX@}k!(Xyk=m2fuAy%KRKutFcAi^`r7W%XrJ^=?Tlz5#H!( zz;uBPAg>410}0RXL}X1xjxm1#wtxZ{w3HXcqhF5wgzmB>A~h~eHH+0)9?UVqov-^1 z*x&sG_=LzK1RxzD@klb&6fTU@pCe`0Qz1aQLGCcbJPu+N!LxdalHNR3fb66Fd4iAd z|J#CLJZS7M09P2Kb4x`~^^v1U9++rw_aUlK@6-5SMhbSP7PQy~l_ne)FKP{6?nH=( zPj!t7>XS8EUc8%DodAe@G!}%6jlmdSmK<+~CQ>XCx(Pw?gFVOA&=(wBIgpLL*gyQm z5K;E5qtM9)+HzDgSWZ|nEGq=aZSxUR;~|`t@k2?&&>Rx{(0_Wnmm2$MS{-Bt!_+<8 zKb!;p{Xl_&IRUKEtsekJhKmQzPC(wMwYAHE2Qq;h9>@VBji-lRm}C{BXEZGqWDM}1 zgtKY=zzCj5Vn4I zdboTSeY7nq0!LfN9y>lB)59;|IvXBBqNAe$t|R{}?*Thq5VT$&>D8?BsBpZ*b~ZgI z5NsvNe8OS>gC1EMH!G&fd32LGl(g4HaOOSn5YJ{p+P`x;C!j9J;TW($!tL&+&L!-nk6j$hxct8 zXD$=^pJ9vs3#(pB^vMLo8XQ{qC-!pivi#=GxFMP<>gf#S_g|DVmcxgMiH_#7XbU_~ z)05w^c0+#+S!Z&wWgdZ2{nyJ&YXzMrU>q&NmIU~C2sFMAtF@RLvNbCaidnNRCwI;b z&~%wm3Ys51Qc8S2{wfxh4V9Di-`my~ykT(Z0~@m+eT*leA;(FSv=4#Lp>;v%It4+i z7la6DXvk{3C6N9OsF&BHg1;P&LivHdB>k-4QBb>uWQ8NqXl$}>iz)UTtXjyni4mJQ zK)=V@K8Evy{1Zind#i&x3Bp5$73oz0_ik}j{gNh8`fw7C6YS_MUt_$x`~|hH#AC^Y zcmuY4edv%10*~mZKZ zefinLSu1Y@I`s({YnTy5bdi2?4Z~{XAIVbNjrPj}D$&Sc8L84g6U#s?z#(}3rBL7C zhp**$mYM6gXf&#!pjQ=C?GjBs&wM}aoYK0lseY0BSK|hK{bEkF`21-n!p;U_#8Wuc ztDLLLYFQ8P6ijCGY_n+f*A>*+BVetAo=j^|kMG0PKZ#@`t+|&Sr zLY|8+r3n=ILGy^aP`}|pfwEju zRD@i%AqcMLfPxC&Sj>@WjkK{fcP0?s$HgF!lVG`7-Ml~Snx4tDcekXC~ER!``yf`K@t z_I{l5UsT0{^$b&ag@xb{6eP)cw6|J_ehqI@t|@d@8p_^4{W1P!i9RecYK8WHGBLL* zu@H7*To;|c_0!*Fb$3lJBGm7b&KMbQS4=In8`q>k>d~5F_+|na=Q{B+B0bXzf*WAc zMIV)3qdwl!loC6~r2l?Y^m_v$i%0OS@c!BHa=!&wLq>9Yg0X~o{LQnu&M9lT{^hOj zvyvEu&{m?C0x%lGuHq)*?>ZPVOOA8aPPeM@g7D-H@NORvtw>sZ!*7gB#NSE*dnZs& zwKvB>tkKOJsV5{4#(`vU9Xy0RKSApcK4Y%HhXj2xRCIJ4E*yiel&&B8`fOQA7_M!8 z6s0`c|GOV;SkOO-n8ju*)}%zaMq&iS=t><3j=oer6z)AYl{y55HJ?ub#)6 z07djqbOPz5Q4k)`9Os>d5bIeTdte#t3mr z*5h(fRv1j%J|AO0MIOq6jQlUVXcaN4QIMiPC76VCkdm5^Cz`ZhN+?J>qxQnELMV^N zu-(ck{>Aj7roaC^K^w3IVC6K}4~AZ4z#)VsYFLk9saD>5ljNgLjMoG$qYY8e>djq4 zhWBAvd|)M&hYz#Wm?`bhs!AmXrM{@1_>ZPSVe$OZ^SnbgSM>8KQ~8xS{xGjSz!rcd zXg03@5-36Z%DACj7WH1yGdD@mN?0u98ENURXIqzGF%jXtFQ!5ppWkw)aO;228ks;F zndNs5n-Oty6BKM!CUvptxK}VBw!&=18bCRh{Hd1KUCsP9%U~et3>Ujxz>kT2+x+?> zw$gZvfF)PIh0^Nno})YuM6?&i@WuBkdU}*M*S?&laO=+c|EvU#Ovm*u(1%EH$KT70w?R?pwg_2}t7PIx-yGz6c$VxFCeq2cNGMEQl4seUG-brr|eH zusd`PD*Xs2yo&*P=n-5ZD_9Q0G>7A{bPl|F!n|nsjI`PU_eo^-LLwR#&d&F4V?TUM z@S)RcSe?Z^Pan9|MFZvqcAmCyz_ngLg*R>T;!ZZz9_@j@k);cLq&asXZ~=@^lMDbN z{zB_W#FGc-9c}0Mpr~lL`i3%mJ?f=rpjw61UKdY;W>C-x0YJ3yH2;-TM}h_j|LW7^Hn&Qpe+zS^$BVb<|44d3+eE^ zy)l33ei~)dlvNcsDNuK!eXzGTHqsdwto=_Ao}=awc~F{J^|7c7s!2KGP#c zp*(%kArC5!2`tutBLdUO&1{L(b?Fzm`^p!laL2iEEKH+Mt^d-vuG=V!Z7JQjAqY)v zqi_0guW9QGV4Og3#B22y9{|MP?N7~};IfyX8^?Ci_h>d3K*mMzo}pP)SAOhJsql&g z9kTu(&fYRE%WYd9ekkc~1nH1&Nok}Tq$HIPNf8n0?k=$aK|-VvRFHZo1*An#q@`3q zP(Xxt+_?5yd!2KB?}z{Sy3aWqpL@i|6=mfAb@Fx^jls(bWZMfZU#odN$wHuSRbhrw-T$E`&SdT{N z0Lz1EEs`Vxmim0X6k|rPIw}MD60@xZxBAeL7P4|5csyNa6m zS;0gXXf7rskp2y6YjO8Qclb!q)PGrnA_(c%4}F)4OKUHTO;C_{XlmqLcQQ2OEGZ-A zn-`@wle#MTswDfaO zek~qld*>3p^$~$rOG@5*K5}3h@`MzFVcE@#``4z%x)JO(GWx;*f`$ZlW)k?^IHQ){ z+h>&mi9JoT_s(7x*wyhF(vMWeXM>M>jcrjCKQ>9_9pHw zUL{kY8XZuSm0P~(5drE*VQsMO8ZZv({*+1nANOG$sC$)x!myFkRfVncZPx1l)e-=< z*pvW0EDPQr&f{SZhT@gJi+yG8?N`g}S_OLrXkgZ=yTasfyDO||{*I)(B|K3O(lT(# z;m;9?&m}G#l&&i+O&=Xo4=bfGeIIV(6VP89&vM2Q=rO3y7*c&*-KAo6WD^0_mL0It zn>UBb6=#t&;8El7@rFTu-qMV2UgXZw{H)exVjaRPi2MSc8vIu6vq;J{f6+kyg{etN zyOwfg)9>X)ral3QRRTq+e;t(wQM7Ke4ZPBf&Y=r;|v5rkJe;h1fCDs;@`^?Nl#q-URVFu@Tfs7j>hGT8UTAFU4~F3P-k`_!_q>i ztQYPQK0c5hn7EHeA0AO5ATcZJlQ7gaDMbK3AkB++qU>|*mWf0ZCGC1g{;W^1me#7OI`R2^$j0GFJ`NkJ&}U_#_0^v|)~> z|2LQ(38A`VCp|IE*8>XSFcLuOjYYn7^{m-3|~frU5mY3S+M)=Qg4>|jq_2oAUi zUY)#T!kdZr;*H_|__`)b>4o_Hf=kQG9rAC>D=7KF$;t8{8~=L@G1r1M6U@*+&b^$FoA%Qpmq1TEyvyClRX+38QKD--2CM z-p8)$>$jbpxIP@g4_)&TlrKM_ZKHgDnV*l3@bODfw#VRqhW@@HM;zd&(StWP(M!gV zUHV4hXe(uhiUMN994|ULIDj{*ZVFUd*J0%5(nMb>6m#bB1XdaJ>6%fX|I(8}F@pOZ z2M4yV#IG^p>!{6r$2Dlmz&X)H@!~kqZ-0fK>w4u{Lz#M6mA@&dS5RKanq&>uB)QFS zrY7is#F_BF?&!nkV~w1!$|d^uDp#2d+nW|ym$c#_+}HKJ6s*wK=~d~*(;Wa-thz!v z8~D;U2tZVpUaoLh6#Qw|U`rT#_oS{v{PJz+$eM;1;pRtCxQhO3Us*&GBPsk>{!Y66un#zB|Vv8d7;CwD)b5oq}{CQz{;=xg35KSuNX zudYzU`vQ{Yfy5J4WMQ=Qo*`Pz2xvx6-NfurUW1o z94X=k+#Y1N5%bU2N2N7r7J4AJ*MvBZYAhSLq@w@Lb9+&Sr$mOngsiX+X^<08wE*%9 zRd*=RvcP5pUeb$SUht~-KLayxdUA4au-2Hj#DO6V^Ae4>TUS_j-L?0kQj@?d0-w0z z+k9~{w|U*-xR8GHkr8J}`CkK;sym@kpX)V;7kKkDFkum=-n}~wd;Al2`#mIrNMC-J z>dz{Zm-?!?V}jR9lAWt&f#=F7jIy-jxrAG=w-9$8EecA?H(+xuaBp(3c?XBp<8*a+ zl>qwq4|hZ93f*PcP}bE0@Q|Os&$NxG6^h^(zCO> zE0vWY*#a1z_RzlBOG-hQk>s%)FwoH!h|%#~6u)-4oK>g6O8-7Qx9sYPJTR)lrW=H~ z5EyF!i<15u5Q~c!_0;sq1Rx$o!y}F$nuDU12$ETKLreF}#CC@Cl=M=Y0OW38dd;hG_x+jaKLeZmvJT-PyVd@+hw5 zjnlVhIzIYXe4qW=@^#+#FxCn|1{!cPl7=o-?0Xk>%Cy6SYQG4ilk30-wOx9?`F2== zntw}ucmp9&&Xm)5cCmN8OuKzf5{(L8Lgm*kK3N3Skt+b#AU5JPt3b@=p*k2(D+5^y zhDBI{@IpAp3*XhICaD({3ls5JNiiO^*z*euU?YtL0*)B-m2Z?)aOPbr_3=p3%DPuB zS)q~BXA(!z0E2x=P24J5tQAsqIsr<-=Z(sdAx-=_^~@cF(7^tIn!_+L$@ZZVt0j;6 zlWuwB$xEUm#3b#5y$$c^K7-)oMBv>!Ij>lXrccrBO(`ET|G}$FqLog`prm?%-WfhV zjQIOSlg~`U8pR%l4S*eaaZ!zstt37aEBOs8 zWipQ5|G^epe_fgdn<$B^V?1(l&2UBXqag6c>U?CY u2ZrZ_I=U=!m-I^~!RLhv zPA{aR!GF;=@H_j>YZc~o%VnbC&NAl`{e(|D)yv!jpq^0Ve3z3iHmNXAlgo|4T#xJm z3NM5!W?;IXm0Sl;tafqBd(wGmpfXm-H;v;FI}YgE%T!Rx=z?#4k5)GSbC=;l(7VBu zT`YXdkMQpOk0eJGc5%fn_rkLZZ+#7D`RSK0jiOqPv)VF~GipBl{HCJ{(X16N>MXDJ zE3IHq-5KHmWjd^!T<8|mDwQmG>XQ?qh8z%{yep!*C(TAOOB5odq=)>0dSpAkCO4$= zsIU+jGQ_ft%M7SVTu(;emscm8l%p_S6K0(Cf9K_sB3AWYQgO2(3LEo}EK)eFC#ba@ z8#xe(^kQJ6>hEf>DS3p63eWAz-G9`k=5}lkQ58UvSuBp~3^C(}AYS9v;NS2DAfDHU z4WS)zJ&SJ-p*P=2+etd&*v?V}Y21>wHZlv}6}FO(ORDe?h%ME*_p%feCw z1`2Fl?*Z*NC+8dh#8OKtdwd2D^GKiFIz?O3LpcXkD<(P`F*^VqZWH4wuzB|3B^nph z=d(TozB3ndzgcO6u=NjrSzCeX9C*XSaD3%M%<`u^_=eaHbfTAg0gD+O9c5iqA(gKO ztmk|x&fOa%UnoW2KealCe9;g_RFk@fg5eyu`Dyn*pOj$n8Eif`>i>Mm7R5vB_H?Lo z6$EDS%g@($XQp9#Ma`HBB|A}jV=90y!|Ft=nJC5rQC^Ee*f{@b>kwu# zJ|!0ns9>t}3qr;Y8#?9`_-GK_;SFz4#vWOQ&VF#*c2-Y0a|yoCSHahrXT?B&&a1Dl zUqU)Is3qVKlYbLs(13JP6#wp~YGqIWfU4xel3fb0cON4oimVWztyA-w6`J9bYMMAFuw=EKEec_JlmCryWTRw-PHw@@Xm?quKw7i@psZke)sHQ-V1e$gmN(n zjtnjlU{ZqkY(#ygike(3YFA{g?G1yzR;4QQ=rw1WtAJZc^|JAll1Yo{2B8spEKXUh zlp_56_+Q|(^3br0Mz(V)XDsej__-WY-Q60zN;B!wc756S@?X^%>+qV-JLyuWX~q?6 zdJ&if)@apTcE6|k^ygtaMq6+M2k8$gQ@WZxHotjRNrt-m8?Ix7!(7}giweuWXfBYb zJOogO5>v0qgjIvQ2dN3K_LooR@@^Y_-tSR3GHmp?^al$LOi*anI%Fj+D)l(67*6`< zHMrM{OAn`*3OqVsCjDI8exmh5boZDYAO!vT5!eh+wm%C|<^2a}VLLO29JB51ZSbhb zU73d+!r>M4ZR{2-)H6=(vqt{~J0j%q>!``+*PYGHCt=J}TLPwG(0m+sFl43zTGrkU z)g*4cu?zSma;_sq;0lupq}wivQ7i~#;1U<#fqbSs*_$3ZIyw+Z%2N+~_I8sX;1&Xj z0f|N5yZx1YhOpxws=fGi*xLihaRE!^2yVspiC*nO-dTLa&<|uMkCCO8M{PzV9)?x@1fkSkvwcNa`NHUkdP-Fe{+Hx3F6)a z8bS<0K*j63;-tRg_Rt9!T!LMumo~H{v-W^=RZkWc7M>!aLuH-XC&I@^jM`c_1j=q` z7a{~{f4_~Jz{F!OU7AQS1nW!hDNMw!_1#`qv5)w~y4$idq*m_%T5<*{E0OSk&c zISsU7rU=(&i4@M`e5(;f!Cw17?=sHVO}}^%9PJNPnuR=N5|&TGvtF4OThvPG6;9kf ze-q{PM2SMXVWrcni?)U`<$gE+Gr{?I>tY-6zA{0J({z8y`c8N=V%Dv}EuhSA9>W7N z%E7zVBj6_!0Y7v|9Dqg%0-D)>pEO{jUiPk*1;hokmH&Do z3VVBd;bWH+6-7oxfpQ_E518tWP~NkEs0`CG;1vU)s$#{46dgd$4&VM3gHDCU06opwb?L zN;pO;!DooJJuB`k&PUQ!Oozj#eF4y>pBCaiG{Cw5Cj>Z&+W)|;)EAI)B-~Ll-j&VF zk`5J-i-V(D7^p2*8WqnM(-4^%t!JwapCmNh{rVk97uV-jYnC-dfF^EECf3_{?frI0 zjg{RU@;6V)u8*smqgf9pF!BMKD z`Y96n1P(lgg{ojXu5tBUrEbX0a8@UE{;J1=rl;-6V>Y2TbYl;r>3ES+%7fJTpR4uZ3RW|l<LNAO5IA9T=Gnwuk|53L)djuB%VJVl{GQJShsCNP{3%_DMM^!gth za#r5y9q-elq)ANC-^8;MN?`?dGc8YEjb zT;Q$0#z+YB8Efq&--Cf#^o<1578Mf~)+wF%(;`pfl*d<%ixdwHk^o^_SWLYG(ce($ zPa7@*S{odiS753{FXlwF_<D*J|`W-h5#K9v>_E~rK0Qt zLb6M1)(zw|-gvDyP}P>RpWi-w`yxdmx4h50od@2&YeV@MjEcWPU`&|2Kyo!tqM~FTor%lo`eKM zV^0)$Rex4ue^qbB0_dnt7%oaMGW_x9`J?%@1BWP(d#nEY@izp$iO$0+>*q#Lb^)a~ zngiPbUXxNgVlw~^!Mv@5H*D_82|M?hsm62PRU=@Zf@~$119lHEs{z})s06W=QsId4 z7utkNijcrycz#B3c3$uWAaItgR#YC4Q}0*;n^Y*_TvIwe-sL}nXf!}02=LX_EYN@> zRW&8#kX_z%DT!x0_0Ao9iSGc0iXhI)HX*-nxY7B=Eg72{x%t0kM2YKbF<==w!-MP> z4`u^hPy>O2+}4rEBug`ePy3+sE+cJXdR7+BYp=hltPgi7{;s_BUVm5KA`%pU#!-9F z2DdH3kUD#{Ppe2d&b9pG)4|I2_5b>4Z_KaV`hWOrHLz$o_bD&~?jmf_PZkzZ@qaKy zy~PM3m`Kh8nbKwY*%$a}_6U_CnQ%ja7X>VrgdBovP#vL-hu;lCjh3pYBL_I{wdsxc zb$9_&7t!GyB~~H)Op%0U+O2$kL~L}#bCm=Ywc$ItP+{%e{;JlsICXbJt$*x`XY21% z5n({5I|{#RZkt9TAt`@ebBrtDcOORERAqm?$X@`itc-vQ4{$W&e%j5x;xb`x>3CO1 zD`MPY!@L@FxYc9h>FVcI7-f7ybVFW#DDmy>07oHHz|F@U>_CoPggft&3!C?R=oE*F z#6(4tnHBV+{$|~_FppyS!juX)U46IS!}Gr(H(ePN&Vbhy9`eZq)}^%D9b1Onoe zUFCMBLT0+zJiINR;c1z7ir~nG67i^APtFe+FVxD%q3Vxw%62Io5`e;q%E>S{ zLKoE{K^f@}&1a`}M4GbRB2%7rz`8UW2I3j!$OuXG^^8I{9+Z~y(9wabL4+^Avnl_X z6+u#uEdu^&n?6O>8?R`+8>PNRzgxJGAXxd4R`(CbgfXvoA$8^=`~W|1eZGHFXNof@ z9NJ^q=vH{1|8XOzJqj+>HKzbw^kY8a?rLKhS$jHR>J515pz5&J>6{mhX2!cWD8{8Ku%icuL1kMObtgth{aKi|@se0n?{u8T#(+i0FUmsw4 zME$?jxqT1;c4HvxW*x8sVJ^%elbk{F+CO!w9nS9zslfJ~QQD^nH$^?lD(^iO9I%fi z`pi7nfY+9fmva7~w1E#aqGUMECUx76{Y!lp?$ZqzE2--AHHn?4M zX&fR%c;OP#kUb5A7{z-~l(jT8hNMtlKW$zG(no?cIrJ(vt46*E5uI@8DVnQ#ISp%UMO0#-@kwMh%O8ANv z`7Gd%q6yx=7fo-mugO6!5FwQsurFqL%N-G}#`7%pl&BfVR3?2chp z4lGam&tk>cE(6wAN z{&|Iao3~jiXUs0BZfO6pGv9DgK0V*OCj!@=B1(t&vQF*$fE4XL+npus!1~zbF}%)L z)meMV?!og!?}JgX`&I2u@})#vI5aVx3^Pt6MvS#9^HV{?=o1NXHyJ)NxU0trL=y&x1ZMgtciC2q)N~7z9EM#M+oMwS}u63^y_d_2^zB0tDJ1_~3XY z0JI}hc4uej7Z;Jr4Jd%uJ3`)F^#iT1HUzydiy7wiq~+w`{_NPm?8*EJ%;&lHdhB;y zrlRfTzWToU;w@}{<{#Ls?JwBu$mz8IPo>Mw&d@yF0y-PK(2-M8e;fj|g{aNn8d0M7 zxUi>zh}EygnhrJgdb`v$8iF-H7)6`Mw4qsN1%)k$1t7+5v)67JkgET+m zbRfKGIVbeP5f}B#z2E7j8X3n5poaC2M&-KoeJ#T}Nr`O&Nz1EWiQc1Iz4iNSsd@m; zM4fN$g(ayGcGBdXdoKUFFdA5LP1=IMf+dt20K~x}iZ`EKade=sub>1nz7Qr%gi@y0 zPor6K2xL71IMJ*CZ;|nz2!!psb6l3fc0>8VfsWj9q$%1CYZ=sgh2cDfW z)jWF-n6ZefXEDG*yAI>dn#Df@ zzoHbknv|yS9dUK#4!SE`FEQ*EYU*9b$EO0-jX|%PJ&9!Nj|m;&S~3!pB=^%-#cgrT z1VV$H5~UMeMJs&P3XW=r#K4;B-U~VYb_}FP+N1BZjE{j=Hv~R1X-YELb(RkDLzgya zS_Zdd_@{A}@4U%14GF0DE-_hwn`P0|Ht%KLIjv)fmQE1#ZdH9S*VQQeikUFwj}lwlTp%lw>6E;CWm4m#1 zD<7J?H%3`L1^v8)WQLEILxHUT#^>K;QY9i_p5wnHAE|v|2him9ld4yRJtp28(~VWY z<_BhNjQ`}f=Lz8OfowLRVLuI;%)?+E zyY-0x$W-hc@lNWI%dF3cQF-(q@W2AE#&Iafe4Y9(v7xVc4Y_674=E#q5@TvJ{_wdo zJ-WHDbw{WdDaD$U7QbVQS>kc+zn=}g3gjWWUtykn2LW8laVE;W3X6BnX!GgvXNuE# z7rz2b1m+Iqf?4V|zvI%51ZoZP^{^%%lK1%v9)fH^>gCJrEo`nD6O%=66I?#!ei+Tn zzF5(%{LE{<2p%gpD8{9Vdh`V&TgEvgT8Q9pvpXHFlpycP5es8-|z>NwAUIxWQxi?>BPoQ#gq)TixK*tqHO|wnWn}v;@i@)lj zF}ACcW~m*U6xTdv$Ch20dR@~r&2-(tkJq}wyzoH{+X-ZrQSb5b0PcSCnEen&sHYWa z@|~wI-0rj_sqD}gekO}LLvtgow7sHqOybQmiMI|W4&4<8b&B_^YQ>FIKMHb{jC?NK z&*_L?F#AnuF@pZV?loAbtx}6y6yWw`&7!}&{(M(0?Y4l<=5KSUKV6oIWbu_+dW@Q< zk7m{CV9GK`7i!s|aN*7>);Ks-|NSv#7Pm6&orS)4pZ09Ko0W5l$p{xUL{d;t02&6Q zWc#kd%@hejWM{B?p;K?`eF{=Efi!e9V9SBIVq2*rBg&K&I`!1{x0J54kq&isY)Ag8IC({R8Q&b(6RirChz z$}(!N3?c^c6)+ryD{NWs;tldM8_Ub!IaYBHFPmIn;~hK0w#JxsX4yB*B*G%C+|8kn zS2k6h(04{FxI(zdb*gc{ z)beM4)d>Fq)Jm^urNLG9zHk8m1w1EnGKH0ge-;(#+Tp!3p^~3`3%6By;QiG-0yo!= z+dIJcI*iI!@kNEEVJFti-2NsXaFEk_SPcG1CbqC@hHbSwAObrSiQNDvYKQn!ojNCs ze$Lzzf>^OYqPR|qsS29|{f9QX;6qz|6wkR&?$|K0vOuqlG_yBf!M_uUr*K4 zV9HEzF0M;%p_@;sgv}YgXhvS%RccIdSi+J{So<#`vI_OEapeZTp{a#SE})sUR*E-| z8?U`>+&x?E^3*8@pguYS*eups@i=5Nh`ZQHSS{p^V8Q%WloZCpQ1JMU=G~YEcg4o= zoC650(bu;hGN`?*NkjeanhBF`UOaV85Bcf)dY0w^ypo=d^2z30{RUzzFQZnHOsmm>0<=4dna!1v8 ziMvzbGHK>8l@`utvW`CV^L_Ptq;&X;q(D8qiEx#ycJ<|gkc-s!oAK(*l`e7a#}Ahr4)?aa0>C7kEipA8(_R(xQ4#=~eyzs|CgrT;hKbKj z6=y+AKjb*Xo*MJt{wE)fck&yk*e@g}#>H(__)f5vos#+b;hzP<2KE~{R8$miVNYT0 zBY3uyJCVhK&J4K5?Ctsc@dgp_TRwUJkL}8@?+=dyf$BmNky{KKi=&VqgL2K~Yy-#m zc&r;?D1xa{;@D8Ul?Xf%yxMPUh7ovLsu-?U$lJ*cu}eyDu8^~NDDv?r^Cb)vvLx_R zDCXmIaHToL+2d>8__?!l_~OgwpWvJQ!pPL~Q>sF@Uq`$Sy z+p6Ezx`^#`Bs?`sCyFP~*)JuxSkqpTb@)*)i9v$GSe;&SQdMl3BD*?c(A?t4f_Wxv zK#E32IOnV}*V(i9+bWytaF)$NqxU22L>blsk0aAc=4T^4b13ktlcP=v>rJ z8^GioTwGuZS8{0U_-K_dOeAmQ`4Da4_1(ShQ|PI%)5bdoqHfF<(J{lZcB?`(9$h9y z=2b$Yc_IhoUmwoCjlGW*dgWqUg`>^|*G<{4O%XTWgCh@6w|e!Za@p)v4AC5Tg>>X_ z0pE#7uM=?{b?>H0&)5mOn|5Fmzv&In_=0O@`?yck7PrQa%$t8b|J77{L)L!X==WyY zJ<}^CpNsDAGBI5sExG@Ij{1jSr(eK<+GmYk9lhQ@* z1917C|Rtu!u1w`_n)cH5&=;2)#mZAgoh?UX`VdxuBFzE${wb-&`-* zGrNv;;yfEybVh`nlknCdf7!)5ntcJ=9C>rxW^6Nf8~FaIhs)<$ZvlgU+~6TAF6zcL z>IO9L!_HSy9G}?SlGS3U1v;qs`Ppo@FtoMERSF*Ji8I%UTh@Oph%!5O?6Vl4Hg?qS zPP=z?_{v3X7mEh^nZfR?H4&Fbg*M88zeTnGlLAiij~KIeJzs@=>4Loik1~4D5jbXEuCT7l;M98{*H^?e z0=r$rgD_7?w~D?$_IICpVvoN5zPdkn{tx5ree|cTP2*(=EC_r8C)UhAj+` zJ4D2pkE`xkN(}qGg};vN451G@T~F@bMJ3lw=)fg_gnzz`%6~EKf8HltpQP?mX`e;wKN z9!_sdkq6lH!BX=UfasIX(~x{H1)v&)zt>id51{TD^^_J?Bf&i71&sKOQNSP zpwZQgi=7NQa&p=ZrrDlbi<;t{`W33po=AdbMD1ZWU^B0!uXZgBE@kb zL!zg!^92n(H9!A0CO3sRS8;5?kbc54;evpZYrV@$6~d<;M@w|;OGUr$Obc#~7rv?N zuCJIP@^t2;)xPbZ{op09uqE>&cHx)jl=hNp?eO#N=wSYiF9YSCuc{m!ZgMrP7?&x^YYy6bDETkRPlic@f zY7fOEn+0^|<`Tuu~Y^0su4Q5 zi$6EYb}=X`c@PjEatZ=T3FIB(gvf|pTw?&-0AG_3NxN0{YyQG$q}DE9>$*JgW6LY@ z`XS{Zea&@L^#%c*Xg9i*Kr7-iG??Ii344C8azrRXil*ulrGoS7_h08VEP?pc=mZIO zIG6Kf-09$^^H%StF|2TF#?6wy_2|8QXJa>_N9y+fd{&{nc4x43l3qCc6$)e~A&)sl znN~1l)_)*p0eW5_CYe4jXSd>A9L@etNn@t`<~^AkGq)Y?e)TCz(zog6SxZ)OwP~> z$$}qM3EgvgOc_R)G_SgdVyUk1eKAORa!z4B>M&C$7ASy2stD-S?cKF5XO{kRlS{D{ z?_d`(d2#EZd^n!NIW9j9D5AUojDhW_#mcW^*sMmH12|p)3qOE|qu+u+Gwky2J9;2S zLJ{uxbgs**DJ61%rx$l4d-gkMKopH35c93|_+Y6l=x>?<;DR<8+&)u#ft6V|Z{&z|t%c)P7So~ywren6Bfh3b2qg*jH4yjh zJi+Gu&rx1rK}X;5_UCb2fC(k41+)Lco*aO8w=7pXyQPK`pZDIRcV;9!w_5#LbbYfs zuj2`Q&nA{ww#3AF64?%WA;U&0s<%#m@ph9Yi{`m$2!7w2aW z^Dc>TS16RWqMufZ>%{+0p>C$FzJF$7@Q3>uXf4jDFrsoB-c;XzkWOf38=c~{Tuwmr zX%*+OYj+MfM6<9(F{1F;JmA?6-@Lcyjk4RDBP8rOIsSELl3C8~_1oHqIeLP9g}yUP z#hXY#8oa>grYvhBihQPg{5xthhkTGiZC9Tye8|E)VK+Z#F=BMZJAqrjkUrRfb3Uo< zK`3q?Y+Hbf{v`T92VQi0(ei@gQwuhFG`JgeOI@twy2Fe*Cw+!aXkUZIg}}CSBLc2< z?#KHb8VME*^p#&o1nP4w%&TaxPaNqyDeDxp3%2yVLp54^)$vQkqJyjcoAl3x|9u%s zi}UZSs?O>?%6+VW8HD58G_oqA@70UZCI&xo9K!E``v9b&S0F8g-tLid6akF25OFQM z@d8}|0ZRR$7+96hm-7H63gc;uPKili8UfV(Oz@t4(@(H3a33$H-iVJhzxMG(0}uj* z8-Y?K0&5N6^siTmDc=LL##okDFAaO<&ToT|znU{@gQXpaI34HXVqTWsH@wMW(R302 zGV8}?u-m1iq-15%!2S^n((ZX)4vE0MP1rF`R9-s$!ySa)6yI?EbvItuU>G6D_6^oZ z8LN17fqXF`rdWtMBPub1?6gz&TtOnSRG7H?6je0iJxO~%@E%Ymq7Is*55`=*{3TY7=+$dc~wD{*c58S%4FmhxuKf^gt$P&W2{A@T+=pwap58E@+N`z#oZ zZ^8_U?s~j9%g(MsmoJdSb@4R?F)z+BU~0?`U}O^vT)m;^=7AL$NnhIDH{TMga5LD0 z&0(AEiqb3ku9Hh}j?M0O*S)8?{oZ1{H=mbcem90&Fow}q%O{*%Q+V^3FbJy-OzY{S zeI8l-Rrzh8@{3>1Q*%z7PkEX`a*j^!0{3kqes+v!qjlH5=Qg~WlySL+o0jLzgf_F1 ze#I;+|Mqr!?dbKIfs9Vq5@%mjwJ-g%QDdL>wKvAX$^W_n-6_n+po1zcxCqOJ$ivae zVH7>G&0I{U?Y;N1=90{r|5}$JR^3Fnv!gdJG zq>_=eyR@9Wg_eu-X@@WDZ55pTPutO*bw$Om=V|mGsEQq;wa}--8)>yuxz&C2|G%%z?B3 zvoP{AB-31ViG_?-m{094TC7}*2R(XmbaZg?8X#RixS8b!fm1m~SlQ&iRanC@JeCYL z6^^%1_yTr1*c{iu`+Rpb?`~ty<#cGt7?gJj2)s5`ctXwOJUxL^o9(9V7Lkn>z$fmkXA5*bGnMaROl+EZ?Oa2hv-mTr*vrH<)ak?5OeQ#d%Q zL^qNWIt$oW5^)85xy(3WVpk?QTERJZ+8RT)hGI+IPQNW;5%w+7X+$C8gu#en=I}63 zqJC`lr;Xk_#sm^VVVs4?EH`@ov(N3+-Riph-5SLi8a_Ucnz;VqMp!8S3ruLySx;JY z;vX)ZK5SkkhU$rY`59TsZSa?Rf?4UlV~YIa=xl2^Kq8;Fm)sr%R_i3woml`lY+0Da(Qb76>Xl7AAGSet>_dDlNv->QEK;T1v2GT?p;17C_%bBxo zT*wp~WDUE4eGQ3r~+EJH#KK4VX&MON~&jn86o|BeOL96kwPI}l`JykPxaE5 zp=>0~*5AFMgpzG(3kZ95%ZA)MM=dVsuUWr?5#`acI|1pYkE^Sz^YiV1E3{d~z^-ha zfM1`o<>nba2YZTJgRkC%z-s@nXMX=%FbIa{L9MQ>sVTiSJ78CwJSdig_a4Iz1a$lR z`%xqMp1UBQ0PZuX{LWCTM{H^}2}(=i{?BW;7a^L`w;&m;BSi;g*%%zYqR9F)y<@+p z>5ySSwhZ>1wD$^%tWm0QYiFuL-s)C@&S1N!gxydO3$0ZXr`CC2oF_utqaEeVAf zs%R6}=Qx`E=2&=Ig}yxQhw(IJm@@a{U6#XB(?5Fp;zDDoU~GzpKLi$7%`5l4 zU+iik-Q0yMAIX;zuWO_-)qoWdu*p&}cQNO3S$^>`9MT-lFQH~HU*8@x)^C9I8EBZ{ zt^c;Pxo5}`&T_~u{HVZiG;rij)1~Ub@+v~i$yJ>9g!Kd8BUxVZGi<;o5+B$zO4~qH z2kc&rH6}Z_^bLVu!PY(|1Yln1`Q{#i{lHEKp%4*(3xH6-8hS2F9Zazg;39#14B?c4 z5+@WhGmK+EbJ>CIK3e|DCVlkb3zz!h71x9R)U!ww9gET08a$e#KD*4UZ4#MN2YL5+z ztQX(bg!T#YCx$Q&#j+D@E^v+l_q}a0+80b61YQLuzx&xqst^eOjDq`1xW(oYS8!2F zd|zSxZU~nKx*RTU?l-nIPW?Ei;;fs2BCC7@To5mV6sX*_qM;_pTnZP}vcA;r#~h@8 zmlQpL(I(};mm5h6+~r%K@v?u_CMOS1AF$&Q&-&x=5#CQ9T}&tzd>xyvu12xOPEvp; z@#|BexDFZG!(N}KiNf)Sd{AD=lnIp~=*4Sv+@~^Do^t;9&-7$xXB@OPb{39WW$V#F z$l(I>RhKf1={H%t8wZ-*R>!e@_?45Gh;FBu0dWHf!j!F+Vt*Wv^L(U3wR*0^B2 zFt2t94h%CeW5dR-6Mm1mPFUevtduw*-690(z|gPU3x)|rneo-IF@~%L@Tc(s9Tt#X zz|E=lY%{P3AQrbW_5*CMf&CD5XCwq%rx8sO{C@^&=GM*kU_g|ao-PW?t<)8mQd)qb zB1f)q3?V32KQwIt4%_JW#WJmlC@}+!+(1^=`atH!Rd^@B9>x6($xlhw5p0i`S1Uk- z$W)S>6tmofY(iLGu*U!s7Obk~zzZ(}%c57@@TH&o5&avrsz>+Eic{4u!W%zVDf_LN z@*q59pAqc}ffc|IEnor{OhDL>IncAlgc8y(e!%6F4-6ImUXIy+@YVtfk&|_wH;k%uyxq%WUIWw~`pd1?5z86zKI6N!F(+9#KB|m74 zxhO<<@g+afRayqvh?!wo^uvB0W?t zwCPY_^oe(qAXem?m$;dVHf47UqY}1?j+r|f*B_X2_#wI1imHJQOhlx6V6PiPd>Wn* z7X;A4bXpZ8Ak|?ZvXT{Ya&v!PUOrk6ri4Nbhb!|FyR+J-zHXahiN_(pZHdciB_tq# z3Ie`s-!?3PO7NC@bzN^L{#ro#Q8(=Eu48Ja%w1%|My$*~}*YPp8 zqy~L+TvYM;k_|g9N+A|}^u&nI%%O(BjQC6w%nx+IAb|UP47<6G-p>z{v=Bc2wHG4R z4Y_7?;jse9_x3>XyV3KN&vm<_l+XZN0FHP9PqzUP(;wiVtEHZY@XUni#xhvu4gK_i zM2H7}o)`5Hbww&S81Asn1nmsNv~Eo-Fr12oQo`1;TP9s#_XCNDe*_G=;XIn~7<1^d z1e@KEqwNo~q} z@ZbT+@fVt5AI;VbP&6f%4%xB8xVQ|e0{EMGZk3=p9f0z1{`Zb?uQyC?^E8fvH$Qme zeyfF<))bVtg}e+H?LrhGJqNC|%k%e}RkfUMp;lDj`Gq z_?k&N4Em0vNpt00^&MoY*W#XHg?eMzS-defBtGYXg~zsx{KRA@tsK_!#nuDrjlOY% zPY@sJIaOcwKfh6TpBQE#oe~LZc)LX5cNq{>k)W(`LQV$w(y^iN#>0OGXa^y3(_+(A zp^qLB4`;z-5!MxZWike=TA-qe!vhLGz@ghw7*MkTk+NwXMEFavri2lsdg$jKSVdNY zwhaE=m+D>8G%h$n!wX$8l-~$O7jKlyczKq8mYM#aF9vT^9|g=Cmq}*ES_Aiez+nP1 zQV-`7gy5Y1SbwJ0k{3PZVyyw^ewVL=wI}Q{lz-vOtw`IT&34>5 zTk<u4x1OTCsc%>nOVqDDw%?$F5DUh*@HDRgjUzHlUTU^Pns2U744Y`)(LRsq|0w?HUMBLbdMFTkBy z@O=igFWgS3Pd*yJrHAj$9JFJ2RK-dAJXzE@45*Og}*{ZrC5GPK`$C11I#M27KSABC>; z9rWeMW$Ej4oqqQ8e_a-rmn`u@i$F{PqzWkuV?*i5l(?Z#)WJSci+CIvZa|#`zX1q3 zifssEI{6Q|`1rKt;xe~h`a|ac3+Z`<=C7C)lZNv*pK1%h7%k_GN{tEAXHQa-$H`T= zLMV%)7foiXb~d(NCYWkQ!rtu*K4?F{ykT%)z*I;zw)rc%h~6>Xwa@XCG-^+=NV#aC zXOl$T{;-X&{b!b4-n;7aBUnSB61IMGWFF08(|F45^@hR9nY#)mu)f?nIzJy0`e zQqVd>{KN8g)MsTS*%msAlf@?NJl?~Vp6vv|0m;v{6lg>Iz`WY7 z1Jqc)Fzd*9{S5)P<{6QL3>~lADUV^9)R^A zFv`#J@XXF3q{NReB+!v>!3CA7YSY1!jKI!8xfqFgcnBha2kGf-t@O5leuHsA^-v=% zWRK_tjyHM{u&*v$g-h;iRCDGU!|*jKf?K4tPBh$5~-ld#DV#vZt< z+8P=>P#wXd9bQ7vAC#z%7+B)-`DOOLwWU;sN6jqBm-==OF=z@(6jLNELYGPP8BvSv zLh5-fFWEajQrTJ3ALiote9W>yvJs&p+)jno8EhK2Tz;&rd}H)tb2rzBaSj@gcsB-a z4f66IH6Kaqd{B740I+{G3}r*-0l5j|{Q$Xuf`Q#quU{Yf9J9W5K-84^#Tgi3-d+FDvKb|1hmQ7MB$!D}8$ zzXgb3$7eYGkTi%`Iab1E1hl@$qeK(XBM&|qK1;9PMGe`8(}FKm1r$5sF1zH5kS+}| zN!w$gcWgk@0R+qOQbWX16n4#@t40_`V|vN(MD~BSKa{?H>cttO=*#w~pI@ePO6dL8 zkib5`DJsF_XzU4;I;g%`pAwnR2TH9_jL|;RL||uqk)%H5TaZ2X2)lO3Z5>t`=Uiy1 zNMHMb7oyXZD{)Vjm;AsP{51Si=}sD>G6^bp z^I?r2Y6zF~oA>keFC-{+&i2QrJ53X8(+O3`YB-l6s_>tKA08_WZKRh!9`Pw|=f+7I z^G`Gc{5grLR0@T?_FIw?g{l;=Ge&NsUaF}q)Q)Sj<3l5==9($dlY+RSe#Uh;q{O*& zwX<;OMgN@q>w#I2(aw`d&ibVE5?khnMN5lrItY8m1))y&%{|c z!4eMdBSb;HyMG1eM=~D|Pw2G0f;&~!h`OC5=1p!voLid-X-?lfJ~utU)ZzR8=z8n0 zEYq!h_@TSITR=ccr8}f0l$LIg77~k2_G5+Nd;6?T0~S@M1*hMI5T_R z{oCK4`g~s)^q;jS4N70 zAK{<=+=$tfG^d_!3QxKXbYgUYOKx_iZ-gYXuPuR$k;o3F>ZCQjy4o{%Xnm%{@m`DS z1=EpeD|Z&>Tji0TJPVB|`}IJ4De_zv+z&7C>_G5M#y$S4H6u*&59i|!mkkcqks>JJeckC=gjE&f3 zP%Tb>5L*lbLt#^c`~1i5UTmb6;TR0#CJ*jFq&Uoxp|19WEGMU|!hFFnBZ>q6Vl23a z_qSl6Pu)%QVN!%s_m~t#N>W`s2t*iRo^_mY5@%`yiyWil)PJ=>(_ske#QCuJM3vy2a=fFR) z0Fq?@g%~N2V+yz}>kU$SeWxP^2xmJ^5bnTlA~FnpyNQl959N632Z~smT0JOIJx}rp ziuj9~51Se)f&>P8ois9UY{~;V($5xh2+GU%PyH%mQ+Dr?>n{Hkpe7&`cNjo&d|<|j z=1`lkh+w_y3%jmETzGeGk|P7UZFZ-~41~iFdx(}SJ-U-}w!M`>jNc)#2@UVWKq{5v zc|8o)eo3M(Jq%-iS~xh|92`oA-l#$d_Vzm?e*c$CyTa9BXZO{+QH@gt*D`aQ$h)s)y1vND z%7WdQ0-!2aT-n+Q1=p-LEH`#HP}s}~DI_B8EZK#a-&ETf&dddwFn+0#D5b*azmM+3 zM&25O$$C$c)=4Uok|#7O>QRX?@eYEDXX721DTxiZJ&^k~&MWEnx;{vnn_9Ml9xIIy zXR^7=)ViJFH&wiy>IqWC$9TrNV^&G!b5S z^)s45;h(;WgP&`aJ*?v6TUC2FHs=5ciA;m{ADPyOSx||Uyu5o16h;avw*8|J4WoQk zGFBHXlW(?uz7f}SZ+rx;gVWbIDOwQHU~coMk96@cea4M(+inVP3bWL)FI`xL^l6s& z==EOJL9Tkvz7%2f+fh;uAu#a!0G`H|?ZjT%^xAb_S) z>zsAA>eks;+Y`=bPq|<&mJDuSr3q5J7&I7tUvu61T3)SxkYTnotjum6{d#`f3WF}R zItIcSn2^yWayk9Z9bUb>eVDYnj!L0M*lh`#H}moE;6$^px4FN*-y4$1R1pM*O(B~J z)=2y{*oD*|LKDn15-3s)Gv(plJFo4V&_@x8s-1xl90~=^VTm;8DdmU5pbu7*-YQVX z;Djr`1o$83xzE6y2L!YwKv5DKoa6VOekxd0n8A3Xq-HaT68+w`J<|H^SwhIuA+8iz z&fD=U^rCjEfFRr;Y^z9njCyV|c*W)rCt&6p1TkUgC2C1FTR{Kz)7f5k*Es#joM|S9 z9>CVcQ~K*WJ3BDyLF2J)QGY}^_<}S0&K;~?NX>c$+i#_(idGzBrrho4L^e-fMcALP zM7l{H&y5=l^BVA7o#5_{N4+%iFdSHILhm9McX7F=0+nK)hhtlb5|Ab;TGCgZX&G?l z<$XOq8lR`ZWPFiVKtN!AZM2`Mrj*w+u}?WFEISoJXh4`#IA$LJ zc`V4qoboZPBldaAs|lD(|VAsT>O>A?uwdWkkhuPln8G zYvIp&M4$$Y65UDb)A8xxINi>eB^*JJs6lipMdcX(6df<3*EfoV#;$srdE@_XL0-EX z%-#6KEa@F;VsyhXEVkg78)p1i6J+q0<&7pq^-bQP`bC^lU07Vd_zAKV=Gga^?p)N% z5rfdpB(HnBZ9Se3at23==;(*^`U8&~HH5MsP4#t4KJOt)Sw4pqVk1;);go&OdObBE zYWr298CB%mv=Rcs0>akKd`aaNR>oD>fBbW)Il3G(f^Pk8G-cdf?&Es}KeGR=06Vun zWM3NF)WD+Z*5{bOA#hi4A8xeDsh}H|OrH^pkAJ!1iu@A39cjj45jm?^O*g3`KiXLM zd+?8c;yV8{aLI||Ie+#8k#U>oF6*%pxWL`Pc=L~zmKUfigmZ_3*eNNC3z@IUZLYmH z!I9&aV{y_VG~$I_1X;6Sb0ed<^cV}uhOygf9&D$Vj*sTaLNs)AmZ&OX z^MK=$kLucLn}F0w^lNdRJ}TQwaOn|Njr{RL{2AO%(v)Ab8+I-37(CZ=m?wqn!5EZz zFBS@dVHa3+QsL9NGP>VyPIr;hhCA4wdIt+HV4r4(7I&o|C|v&bScj0|7Oko{PPXfS(i=ge(0&3L zqC}%m*BA@ab2gta!s>6%Dg0h9lRhv@qjK$D`>fboJPmo%r|}v~UN=q3$T2&#(c0+Dn8M#}~760qKTzAucMn&lKnowK(fS0;A7JXJXq&jvE zwtoxo92lvzpUSqxv;piGlwCi;O}WY{Hkoo#z8nL=GFuQzRhOULDhjPr$-6$>+iYKb zJ6eoY;ir;LNSyAd_IXeGuf(-3VM%X3+kCQ)ex-ak~QMTTAOh}w~lul;J zZvN2vO^*_}I_6DGRuS-OV3F|5V_ajxaHfl)|KXe6W#1L0+5YKbT74q+GkF=&bN0KA z6+D5_bXk7tqg&iN-eThk2DaakSI)mYyqrO;3CXa+^S!jWSz#ln6~GoE-uEr@WEPWM-sh86biozfCstM*^_ZFID(DR#<4 zt^wTj108+6M=|5Y(%-0Bl}qkhefn`qv&CpJcP_Es|vXS@SqhKz({-r zY1p4SY7}ZBh4ysJAMZ*PVWlB(_)*k;DiIk#<|X)_tg?O8V21?Y*8V3T8^sY#eZT#q z6@gnm^33;oPh)C*vz>oe2cp%}^RQM4^_KtCkh>P&o#0$8)^h`l|5O7(T?ic~`_Z|* zyBfx{N-k9ZlfAC9K8BeZMB8l|O8>qT7d?Z>z?O)KX+wt*cgy){tI(Y;y|YH4Inm4+ zFU4>InLTJ{Dp8Z9PXzsIkq8u6eG_6fg4f1>o+8BEw|?j+w4*IwD z1vCb_O5C(?&Axyug=C1`;jK+~1#wmy4zF_L>$@D?OzwCTcuenx&qfG7BYa_8p_5Zq z#y#*2*4s$NNB7OJoSvAUV?>AdW5VlV@VMrJg6*;FzRuNz${2V>6Sb)9I+=@0Q+R9B z(4UzDME9DmctA~eeu1pq^5ZUboab(xgF2nmnrzz!r_+5jOsO?mC$2rJs11BoYH~jhoSZ z>;CoI!YXc^3ie1N*tTJ6m4!E@hu-QXeQGLsEO<{6ze@vws+_=*;`zbrx>tad4i87> z)I?{UO1vqhGcP*dRc2*Hx}K%(__Q;U5!4n`M&icSPUhjI4SC{|FL;RL<%Skh%6;v)D6?7|SDv{Ir`emDzIQhRe#cKUol#cS|r|1N72LOCwUjb%s?=fUtUxS`u z$A$RDoI7=M{S29^C1?Hrx&e0&M~@FjfuVz>Qtv8%R=kedU%BUH19G3m6i(IJYKzAn zVcLM!7KY7;K7<5qc==fYF~T5EdPt-!RM0TxIU&cKSH+^(7PLf=A^q zv^H%fyN&91?zX7CnfmnUlkmScA{1mj@!Sz{CazqdUl@mRSFVa||D9*6u972&yx0ao1=ZxUD>WQ?r$1-o-a8tEZFRx zBb9Y}5YC*$H60$A)*A*v_0EE~7z0?EiIppo@H}3KaB~sSau6DtFvy=A_9>Vw0sDub zMs@7Be$(&zQ>@4XIroEFK4N;t)k&~Vk#-QaQ0+Rkgvwm*FUA)4H3^Em;`~LgTYs z+PAR&8JJGQ4M%qn&a4$|W`HyM=bU$r=+A6!gVz^%nv@oyuZDQf2)Y^b%SnN+$n1Tar=ud(7Q#GS1zk~2&hmjHoh=w`LJ1%`75vT#kV9|=yC@{gB zp&X97f6}!d)Pvb+I0@BKwaa;~0*u0y+4P=7TqN1B%wI%4Qg5R*$O)UH` zk%Q@d+cR<2m?t6!g%pTvXcEKoXWs(qK<{(lI3Rv{^IIN+FJDGYfW%uVQjwrPiK$<~ z+1VNLH)$Wc6kPy{%wm`<&UK-p(bx5`>A8Hw&q@1&=x@`mM(tOTc`pVGX?(dt6gUBb9a@%Adz`(C&8DikqJt_igB@H%HOH4lOQT ze}cas8!oP3yoJaeD)4gE2%h-oHB9e2FS^^VubqAjc5^W3^xmX_p_&`m5`brSg#WhmY2q&6C3{Oa`Y&B!o#%}eR;c>ci#4cxWNj)gZ0~! zP>Tg3RV?$joeF=A7%^)vQz=GfjlB$cK?bPox^KhVy0cy(!Vn2fN)ZE!dj2 zH_TW~W&S?%O&_+fi^&lr)~wNiKnMHWD+zD_28O8JpIwA4p%>kmWWn7Id9}t`e!}ek z9QSsX#39=5X?Ll=YP)LFbq1#r6W@kc(~?~|?@o)%spJ#hcy-4&@lsrlGsS-Jg&^ zn#P0eeVB;>P~EsLD8lXwJ5hwuru-qPUVKh3^1vhPDud2xeS!Tp$QH;*x}v2E#E||8TwDXN6$ov%-MQ&xxIWiqC0-e*Tim{ z`X04dpV?dxp~wT`5|0_`)J5-oL2oe}({VFzDEXuEzY>2vg6r5lo_3svZ_H94Nt926 zh)7mXbISe&G-J%HqRSDHgl9EyTwsfxeNBBR&Z0!g)}|YNNcnwYSKw9RWN8cu^-)A+ zGE+vLLJG?BU*BYAav;V1qIVpdQbV%%9^{W?ywALkUx3$VuK;_^J#IjNlDXheS4d2 zbh31Zb2!WiULK0k%~%)5c>i-#HYi3cFaNnIAmFUE$Y0-CdLccVIuKZ+aga9lh|?@O z(jcwgY;y8Hs7;#4o^sXYQ6zIci-*WAr=P9T?PAa6me#h>XJ@ZqdU zswURfL}$MB{Wl%AZnmU7XU4_b1Fvj394YK3@%j=33F{nr-e*4J{(!Bom!|+;hUKtF ziQ{7SgC|de>^b`>Yz{=7i4F#u07NE+L+=l)A5-v&hjp>Yw`D;>FX}ToYjPp+k&fIV zYvN9Br0!DA<}|8ONneY4=Es_3TQv(~?!*<0!xE40)Ss!ioaUaTmP0F6sA!$WMcY2L zNndu#Uhx8E8o9Wm_tU@?(A@J`puf{7`!w zrnP@IO}w8eOYL+DYvo5bi^VqD8Yw1s&o`k5scqDpJ3-N9^Br@obBOpeY~&KH(`&FO z{?ba^j*A+JC!q8eCQE6yRg>0VharYQ??D%{O?7f!3lAeT?$HD5=kh&*)*XlN5Z<_F z|AZbHmu&z_t2b4gAIjjz3MsbzS%LTEq;HyWw?(aAfLEBXJW5v}ryl491o=9$cxpJbUFQC+ zv1*%FFMk6GWl}{RQbZ)B4%Kf*W^hm^xNRe;ADAL2#?c1c5%>P{LbpfTT$A;nWH=Q zP1H0i@FzN%oh1N!D}vND1ONk|Kne&7K>jU@jvg2qiVz|V$c(zpxdeMHH<#6Z$z<@k z3YkDJLOO_MyEjcX4Xp4Y$wcgh9LX(-e-oxNu7$doD*`_urd7dC(T~BX>D8+bT4}}% zUxbqzAH0Qs!gr;P77p9-ojMkx1;rjX$oEg9v{$8kWj`}JG-^v zFd1Z}Z)+p`+{_IPnGsTtt^L&Kq!kcq1Qy+K)UW}h?t&Nqb$@z&HRtT=iwy|03QE4Q zO5!XE-jQpW9O4>9U+&}BL-ii7A$JrO`H1}yf6B#xutl_^0Yj=UAXWJ1={eK8Qgq(- z$UsM!W{g3FEF0BzoBZ83-+um#lN#6DRL{Yo58|OOX8u&xb$8oslWbmd6x;M(_|M@S zzgjD;x6m>NbcG8l)-Tk^BkQQS@Ip0SJN*|&fpcAD{|C-ngB~Oxx=+p_W8hlYRhp2C zNc69o{cpAaIVAG)d3%d+QKa553yL$}pMFnto%|vt5S^t__1=}d(QK`WrpblgJrRlf zCWCJqW*AJzF)bP=8}dS~2|hZ{+DB90CP-E*MLU$pbAp&sMdqIj1wfGruQdnc5XiK3 zF#FcG=c&!-Xtsz{PfrUEPq?Vkxe0P5s}<%53`vP!J`t7(3eZgoehNr@%JlBSA9acG zZx@{d^?G$KDmel9zn6^J54t-tR#l~+ItboS^CxT)%(W#`9rpLl@JrcxZ(?c(dy1q5 zCmllV3!&Apd2GRWoT6mTx=5aYr8pcW^+w4ateC9cxGfe+R`bWs{n-~K!0xbmIuFVH zYeC2e>L*w~7n&K>Mahem(rct?IgDvK1i!Dq?Zrg=bnskP3jP546>w#;`I`E$9|zXW zKyukH7Wa|CzXhax%T>2B>?tDd{XbHPD`>0}DI~#WDb}VDC0)EKm4a~w@gEFgMqBN} zf@(lsj}Jz3+2Ly=Y^8lw&1)zmwFyEtAb9eawj1h)?B0w$KR^G4?smo6*iks#+p|YO zp*T;^)ITt93-o~$u=&^DD=*c5u~kySn0*Jb#f6SgN@*qeXz_wf&Rl=mb^95P0&Zn; zkjBKSSdRLy-)Lq`VaFFhD8|6+f=>oz1{`#+>ggpzo!Pi1TuJ^UOTBRI3npFGwgy2h z-9XuQny_nT88f;&VQ<P^K8jcK`u1D)Cs zASau3R_wt}(9hLhYZX=nauuv@8P2IQD|Yc^8&I;K76Ql*E?LvHOumlAZ=}=MkmDFQ zlYrYAigzNsqguFtw@}IbjXLhsZ5=)aWrIZg6O9r)mR~@N0Q_{sLY-G98}U^Ko-a6Z z;T%PU4}P3}W}ERtOW4~Fz+Z3uL;EjSWPNJf1}P=N2AcR-dkc&VH%K9OfszQY;2U6NYP%$ma*X{szh#lg6JYS1jECy2`L5M60#x$Hu{X!NPgG5I1BAk&*PwNiPe0eqtPzJZGi9`GJc5D zEr5}C*ZQ{EY?r34?$^o66iQet8bMnKYt@&)xP$YiKL;Ht8{x;lM@#E1Snou3FTjoT8n_PheCQ;Bc>hE#_&tUq zoeKV0TFq${UldV+o0N|KFyy_RWom67$JD|@RudFY?^pmB)=M)4# z0F?k-;J}|~6he-FgdjO3uk=HZ&YM7H4S7%DlScXof4bAv|Db%l@}Q5`=N%EY`rvBg zAI8YDur#WZ6T!m@A=%kThYmlCn+t!bq%?|LwE=VsG2qh(g9m%x5S!+N$?*=v`d0}Ub0p#Jpp^IJxP;`uu)qN$FR z&%he2sE(QkoNa*P>^Q{)ZTNlP!+jreG|+YV=TDRi+xw}=8yFY}S~1|c$lLYcJ5jO~ zRIAC}Xa*Oh_*U>?E_p}K@f*=Cj;h1%h+fPn&0mfiFO*UfE++yS9TH*P1b)h}G)rQ) zDMPo|0rmL`bhB#WU)S>=wlo|Yp)1cWk?i8R^g*%<>^6uOt&-XCyDTtIc8jaH>LsRt z*-&i5M?w%Q5+TQblm@@&nE@_Ah-=tN8QdV-Pp_cBra+yHi6!tGB6Nrl;2A^QTi3sl z<~bF}s)=Ni_Scxr{6qFveqMUt@}O7v_B?QsTA34JEpHeLk5%_`AZ*cGx3z2wYyiFI z?w9G1-@DVG1s%oO{c#8vi|>dBze@li_2=xuFTn-I4LHBwwD^=#0#BY0|C1N-P&fg^ zK7kZrm|I@fC?Rmm1*{#@TYx9|1k$25MuA@s7wa4N=7G4(vS%8;h~0n=l)F@B5KjT^ zgZ(0vXuZ&6r(~iKQrD=nvtoTX$yH5}={Gj;4|ik6J_SY`E0Wz9cn*+}4}Kdl>q#6HUy8upvwOFN7zDpBF5l}gI2iA6FH@_w*YTz&lTW{ zFi3bZ2LtUq41SF_UEQEY0=3tTaEV#(ZVRt=9?xpCriyPUZ_wLm{6yjt#w}5r5LJE! z4jkYyI|Xd;ul~E1vf32`RTVx(hKk~(`D2ftKn&Xo;dlgY^a(Y8(A^la?0wT7qvXE% z@0t-CSpp{CfUYPj!o|f^ixsLE%h8>{4Y!FWy#-<}F5+Q4u8`rKh*KoDMF17{Kh_b% z>m<;?K|wSH2M*A8RYzxu!4e203|@aE3@Uq(lt58e?So$AuAo^-be23qqXoC7c6D-9 zc0sE)Ns;Xf!O|%3E(A(j?6Klgna0M(o<()CzeiMk?7|mlPC;A>zonxsuS*$$PSgUR zx%k|%7Z|Knex*n+nom&hlCbJOZ1!GQ7ePuZl(W}F6pg_@x1jj~GahJV1WpMt+|gM~ zf)7#P!U$h5|3n&=Mums687opkXyQYw;FJQQZbTv{47p4Q(s-yVqd{mmM-cE|ZY$pJ zF;J^;iJ)tS8c0~kLGhjeK?Uiomjy!iVY#|e2!CfoNXLLwuRnkzH8u5ychG1GdoU+3 zWV72=o2M+GgFo02{x@F!5?DJr0`j`f>XvZCX z51lZnd-&KCVEsg%Do4o0SyM})@b)~tp8y5b$w7yDzYoFBWzh%S&{)Cp622DlH9x8H zi;;R=r-ysq!gI2ndkOJ^tL-1tvEj?2kAH<6*FkLt_WmW|40)?kYgn|~fM_U83I#UY z1zJGSNI!1geqjaBH-h>bb@l^F-h85@=}#`c?!Z4N`zxhdxYq3@JJZT zRI`TWuHHONrmLn2R7N7U$6g<;&OWoAX=<`gQt6_V^uQ*5`n;#gTa3GFh5mdvQ69CR zmTChYF+KW%aEMdgVU0+ssT3+!?}sMdtY*0tN}-IBstQUov_+}M_ww=}sEpeup{Ela;54fwFjYFjl+@?L1%s38Ra>L>i!bQ9YIEHvo)Y{d29M1Tj$3;|HG z6JxNzk>)qc^;rYR$N@+Oa@blRB1%?irUY@#_XIgsAMEb|g!b}BNrIQE;KlmRD&q%? zf{oDo(D3`QU(hfj03sJ}By_4jpOGJ`DGpFza>yLz&))VD!7q9Bcb{DsBcgg)5^Bp+ zc;x3V;gn#<`~S@rPkjsY)p<)}2vGs57<6zxA3YMFI!x{t>_8udSgYv2a<6ND{D+SI z2+)+?RI>zb5GPR%y8Eo7LUr*LMU4CE6Xm0l_(g;$7FF3}>p_0Z{*ZK6Z{2{EcBZ@R zu-h~9(OmHZGAwa1x}@EZ8H?7QY$|PHq`OK-9J}Ddn5f)Z1Q$-Eaf79ndPXDF^2m$% zucW2$T0>`q)Z60Ssz?x|QPji22y~;+2T=a4Q-dR?*wXruKRFsf+P|L$W+46HO|Z6y z)FeS3o?g4BRD3w{!Ta84txdx@NY{|k3JSvQ?d_jUhKs+deHq%WUhLiBzgq^mi3>IF zG0_G2zulNAAERx#=+k~_Clbb}aC#)$a;e65hpQ0?2f!do{2sW1K0~crbzO-IpHXu$ zq%B!m?Wo%tl@UJl_VzQ9$U%^$2=~_WfYD}6|9cPXvTc~0 z&I{fyb4Q?Af9V!s%t?AKFx0%omqG7m7smnAQYBvqSFb`b{aN zjJg$>w)eK6J6y>6(^@yPW{SXkg=uwb_cbU7;Q#o~IoM>ngN25Mm=UztfCj@92gza^ z3P+0yFu)2v1kvgS*uS@cS0P{mK+VLafHxSIqtT3f0c8G@ zIm<2ar}!*ziVh*pp>d%WY6A?`>4 zOu`dlWofYr&<5f-u5{<^%HT0wB4+qXIR0HDrzsKIw)B%n{bV}|qn@HpY~hoOb5((h zNfM7;#xeEy*3RK2vMI2BdjJ{3J_^+2(wpC*UF%Jzmqx8t;Jz(?7$ntj-nJ!&r}lHV3YWO}YrV(23^jv` z^uIDVE}v)2k5f%)B-7R+~adJw9so2H5 z0IL91b3yQyroPZmoA9_bScfQ8^d&Mh!E6ES#49ct%NX&3j(rYEL2BfxhvX*>`zbx0 z@JJJA*s=ZBXMA9510D>KfA{NPr1b$F7&zb1zSzQ?1zdj7JSij%{vm+Z&)@?H`nz47 zC>vBElf#blhh_$bt69tz$Ue@<$N+e{`%Np{Na5#)jV&fc$1?~H2PhSkAvvkksN53R z27!}Bsi6(4(YnotTR+}|r5xY+*=v6R3&5&b@yP`Q2@+!YoP&abAdc2W{y7fmArf#@ z3!t?*^aODBEU3{>u|i{O>B!tUTX5W_!A0~_!A(zS+iVj$o73Aix`;TF8v;yEuEiN= zXVs3kf<4HBUs_+pg?wMeX*e5Zi}~GZG5LP7ENOT2*DwQ;w?0N;B9KQlQwI8*tE?tU#~IzR=3YdbV^xx2J!`M*CE4~ zK$$(k`f!1+?TpqPu`oEIr3@xGGjnq$8|ZLS z^6{OgCabu=McaC3EszKJD=31n2{!1QN6UK6wr_5g15 zZ5Sp1r)>xDN;dg~9IX_ruaG^UUkY{w8`*6<4TGei!(B<7ldzL++j=2z*u;s`cd+)7 zk1SoI2HhEXVt+eUT=DsR8%XV(u*=b=0fc=nLGMMy6Rh-Vbw$Jzs3v1#)SaB}=et zc(K)?cAKz5qNPDrtEQ3CR!>m$P`U~p0ZfZYaH4@X0OQUV(4Jg^iR%EIhwxaFg{Xkg zMRZ7Hh*fe4bhgj|iCSLb$PHx2|1ni<)IX0!-D=FV8_yoQ|HpnvH!mBTpbkXRl}jrt z@JsE0X4F_XL+fu9b-$MmDN4|Ri0XMZv}Pl447RQ_fV;y(px%Q-vT3Zi8OC&3z5l=) zv8b32*FW5diRlhQB1;dxGk#GB2nG*nz-FKu91KH)O$g?^Y@_IfqdH3));r&A0MY_P zls`Z`l{Bw=era?OuBpK+*eX%kyDQS35R%mzDWp*KCB02&DP|`?iM}Y&!GxT8{KQ91 z&C$#0c75=yJ2~h91AmYl+d_6_n2#-C zazGO0j&B0G43U5al+Y_X$Eb8knM&BcESV1Hq9b0*%*{POIKLst2=le?j$tl!4Q5t? z&LOmT?(_ANS5`w4pBPrJa;FgVcH_pymL|mQ-(6HK0tY;zZUSW(6F6_b`k;BOjS_h9 zRgybzfkL;eXxuCvR7#Lx{$u@(k)<1?tw2h1&vRyqkCG+z>D_G+75l%xhX`L-F_j*f zaIb6I(t;Ig56_46tih44H+A`f76o$w_TMf{QGklLoQU4gogvav!Lzo|Spy*mB&=G(1Ie?>7DZ)Zz=ljUm zHsv1Qa3VUMb|yQYl=fwDj+$_#j;VNp%4EzFIH+r87ZZnd?l-Oq8^0Rx@bGSz4XVjP z{oSQ(z594Xi(3q9e`r~c;eUV+iP%nio&lTSP~_niads`pBkoqEQI+K3QIjP<>}ES+ zS0HazP;792WKaih6x$!jD%LT9j{(*JM!?sWhT!{i&CLixMIi#DsKis}E8Yx!Ygr_{ zZ3+&@G|XB*=NU-L0h^}rVLt-=wA*@w6`G_na+m;P5aTI<65EaI%!r&Yvd**tvnxub zqk!wUmT7;VG&-bn8&KGuau`FKo6oX-f-GjrdB9WeiP(nkUx%U(r$1lzhM0rT$JcDi zG=u7kC>vMK7DwovIdNzoLSqt#HkHwa;%k=sUdopfoZPCma+JP2J-*#dSxSFuV67~epF!p{~W?~m~Xu64wHfqm+=(MrBl84Lzg*A zO+SdL945|4{c{fUUCIo{H3J?6&ahMX_>0aLa?$YdeE`tH3(UC?4zjiNHA9v{OVM8PVb}7-Eo(v zY*iX+uDVyBN-6zOy>C+Y|9A8t0~tU?YryXSRRCP*GKtVFu#G5{H_ak2TWI04EqS3v zf0NxMB-C!&t<(zUCWXITzK3WH#E??x;NXDfHz5&v@DDl~Ly7`|gF7Ub21DHG7zmEK zu;%qo7d`~-DKQ5E&KZ3HnJlk%p6m@n)$SJ1J{uaQ2P>~A(n2K9VH4CkGl`8&mSK2V zi)4Kr&;I9nm4*0e_1M^RC`#*q@oc#;5GzO|IUmw3yP4@k^(A3Q_ z&s=*?@MX!XaO+H>is>5Y)!PoW?wH@V8CFwoXtR&rM~sbaD$G}t?AQJ2q-}T^t1tPd zUlA&BOhvf0baz~^@BiV|9>&NBzOKGpLwmfA^tUIhS_h{`e^|9IxV~pu-&m%>Z|Ipu zSR%1Nh447cq}1+bxS14k9{R~eGo65nH+8_UDL)Pu0w-{>y`C!w+5fL$t;)65MZ)Y^ zwA7dDLq%Qct+D*2#H@>3NpV)H4DjVh{x^;312f9Ooy^H_%%1S1QFT5>_O+@jdXEnCeKMx^G}F|;aC7v+qxwrmJrsAl_Y4?T&w6&lV{QwGEl z=0A{ebk;ot=966b4k6M1aIRf@pJJi+Hy44CS01g7&%Jfx=1JyfID@-sX~pmIxvrs76P#T+f70^y z+L&u%_q{bU@Ae&>SRY9nifHfrUjMHXim7p)pZ_!575y^!kHu~P&XZP}*Sf#%ts#l` z<&<_4exMLy{T)v$$jghj_6WGA6x+Z(U0gQFeWgjWWa&UXB;yoYh$!5`s&KsW>XE^# zYr*_9O}>Lkf-@Vpx@eqXk^Il4JJ76vH=0PBGz@$5$3aMQ3CTL{<0UvvYYz}IQ^qIo z{9Y&wgTk+@Vzxlk6Wf@9Y>{A{FeO2s^@r344y-x>v}+eOi53-MX$V6{s z>x4EY2Cz)a7}DCRV=Jku{BKWr*kUDN1`VV}zY z#PTm2N+AhRrQ1r>H-Efl0TZ)WfQPzIT_h3emAsDHaJWMlHw=Oyl52`HvmhIO2fwTy zG$codKlM5=l`c5=fUnfgep)PckFQgJa9rKToaWk@}tIYa))3GMWWxYR}2O;vbyy`FtOwMokPsvJfv zxDqxTcn5IueDHoiFt?(PywaS@RoOB589d_v)NI@UaMFA3A3Za^%>!F6FqBw)~SCAas6A{2ewZgTmw3_h1b1QY`iwaorUb5zBk0Va%P7^ zsNMuG9`6_Szudn8Ccg#kW042Br1JtPEmIWhI_rHL=`maFfo4p z&@NLq4Pd2O`_8Fz#u!8YD#vKI`Le&eO`=x50-sbqA4y9cPMRuUGTz>M;~f0*B?Q95 zW!~>}sg;)XS;T6DXr0 z!z{G7UmdEn26^r(xJ@3zrc<#FH2J(OJyAq|=zP!&L|+Ujf?Ro#SHjjE!w*TX0T~OT zLUz-F|6$NEkmS4PPUxCWi}e5YWVkfrGCdxpZQUU{ZlC7xf7^n88+ZfX$W%~y*v(Mn zZInoaI{d-r)sN2fLc+xM*47R%v=bPV&H!-^6X`{JZ``wSqUEh1^GX`lsmZUxG1iqL z5yW~3yOA>EDdFP?SVO38)Xx};>a+Hl=kN-L~b_K@WjEa0^o zj&Pv1W0JCl$r13;X3*oIS_s9#h8Y_yHFo;Ghi#LdngM|PZZ2u zKN)I49tRMpp}j^hn=*+IA@c;x;G*!hNJ=P60W5F`y04lv?j<`5TnFI34P)5At$;75 ze{M6A6;O)s*J|fem9Bt!+Xt}U1fZ0cheyeNOQON%3Xr@0;w2?d{Q!=T3+yrCrdNhK0qx-%!XC}bQykDtTE z1Mf{sz~TRcbie*O@(c@QeM?H;S4wO$KIU9naKarskq6wEwI@)7S?(3olVRw`2r|-! z>~BPT)EqMPd~!+YF3~LwqV?Br0GC&-I`IQ<`OMtBkSy~p^yt1>(@K$21n3nwfYe1- z$U;gG*8Vr(5blce4u}i`)URsbi-F}4_3p&ISEe|EbPT;)$qWmKEj_9x>W<#^Vl1)h z0m`AGN+mt>-lB1riU932t}zzM_Gv?>4ggZ9n(KRwT@QXo$l>972#*9O3*Vlu z#QTi2G>!r%v-@1Uo^iO#VIg0p>&QfbArF>!E-rq8^5;wHUB-X6qEMkIO;x2!=~(rx zE|V1`)EyAiy+OyAr7+edJu>5dD82&Fe*mppB>Z8O&gjduym}rI^8WVA|HC=`uN#v! z{;-JXZbz_vNk8XcEPJfWADH^H>W+KZ%=AOm3=PwIOS|xWmM*ZU4zjopF4k}(8Av4c zHmWn*zP_jr%+CLU8^+}aS>GYwPctqR*7{$`cG(b~L=PYJ4q>48GcU1{x?U8a71Jo8 zmR`BoAoSkhDK$UqDaSB?#X$cFrfd!PqBWYGE_7YQjIeix?v^X>H;^Fa_d_5G3b^1s zcIv4_iH?<_YJM(GJ|B=aJ?=BtYa=f4NjF}%u**it-2}g6^^%1ZaKz6`8W$Xoe#q%J zF>pvwAUEsERWyw>K-*4Y;}=ncQIO-y@P!NF>ZiUwB*(_4R-#?zFVVzS#qQEj&mzWD ziY5BhF~BLK&gqUHM(!rRc#k6Ohs^h>(h5V11J7IEbw_@r1RoU0U%7Vw*@6G%a=mjG zF|zdpMP~Ncyx)apk)@RJ?J|PPV0=mnU3nDQQ{l=TvYxl@;Zi*LMvlkig0L0x@DP-| z9Pl(Vq-Zt$@__)h(jnfvm3E4rcY$ly?2paOrt9r0jfO77J)$YYJWQ~9vX{w8Ps~*W zZS(Td(t!ImA3e&3prsDtC_okok*l_VM|1*f*^YrZr*N&y6S_ z-vnU9hQlZLnh%g#e0+RTyI>tOI1#xu!D7n~Q;L0|U>Vsb=+*$0odl~l4L87#b?)HB z!(zM|7RyTGATLF!B!F+CPQEnF?Jhs<#Rz^N{Y5;(luDcmlp4RATt(=@%CSmF6i@)q zEEu97;dt}v=&{(=0f$gjnI9a1DiRS_Fwheei>r7J&pC%mbm)&V<);I{?_IJOf=3sz z$YabR;jjO0&bXc?5Lx9L(~2vURB#2{yAvC$BupA%0go*7=taO+$!>{$2 zbez@~`qGE*-~W1Sa2&JVsW z5s31jZ|ib-I`cx zlW!dUhR7genaA)Pp`-r&6m+Z(g%X_P|@9QX@j zkAI$BF5zU@o{bIld`DABnMSeaZu``9Hn8vJ#z&4?le&V&^)vN=2|{2vHVzKr>#C8) zIhJ2)tu9tt!!z3DSzhl>nm{UKP@)RZ2P|*J#IDpWL1waTL$+)* zCzWfunb|zXm@RM*l(IOQn+cz9aI&Bm z2|ptw$(?y&*?B0V;X9qX`sW7l<530FvagVB9++#0#gquJj8l)v7S{;~TH-`QuoFvq zxJ6$`>bOlPF&{!)hQ(@FIFiQ$aV4)B!kHsCbduN<3XT?)%$6VxE$!o7#C-x-KDPXm z!6bWp0+CijpGZ|YfKl0aD{4Lyb-4j#>jy>z&6-usU>~-c7pSMF2U~#oXhQ4bv)QUI zvzWM3u1>N=qflYcyCdGhceC&1Nz^M`VA*)~06d{CU7}`dS)lT$=ltdLcJPE2k{H|vh z(S8cg8A-lXSw)GsTf#IABz5xZtUc>|nE3BbkzCnx{kC|iH2gwGW~8C@?OVT6CRI|E z6RyhZKB>|`^FJwi7tmX+>Ja(c-1Lm``GU>Uel6e0*Is-R1Qr7s0RiI4`nw0d&2O;H z6$T&h<<>m|6YYYi_gc2oc@WDz4-7;s_ltRwoA8GP>pYuV&~9@f=K0^mDg1hE9F5gY1>d9@TO_ zvtrLUXKg|$H7Wte-r0wq9WdW;%c?d2i`Utho^c}n#Hw+WXGIRc9SLfY)4W}?CGJgs z8s)lWzhj^#qaB-c--q%R1Y%mA{#6ZPeI|s6+7^S3+ivop4it}q&5T@BnTyWTH{8a- zDiO_B4|T)V2$IxkDcK9~dUW_*^Va+dDH0=Vim? zUQB$BSKs;A?odZyJI!pHc?^*Ho8XEj4Air%#WW|b!ih)BB22F$U-HxIy{0d_Pq!Ul-QYzT&6j# z8?67iZs!Mw`9vn;aB$xpVRMF_^;~k-&h^vP~n8*DPY|pE8E<~&r09Wy$t?hHk6F< zWkbnTuygy^!7|&Q!EXfg%)ymSIT{rDKIS3y%*OlcAIA&f{S*f7Dl>@D6#0~&gK5x@ zZ@N3>Tt5V?&)9VMiN1gvC3415dMAsb6hkZ0f-Dp3sXp>P=FI9l(AOE7fVt1H+}X32 z_P>XBvR;WAbN@M8tR}ZLz_rdh1_&U$#hdA1{9NaYjtS`eBE-W}4#WgRxSxPp3^TbD z94woMGXyXF%To<*8BE;W(69<%p!7??%@tfR34N>Xz?~veuCohC#^;ZH{m^}awdY7{ z+c5e6qwGE4x!&LK;g6ZUSClQQ>`>V&QD#HKDx)Epp~x1J2pK7(&=8?vWmmErDko(Z zQ3;iq=lbaU&hPx5=l{H3&vRa{Q|EOWd_SN2eZTMfy07cH`uRNlK3y?6xgspioG1vXmW>PWTa)M5;72#tL;v1PTVr5= zoM~}JfK&xC$s#ZznmItpSnas|g4hPPM*VJ^{7Uku#PvP8{w#xvI5jg`f3n3xhR}KA zrP1v+Y~6Cn8~6ien61m3veoxzO-ah8D@NrXyb5D+>ydV)SA=H`9oKtGXPgwNagz^i zF;~0qlSk$A#gqZr%(aaay}bUS+3OS3XDKRav@|8k?ei$YGD<1RvM5W(t@*d9{C!Lk zgRZp3$KRMb=rX=9YhemGZ@A`!ohLoM~f#p;;R*S!o!BU;ZrRNk_s3XK$0z#-2S zee!N}jf`9~s118{wdR9iB#>0&^#43ib#jM>WqBeMffrA^!RiBp zZ5kb9M^q{nw=|#!l3oM5=O_a#B!73;QNKL%!UJ}!)Ra8%8zg20`gYsYwa=>n#qf z2+VkHHq{d1j`aMERGB+G~t?+3S`{Qg7 zLFzSp(XAL{MF~eUzy0_!`CP2%w7Y=5#8iQFk2%F=JvF!XgKxjAvzpd56y349B!IB1 z0rKA8?@-Cc=@kG@6t+Zl$_*s?MLzTMTXV&6aXodJ$-|edOK9_&vuQqU!oEZci!MAA znmK^WIvp}HeaHQ8@VCFR*sTBDKjNF+i|fM#ahIe9f{gkFX~ZN|?orUf$&F8EqIMN3?Ue{jI`q_|tHRqgpL6Y6O^m4XuT|ZNP}Z@qR(1HT7d0^- z{lwUI!|IH)jtA|3`hHd4EIMM?u6pn2t!rhOXN#RKC9ffb`w-Dl|A}aA#LkJl9GB5u zfm_s66sSvtn83bj>bv^|ku-_o1IpS}mCS?m-|k{IG)fWsLNBIN{w$vS-aj2nqxqP^ zux}cAW-_54bABTB;>*Z2@!~1p_Kf|v^4m=BD#HyNn85Sr&rv?Z99mQkt;*aT;gsUSQDBD>?Xh)b(X3;DWmw zzfTI6EW1#k4^5^bfLywsJi0s6(#2(6T1!pzjy20u0bu>a)cAN5%4S>4X+$;1oLH^M zpH2ND=G?hM+F*RWhYPsOr(z|pIq*H|>+2!23v1~qIkd_;Tbgg;`7XLdFZ+^%H0P+U z-E-@?ctL?PM5VEzftFR!?XA28Q5s^UGk*Ud2Q8!;VKz=I_VhHG_-SeN=Y?x;3HqEj>WpdRzg*l;nn6@p;xw+`V z;kZ}b7jpvW&bP;HiH|o9uqE1j0$@!FSba_{}43+O}sa ze8+dMTRGQ!sqa5l?Y^YGZd>mGEt4sk@z=dp8=dVoZxLzdkT7v_b=^`Gm~3OCd(`HQ z$=6ilhgSO(Y|{D;DXfI!M~wDC?6llBifkGJ(;)C|2H>6`?0L`2-}vy?%8A)vd%Qe3b@f@uhCHfq zru{8?hVAXQ4{z1K!;mXwIrTEZ^q0I{wUo&;<(HF~IikZ8jpvI>OXHijr;uIexRPEC zKRS><(~$k<^8yjNgH(Ul%^md$ts?pH2}M(yj1H-uf_otgn`IH*OxLM3BDr zdnyvZY)wlvKQ7p^bT^CE{X_NMTm$+5I~qG3)Cn`{FaA>1XcN>Q^D!)&dT+XkAa1Q? z$v)VcBKr^+!5mk({Y#nV3({5#n=Fc`r;vaEwgNmd|#vj|R)62e3YEa{r?X#A6 zayjod#hX~28OG!#TvgpE)#*t&8|e9DcAM>{w@;toW0_rM47oBh$2}ACfzUmFga0l^ z&ky$ViIw}5R9FjMyH*X2Mkn*$Z|S;~&ax$?xOnIX&dVGNzn5AjN8t<2e+6jntu^B$ zYQ#*Q<-rpq1bo^Cpap1lFBY3B7(Iv%VPyu(KF?xV`KPhu;Y$1Syx$Iy&UQ6qrqVyo^Gb3 z5y|ghQ;nw=gwXCCL4SpZ&Wmf7CHiAgYy9%BZhX7_CmZiiGQljDdd=#ET2kPGWg{$| z+d)0WsNo0N*_P!}$}5wT9v0d4`bs=ko}-T=?Xa5}t->#cZN27|D^JGk&lINr#;il& zeoz`f4hpvx+<260cB>3;krEfV#@NDAVd~SX1kx$9u{-b#LtJ9GxMIz68#h^Wfkwu;)ngY;7UMebp)8l@h^ zHB3TUnPW`+Ds(b?UrU}sbC-LhPN{TUj9<+6zSCjB%HwrDI+44F3w% zl>q&MNQJwMaFKH^z)3vAtB)T9oS^=$+Ay(Z2r@C`4%F^U%*{{fv<#-~orqnjToypwjl2`$uUS;P2%Ep88O7mVn=-Eh+s zV+6K`BFp97b&+`$YTfGdSG0Ci+%Uen5lW<;4arD!qbI8rox~^JG^mU)d)a|-WL%9* zI#qdGFklbgxvC+yrCaWHJ5W7#3Y@6U>D?08c~ujK5xfwTk!w8NjHw<(Mn%C5E*8sD z-$tFiRkGbT?EJjFNZv@8XG7`e`mxk(r7>M4wRE7?_oSoEaL*J_F z|K+cZ&B<4NA85s_BSQEt$EJL{$uWeMD1^~ac%G&OXqm<96kO1`Crl|TW4k3}8~3c+ z5*3akI`4Sw@F9SVw%E8xFUdO$q(!ge>W(7jG(7f~(Br&8>j7 zCN(wjZ&ImVjg7vVUeoGN7nJGTTkhHQG%6eu)wDM>6;w=4*r}-d>z6O{iL|FJMdI&6 zy{bR%^sIKQ*pg=YMH(R487!vR?BQ`1VfYK|LRhgXBu{VKF)wZ%-1sMArzX~P!VeIA zI`t|D=HZhdm?XPL1J}Rmi=vThO=-L*T;1K}s^35dR440yWKK%4=$Lb}Y+gWks1LYo=6 z!008#xELw=a-^W+wdC_v1YgGg$H1X7z6B+M!%}wrL~m&~hY)C;iyJh{7e(EW^KHOYfu851|PM(sbEB*CKbKbra+G<9hW} z3I89Tck%C=(CXUY2AWk|5IQqj{TS$gqM|-#Br^B6PjfX0i#FFIaifvQR^A@mpyP&l z<^+eg=Wqqrpw(l&%_@F;0lLLih_9ku85|?CufoZ-F#v5Xw0-t{Ox!9MT3>v_={ykr z`a0}7PsflC_6EO<1QOV9J-@QuF~aQIHMOYoC;zTKul!p02hk7Od%*s)Wb4-V&swum zFErdfW0`lnr|nPRGjb{+o+ zYL0?gNQdnE)~DJf2rw!v8KbBBKb2wvJlv}vxLAHYc}V9Wd%Q5#;aNP-HH32U${y46 zKxVd74K2j>Y$;j+TY4&+o4>--xR#XQXTU2rJK) z{aLg=J0kgyI0vi0{~KzlT*)iK8I>2+ybs#AnYP4k6hFppOi|db=9jCe^Q0@p+38I| zuUG4Gl@jQ8ULzpxN48}+r9C0-4N%iJ=P&0y}*=vM4wpOh&<;{NF$KbixIv1^s}~#-n0eeVt9fimOO#ub zBy>!7YO@Y083zCzB*mqHwadRxTguDxaJoJUIzfo_{&%hsMb$3^@|Sn&R%#DFQBl#i z<|pn^tj^(o|BQ<^rz@9qiaEg~@TKWbI;#AUSyEC0_Z7GCY^P@z$yIC{Rc4<}S*lG> zy&Jg(63AZ@{V0A0H?2al$wfs*hDKg=F%^*zA`E$CaYGK}+IRWd3Iw5>5JHeIU)uPs z>XB$IW?eRcyX^%PWjC7dikv?&hGoFOXGTj{&-(-v2+5XjV*DmO^?sw|-TwXYSN!ka z^iKenvi=dsHHA~!?Z4TGeirw61~vI#bY}+VHKMcBHy=D#to(QHb;e7Uy~%o|-2WYQ ze3{~5C`n~1e)KZ~qfBVk@u70>-NiAyqV9PRGkK(N92qxQ>n364+EFkJKocNGD)?*w zwoFC}zx{&P&U)YBGw(<@*a zA+>RzVy{*-Bg^~5U)hkMaHJQ3>9E_^Wu=R=WtWX(I!U^ zI88w8MSAAo?!ft9eNkebC$DTx8!P}EIRr<}zkI)uubtGOqu?cRSEpOg{vvGX#zO9+ zQ5$-v%#=UN%F=dO?Hi;^IZsutVSlksJ@i&Z>x&PZ56o|RCTda5rRFaA2NHkY?h6e} z!^zsG+i9Ks@wCq<3EWTP&j zco&$lC>f)CF6&Ze1QT58e#nddh5{EZNENx^WNVW$bY3NK?Fq$h)91MK}Jfq(*3q4UD6p zs6?1-#N@3~VKWS-)~|QN8sCINe}>FIgXcUW&Ypz^;(RKw?Ye|HoX<~@O^(}9qmL;% z&BHL`$JonHOTYTQ0n_ATUF+6E+}hTT?d0sq5^{1Zask-Zj3LyvpEp&yjzD|{;HJI< ztA$deEC&%6!scdXpe@P{Z^4Wh?1I4gu>!q}2u6Y^tFH$CkMZ`~J6v1s%FPelve|S6 z!42*Lh?18N$BIc7?G$4I3C@2(?n;W61jE26lza6P9(X2tJ99!1weLMaH;Pd!t>(9c;lQJ{*CZhxy9@{~-Ql$2zITo-oy zSGBRbj)yxhKOeqA_SOED8Uu{gB`;a8b-6bVNd&(0s{U)In^+eWLCN2z>hLWzzQcM} zgCI^wSWV(y`a2JQJr3qJFFSi;db;%0Nq$(LyntAGg9)Rom%SWjmEz*mU+Ltb33Y*B zaAj7>8C7B!%cV$-iR8r*EMRvp5O{)mghj*@4`ts@~XA0P>_m7wu`Tkd(t-(TH0-|Wh%fd(3xdsb@#2DChH9vtz>?)P}}`{%>j z?|K&>s(a-rMp$9bZ2Fbh_;|AKT-fF~--4sQ2x*}O_hiIW3s>WL<=*>vH3gXCPjKDY zcDU2S4}hq&12HmATEnz7%}L~C#b*NLrV}2c7cJ5SisKD&!3Tz zVN%Hb&mqF`;Fh*>>b|vWrl1thU%ESD^I4f(-=bH09!OmoQ8Cgh67^bUq-_y!EkD*m zgy5>wvsx^83-gC#ii871PZM00DXGTY&F)#p42AhnX8)$1Jqtb2Hn&0JIy}gIq-0;- z#WGes}|+# zEoNSBFq+uOXT8Wn9;OQ`IKr#|%{syXbrc43ym@wFYHGO8Dv?h(&AE0b2UTQdW`YuF zv~??XV9H5L(~INlFflO9!!aSv41DmyRfOBS2h4IpxY)VkXJ&j(y{>}?m}#<9eesk0 zkRB5E;!_$irH~GfFg|zpSWfPGFmf}rq0t7 z-jpr>5RW>0H3x@M@bAD&>Wv;l$$sYrWvPhapXkL7*%CTQ>vqf%M81nuu^dr%Uv%Z( z2_6LEep%g9^a;Vmr$Fem0}IN2x`+zwoMghnB4mTXLb?ZNPQ93nUaDRGwLF^zc5%z? z$VGX?KBUBXv(%@~|JtyMh!ar5v-8b;6(D>CH)FJwe7|(KW&_^UX^h7(OhPXN?npnh zD7jI;zVE7;yZBYO?^_$4T5_?K8Kd;wSw|vdYi3-l+;g!C&d$6c&NQJ}Nc}!?E=Qf= z_DuivB}0XYD{CFWoEBtXemC^$jnHOZ zYzwM@mOfd^lH3glhaM>@(#CLNr@w#sCL$YCKR23b=vB{gNl)R_yvQ zG@Y8+#wmz8v^inn;edu?YSeK*UDqCoTxiN8E1F9}+}t#)q6Q~p1YXr$MK$p95OMY3 z4XJRg@cUBXBcO$%vc6=SH}FLI8^7jmTB7SGh@rdJ1!Dp>w=j$0{9S9zM|RQ@d@Zu~ zG*71decH}E!5`(3JCNoVB$}HItygaIGH!~E_Q6XVXYLtD12v+G`qi}@{#<$qqxKwH zX8yOE6pWIqPrw0Ww5E#b`ZQ^`u=38|yCRkqG_acBBOFDsyhKb?w5^Qe>L5a4QLVqX zjo0YT%r$=$1_~k7K4TYe3}~R}L`_ET#fIR8LyTp3cx{ee%Ew&>4C}lX<%v2mvnS4} zhb8>#>7`08#s@bngrY>-TkH9?e{ZeH*Q(t!w<8nC__+7MKy#4!z|SMey}i9i#>My~ zfI)(=l?s>Z*v&^$g|qqx_qLzTcC{W%b`?8t$f(mr06_Qnxme6?ncl7@5|Wc=64cSx#Nfs zMEPqND`FeE#=u5APW6@9LUf`n+w6HH?UzF+crfe!gtF&5_*n06S{ESsHhSvmQL4DS zw%B`ZWd8|J53ttPe9t4Qzb6l7^k2)pMt%Wm(tUXL6YvjKjg9|-n$q524@6g27YblY zn&x|$L?Ww5#0QO>OUX6G4&)z{1J-FoX%1qM{-+Ro6FH4t6!g92z)-ylCEgw#D{_jqxY+z@5#_ zA=dUmKe!KA!OW6>tCJK*Qerb%laB_<=5r?|UObbe^QoY!IrEI>$vITR-`9C|CmdO|X;Y zr?bXY;R_4DnumTIVUSi`VQZRzrR0Na)Lf} zyNC6y{~is1a6@jb1fAheJbnXkA+q-HqL23PKl6p@&{cv`Fg^6}@IXM^*gU>k(cNw;1f`QxRWqR4spFC$FmZW)tGXT1`~KBw2?TY zcMeosG(Ybr?6dYKuAljzUl^UqP80VP%&Xcd2wbl>Fb=h_VB5&YvFY^B^`=SkETi`h zKOcm@$HaO*qwq*6k4>lq_ilDL362t)yav5>OpgaR)oz4pPL|=-X75F3;$N8E;X7YR zc7234wr(pc)WCLQl2_)ET>hIsp;$?T;I)VN(Hzs)*LS>I_t5o51QM6NoW*8!ata~? zBGpKq^yIJT+Lp*aSE#rG!hy)gAwzKUv0#fA@ zA(sw&a5FK+9J6W_3!W_03IDB7x<-dPFH!|MI;oO*cr#=hrPhTo*zS zMQ^}Y)Yz38*)c73f=xiDWmha0?MHxMIzMS?CG}O>(~QS#{9@`*(hVpIDZ4OfsDqVw z&{~G)N{AIRld-WeiM^I5BXC~a*~0XU5;U$fjesd&qeu{!GcsC+D@9wIQ^pFdITk~> zYfWZ~y2Z|<95%_3rNr5L^kq-XXWAV5A0iKS@Xy~dx7Ko|6bhWA(D0dg8>?mb({ifu z_VyjS#pc6Wn|GOH`Y?|6B&(12GzWc(y4r5$HhqMLmz#b$KxUmTyQX1s(W}o@jNd+HToMdO|5{bVQ6?S ztleT1+;AVF`lh#S(K6P9f!~>1T4qx8aQ$Bmp-`Rr^8S5IkidKdrYjB?3k%o#i()s$lOs!K!G5h2 ztK#r@)pAVi+qcdixR&T$ovHlmPGo5Z+a-UdplX|6PlQnV(tX&annrnZ?Iy45;IegU zOfxMR9`oI!xHE9|v3EDmF%JS)za6 z1SCzj6B~<92XM1*>7pUlA1GH47k5JMkBjdM+*+A-_7`Dpz#8wkvlo;mc#OULc8%f> z!QF$?@vbd+xOhZk93s7KpwW_NyQlIb#mN0<&z>P}$yH{!@UU%Q8q-w%WMA6x!W7GO zRfnI8vORhHShrgf4Vk&tocLwk@HLL*?_i{DyxQw}pBB{;ribuJ_6Kl`f@bIW_Vsmj zf(&F4qt57MajgK?l#%xIR+7{!l=~iN)*lq$YKNE1ZD%0u3d7iR%6)x%LD1fs4ju}l z2OxrtgwGC?M8>@?+}kLwWMS)Ih121~8I8FSMy~MEQdqJR)b|VbUx<`3WT5g6jBg@M@z9OZqGly83H9;h=;2j`v%7Nr?tEP!f2Cw2SKq$b`DQ~$=(RTj`xmbhT3)9E6K zD(ii&C`HZcPv_kGOh`4!$wH?JI0Fk|B{rE>AAAJp5(cK?~hWAniaBYtZx0e+=mMcV_twYWVMTHD`+q@L-TrU=w zkaeVQNm+kuv&Ajx@vW+M4rq-noK7+AqS!GCqu%S<_5PJ#9u=jt@5rCwD2Oe#-ko2} z#W9pwYn@8{I{3Q_Z!{4CJ~U}Vi?#q7`##V&8b-EjwA5sB2XZ=ANuIC4>p&;PiH&Ay8HHmkpNRnV`C$^UQ_05j+cjrf@a?SR>L)^ zXg>GryYmk{mf$#BMaRfUF4i9HgwaF^ zBtPfrvBRX92B`W@+sR32xy-nH(7hvoqo57^AG*8B2px{sBIns8NL_;o?Or_%i{AsYj@tuUFn`Xvn@b4DueIXH})?mTPVE zJ9zAG7H(fg@+)Oi+SoQH`%>|MsChG6ar?7TH?ae^g`aFnr(`1%z{kAP(Q$9!z1u9cw^W=-(SUiG;HN#l zvu26w*1}uWZ@4*^0nUUnFe!h0d;b=bE*k?Ay~EDw73%dem|k|6?)+X_SVmKf9RDph zvu?Bhqyre4@E8ja>6QOIod{#NGzCY>iee2Yd$zHVmh0MBvUz=bg!&6u`5u6=xZ`%I z%!8vv67kE~u;Ipv-aH&Kg}WlBob&28AFCMu&FWl=VFob(=sN8>MuNa3@}DLQ!RoEu ztVHGo4N=#;%4+DWCJO$%0-s|Q(L+=Ct)$p z@i9NOo$m8e-)y#dvk#L#7^&Br9zWiA;zQ;VSAyEYD|kQ7MA26%OleZQzm(TB>RLE) z_SS9YjEo|#N>SnHt#u^cy&w5pxVEf$>CGjtp`#L0vX|ADYdGzCb|%$mG8em1TMnYn z8F5Hg|Eo;n|Mf7u3Vt!@aG|0@&~_JF$~FTTmzpu_wIdGp^rjYjpDC4QemcDEQri(SF!6X}c1itWKQ2dUym zK-n>*0+_H2p~D87yTk#tD=j-^IM4qR_rNA-MT)gO`gQI`2+DekztG^d4&TFi{U;iT zyVhuz^9v$2@}M;spdw}u_;l-C-)40xhtUv)=?&W|RZc5FE*lSb3IFs4ean*NZAJ>h zigWtqHRAGzA71Qd*rxVh^54 z8MVEA`AiC*X^w2z#6Fr@6Y)2nGyXTRw@)ThK=2yDWfZ^Wh^4*GPs^qZZLU{7GzG2! z$-4%^_6v{pXbkkwxeA_AeTV7lvvaZ0L-CANUPecMp4zD=5a%vnc(LzmRLc&bnBUI^ zM^68opl4yh!jAQx!&5kf9a-90whdw@eja)IAc>FJdOrPf#s9mMmgwRGIP= zxF~dcp%)~Rz3Ra zFs7H=r$BGGkC_$bz?nRQhRMqY+AB1A3%C=?ziM~ALasbb6eaoc!8_MVaf-zIxO85< za`mbmMuFIoLFX#natu6J`$8MVpYMl3pz6B0Jx}R5Ut!s9qjWbgw-5YAxu&oUP_td| zlJBX)wnXFU1KT&ilcls@`{g2D$_r~&%^9`@dln1W-`j$Lzb^#O;+b-_9GBzcSZ*9D z7d-}zf~k*cOg9F=AHV=m;OAdlAqRl4<~WYJCp^BXe?432sUHPlazaO?lkuON3@2NG zr2w2N6w4V^hLYLVR8+9VFpI2%nMAfQZIGZsHl%atv{$;({xIv@r!6DLXjl5t9knKL zd0pJPC-2xz-G)O<<-ZCL#3>YI(lRo!_Dkk*^R#IfL|-rFWarD4$Ni`DpqcvN7NN|l zdHFcR4#S3qO`cC9U3hNxj9bzlSExMB6?5j3;i)@bFSm+ruyJg+T49ZAl>yI#GhDL! z-kj>50>^h|2UWI@x%%wC^x_agbTMSXY1)$7b7lNd`xrXt;q$jsnv2~AKQEYV%5ppN z?;0fc74vtd7@PFq{&er)OFDcs`-K)i8@;Y{tSYiF`6n#>eeM<;SmfU|;Tfjf$y}XY zbcTA!*`bMdNLWN9RGlXonRa!H*0I;?Z)MA#8^|A(v&{Xc(k@GF=2U7r=B@N~nO7c@ zN^*53pic}~q9v>8XY^4?OPDaKwbZP^D9yFx_y+-9wq*>?C@%sL_xb#4Te2wKL0)J) z&d0_AD&kHG{5|&x*^6RRL8L#UGeJyZZ_YyqqdEixx13xrD{HEjWw@p5K3i8iMau z9x4vfQK`M1Th8YJ9eCQ5RHGfq$zXTr7o4@H5e51E%=bY<%M9KdF8~!{$r*h-C_Za3 zTqt!m&8_`cUS&J4W}ziNWAi3%#Akh_!C?6VWYC#D$pm?CUf?KZ%uIBwtOaF@xLg2T zPBcU^GS6GwAVKJve;CL5GlrKEW$X8@WZ5^-ABs619rNuYf9&Sg^|)jAuJ~g*D z+$K?-rmhzbaMLDfsH0%U7R1c0-Jw=H_GnLOa)dNMAHm*Y`e+;l9GI#?=#*gDe9J-V zoSb!|gc7Z@^Sp`3^Uk>~r?)vQt;A@}?BF>xW-(SLyijE0K3qcV48}B^|F>YKDHzRN zh`5Ldg^@6M*9&aII1aNR5Jft~0dWBPt49*rw4q6Ml#+nU{yTtcs|5Xp#l^2ftpSmC zlB3o*rUbwwsGYl*SXi)-y-JR~p}mcgbKQ2*ihkLgF)HxOAEb1Dsna(9_o@IK6crP9 zIJdMIXj2rB>2u-_KRXhC)7i6U!@mP^b(*hSld^m_=PB&5W1^!&^uSX6`MFybnds_Y zKn5|V6G;WxBV@71HLIJ9SQ-urF_B2rGE=;j77^$jrtOPAlsYr+G_6SrcY)T-62MA! zwy-gSJN2-ktE8`5^WZBHD~t4@mjscg)}<+L(D!$M&Z^N?%MLioulS?5Va|H^>z_Cc z8G`&iWZxg!;pm<+-+5fTp(-R8lTGzg`-E+x^Gean-afT4_t$NMZ~NY_{K#zBZ+m;s zlFaj7;hEc7O_=sB&m5^Nn1CdK?jl(F=gsVm{x?9MZOC?A!P`4DX51b)t_X5C_^+=o zDRtL>cwY4Rw`$|K)ngCdaSdZifX;#Jz;nW(u~@f`7uBtb+q-RXQPN9s?yXj*Ciak^ z#fAQ}6a-=ch1(S=UqNG6fTp=S!#X>x_b#xV%loOm`&LJ6{KEAu*@ptC!W2QNkNR*3 zKo*em)+2L#5H&WRHfz;9ziEEy&d45B&mTbrndYjsrR)YVNYKgQ&bF;@n7{jwnb@L( zgYKHc!GOtQe0#^EzEtWxn+zD6NW9(PaM5+ug3|WAD0@vd+AcN!GA2HpN}ziuOdJNx zjCa~&uD{nOonJ0dQA+mZ9)_4R9et6el%uu&p@Ia#y4FVv_+byo@x8~OgZa0D@lV~F zP>rz1gpLg6(3y_*F70Wz*@ zzG7maQ=_V^JdJT+W=n8F3iBw5u3f%z<&JVA4Nk!6XC;wFU=2Ziw>1)`7*T;e^YqG< zM{{B@xYpMgaAF7Jg4~Yx(;MV7)0l)T2-eTji6gYX$vn zTj)7j&2$Pb!Eo@XSBpuIMU*7(diG%hd#BYE9GPAwbGb9On^wQMwSG(Dt(|o@ttN}Q zadT%@=te&*IsT$6D=6cu^h-xe`%l-_&+E^ve#3phXfL-}1&uP$V!!%%X1(gO$5(jc zc9&djVnrI0Nlaj^Y?Z*pYBGe9{YuF-o5J65H>LBsw_FbT{NRhYQ6p&a2DQ%8-^*4K z+$0*Eo|FnB@rV2`h^#9Gr6-dRB35eSlsu+iJPUWOEq)0p;HY%Hu8BztDr{ScB=eEw~gFYyJ)` zsh0XumiKI(Hx2xg%cxr{?>;6A&mA+3Cv9`uMiEwet`|$wm~7|evJ0J7U&$yv)Apq5 zf|50B?sj`=@%c@`gKZKNGxpUXEJXI@?_}FPNPhyvx}E-G=U(Ws&WvEE%LhZyV)%h_ zmHqejhI(q2YJL=0<{&+_t{Z6_Yp(fYCkCKNi%_?MKD(Go!FJ&g3vo9TlamJ*-(nZg zjZZrQ4^u5~8wH?-Wm>PU2411LB_%3)5S@reAXKTCFRj%IwArc0c0%bXD6}@miP%%7o>k*aloTP1} zC||1nv=rtSv=*)L%_EPb|AvV0+ECOEq?{KvSrj4a1OqoZ6xawoSa@y5>+#yb^+&tE z>PFtl3X0d{w2#JzsB_He&Saib|5ncl|E2=gE(n|1#a8>@q9!n= zVMhUe0*=w3s-+F1Ou#dKdM4%9WB^fzT7`96*P}$Y#R~94BYDKu?=Dc1~OukS1#zHCT6?D0(k7X#X}ZlD!%=B`s2Mk4p|=`B{R>8TZdg` z{(c&mn(BI)iw`ZG)T^p;+@G50vbbNzFb)uT$L&0$4-+rL@xYfZ<(>JF$4>1j6ii3F z#o!oDajHmb=sO!5xmT}--l7*Xbqplx&z-?=sCo(y6*K8>(&sNn3)8b>Isvcn?;&bDFIq>WV#W%URIu>+FTl%o%=nNnLb|8Er7 z&$z7XzNnn|5D1z%DlUm9*AK{TEIGjD0GPo;Ob5KM&Q^e=jO)&$!GYBgu=_z?JJ}PQ zx9z0XzW<$tdD&;A$?4w*9Prz)-es+VLx>0y(Za#S)$}0mlcE3>b0G%gKqSsGoZbB^ zt96mUHNm#D#q^=)eGZgB8uK+TAK@71PvS`*turgpa{G1Xid=tTeaGj^-`$SN^?Org zidy_W$J<3gzyPNQD>J2jtXp2h58bQOS-V$0(?ip?qO%R z>FHKTZ@U$AI-_Pc0r~gi;hy{Qge2L2d46q`=FgnO*ALelCQycSQW4-u@FACB(KRlN zpI_f^k(HHIQYwW`+=EH158N1TdAm%x50uC#SosM9bV`>!w{{)@N3ha>?jhG1bfQ4m za1FIDX-sD`5+PTO@MO^$X#SV~xqTH$^2t03zz+9J|2NDzAUN0s#2$fiS)Lan@EaC6 z4LGbAI3tc2$FM4k?*;V*HIl{JYoD6DC2!p&#!?J~nn@0-od=o7uz_n^s;9nTXdoDD z=)6OGZtc9;aLdf^^5Gx(1qI>un*Ni*+#FB!Jfz-WRFGysYqIL(>l>E(ytG)4U|r$m ztRTSRU(v%iDCMwydmGLY6ZchoO8aqd^0156oH{lnE8uylfs^Rp@9{QX_%(em5du)0 z{Ztex2u^eVByfuWCFv)Ny8j}b2MQfXjqKm29Ah2aJ}FX$Y)u7=(^<=bUn@RgdCRT` zgUcX#27Q0b3(uA1h1%sU#d3pkE2KG{6$TyG|8N$2_wF1%E1(8HQy}m2=E3uv`(Kag zi`_Y~H&&R2=!5;#@bJz;8&8m9s}-%zOgSjCqU5 zskTOYIpSmFAKYKhLkr>hNrLH8^5wjBuNm~b9epY9+v25yxQKv_ii`|``|!@+a82Nm zwu-nl=FVAR{|M<}7ibT4&#Y-$IL$Q_acSA*4*xN>*@iJY$Xj*Ai}}Fj6d^TVfA;!l z$IKkR{>ZoLX)^#3UnPgOQdCYOXK#S+Jkq?v2gu8ImtymjhqV7i!xpa&;RQSVR)OH z`|8yzJa%Z1VL~}fE{G5gIGUI?Tjy^-0%aGHSvrgIH=xD|Kj7@SH5l!A4b0kR z(hke-eLRCTk#(<1F`)%Fh3&8zyXQGLp8p1T zaL}Abm#ry_P`i&)|2#P0ET~tE@1{gBuH%_J(nZ1f=z^aPX-M%9i+%@kML|IdL(vXN z7i^-%-A3dJ8%P4XEwgzFGRl@nF-^^t!JIXiWmpfR#}H+ftwi+yj}OOvhx#x<%~O*U z#A5tx!VeI%$2uVvOqJQy5?DcQtV1_K_n<|Cx3(KJ4&Q*UI6{rXau!{DC2-yvd_Be7(bJ zRvMxGWa|0wv;*qWKggj1wmK?4Uxa%f>vn`C%Y8gFyew?D0tG zWEO?g-ltwpUB#ffphC`8o5W^SiGJv+0H332skY!HJYXyhg`i_}T% z#`(uyLW+vOtP(h636KEmIV{i2WFQm{xDyvi^Fk~I1L9*tgvd#&neHg$7FSm0{JaPi zh~plhG!wA-02hu}f^~r90BR--PLCX|d}r|WQ#lrm{@y~LjF`kc-jYp8`2HUVa(S|m zb#N1Nf+;4eufluyPQ_6W%P1#T9(4%m%K{xno1kaN@;V(hBNYG03x=#5bWEV=#(}B) z5jux1PM*AID%OtEjs6B)B6L=xB?13{WO1xovvm)~&QR+fa6tkkt=~YeNOQuuM#~1K z?(jXfH{=EuSRTx4fs#G~JRaq!J35J+GTjRcGhb+}N#$!{uAh}S(>LWF~o?SA<3^O2M=@~m{ItC#L4prgbN-BA9$S?7vplCk_ickh>PEik(d?A4DNzKGQlSp792hVlGpjQ zV#Xp)c=}xx96?)!3C%`iwiz4&Zgc;12%N|rOv$3ZJeMDF^76hxJ|r=AaJ;Vivq)37 zUDd2Yr8v1qBUJc7=V>vzL2U;@C*(Q0W9%3=1Nb=@3>M5J0H|efG1G^^;v^}L5Xlro z;=$WP2T1OgLKy_R2a>Q3R>9uc0HioeR$Vr6htRKkQF6d^JUJ;z?5ekprXxZQOKjF( zC2hS9Wp|S8JB$>+;|>_y#HiC(3?#8@smfy@%%8=R`|qQsLG*)bi6S!q6kDvD#Er5H z2GMFt0*hc@fnsPu+YS>R|B6vCZ>;Es?g?m6PJE#2}S}4_B=fYbfejs-#CJm|4<4!k|{5SZO`fH;tLNv zFTzLkLxh*07Y4#*be;C^?-#(ZZrsmgXEQmC315FKmCrYCYUyDeUT4^{<-S_b*~&8i zESJkufRC|GY6$NCl)R6STY=}E<2TNsuxkekf(@6wu(1d&pz9bsLfnf&eIs-nQiXPp zDgh>fh~Rt?*xbFN-Jd}vCuuIkUH@T}j znCaC61qmIB3qIgHp~{Mt-09@(jH>!GE}5|2n+Amm>FFzRc69Z*)K4Ejq7t?*_Xy?v z00LSIfK%rAmkG&FP49?L|2_NnZK1 zhZfT*d5G+Z1W2MBPGJrwh7nhNh8ap{9&9Fp;e6_iP_~@O%SRB69zyOP(p^I|P zK79aSBW6%LG(yW;K=H4|UcXfE4M56-gk}(hywp+Q$O};T*NHj&UC= z)d}+M5~9*qay-qX*Nh8K8ytPU)84W8ZNCk1g}~JSNx>U18A%@zRjcGF#`~ejsF^L5F|sLrQD% zj!v>+CHnosT?`Bi${)OfGBF6QM6h1Po>wiOiN`NHgx#jTu&kHnJD9o2+vw7}D9uf$ zRCzn|qo~)M;efs!e(!D)6iX*9JGacr_}ODPRT%cB3mts4m@v+Z7s*#mNucEY@55c{ z1zGt+bpV#r8k?G4;}|U>SGO)1u!-LV3b>-fwEP@|l&pq9vSn;}6FFe(ytk1<1#zw!;<0zMa`|e)1Ca$1r0(HJh!pC+aOTMk z)cbx~f0JAnkJVzOy{f`n{cF!znGf|Gm?)NLDwfiqAdrHz#0N4YeJJclzTOU;9zl_T zuS>P2&SMOs)xjfwRC`aM6}$Wln-dVx(Cm^0vIHzHE?`Xc(B0M!XPYt{d0=ToJ{Se% z1^mh>#Q)$@X`lDsyOGrua1v>^#uQ~WdaHu$c zpjV{41oXksz<|zn<5l!gI3)A1Y{PZR-iaD_RuN}AJ~lQ3PY4BGlQ)mCxjAZWWuN72 zPN}Q~!Kpcc^tX|uXHjrNzDwwU z-MKK0ai%uo$48)?IlKUPn699#?ES~M9Kafg&hqJ*a*lWm*2qXWTr3}TJ#X|TM26%D z4i8G1?pU*Fow)e=y`|JDIJgYg1Wv}@V92X>*1%*X+j&6oQ*PM~NtC^MW&`wuPC`iI zLkeOtx99&s%w<%=v_){-PHyV9b&rV1t<-B%Q-OVHbO&kRQ<_B9fNAWXGmp=c-x}^U zWY*2aqG7@mRMo#w2FaQ{>O@fE0E8$T%X#B2A!T;1OqEusSfNg@x)CbO@&SMbe45We z+Q4?54YgsY0gecqPNaa^(Nh;;$pM-aayn>KyFD!CK_>S= z-4L3En)X6;G>9B^W0KUm3{2n#Lx*h&T+B!4Fr_)R9FrHx;yf0KhZ>^E03!r%W&isE zvo=^W_#qv1{R$V%_Gc*xSwj-U$koXRP>FTx(AS)7OH7N*YG^E?jG0M3 zAoYI3P$(B_v2}FJ6{Bu1pB*{MM_8ocNdKWNDI9E~^MBkmgxaWK%ghRu6ucT1dHIWd zt%jrYQd`8QS}g-AQqex?gVqS#K zzJ2=;T`n&9!WYm`{>^#AtkYRfi^#^)UvVCuY~WWsNWCOI-Ld7H%CXmRf{;N0H%iR& z2IzvS`}u}Yk#^J&uFlRSjLtfe|H6^zSD|Hu0aK`v;Rs$o*dt+0>Fa3X@X{nTT^=8K z_uvaYXL`IGqeRrH{&R<+njUI+!f=LuU@^{tH*cc#g3UX5mLi% zfA$6S9E0206C899xRGyaeaCsorGBW2LNShL`fkL?$hg1EWk-!XZ;qs}pK-GM*7?_* za&LUS*qZi|+9K?inqBUxnl9&`UJhPte|MU%v0CxalRy_Omk-K9d7C$^x)~vo;`UfY z;xuE9%0&Yy@%(LuC;6S8*(7OJTb_({+hBbCMgEta%}3I|O~3i!{lV4lQ>OTaPd zP;cAY+p98^Lc0-wUA8EB{5(B#j;JyVHzFK0H8p#r2trDci;#*dFIPvyq{=`rL>4`~ zl9_pCZiF}V?T$lGi#>Vr1lu1tA31UoI$HFR4R|!;qoW{+$7z9Sk)A011`TR12z?xsF$1gQ%uD11lF83R)KqK&mRjfi{0UMguGPWibb(bm~#Ym}LIJT~t+6_nHECIe##} z6e3bdHzROqk z^Ye3x*@ma2*r=%7J8M7?u1K{#KwAivY@0VqXbcYykAIHw-x_i zPoZ-_brA_35Wd`8T+aWdBj~+TNrL@T|2VRG<-iD!xVS51S;uK#vB#~COsPDh2B>}` z%Qd#PF1MB}ZQ8u~18sIzmh}9O!Z!p$ozd6nXjFWFQihl4X_$BA zf_PIk9&P)>iSg-aGkMipH4|uki^>$8FG;Ld-b^qTIL4n;*VcZRl(hG+(C*#dH;(xG z`_pDHI0uQ)sRWLvS}EthA(+znl14==CZ&L>ZvK*_>RCfRG(3eL*orJg!>uGq9{ zqmfY;cAL~!z$Znqvcj;eQ!T47n4DG9)HE(Q_{^0nuaiTO8=RclYIBaTzAT7?06fgvHhiTSr{dU|9qz#3GZ zaOwcNPmiurV`^$@j4SRTTJLtwRWb__a6iw%#83LTKAx~dJfODt`%}fF!{=9xmWD>W zEs)D%PlCt;w6KB1FgG>5!Ub9KfF5z)(sFJGEs^ox`-IXH6BH4_$$yI5E%x~Pk__j^ z*{ph_Jo{@jTEBnt+MK+19TD{a2V?g74Odt3Fl`TQ%jB*oHQRc85s^AIS6MMRK;HIAxQrE&MZle)2x zhl0(Tl^}w)&=ah8o;_;^E=1_%OU58?#JKYJ?|oJSb$=Y3#JsKdw+Le5o+1yLwL&p~ z0ksa+A{!bSu3x{NoMx7@iqKYv)et}(y2x)d2ex$@fZ2KbiXddauDSiT$79*Zz~Jbi zLyxPQHB3xml9GlooP2zrXNSsWVmGFQl-!62V`O5w+?vYC5JxMLTL+!Qmj}9SM}d>o zFVUk3L@#h(+=jfU0lhb1;p`|BzL3Ye&!NfGpF^of_sp5i8e?6Bxj0AgE)@}ed*FA1qB85@W~o#LT~=& zuu{tz2Yz_{u1^kVgp%`iAY13aXCtsDUnisZkR8aHC)aE2`2V@NhL^_{GNNd48Me_g zGA7*G#Bo~M@jN*}$o8ToJ5J}2^*l(Dy0sy0`<4N$F~gz`nj`>-9Ygo%x1CP1J6-Kr zMK(tk37D&U7RXY`-g=do7`4hLHT4&e-0qW(dV_s^Cr+O(k0WH$>+;XZ%iq#0wvJJ$ zyWm~3WPK{XJ`uJz4Fpp{+53n0l5?W|9j6C<;9q|rGFRoU|~Uluf(HHK0V`SM=Z}@xKKu8 zrT0&Qz2-I;Ld%bv@=e3lHUGf_O^nqE(^hpn`)6XpB(`o%+-9P#>b9m92>}$cva=x= z7}Qt%mB?4-e5m|cBR+z9J`3zQrJ2s704f;&4&il-JJT(O-&GnvC~E*hqm-?5)KFmotiF! z;P#>>$}#llAd)ojlJ6LDCqB#M^z{&@XR{$pz zAuaXwu-u*hLPhW;eEzJ?!4U>cuI}rD>Z04aRD|cwjr6w>YF<40f`i*;*tL82ZV=6F z?d@Ngica7LKa7cq2?`3rE@@F6r1KPb?n0w>IAbGQSn91eZzLrirPnhBqX@$Ioa7T{ z&Yb!AX`g@qHrl{XN*m@9^0$i#xzT+?7UPMd7eq%QjN9Sk9x5R?Qgl9@Fiihdc;pBCnvL5qHZU)A}ync$NGrA)|*I)K!9Pu(%>TAci_n;jf{+-p-I3u9JwdQyyTWx8d)PpArX=J@eVZUR zx2&2PS!Y>m4y*7JiCk6YQ;)Iim}(>H>bJtL{OEqXJXWdCl{8NkRgSo082#xcl#*dM zH>~CEm4EoqkbMI7^e|7lX{0KGo`#lS*eNFFOybhJj8CQyu9J}=*O(zmr>`ZnWf9I_ zEH_zaC1FJxs?Bi!eg0lvUiggfz?x`j1!!IXS$GkTM)=~zeyoni2_rtz(-OXAU%t2j zh9iyW(?@F$4GdhufXk)2_b}~fVPz^(8IppD4HfZ?jWg}X&z}&lui)?d`uZZmSXf#% zY)Qz-cx*#YM0sNOnkD!JGqclbPAuo}seKdQyy?aBQ9ClGmlzpAg4#+PmDk`vVp9u>-NJeI+h_GlRlaWR+WN*ujGEs_ zc19H-?V?=mI|d8{H-wIKo@ZQIfK@hkdO!t4t6{qze?~6b<`sSXMUt{68~k$QF&4)rly`beLAIw%a}?w zeH(0UYHEJ@27O>3S>x6zk8=~AlCgGvg@uLBpKs?Z8|p_!89oAyIRj^8Pd?u|wCvfRwC#qqclVnw*PxTGUJuu4}{SIltJ%~!AXKe*u`LZJdIJluJT zUj_*aO{05R>Dy_rI}`^Q=yja$)rVXEeCfno3@KW*Y84Yx036O4`MC&GL-u2s!D~%T zu(B*V4~)BDHlKA&Qrnj=Tt}{W`sW-66hLPp+Q*_lQF(ZHY*UWjik2mE5$OtC+61^n z_;XHAPXjGwX@eh*9>rE)=5ZiHOI%}eQ^2Tu1VPvjp}N8o#{0vsn4O)isUhtGE_-(n zS?h)O#Zqzn3t`<}B`1Hz25TG?(0(Qk_@_UhYYAJUg*GAToY;5?rWk;#bG+ck79B+p z1Un59(cM?CM)9}W`QDUw>4ru|*78K{z#j(T&qj{%vChrUkB*GsR)tpI%g12j;NTYI z&Nl2@IIw#4U0lEP`UJVvHXXORlJm;PIQizVuI;L5G2g zmeys^g(zRFH7)-8?$Eq*3JMCM&NbV8e1{l>5(Rk1npzqeT2`g^PR?Ja39T z&rY=ZBDQb;-xa6*jaE!j$@8a{^;2veK~({$i`TlD_SLIbtEj0V%Kqvp5p3B^$aZ&u zn?#KT-d9~!_1fZGc1cM|Lc%2qg<@v5hkOwAYi`RGoQr*jMEo@Qi{ZylpB5Ju9336W z$)4K?&uxBwe)H(}N7yV+Qy)!@i+hhEv$V7qFWp*G{J&@M9x(w4q>XQZ(};@!4fw!< z|9cWqMAWaLhaAkzQStH9HO68v?cc8*#F)Qu4Z`#7%hXg+F)^9zGY7*cau}Ql&*2)5 z(^tT*fcZo(hM2H0#)+w_sga>Smwfr+;_3fOi^Ro>2T~3L>+x*%yoQAlw-ji3ychX- zWMUNK{PKZVvu(bJYG{x|aY%4*LPA1ETN`i#dNR|=Y>1ohi z23D@{LVCX0Kz|!$4!9f?^cf$+Eu9eu3SP$_J5Sb8R>LFbh~BSXu@+5;#|0__W<~Iv zWonv*9??#Ty>P}2m$9X#MRJ5(=W@>jYEOv*$UW$(!g~N2)!x}tgwYX52RJXL$QPBM z8ID|^`8~H7lW>fcl{GOb2}TxQ4ZZ}>1rRRt`Ld^yTs~A+yYIgPRXR9% z`0!z}?7EAh0{IU`n3m+z@NjDiB@eSqBqccrRyAAO)XdDo{x&;z?HcaK*K?a15(1U{ z_N`pbUcxg5IItUQKmsaIlL4q%7todKrsyl!{5IDmv=4xj-y&r49_B%;g=jbAWZkq0 zF+9a%dFkU-!Sh`xPu@mqGOItVVVibq8AAjGOjF=Wnk+4qO~2m1(fB6{73kj|Yiln9 zQG$h>{<8Cu!$_-2mq_m~%hFWYs%S!XI|4c_lU{NmWG(~+)bANNN3`f!2)$iIhBf3gsdKZWA|DbYSastk~?WVFJUek#-PPwHl2_0mCc(rwRdzp z*m^i2FYjzJb}&%qUHMAQ*^-=+f|GBkuU`Pil#LD9IJe$~h@ypIEGIXYTvT9k@nUrU1i} z;oX1V1H>(mniDRP9S`AGtY7P6L&cp&Ss#^>QvKiK0xD$W5WVkD?fDq*Fg-Un_w{R- z&Nk4($QE)yuK~_xZ1nZXrX{oqxoB7$6XCZZeNR9~aGee_i{X%iTU^Q6bwCJ+($Is= zq$V>^!3YZnRrK3*=4}{rq$X&wI!zp#B!PV)KV;-fM5%S)AB2U4VV|^5b@p-8#FY1H zygr!2n_pN66JF3dUp4}Rzq~IU*tajH5{=CH+!~GdLDM-nI>xn^NBYzrah-_6djKIB z62g|v=F@dYz^Q&ca4cvhA8C7!4T5}pz0wVH(0gvRTtK;F=zZqE-o3-%Ur|d8ESU=E z+l*9v{`>)}(aa?;4?XI~%Y zRs`YcimoPn?+b_n+C)mZec%aYHrfj|N#T02yrSX<%y7X-Dl1ddEc6*5n&lvG#qKzU zkU4%Nr<{^f zMQiI{n8SfqQLvZykITu+8!Q;VL;;QvI{{|CFfR{&r4L}_6uX(yM>h%a@KiUTg`~dmsD-IdM{s&cv7uG36%F`DF22jf2a@?IMDX>6)1E` z6x|5O22FM1&z~g-?&M4fO{ z=e}&ZbK|&$JmoZ`PqI&LX2wNePXl^lKs)j9B|ZxYK4RNR4mGQr)6T^0T4QPS|2+<0 zy%_DRS-ba$h&*}V4K#j{MOzTs$DW(iL=sFhYVRf_q-P@K`_0*zLPA0^QK?-a&*5^f zb(V#zn_J_wAjgx7UZFkW7=NsK@}vXqqf0g$Ujb+X#F3Q(R+W!<=nSSE#h6Jf5tR(Z zDLXrx?CaQ$#(rA_--X4+@tL!P_HhIN(SrxKK-xsu1NMLt1IF_BahflT0BzV~-CS8o zff$Uk*ZNlqg6H-Cwk=!ufV{~NxsQ+Uik+R1urOvOZVY>M3siub+9%^1vwIF74y?}R zeoRXslH*BNfBb0D2SpALBj_hCCum{ytp*l!uMWy8F#DtIeA?|3Uo5ZqpNkaH#bE>K*h+`vWJ* z&|PnpdZt=9#Wrl)cEDFHCIwV!UqZYsW9vQ2u`C{vfI>_^Zgbpe-SLwr4|8+NPCc6Z zR>czPx0c>=fK$}^=}>HOvGihMYet^_2|mK}ju*A(z$P{}rMrhDrSoq*;F7UDf8NvW zB#FsoRiX&b@nWW>!MZKQpfrW=m;)j7!nX$B${1OI+~b*=o3<+Wc*dt^ux`f`nmIs% zpl+BF)aV!cU3X~9)~y(u41`T+ue`dPoE!vxbG*9u4ULURiP(3%V=1v5g9O@8*%%nK z+F#yEcy?X`_WEb!9f&uIgtYVxXdd8MVDT^sqbB%5CnoGs%Yd2zBnhl8dJ+-wv$wYn zA`8^)Ow?N9SFtVzuiV-B@3qc0T|hTrI&gj1`61eR0*XVOh}(ZU81I^oCxOohmE*@w zossldGGu+xh@sg^Xc;Dl)@)}LBz}W=iiZVD-khOc=}yzp0W*I62FxNX?J6NZ8kG5l z`@)<^xcoXCs1kKPq9>%W@#Iwc)z7I3TP@5i^$hv<0%pq}qmrErMI;5m z*Us0}j%yCjLsOsed?SqVgeo;H(U-S+`RI`&Z(4hnaHwXkgIJUZBM92Rs1XWMWq@+S zDEjb)gkiu7$CJaT<^hvho0=97&tRvebs0aBbpY~q6qh|oji|1rB_#B%^>AE%N&!0b7r*+=JS1W^% z)t)SMVILY99bJa;iGJO>elJ|TNcTTme<}jZO04to>ciL1U96XW;WM;LeuQ(9daGaX z`z{YH7EmH^F?{tx_5fYZb zp7Dp8Tw8vUH;X!ECQc=gA}}rrz3cS&c1xv6+yuj^N1``(W*rYp-Wl&alDxVt~4(;80ue!)jn{NTy3>ci) zn3%S3uRwi*d*hZZZDeGdopW^3GQSHJ6+!I)wi8&DvJ!b9qzAIo)3GBPuZLvgCLXw^ zFZ68YzgPSomOyai?(i-nMfZAPhz|_B^>_-)-Dgw>ae<=jihX_Q-MeCrpt7sPGR5#m zkILM`C0#QT3E7bWpq=_TO!Zb}eL-9OT*}~$Ki_mQErEo{;+_Q=zuk(wK~)i}v}K?m zD&ht@9yE{r{Tu|@F#90MHBxsgp?;8*lpOMX>|e|~@(uAEEFwuW3h#T!0<@T$lanCS z20##IHXUjf?eyq`{HeG4>&7G1#}{Y+f?l0jCcS}LT9`VJ^7{3|>SkZMpWqm9=tzQw zYjO49xQlSI35&*lA)$2GKPUdIF3ifxf;Wr-yRaCan)(KqZ#y?3t)ttVhTNZyujeYAESM%17hgg#BS zN9@_72eyEN!??il9?4Ep@SQ==lGAe&?w{<06&>SogaL8iq;e9h%BVv?D&S+mDttm; z|6@WAy09JyTsB7&SAgsHU{fktd;1A=O5~GU%1I^TimtZ9iy1&SidPLIe%!#XOmljo z7_bEr$vVsJAC{py-o10jey~r<(FNZpZNFHlpLI(34+d3p2UM^yfjJl&9X%-{ z90B1o|8cl8!t{HXbxci6-eN2vFlu;sIOM!s%jSKUd9hRY$5>NR;L6Ldqlw@{Izcw1 zTmx7J>_y9ydB+t}$jH9a)XH7=1|?as>m-kcG3r^&&?oiPsFw07Rh3`cJm(Np9ik)B zP8olt$2y2f|P8yWla7RFIOzuYT z%(r73e8(mM8y%oGq+qmEiAYIBXq1$|>IHMa6Lvs_X=p133lT%Sf$fp%hxZ5)QT^Y7 z_MU`?hor#=y1xVBfnS?;JIp~;%D}-vvg-b#;u0OJSFg^jza&@j8C@h8!gWeX$sGI{ zj3CzQJU3fn8z{)Ui?KKwOr-IPiS5$=Q6I$&F_|pNfS72-rI#FX;r(*hEIlew{I~+TF zm$gn&6N$78UxzHSK=ow@iC0Zbd{<*%8Xg_(K~AgQ%Tq<|iGl)~+w#$ixO`IHaaF;L z)YUAO;@JTX_Nha(1hOQGAhJgchz|OzrR!%)b@lc9-{EH+?A};9z00cpgiTk$Y5eA% z9%IR;&!5|xnN8PvEM)?!L;K9g%3@V6EPpLPO8^KT6ctTKMZQrhlDamP`1fBaez9kQZzShXwf;F_Znp9*pws zXl{P;r8)b6<{4G{L86I?o(NsXPzhIXm11=9nY42ngNy|Y7g)5#oJwX3k7>$Q8xRn# z&~f-qF^uV_*7)1X1id0pPhyJ;hKdmj9V$fUj@E1OWXfz+H6XNsVK6Y%BQ2L{A@c59i6u+&t4_EiwWuKKS&^Of8q|?}vwPV~=Sy zW^e zzi)z|I4~sK1ab}v1xnS^Mn+Qb)Z*4(-Uv4P!ziS7E4u7r{DqPjowTKKH zlh;J66A2F;3aT*Fi-@YI`QGaa0?^_DgGO(BC+qBos(11)t~Gs_xUiAZ~{of zzR?sl1DN-|gl7@5{gGhDbVyRSiKBG@{$adXToAeE$$)MWE1t z%zOgMc4J`DFx2LR<;;wa1VuJ6CT8cgGFJH!w)dL|~U4t#4s2ToJ|7-3i&aF zxS}GdMJ`Z?bvx_!p2gmr?Kfn|noOeOb$tRf(yMUzfi-!RlvJ-0sF}>)%cew3%P()W z>t0O|j1T}i@W8O5mFufLDvqRY@H{bb>MkWyMuvh~& zBtAhw^Tuotf*_ILG*%%A0D1*WD%nM4_aMQaiBuckk4?SoT3y3PXwcCT)n0U8afU9% z{=VSr=LfnAHB${w6eHq?bw^ebo-1nq_;rfNx4+ho3tPi>a=aC@6PSHmUAs z&z{8#dQ()Clq*=WV2^1m8FL#p5dehndg?e%O7w%Vi1b*>_w)ONdakpp3%R$v+>GC+ z0QK(a|0sYD0bI33Cw&g1Q=FdM~3P85+S>t{?yfJ)8nDg(a;PH57*c3cHS?^ zA0$v@;ht#FarvM8IN0$od4ODE}`T4apnnF<-e zmi+d~i3!RvtiJ42(xW)n>$_t`=!s7RJv}{!neN9?2|a%zqnO*4W%plR`F45&d{WW% zS^Ez0sX%Qbqw0jwJM(KR{9hCNPr$ha1Tb;fFp?wrJ(eUnY)rp@|L$1eXm1ZTI1?yu zvK3dLO_5OL*|+cI)>n2g?4*J+O2tt?4+0tIoRC5bYM_>u^jBgl2-m^xLs6Z~Q10%eE%|Up0=N&`_+S9s5_T>=6^zOXM~N@_|42!vVlM!nFmEgZ05d% z1(iWv8j1@>uLe)A5qKu@DWMqz3OtXd8`^csVgfn~g=s~`a*_AfF{I|XoITry-lfp6 zu>ILwBk%z1#y1Db0=3w;*RClF2NDBz`i|+X+t}pz1-&HoF8zoDI09d{e>OgQcBWa^ z*iNf(P*M7XZgOzV))VG~UNILoRnNe14QfkKJ5xJ#!Bc*opPSn=>|BQFJzxaB@|1Ho zZrlJOBfW{Vpx6zvEl*ca6XOxww&>77sot3dPzo@7d~ZGF_4cqU&|z_~8r!sjNV6EK z*5h1OIQQ#16rG%=!DdO{SU4-`)c1aE$10@d_|)TLPhWmtx6y{0On;{dW)>Ft!ne@! z1O)@Co7?fD0LQfoA7~J=epkp{mfBbz@K{4QA5j4Hp;-IHe3W^4tz8f>0Dc|5U$Ndr zn2`&UKxX|{)xL_0`?kC^wv3=tT8NT=(&Y^li^IoYkSGw{OlH4uOhEt{{W2*jyI(r?jxek!ciuBR}d;C zoG7BYuTUN8AG)Eu^DG z?J4McpYv$ThY#d&DA>+ct}w5`ZTC(h_Zl4T38@e>6zl*3BsLQVkM4k%{Hj-Ot}|L5 z(1ev2l}zn<&6?W%9`I}|pC*tAc1rEro)3Ib%>Fry3O9(t4btc(Je$|grrpay)FpphHgW`6JR=UDM$?}1`Q_E=(k+-q3DB$PEYM8w1*g)L6p zxKYdpQBOI93RqyKpPt~8koc(aT=H4%;8=U+-3J!vBWS3ro57>}^zmaGU^4xlqetCf z{YIUKGimN1f~t&V%N88Gd^ku^vWhYg+-cr&JN(;+erIDobm*nV3={?R2PC%(?%e5B zT~&lL0N2GGYR?2Qtb>wNj@PLU7jFde1iOyc4w@6M{psC|b?I=HW=3)OP4m1KTJz)61ED1~HxreeQT|Um$Mq|PkE?zv0MJWQ6 zB7p@a*^V_2CFXIe_wse$z>F2ByR6E|VU&`!yKT=7>Ysb#@~Gh2+*A@@cdQZ<5E2Ae zkYUUspK5Ex4j(?LtBYR4ujqQDXmsll+5sA8i>^7rPj}qT#Ky+P+?;oNJLH3sJC7c1 zV8f*SdueH7;LK#L&VDlBrV&w~c5K6V_*ehD;&7OaV4iR6CU?QCUdqM>sC#qw{QNEg zQJkoIR$p5i;4gX?JnZ&>)q?*Jf-3e1RdPd;!DOs9(_25Ab zqD;?dFgmKmk5}Ykc8P($es_rn@Qw>&9P&8?JSepc&@;Fzpe|f8H*W>)jFJ!692iq# z$Sz>7nS=d$pzS(tz|Z%XHW2sf)jX-yQ!xypf`$RLgYJYy{2;MNjc%l+=Cw{j?iL|G)yrkjNEcO+piNY=KaZ(~`c3fB2fm;iK+iJ3s`TL84e~_$DuJ z1~tNFk7GCE!15uhrRxY~$zc5yn0;E2)7!|ED&60>A1>1Fb)_nWDHAAm_Ih!$u5@t- z%u0dD^fu2*Z&iSR1E58d%#~TCnLN2J32yZ2mP)^>5QVXR8yM;tlcYO*~Et*3uTJj zKDxblX>Jt3EfuHc6d@~B$KLP*==;LJ%L*QHs06$ZVbWN{{xh&+Y|P^YCR-$SO# zb#$lr>oShh(gE4W!}OV$*oIL%i;3WZ!7K)W8iNvyc!A0~V=Y11`|{N*+|chYD$U`V z!cQ@c9SdMI7PkLAG11W696#BG$PB;3XHja;g6R5F*pEw+7~f%xn>D*|!4AL`Z|rXj?qr+hfFJRb61Q^ENf^$5TJI1ljO;@>wzy56DvXRqJCJ4 zWcmv|9#0M{`eq1lpGs7+Cvj^hrlw3|>nN1jov z`T~EKZg#*_red@;^)9mD>wD~D*=vPIel@>h6Vtdj4EOVbl(E)SGw<%*%8Q#vmS~0a z&r+3w`oQy&K|G{;qi`9Mh=ALjN<7irBp*V+3=AWmDNA*9TpYB-KuvS&m>OW}q4P)J zKgPR))e}s(4D2yBG{o4w2OA+Ityl`dk$q!h@NlrV2cngA$LI3rJR0oA?8mo-lQShY z_As1Lh}ZW>bH*t|B2*G!EW$7)Mqj@J9Ldb}CA_alN!PceoWzF)2t-OSqo`=CQ$L_l z3a_u|%^RAAZnV>I=TgE4K$2ACB5-Glx1;yvUH>>X$OMdxSc>wBp@$0b8H_*q-~rKG zm@QZwu3mjEbdipOc5OD`w*P-tTFxbJZ23T6aH{5&nU4R$htvctM7FM!wP6Q zgBB}z4f%6!cT$9C2?$+|&n{9ZZLgWWqG>Zq8ns0`4UxqPwl#X!)m3L0+@pX*QHE#K zOEA;l#wkHb*wWMlL(U9*Z^NaPIL>qI5UFtu@URFz2uPJlc=_tRLP$S5fH|2{iVg4hp+ z)__VB=NPos3mxe7+L^*jf&?E@LZIxsgE>YHlvF2Zif>Dn7cU30lf)~=&9rZx_=dEC z<||B8LOY*mSRgMybY(`XtWCH>xTy^D;Y)~N;0^0_t9Xd0c$dim27f=;)oOS`Rp5<_ zto2A^JJ`TO&eOAVa2Ume-uQSk3S|f^X}covy9~Me@6Rt4q&a97#l9wRWNlMZkUtbf zZ(L^sgD+T(CVg#6nKkay7@jP;r*mGuR9!JsscC57IK|%zi;V06?+s1^c4=yAY9KB0 z`T5H8vD&t0+^*=ybUu~qM|kZ~^kQ~8JPIR4OQNe_dK$76mXtI^3tv~4l$cnEhT2Iz zy{+4}!Jp-hF*>5WtD*Z54A5mYVsR3{vLTLNV5ytQ$jE>;>*y#PMhI1(BZ$W+goXx1 z-_RM|%d|*jQxGJMT$`f6(*R<8sPe;rv^uCsDB!=7jXi^H@VcRAUWlzW2f65!9Kesl z*w@$I$?5dTlk*UA0mD_5_;WT8!r|C*L=rxD9G(-$s64|ZuFt#<3}iw4q#S|m@7uTw zRCBf>dTw#?c0{3%AL(z;5Ot3wZ!ENxmn&#!@E<&gPNTaJq#9aU7=fhPDg>zr;q-=v z6B5bwWj5#)eVkhbXa)lWv_~s^H1M|SVCtSf2?{!3TpaFpZUDHjElU`mYNYW+Es`RZ z+EopN!hK9c9fO<5B3#O?1&p}A+)1*Vr=>+xL&J>a_h!u750_wa#SHP4++mQTE-&AY@4k{d+JMiG0A=83;Zno+ z-C9$VinIaiVp3w_c`|uCKq!H04_tuM!Go`plkZmn1cRW2185A-=G5d`B8nNwR1_5$ z`)dq0FA*3^ioF;GH`Aj>^<;uEoID0IxbPapC5C^^@AMiJ+;`kqw#sUa%M+j=qjs~V z6TOJu;4m8-8pvtVyB6onP?W&JOLi|jM4cpYAL9f2|$QOXf?V^ULY^{;Ot z^Hfws9DG7Em`FDL5B+!LQA0xmoMfY;qpeHlWDuW&o<1G=_AM?Yg%i$xPEIKdi81bCZrNfv{7xiZm61l8}PjKze^^3GP8m#kp*0Np{!)5xo{)3l9c4 zfXV|1i)1rfzx@|ldZBR{<=S#`+~(2M(Yat_Lo%qMRM_JHnh?SN9@jQ|2ZwSQYCf`o z14mLtx^<<~{PpXD=pB)`B|LIjAI}2aflr`aAq>-w|M_#((a|)vnsf(3(h=4%GJ?fr z9ta1OYe4Ow+qG-h*1Z2?I(rmt?5kA;j__m=QJ5pa{Q|Ikd|{UCb+3C0mxQ&cDF!H_ zt+ZMJzXRYN*6=G^nFxL@bW0#fKvIN<1l$f9E_;w7@u9YjgIYr%4`8IAHR^Gi*j_y5eEanO z6#Wgtl^x`4B+_CnI`^DCF7U(25YDq9qFe&;AC5-yRwsW(G931Z@%OLB4=dL{wo8sm z_Ts0Snlrk(FN~^rqQqG@Z)V@J1=AaRG)j#o)D{eHA3GHPQpRw1)4*ff;0_}Br;$pnvoqWX7LWq%?AOR z$%)UBj0E>G5FplSYbYH=|1yf1*TW^CVy?oD0N=#9m6)yy69q0YdYIdX++s9IUxy+G zb;MDUB;?{SwNZFl13DbzQv`z30_le1s+!DJAF=_9G%z~q29*d1uwl>rv+u4UAHsBh zI;kGxQE+#2Zf_ZXg5D2(FmQ;om@*Oh`0>QwJ9TG4t+Wkcuu1eqqpO9sQh77D3_`j?wiM|~@lv$t`Wly@I zpuowzyMC)Z^d15Gg&8|Eijxv)NXRn6rmPwS;!Kxu6F?7?cz8waU%ohKiIp8=B3L_c zyHb!l?%yY|tfayiT7seAqY8P;zl(^v0(w!tUl;%!kb`VdM!RP7#mt?@h; z7SO|!#SLtZR{>?gI;N`ro;64)l)(5a@R6Z3u=*huPL!_38K1|l2zbpO3;1g}nhN6gDexKB6`qQ`pEr zG5~#1>wpstV2!}V8E*|cb^0`1#)u;ba(x!J@z_6p{CFnS5DbJkMs|^%`~2iwNa4~i zuP?}htq8L+&7ddSa-jzO`0?YqyZhlohiF8^&aPg)y77W5To_QkF;dRUWgf7#7PfB1 z7l`(oXydQ$+($&EB*e3A+H|kO55z-^PAd9uGBYOu%lG=CZ7;lrJthDZ$Wf#Q1PR;; zgf6@&IO}*4TRR$j_w3p8vopW3tnB8QdDO{0s&+18B%m;9Bc-~EGBgD|-{xCQ=2kPkD7G`Gdk(1mP^>%}uYjK6A8B|ScbMq!a z;$cC>`?35N8nIHP8W03ZkW>tXJWr7W57APM6_{t$wh}ge;JZ`ShPkwUtaHZE`a>>k zPfg(RT&|(>2M_9RiLbGOTe(+*riK>qRQ1vxCG*Iet5!EWTT`fzx*qKc9>Nyw7~-J$ zP64M}!c4e*xfm?I|8}WR*Q8xU)4UG{tSI>w$Ds^MlT zRG^jMAOyj$@9x5~guQkO1_o&Dn08b)UBP%L>mAa_9S(srR2tmO=vF|7ED)h@2$XC- zZwN-X=%6$3Ye14c)hK!L>=}w1aro|dZajS5L`_IAVpk6O^^-88DI-1o>-JM=@$t=Q zu$tSlWeYjo3*JuDaDi4@@{MR>gE<=WTE<|)_zo`8YS?=};dB2!jDF-~q@}{9l66c> zT$uJ_G}hQm4*e!Ikn$4JV?+3bZe0;>O+kj@I<2Bx|9f(>|5v_8PxkjqC`Y3&-|Q1s z`-dsqE0FE*>zqT<7e1$I{F1uOIx>KhR8&;Rzo;NPyCyTvsO(E&M-`Gq3H95ov|Iyi z8mvK9Bd$~arHo*O{qp0OW>g=rf4UK4KkG-DGb-{(@6i90;3JFc8}DFR;1#a;7!W8WeqS49VcoI`mia^wfQdQfac@AWxSuA`Ad*~S4goTAEjtEL06F5k82?=TZ zHJxX$f2w(Pb#>Sh(=QZQNHj5{dfNlcO-$rkuk~HlP*+oHY-);AQ%Dy_IXOHubR}Sd zW<{YPT+x`n4+cpR+Nckm=)sJ@y1L&Gls%d|&kI3_$H_rl%Fv)9^nLNj(XH`dixAmi zRf1V#P)c)hL>U@>JF_9%!Wlx^6T5co^L>$!0CHkvc$ibt<@IpnLk8maO}O+BRb3^| z$;x^rmz=;*2s&C?2-!nJ6wy|43=U=bKWSKL07R9aG-R~7+1dKzFsI{UVj}S8@^^3F zN-s_|!gu}Y|E~_KpV?ec3eFM!arimB|CMXU|LT81bhF(IL&3P-(5JqHJx7_=uMf(O zeSa@4ze$tkvUA>$t(*SVjt@{huh^KTJl<`o9zxgYXl-!PydwBt1`-hQUR`MoD8AWq&L7P90FfP$wH|(jw7YUhBURRf` zsy%WJOSreZcl?BH9(edor%fo|73?kj# z-3WAtg@nF(ISCSp^BoOWLc_`{DwOOu-^{8(>;^!C#u^+vf z?C;Oes@SqmWn<4boSl(LK>E%Kh>4^`#O5LkNhE z|Bmb2##hNAah9A1TH|glXHgeb-qjUR$3^*V(rcvYZfT(KM(4`U9v9h6{9p;%S2LAz90lkU74C z#7DeGU5dr3UUP6R`;Wi>ApFzEtcEY3+VqN;$o5G-d98vf+v8?NO2D0cJ}9;aHchJ zp8gc7_S2gOm7MzGmC@2?!WJ8=`MU4yDpq~`p2LBbXgd7&QB4?THYDN7BOEK+%~`OP zD-<=G1*BkP0NA}4*L$cdjPCm=!BhD$?SiT4ajs$WFKDlV{UEAqy2kk!Iyvh_G?BDN zGY(OM|6sxY9z(5gcV8suvryUdU<{;y0sQ3ed4h-tN76{3{>;l@{98LP589R6`1qpG zM2g@Ha}QeNNpRdXgL}-Q89s#KfGlbu^k4^wY^*esFdF3~B|FQ?Nb?%qM#tv`4ugze zltXcE#YTLoWZxnD9>tgvQfAl#nZdOw%~<9a)fd>ul#%u4=Yi4YXJ?@^;DRt=yd)Bj z3(W>1%LR!aY7^P)eBa-n^bviA8*=l84fVoW4}m|{1Fa$Bj-aeSfB{{Q&&}C_#Kdj5HE z{$L04yGJrA$ZDnNf~>cIJakXq?HiPeNVr;GKTgV^nsoZdQMV284o2+Xg_%Hy;d$Ei zOw@*bdq)Z%WVIL3(c=ipqV+LoG|vTbhrCMmz%zcU0?Hjgx2WQ=XdGBP@p`*UU{I=O zUI?(;Ekdxu8-Tf9pI+j{moHx$qwi>3e+{e-=s!O{-)MH|Z)pfFIuwMApQf>XXA0Wh z?^q*yGcP~?XLnH;S-m|5Cn`AygZlVECgPz3f&_Y(zmK~hiwp4c8)1YUI%V-4#_J9c z+H`16c%7U3qW?L*eti6%$Xd|xMY7y0vu4&rYj1&?4aVktsVBG#_!pN?pZt?=-F^tJ zT!@&4G1Q(rQO2NVxb@({16+T|yib4N^U-0fYyWb(^I-j{8_RvA9#-yS1@8*T|LfdU zcqWkVaE0U()~zRXDj%xfmWz=NstgiOy1zk=azu59S@Qb5ItgpGK7M>Wx@=2i&-^vp z!O_go#uvw&?))7M2>#Pu>+sy6#^KN8jmXTuH?IEj4hl{T-OmMa8*c=@Igo>Z_)-{Z zJ(GE>9(B>T@81!hzK%g~(C*J%w&n$KdP@ef_0nwQ@@(L){9{ZL90fyJ*SnuUFChMBEFUG|tI23i; zHFaNJ$K(N2!kL$wWHERRBNK%5=9GS4d+StESje%2rmUN&nX%*IH`}()2Q`Tqg)7vh z0Yulj-P2d%x%V??s%mP&uYCIxm0npwRP3E!V&Kn{Wj_xh`UBva?lsxK+%Y>dvxc4? z?Fmu2xgp6*_qeAXKYBDgHpaW3Sk69_^!GB_6h8{wlnX0Ajw@ ze|0}pRN#0*7OJB6W@fpy1 zy4s40y5Qe5GAb%42(`Y;EcYX^uy7u-JIFKCPmPU@EE_jMQ(V}SdslHybUy60m{h|k z1r38Mdx&$P&|y$>?b>_jo(&(l$84g|-`|g54n+uIXCFU*gpr8E$`}9`e=ro`vq0qj z$Z(>rp@CeNACc3@;X#6?f8obZwR1w!(!~HA+8)M*R~guszcVk_-eP*YyimJbn(1j& zcI&Of3!W|4tIB$5JdLS&iG?-jB4r+b9Pg7)((@F`>%}bpsw$NuGSQjP6?jIOPkJ4B z58my7f_hqRZYw(Rupa%(m-&#M1?Il1K6!Y*m3XBbn3(OGoc!Z&?W8McJfvQ9+u|+3 z903{&&cELgg}3k5qE8xcT__jrGKc~htUJ7!@0P74)L#@AljG0G6T#C`S9cx|8a&jC zMnS_o?mc_5!4KTLNtMAVwZCn`G4z&W)w2lSW95~1{AJ@{gocE@rB$97APHX#5^Xr!| zeWRk5YnIpfkYwibMc&-G4NE@&Q3{rRh|DhRzxuoP7_ivL82o4$RlO82-GfRk1=g$n z)r$*dQRJ)moDYftH$>h;?hN;UsECM}g+;#Er_G|zpRH%x0O;!S;GIjPN{f!MaavN6 zO7Kn|j2dc$9D=z-Ah6sAAGV7Nk&Pdz&%?vj&v1LUk#LeC+=0JRnE4M}n#UVYO zx~uCf99{4ivHJ&yhkyPc^MTu&`76#oen{OsxnB6*J*YeweOCLm$Z&()5HANOr-qgm zEG4iRtE#J$!}sj$(ot$cDuJlk&c4I@Y#D4$V3;wmGfFJBaTxdXl&iC|7|mr6RhVz) z(b-pc{StL)t?mxUOBRv3;c zrFy<+%PK3^7-m2hRXX$fHneaHi(#xo!GO&SMxk^R9G^aY3XJ#J3)GRHmxswP@=;6R z^fKny&{2?`nYl}CgPb+YYHD(FeK0?pmK(c*?2FLsfnCjC5#I^%$2&sy$nl(cvK`y& zH+hPMieO;?jrM;hoOjQj+eRgY$Jf|-;WMC=g8LfQeFOnZ3WXG?ti2K5tV~QW(5vl0*2+V~$uxOpE{MyfI7T35shZ0B1kk`mXf(4&CJJY;w;JY9f4ZQ_eo(2-SI8 zwEP1wV4n8FO+E@30SUzt%eO+4&)dP9#rzCY(gHX645RH8V~HXW5C$OK}BLj3&x$uleugTZVT2L0rHh3`}x1FfJ+zJxhu>v`^1jSR}y!OXdQfcfB-30t-94@1)fRx#V9EBj6Rn-V!t@ggktR zC5=pnJC!!=@V=3N?+d97EI$;k8#k8F>j8gM-$vUtJ6fhuLc++3c74A6`zt|6` zPD?ywHf42n?3D3KKCgipB`~D{KwvV{&QWH+UXXHyo`+t5SAru9g$>I1+TvAP0x1?2 z8#Zma?Lj8NLfD_+Y-OOQ-?@Kjjn_?7SN{NZ1aPzwV3-cXphrIkI(mD#{T0zCxD&}X zIVw7uYUj59J609V&G>6i$iTU|siC0{e|g`&0$_z5JEFlaUz)#4J|3@*4t-~5XZTBo z)<&yg^Ncnq{&w8*vlXBeP_pmg=01P%q61cPfQkXxyO7Vm<9z`{E!)297PkEBtJeS#YkIQHhAY5{DWP)chlsr)hn}vw;Nqa5fKqc zft3>PC7`KaS-bnvB&IautD?jO=)+tMzvN5Q;Ot2y0uD>Po6II`$n0Pr@LHC?ZCr(U zPxuT_>zW%I5gBfI1Z)$h-U9~7w2|urnn3`{5TSe&9G#s%RaHIuzvzYFfdEEEuk>U< z3d~_akU|*7)x-77pp{!==ZSQJQVC(#*7g_ZN?b-$GqbN9=gEVE;~f;liWi)^f{_T#&X|A5GXG8cMlm7?(~UV__ufJT#eDqok5*LK5x{bmz! zTvTCp|9A&xi3momuQ0Ef(wd0WrN5p^s=S2LvEwBsBBR3_1uD<0XLpKw%~gdwxEG&IVX6xCy&&ldj@yd zgj-Nd87$ESJBHL;NuMBg=ujW%pXB7ujt9OreJKW z6&w?ZSlEZFi(LXbr%u5?e|`QBf4kQ+7`+o;y$bO6e}u1k^-*~E8*plC2EGQ~03rnP zLGRH$jFKrx`lr|_Mzrc2;PsL|sh~h>zGLV61`KV)I@hVCMYjxU;16CBe<)#Y9~o%L zqjDS421*|EqT0j20epl07H$J%xr#05fkPqOya!G;Of{0h)Gab@27>)YCglUMR^-e_vxHiywoE8C=?1Nw&P&hD(jAy#Rga_=D=>~&J%uf;& zs$o4?JH%~;MsN5s#} zq>ccdx=HRr$8h@PSGR)TjDxptkNz+Nj<6bqJTvzlc*sRw7>K)ciM;4Xh;f(>o5?*H zZ_@T3_gq`!EAd>2y2~N=OK+``wdE}YbrU%v?4`Vvx!$D5>DZC)AfC+!0{IfNDOUld2i$A9GO~&$%#xk@A?wq#vMC0 zf9A}Yl+(}!XhHH8aDTR{tEtIAxgS0(j8{POLKg!z!0w$PxB+1R-c7<==iN6o5u=e@ z#~tqbmiP8@E293f%Q)VRzkY>SPN7d?VDO!K%58K`Le+-!PHcE2udFOKF|mp@aDnRuJEcBOk@nTU+=kUm9TsYL=u0f zHFNV1&641+CoKKv;cw>QvHOxW!4X=Z>Q`+|4Ka&xG}^qBtFhkL8`ISu7k}{Z;r(xi z?}yjLvx7I4a}=74TP%D4+v5^?BEt{!^O09Xk^pEEr>VOe6#0!C z-J%Zxof(Rwy9&dmt{%gX7B3m)&1f?-n=Kq@8=EXD063tI)H&ctPi_SQ9~ZtJ(UpVC z`{T$$J%!ulbJjlr-k7NW_nyH@g~#xF)uB#~j}HN6HXCw8pIgMkCN6o%Vfq?yfCp%t zgQ#JJSM=e-<7@YbX|3{+G0}V_RPlliBwmN4a_k_PE zcY2N+vj=NWN;UVZx+jo=$ByO6<&4w*8n|m6s|ua#FYej9*LTa7yvL6_;euU_F0C>q zhWH9!C_V6lAk}Y=+`=T9--o;&mtbEMUP!xCO6nXE_n+$QT*B;PqL;Ct8?UIaJ@;}) z+M~kJqMoHji&op@tVf3?`Y3 zkpK-XExn5?nC3xew~YUl-C!{GkhC?3&3>WSYa z!CsWr#Yh}5b8sK20XP{oDh2apVL0%`3vUT=+lX1#8x9{oeknHg+P1dl!oosovxyTX zFn_0us19ny*Z1f<5uJuF3{RxrzOC~zNG53Nxkry4g@=V@J(4>u31Liy{5Zet)29=o z#C4@FjAh0&U^|)RmlnWi-^Vtqitpbow{`=$tnyNA8k=p`X)bTONh%=wOP|M_+*m`u z;>Ai)-?Bur_oXB!6E=eVmRu59H(4gH%(bb7gN0&&cLpfuwxFO^bGun76`{|3%?G>> zgd%|p&D#YQ*wcCG%BVKd83e0z+TvVwk3D;8 z9A?Su+L7lqzL7yANy#uK5rjXMgiIEgDtyxaea%=%g4>P|tW2NsbM^K0X~%);Gt1AZ zteW+N<_p_QF-}}1v6~TaEv-`&VLQ7IX;`tQPZ?r=hBa6DT9cTCWZH+|Gb4DR0kanv zYe7AW8DSr?#KWUE`(j%fJ&)IL3D*h20T>gwPkz+c)rD)rwr^*)^0|95{d*h_H3(##U-MX!3>Xn&L8{K%~$-sfRYK2$NougNKU+s2*+I*!!-EV&A zxY}lI_}$Ej7O6DSL<#Ay<>fXOhZ&{FlGZx8ix9~hCEPMk&BclP1ehOfZhpk<0>oWV z@0T=<*4_@G@Tn>++Y`8sVxX!J`X6ey?NP0=e7El1YrlP?BK|dD^5l`DM(Isbc@*N_ z?Mt0^1UA4ldm>_Ucb;0~<|ZK~9wEnv$TQE`L}Xx$9Yp6PX}D?O!jlx7WaSel7OY)c zUs>6&B;r47&18DkLIfNc`{g{Pwu3`p; zW0W6WNXR*tK)c%LH2yDUSufkAOU`N|TbXH;AlxsEKZOZzL|jSD#~8-{&a*>D0y>Cv zAnc+BkyUC6bV~45XnhTVNsu2pbobPAkZ6~2V+R^mW&ng`N1B*`X=qe#;lq-9!1Mt1 z=42NAVXK^$kby?H$CT|IAO!9{th081XBKLn(JLxi#(i~&AVenaYZL4%!33LgDsYFCvzbK00c^^; zI#TPHdKgTSh+WS()~&2q(Nk6Rz{!(fh7a%GPg&Ek>kdv;gD-W{HRJ+R z72@h8a$YzCF0xxr#LCK*ul`qBx^K=(atA(- zU%q^ij%k5l#`hkT5Zk*!(|HM}Mo`IeX3sB~H&4U)kzd&s$XdSqULltzkvb?Eg*(~_ z*`GdODzyI#@lfUFmH#j;R}|7msL!g+j13K+V?_up1d=CWu#~&D_m`leg1<*a z3MyT|o^n1J1SKjZuKU*wz65S!TDfW7d!5k2g8v{sqoakQT3??Pn_Y=s99lBM#S0f; zGHjxp=ggsiv0bwr3|vI>5hnwNUZ|mj!ta#xpkDw4CQDpzd8lu2SC&`^OZxiClD&nS zeNn=Zm(Uk$Kk(kPiS?oTi7YPhI+?`zYm`b%Leula$46T|=zzqRABHG(*oz|!;W2{{ zb;8C#>b)4VJ17mPK4|*!7C8^w${mg(i{LQ;3;Vkp5p1;=L**1be_sCSQ&>Q`S5?Kf za?yA%)?aNz*^s1aIK=z}R(Dj3inkCKAv%Q&zHDq_0+E+xeE3pRJ%t((5PS<~M;V{N zy(U5uogdJ_(a{S|1@b89cF?7b)}KoouON1O`qn=N6!Ohq2k7bb($K)Uf*eYwfS>^r z#JwGag6+)@O_&C5g7%N5=T-{~WPM(Z)Tl*lBv3~WsHdUh1mqKHDhm~9K$qCe%uGnk zYwu8N0NJQCh77sr>XW_3bobu9iq%qLNJEP%4iJT3u`y!*{*cudumUx zS+_0;CNy+9CGYOI>&m`1h08oVx-5txOz_*uq-#A!E}2Jm0}!LY+q-)=GT*Qnn<*iH z{_L@X!wh_6h}Yfv}Pb7Kq?v0v0)td;C;$qX}fTg;0^2iFp_ z`VClof`fzg3j5iuDtA7jQ4L+c2;ofOIU9#tXh=w74#P{9FPmWktNrQQw@Fk8#%5;c zV`FKdOqs2{7q>aNyA4fErp`gd3;1ZE(%$v`hip3nN+H_%IAObbN`0sHauJmim?Jvx zY)4quU80Vv1-J*z(sI9lit`gDN{IGu5qzhr75ve>{QTeb_0RwNZvmzVM~}XtXu!0G zFgnhpZ3OHrM9i*qXnMGGZDsJX30*n7A7579Ji4%oz%?o$)*=XG` z&%n?t+O*;23I~vXlzjLwHD4L_jqdL?`ogsRA!CN4M&XVs;U~A;b3F4n6E=P=O|EIFvnnqLes`Z zbkfi{A<9dZ^4e*I0WFZx00DQeix9`76yo*uCFkr%k5;T&rNJ)cqN0(jXM}FM01Ja? zl1e~h>Ld;j^jzwl`}PoHFS(4Rr5*H<`BC1B{X5z%(hQqLL2pDLDT{3WoefhQ0Ugg| zB7kqhz_F%IlhzDBCj@-2HV}HI-@@_OZvyf8rC5qHnJIT&-EA*pcnm-Nz4>QklZp2| z=HYR%k9upndCDnJgSeR&3#EPytxKp1*olQgyn>6^{pHxjML#~w07J$mvxjNRyu5_@ zXQF{&RjB^Zp+NF|1XBvaaffuDc>`NC_B5_f5YPIwl>kaxL*PWdDNECPBwlGxdVUUq z#`<(JwJ0&00Z~Eol9IB2PqEAx8x$r2+b*WlkZt`_#@UTYpEIY>|B%<}#uHX(DG0#n zm23Rfs|DHFc8JJd%$gmOhl|iwjswMwa8!C=Sg!Lz`+rJI1Z^4`8lA;bfG}u6t`rpw zA`@TE$T)dv`4Tk9-@bpp(&@oJdj03g*s(KSLiNMjVz7+J;BtifM-Gb30rJz+LRquH z9NlXjulkz4fmWDu^EN>`WN%N&LOf-lfXuuvG&Cnky-DF#N1=55A~BB(Fm%+CV|VGI;Jat*s-mXGbpPO?L%Z1Q4GXhpdbn~4Afr;Ml)}>k z-H5j@HDT07*I2BGtgNgC43OG%`s`WM8G5?9VgXbg=n97q8YCM$%2Y|Gf9uN4He2A< z#<-9VxM@HM9es4Qp}l*&@HTbGohvlLD=W)x4EK_9tEzJa5hT*#4Dvg4v_UcPuZXHo z+Yn230fs&wfYsX@%FH%h?yDO+MtSYI zuSzS4w4_p`5Zh-SxVgu!_hgu)d+KQ4d1Dl zjH6-p`;w&aG(IKe5C1g4B%e@7Kb{hBnI>!A$v7U0jEqFXkfElEm%% zG3^J~^W-@m^9}ke7b?-IVyGBixEf*uX0UfcY^=45%bm2em`hD-yd^_pr_G(K*8aeL zzUjMr_eA!L>pSWBkIYfV#x})LaT-=u9l$Hnne^4}B@t7&76S(lwk;DF#Lt2)Hds(c zfo&HSm8-e_+s_ye8btSM@w#=<9&MK?706Hg8TcQ;9BZAcw9VDq0B5*yA*2}yHW>Vx zPNKdaM17i!jLfyi`S&R`Ccr9ksvz$a(w!JF0lOFcjcj|m9Q;8TyK-gkJY7TR8Bo&6%VRl^w0vesJ!3`q6a0z$ZdOjA%(N5PUI}?1Iy1C9V6lGCCPsQPdNOr zUd4LKls$0nzUQEVWK`?t7&J`4KbIr~nDUlg@9iC!t8TD9Y6kr;ek{U}h;#+TtC~F%f@w?K;l}fE1T)%UNGdzZp;L?%N-i_EI9pNNRojG{OnTHK_ zndA3-?dS(%QqoN%v7ZWps2N*XpPv8XUh?!8?=-G;J=zl^-7eQl>W3C{`KMo0IW;H zix;AG)jdMlwrY~KFWS~5>N?U8gak}ulB6CkK2@Z=^XGL2$R3G_0ra4itpEAbU|zCJ zdm#X2}^yvflo&^Nz>*tr2mVWr4neM_KUxPb6xUIQ76u-KkLHO`Cq)VmwLPG`*2V{gh)dl`QvnL2_7WLAR}!9Zwh>{5bs zXA}Z#1-FSxVa}zw<&~8Z*L7<|N9!s!G}g}d`G!moZbWqLu$*k~9=vN9fVXenAn~^8 z0#lQ3Hffsfedt|?8M=QA1X0cFoC*3>Nqt#5W=$%50a=S`Pw{o1v98}?)JTCLlLAqU z_`T2>NcOyZ;X?E02VxA7u}jv0CZ%$Z9?bgK9@5>r4(2)*TlJT1^?0mcRQ4QtB;r;uM z?axc>?ar#pmXJe-daSxKVxYc$Tju;@ZQUI@)aM%l?hH5Wn$hNL>*Y0Uqm~r~J78a9 zk5t@yDhJ4Musu~s(|&2{ zp%F0dW2w?07CKXxE4?P8kt1hLow|lL3*}Kv&airSFnIQwcXzrlI{0F>*> za-&^oHHuYCjxala-5rltCImX3T$IY9WMscHT%}~4XlPBy#__eWKST{R`<*NlM<5a6 z|D|^zrxhwrMv3;w?(DCHkIHstul)9v>`JZ6w90CCck;Kb?>6>><=)EJ@~Z#aKqDI@ zBjgk}N$F!pk5->_PQo`MV(K|aTV@M|U`|_zFG7%B^YD&3H>W5d3rFDwtJn<3_O3w9 z-Ivv9m8jkR^zmc7ru@BQ_T6D3bbJIHVGtGcNRC{l~QddUHlMM8PP+7lJ|cIV5T}ccHB?tntm8DNRsMx z8Ha(vAt7WbQ9cN>s8nV38J>#wgukPrf{s#|E?Y3h&+a=?e2M)n*E)!|Kh&q(VbCoJ z9aCE-ioqY3u3Y(Cd0WfxDZ50wPER^ubT6imZZfz>OG{~SF~VX7+`!M4ULVGwBcLyB z*ZX@-O>Yei=9i~A+>D)nB{4CT!Q$WD<8q(#2>~S5mL3O%z4JQvD{u7_+7&4{}BpUS*vpzA+*I8Mj&&D;*)fZc6ZmP%0 z5o9#Sq7NQyoAufWF8`?rEwg<*`jz=9dj&4I;eBifDcU8&FAV|->V`hyt8s&q>Cuy%+G zu=p1=W>qjv3$;mxZB?bR%1h=Hc?Qcske}NoeL~;L>QwWT1Imqq;BJeCC1@E9AKvun zs7JsV_Ixc4y5$%Z9esM6cbjctxboJ(rqH7y$s}N|fxCDIuA~yanpK+?b&PGf+JDfX z37R>lX#DgabuunK-Y#Qcb2G(8&T6%;UGMvxrw^(2LvrKc?%N^G9&UYXn`yyp6M7G8 zB`p<|a?%|fzE|%%GZ0T=EgdQ&EM7iyM&h=AgCNhhX;0etZFcS&Y@g`|;0DKLimA6+ z;_2hq=*#fHCi(Vk=>k^nU#u|r;d7p!Lfv9xpeaPA;Q`T30*`=+StG>^mz-VKP%gRe;K7>^IBc=t52Cb6%Q{{S6wA*Kdt!e4 z6cL@6K<9^+3t`f2#%!stN@H#^2QSnF8Q{{m&Hp;-ySGpiY(aL1B~h4gphs>dyD|}T zn%^~WU@DX!gMc5k<4%30v0c}Ltl%Oz6~+Xjl7>xEa}9)<_ZG_VM-++#g5xJn5S+7@ z_vpa-YlKJft5@4sr|RZROPnA^CNC66`!wyB1Z`Pb4Y0=G91^7j2L)d>_vd;Wv0e{3!Ex`KbN?`wGis@8)h#`OC2q<)eDXHU41fs zg}t9&Yt5XkjekX?3w)j|cT5bNv%~jxT+g!;4-IpD<}N#|ug9b*ZzMQWMdI%h$H5= zVo#NS`9l5XIAOxyr@>c&*j;<}v_h)v9JK8wkVi(0z^=0a>_YT0C|R=HT1lu>RVJ~m zl&nD`ZvJWy52OmqGZ$k&DyM5)lxZ5{+}PFQBKkg##f4x=Q+C~u{7VmKYsT4*o@c^O{vE?Y^ax} z@go#(g`IeZn)R6T>nwFm&Fu#LEiE+-ghjhW7QN^@WAKU+V%xhU)wn_k@?>8DC3z1@5~0` z#FjCA`-=M_PoG9xzaE0p{@G*5A=feowr;&nerDawpc&6scQ-4%gqr2sY$m zLc%RnYJ$V#;>DDXt17?Yl?Qt}QNQW#1}Qupes_~MHaL_n83Yz-u)QW-T!3PNT4&KI zzERQODMB-C_rBC35$DHz)D=pV{%y_1TdJzU;ooE0DrS_8$nx2|{5B@FAN#n`#E<>p zD&_M?{o@67%Z2wAGT3GK^ytsLgJtJ?g%x0vFu7)8N48T<2{%1LoUU-c$dgmUzYVG% zdH(kIrzT&Prp9FDq0R6fo$mYVD2zF2J-kN>I8W0n1ZlUVJbdMAWa zMo;CW#dCSz;XH`5r^+{ms^>8T@4T~EL!toiN1y42wYFRwBJN(!^({Y?Q*Ad)jDN0u z^uaK-=Z8avvV-u*&}B=aq-aaAStRG-!*mv)%}=4oVM8^x?~mTU?=wA(+vPRsv{{cc z=}{_=a&n@&U;06*N7WEjldYZvpXm?)g5>vXd)F)zQBJ4iz0sf2D^1tBO1PZFiM7j* z;Q08y$JfEQ@BXo(hyFZ?0K0|apWrEqUYi!0)@1jQ;_gnbFaMDC-l{C&o%e;x&!4xW zRVWb~fB(@V=>?C`q)^PFK>1R4#ic08hW>foh7m?aI>&z1)dfm3)unL!ETD&93Xup1 z2z^7eEGV}#y38PisX|#l9YH|&?EZaSz)5F^-SBg{gvjT$3=5EeI_0+8uF=GRZ=&R{ zzBrJqo4aq@yT{$!r{3#TL}5-u08yBq(4-{j-J3Yl;WAdJOW7b&N8vdo82$%x;fL0FK4RpN(NjG|aHx=_OQd$|BR!35u*4<*LA2 za_TMv*tpnKIQ{yK-1@tLQ}gp#S&WWp2$vOA(a|}>x2_TuSDpT6U@XLoZRuH)UAJyE zrYn)z%NLRJ5>I(slH-0awr(0GFa({RYOPME*oABWEvAL)LVsvc!VisjON4s?!rhtM1pxIp_q_=2xnx~T!Rt!W+{a9L%+ zlP4ic4{&kAICeF@UyHN1!xIxKZCxKVC3f;W@^Kn>Hq?E+k&+VLD6|fb#OMX8Xe^lq z#zbTkerCG0waBw-Y%$T=jPnc#=l~jE^lJ2F>CnR7=|qs6e#((d-$$jtlyNbpqn)=2 ziH0ewt?31sNKPAdX622oi$X1T92I9&%C-#~*AYX85Ig06wh@PW|F<*a6)2!gRd-U% zJ-Wf#M*4#X2ZzePxFRt6ci32S-KV|+RXl6FZ4&n%JvtAIksZaGcSc51bbc%4<8mznG6a7?d{$l?uAx(M(MscB_-2JcTL|fec$rZ59>kf1j%TnS4fSmq@WO{$Pj5* z`rR|+@^k0D@IH~m*6D!qD9C()lpzTnJ+ z7Zl(Y_T8d%rwL8_j3jTH(GcTxUU~mk#yoJ4UaGBeCyl=X)e95=iU&HUq$%Yt(=Kl0 zc0=QIU&Trs)joX`YKe(!Pr~?KMC!7)KK?;XSn`nNuXLBRPPUz*zleQ)ao?v|Ur$OR zFdVfIrT!e}*Ml`oj(a!v?ArBog7Q7BqIcp|pV|#;{15RqE(Sct-=f{HcY9>)mv_y2 zB*J#Xh1(95m6a7cA_Q`~quQl#TEVt7OTa>7q-&^Ua7w+6tC(e+cLIArhYT8&r6-8= zNmWHn&&&i^uwcQ7g9kUFf#3@?Z2;%f%U>mG0z7EiK&rWaPB6ZNm!5f~o>`oAE?TnQ_L_ok0 zp=D3+#!Ut9+QqKS2K*9rj$domo)q#2-)~1!NwX(iT|qISuY56$1&MwQl;{cC9*{n| zlT?~jWt?<2g~=EJZGs))up(ZZJ9jQ`lZ?M8n-&~~=e^KUs_5~sXBmaSVx!G3-=h%d zH=}50S=}(V1gE4G1)7P>qPB8tyu*Ucq<@HwjqKEO&s8`xm?m>FnFoI(Sc553{2BW! zk-0AT3C}3KI{k_HOutmUehn*cWM*taLD@MTNJXZC-5-kERBs(3!I`_9A4oCp{%13` zBy8K-U3G*Z@QAywej2{W4S$sj)Cc?tZ8&%7yEJbBbxE_ z6NL=X&}~}Eh$DNd-&w-Y-z*9Fp|19Aus>E%dkm9K0W=ayW=N_DC4|3Klj<_(!84! zJM5e!f@VO^ddx*1jW@f6v`Met^BKllxpGjAdZUuG_&&4Wr-gpBkJKZPVXtW@mpjoM zlh!`{E-(EfUQc7m;?hRc|f|1kx2 z|5H5E3YV-sz5q61WK3tO_{X6Lp^YfB7DAoamA5K1WBb*sUYL|SHyf+16s8PyY;H4= za4_}+mUH_~psq7aGIGe*J{)S5IKPa_6^La0gw83_EAll$$K8Hi8kXMM%B|?BrR6yL zny?htG>k>OFKd{-^cmJi@edt>)Dg?w++f!Vp^e~_=vxT6rX)2rmFfNRF}lk|PB^-| z4|I{y9OGmC3cb)c2Zvm0s!nOZ|2~l%Czz*BS?_@1rlxbJPZztJOu5Qj1nPD&bmNaw zY@xgn-}9X7#B=ugZr!?~pj)8J(Rnqm_={m=;bge;wnLhFwr}q`F#HYGq`PeG;vvO7 z8UtN-BQ4Qmb*$-U4lF8+q|FQ z$5-aoKh8xeE{efK%OB^nLZMrG= z2NYvy+$-@tU9K4`l8AA?tLn_}?W2P>$1hd>5$ShJRi;eN7pRH4b%KgTvAg11hOj$F zddGup(G7hV&j4ssKU79GlU%&}w9TVsK}(>qp@9Wf!I8ty?5APuqqArmJ*&ctgwy31 z@}sN~rm^I6%yHpT>TOEf^OpGGZ8A+n4$Bwl(<&--wY5d&e4(L`+XQc%H%`WX560B2 z#%{IT|IVhm0b>M*c&N6-Rs)m_FM3pePJU+e?U}WlRGQks11P2nroN=h68Gxfy+w_4 z6lQLcDpBY=I>b6`j7-BWkfBXz!I-~4=cxDU6_O0so|ZC+$&FGZ*|Qe%p>wKXnUTuy3GxPE-^z&X03?2gAV%>{z= z-p;c5!mkZ?UQxlyqzECqt%4@ZZI~`fIn-S@? zNz4PaFEy~;Jj!_Du{g#XI=27qSlcXGxw**k_dO(yh8yOi$ZWPhb<{)(ZL;k_lmjz7 zL4=74i}i{=PV*q(!2u4^ z+~L@8rc-q1YbP)hlfLA^&e9bL-Uy&)0Vt24B+^*-oQ>Qu}-I8h3s#=(CHT0>{jxF$Q#o_(`o@-R(f;{?8<}AOI{5O+PWTE!(u6GH6DrQQ4;*XR}7jESNZcytH#x z18Ci!u&J?U`LboR7A~ARyKm!fvJ~00smTZ42EV)#tF_c+V1!LHnBHm+c=_&Km!29v zPiQCRnltB4+kNxewK|;wrY=Iqj!#*&!MyXNbRT}`QQe6v0`f0qWv3Z4Xh8Pb z*ilMGV(j4}b`F_)Z88hGSO)HoOG^4lUrbr#o_bf48J1k4tbRaC?Wk4rBODjaVWJ|)f~~?EKjuqksJRFR zEldJP54+9WE3gVc4M_sw0e~&hFuV)Pn>}8jG5`QzMU*U9{p@2C#qi(?Xu9tc)^ zp!mF?vbI*Ck1+0&l(dcs6+wX6qiW7=AlCH*KFFGdepo|;cc|fzb!*p}t*wxtaL=V} zp`$qf+*L*b1SC^gQ)z2CN-%r{;9~iQ?qij3wt$_#d|7|sK<1CnWt;(Y?ScdE_lC~3 zFq&n>0Za69Cq5}RE}pP0w^Zl#Hi}P|B~*uWKR5BEQGEGu?~{VlY_w!#W%i7N?t1uR zRI#uNqwMElV3w+?8^*I7FCHBU1Px8NCT;*m(RcF9RtK$-Hn0}bYLbl3ScXKhTFL}f zGkn!#34=UtFApKitkSHdqgM4cQp@UlZ7sfTdl~m+c3FR3G6Q=68alV7kQLe4eNj4E zSoE1Ud*)1TSw-%coZ%xzh<&m1^whtZF?0I#uE+KHDfDE#+tFZTsXxfh7T4*s^s76X zBVvqu4{q07CppK#%qA=)XcwspQ|eXd)qllxSot@PG!|V%!)tepcu}2X^=DyYdtC~R zvIB2RN^FktrP2G{yG6#|y>hv7K~&_U^Yq4OlAr>C%0wnSgvdUOb*4SXuB8cRsugKZ zm}O|kHbT-cP3Y8{V;l>5WtuGmwv@HjtunD%a#M|H?ZXo=Fh-yJ%R;)BQ^zjWL@Ngk z*Q-w(Eg+Id`DtmYfYmo2!&(%u)k zXy0N~(ZJfHl>%OCm+#rRQ!qvh3zKxqsY|d6Mm9>dv5cAEii)l#>yNzx>#mY(6J&~g z{N=`O;Fr@E-VAUjYlk&OU0SrHP9?CLun5ew;Z#0x|MlzFE0!%2T(@z2;VpYhwTgCx z*;X%%0O%ro*(fwRa&p|bRH?xtwh=5BkcjT{FOl5iAmk7SymXkp?R`2kGZWT~U%>4J z#Go9;Pxl6)fe4-$oBx#96zV2v;?R0 z`q0ox=^$(oz@c6qt&H-}AQ3xirDwX3lcwiU5R`ij6r{>Uw!QujxDjc|s#m6Tm8Gv7 zS8(;>#gO(6QjYSp-WI+nN>r~XLJKSVup>>%KpiT{`)}Z`2)GedBx9cZP2RPEBEkmM zqMIKfxGF!G-6ON1P={zG1d^{QW`rSLHoa0(Qku0+IJlA9a{(c+nb}{)Cb7{&7Rk+` zXcbJLx#?7ixeCh7mZVX_2+6w>YFHl@>vI(cah!`Xypz?N*K#jtw&cE0hg0#EWqDf2 z6WwPwm+L4$Z7tclbFLtQ&bfHWSfX2-1n;k?u!Dw5_Sr+1+8wY32oAOdbvp;mm2P3o44FwC|DZQ6QSikz~w=&UDbc3ZL*H|n}) z_vgFJoGJ0cXY%Byu$SeNuid;E)@><}&ilf>mD&0E3IS+fsjbfmnp^K^8q@a%aD9q? z&f)(5%QxqV_}s@-KVgMudP*Rw7A7!V1{^qe@O<}fbePUB5=QTUHiZNF_m{NY1-#9g z_0dS)Gb#LEy~mD#bm7v=D+dqg*Dva9M9BNhfKsHCzUy*bGX|?;48e>3)zFY5 zIbyDu1V#B28r~cjKsZ67J8f0G3q}c(zXmUPB!V5RW_e`B%K7v!#|#vx=YHD}>$nY? z5gaO3F)Lyo<|4z>7U>`~C#nG!3H5v}`cX$b_*rGh1kg(vwM47mFH5`jN#WJ##kV0J5F97kj%nCa9sKE>vWgj&{1KY z~H-zI|U77dy<>?m;ovO~$wssUbrc67NU2%!Q>U zU}fqT1CWhxZhM^Kxp!OdRC9(11oDaGLCFb5XLjA*ei_sH)UQqp7c6MI`2DGPAC)sw z97I>9#1XH%?HCk`G!y7alfY;E=mg30t`rO$6ik$mCyi zQ9(i7N=s+m)|`y8)1RxW<)%#$3^?%FGGb86CjR-q3cB;$kPw-^`*&{sLtb5q@CVop z6h8M7i&C5S0rxN`baaf=kjX%kz@YTmI&m&nktedsrAm>0eh<>jI_ zGm(GXndmy=gQ}sdPbMY}f^-tBfE3lk!w=nyX#pbzc?mLYW~Ph54$PM4?NleO5JaHO z2oR1_raXANqqTR`89G0EhqkScQXVvLVDXzbNu>{V<&#U9ISU_ZKlB+iOlc|2ljAte zkO~fti_g+)8y#}JzJ4vu3|df(@`Ai-;7C8>E(``ZD;N z8B-!dWvCznhuV8w$jo$|Jb8EjLlxUYLe-3cS6?o6tRoN7$0xLe--%4uH*#494v$^Y zjVTwuLh9z1FO5iFsAg$Z_x%0Eo5VLRy3gv6iQ;HN;SZx| ze&Qr9^(#_T3CP0Pd;OZSDSRF?eePSZF{CL}d@C(Yy)ndU4x|4qTNp=S%|cFBG7~8g zex2hDcH^Ulua33A}hZeZ01pxR{q%`12tP%P5?^IrCjwLch)^e}Kq1XUfj{Rh6$ zRNaYJoP>!7+KIp_$tArYbWFV>`;4BwV8Na^`R1;4U!p855E@@<5}DesYKV49b4Shl z&2v@r(uddG{c_UV(WmkFiXXC-8Gqt3PmG@bGQ^=~@rP0^%cZk6zO27|A?Csw zcN4p)&|8MiVjC~gD}#@UI$zt&C<9(E@H=o$mLrc|=gu+7xLy20bGO6*#9(zP-OZ#A zXZ`Dam6v|k=!cv?2@a2KDI9?PTM}2?;h#p%XU3H$1+qdBXdi4s;2nCwuybWcTs=w;BNsVd~Q9Vg_SW6p$WDB zJpV(~P~ z7V0b01jf@cUZq+j$zHd${QTd)@fX;>Ntp7<{QEo^S2kb{t+Tzmc7^mg&WcPZLE~0Q zjhSEvQj{b=bUdjWc?rX#hj)O1J(aC!8 zqHFq`!g&wJc_}&5XzsmY#7%Jtu`Jeril~gHv-;HT={4972H!_?&%4AEFp$oF+QVlO zwh7^{`{>a*J*m8~@Nm0KVJo(qRCN+ifFhSl7o#*`7_8%2Mo*AU4lRmezd(_Gx=qLSWvKU8*T zk4X%`Rrjrui5T}+i+LeEJ;7?B?X&!lqIl4kX>Y}%0pOb29kUf|6EorOw*0Q{9s}d= zV2Nv&6z`HUyZ;Od*|_#k6z~w@G?xF=*MnA>)>(R^1V{)vpy2C2sc*jryl)9H8w^QK z32ix+6MkNo#>MZgxB`c0xU;3#=MqQ?i&^>|UVs!CE${YsC5pfu&4yyCfgSb zooj{RN<;~Jpiux|lFRCurk9RZm_yj!Ucy=%@GZvs&bUpX6N6BPxL&rVC}JcCUvRj= zO?~FfA||+q26!1S3WytY6H93F7(9$625K%{4DTT_+^cCJciGi_KhuW87Nj?@MzLvz zK{R=S(%-+SZt)1aodSm4D7j!TDhk5Z$8IZb_beK)U`xL3^5qHCyvMbB%j8-MP6Ok1 z2aewR)Ov)>vyZsck~s6a^ph^D!M@h|+KEwKSfUMzdGF6eDcYrjdyQqn@!S-z>K)YA3PjgA^%Vw+f9GJNv|AQ`lZTm3v z$EE}mcJ!utE@g}_CQc^q>arzEUc~pIrD;2Pdp8lav26cxeH_Ap_~1u-3XzM6$tw&& z0PtA$b95}$R*dCzI;+M_#WV7RwRYf>p)^SzePmLNTMVBlY+BD>4*hZR!4eyzE=Pqo zHJs72vG4(k;Wv<`AjFy5&&X(h!56pytydx07x?z&5DqlZlOQFz7W5S!?B(Hpl(j|9hzYv0+*j=9y-G6TeS}M<@1A|EchGJ8e{|TfB!#`1L^rqcU zt!sVpjRf7Z-Zpl$RCnDCv)C9dxVyQhTOa#E7^@T;@$BJ4L5wolzHJhE-iFV)GY1d` zf^I*3{Fnkd`sx0qPHSZ(f>Cw<{M*Wcl^sgoH*eTL^DtrI=rMjx-c7@oJw9Nz{qnA{(~PL|CxlftRimFCHI~dHjl`4$zTfd%*N;ZW;Rvk&gB07#O-i+?w`^C#{qm&+n*P%Usv(H zKpE1@f>q?x;_1BmGr`>|)x$9BU))~2Sm*zX+i1!?OpZNR!WfeEXhs{mTRI*c3Ee5X zjuD#JN)DFKn1$3SAlpml($kN&iYI2irNSf5=FT)Ew|eWhvFQ-XOXrue?R$we*$kwy zL%F4?35?CRj8r#z*y+?!L$j={CM>OJ{U}Ny#3ImA$X79jVR8rlN4qTQ>@?TFSHf&- zdG9Is&hrVJFIa5SoCKz32wYQGY z-5dJ+@}hvnSM!Rl7U@5_yp_?lm25nBTeX6?DdcI^EyDp4EZiDjyL)?UzKLEgog;Bq_Cha{V|VFOj|efO8GzP87hm^C}Z=B4=o+Lyvc(ecnE5$*EJE4-{ zfJWd|scdVznE1v0B#uzhBUWl3u+z;R38S@kZb3moem<7u+#TQnSTXLyP!BJ!f+g2s z!{h9+N#M%nF251bR4$#1wPtYiKMy89%TvA*Dtn*Tj2bZch_?WJW=$ptW@K#RN?Eo@ z$9-sEyCJ`c0oimKE$ZqD6c;Rl=gg1xse`SY(DTvi5s&$Q2zlRgu*`T~1RR3XC>x7e zD5HAV`^P0DNEtkUff7o(jn^3Lrjc3F$MV1RvHtzr*L)i``|91Lto3^Tv#O0FZmW&g zq_$CpRBtW!xwfm<(`|$})V^_Bwq{t3am?WDrZbY$zol-B2Mb2*cU$$^x(VkJxZ7_H z4JU!ocIS%ea&nvmQ9?q(0=*in*S_vM|MhF_`LQJ& z^?t3j^*;`+f4=g>ZP!J|w*Fn*I0CKIBk1(|_bU)?^2*&dIqH6oZ*KNQAwv0U<`25PE*>$2Z|6%Y&K@hvtK4F{1wBFywTJKNU89NFU znX<;Z-Tq^R`|jn-{rSu7JXYLPAHFD% zVMEr#_}zjWx`aoHtsyo4+Z}rYJkMgOD4suCnk=JYq>=iCbdspxMrwR%6tUtBWM1LaB=eoh-^5$tA1AH5;1G6r{S7~ zY$l^)2;30bZ80(GyB^3XDjo_SHNLOeG-v0(sPHI5=r>S?&@cJ4P8epat<7J4gZEZ` zRBMErO!E8jdo;j7VmqjNfv&8W^PX`}7k|I-2XLeEf9zj}i^eU)45DwltLY7nK@Ue@ z^1)5>fG@)|+O#jLUEeklbyQ`f!i!gdQN4rTmLrLwjitO3w)P@68Y@<$KnT!G0{Ji0 z+*q`}>=0`c8_`44!HV69yU1LI=1>R<(}WyL(WBBR4`R&LCs2eJwNw99gglyh17Uj{p2UT z-8@g(k9}cd6Sm!j#AYrQjEN14xU}MikJ-b{Zy5wpnIn_7_xPaGT`uY9=}AxT*GtcK z$9>f|yDBg4^=^r^V5hTf!X{}`c_Ipu&pz*FF8O1Bb$eh#0IS%bt4?GNLNCq{6+sr5&ep58@@N{=|)$^_0Us49NKk$%LC&Lh9)B>Iy=YE z%)2?hcCtvUMO>6v`LqVtV*kAI=w$hcx@(5y&CvF=sL)f@6G`qnYSd=jzG)NENjf-Y zN^R6`AKR~^;(bO}?poBJYtQ5qYioB54BK=wNl{ULW#QQ-#t_DzlI@;N%o`Hh6g%9j2aQ{Q-Y+t)f@H)*a)LjsuqLrDxT z_p3iePQmIARlI1IZxD4ET%_I3asK(&FLhX=O)CYvi@_)is9r7y=oHQgv!1f&BQS;u z?Lcpvw%2GM8*+OoYiP(lq0`y(l`zviSO523m%3Rh3t!n3HD72ucxQe89~)NID(q|Q zb+LBn)Ly;&#V?coc+hF6>Pso_7^&MZ`pMv9_{x&vV%n~Iw5%+(waYLkQMSu{A)`~| z&t=_Q`^G*2Kp(A?BO#J^wU4`h{g5d-3mijV?DLu}yUtKl#Mg$_vfE(pO=uOkT0%++ zf9?F^X1~bDFES>KP3%ySkzjbg^4DKz;EH-5+Dud^Tn)G+50kA_0z-SOCMpUngc9nT zkQ|y6c0o9FQwq=R2Vm%fFw)(+xJdrN6Y zdrD@T!n2PMfaR03&WDOcRebw4BqN%fte|Dtir@503bB>Do?me-ZalJ{h0mF-pN3tl z0Xrp*ePgU0I#w7nKXodT>I5A49CrV^CDVt@irP8VI!2zrVZNRzySNGppy8|=!CJC< zdt=JNq9-U21s5#%7mx#MW-rgXSxuw6&tJK#>-(*iOB}%bbI0@L9J$k*iwa*EJH;{B zHa_{5;ZkAb+-b$ZnqSC@ets21zCv($Xr-A_{65tx6;?y+}6VH zb{n@sKrGBgUVFy)|Aw{EjsV4m2kq<-$X*u;rEPG!cM~3v$CU|rVsp- zs-hRM$zp8G8ml6I`HI0C4vg(MYav$mmod1aFRt7EhNYMFzgl}Wy=`UR=t^_@eS1vG z9M*V_UR~wu;ig?>zGLdCrXxnrSLE6C^LYD-vAL$smGV-KYt~$$wXiXG)Ax514#q8T zimvqhZ{OC54x4SI+s#EqbPJ+%>|#p>4IWxhHpbl4)H;5og+<%0%)UGKk1_st!$WT= z^i7Wp&9vAINGE-=8Y{}>a!qjjftVQC_r2YXtoK}k2wr|eCzUlxsGNS6_q>)Im-WnaOpwg4+k!M5;KL;6S@0qaYV@Fec-!v76F2o|2P$zNeEke6ZeEQeqX;TM&dRDt= zYl!yc>w`VluFSHTm)yGKPFJM|jBiDYy7XY4JZm2^Mi<&PXso4D@(&2oN}>n!%lkbq zf2i0d`V)&G)h& z-vG4HH@)T=TO4Z>;**&%QS%U$b`OQfc>MvMZP&`kmq`7Ytsd(*=IjD-TL(rPj*@8^ z8={V%MaNg{E8S>e1hU2S-csZO;XEMeg`A z1x-?Hmz%{5uA(K>IaZ4EicQ9UQVM7aiLJE|7#sh_#yRXG$$Nk9y-41Y`?oSjta)d; zsz|=IJMu8$24}hx35`#`AhpikI{jAU8m1S3zdYSA_pXaNX`~o&iz5o@l>Y`JGOwfZ?11;Eu-DNF7ACdBU*36qPOgZo1myR+J@nUcD{xqIi+^nUGkT?s-OmqRdZyjFiMapSwdusq87d6}+&Ax0 zGqf%8t6s)Q@ty`37^;zTilETb8cm;ZL@B)?#@xN-XM{9iJw1154;M8#&)A!?9cLHn z4e_1a<@fJlff@a-=EGer%7+C}N&cE$V%atyqxi9JsHWuJYMzz-Vz0W&J^r{;^NF|n z8l@J;9(Z^m`N7H%yV1{o?y8?SMo7@xFP=Xiv>v5H9It9C%w@=8@3+ScmKU5<@K5LN zSlcQ7=o9MXT5^4Oke3J^z4?3H-&O<%)vx2>&eoguc7J@Mqz2~q z8&0+1hCHZg>9ihho;ACbXs_)XX* z4jN98lX|@zG@qu1POf0P`SggPlL}HH;@X6*#$&3da6?d7!SoJG zdCv^^bGy{Sr_Orb;=EmFUXGu8lu7|_n&|TuWoSjH9(s-5G}OZ+=Jh!K-oonb)dL0% z`ns#XeZRT*Ae#AVw5i>vnl=eqCT|34Bb zk(Q*a(jKm~l*|+jqi86VkkQmq*`(4IWi?PTt_Dg=p)E~HSxM562#G}aKi<0U@BKUe z_woH6$5q#*KA-pd^?Hu;d7kI?W z&Os2yQk&n`!_Q1K_(SctE7(FgbO}ngkBH@Z{oBL)I5Weh-mPC}>}u_L!o5y=-n6`& znKvjXG_Foomx*0-e!g?Rcp2GhqbH5!eHZBW**8R7>x{m1=k;faNUWUr-P|nryF=@u zE;aOH9-3;%u~qA|t@T^K0J+MsOgdIvS^b}Jhy@`gb5L}(bI#R=4{j<{|1_G~^laqH z=o52A2%&Z{3Ft_qF|u6xz5DhtXFfi0&BFET$3`5Kt5m%HfH4uFgO7fkPqZH4x?WR1(hSPa{$6;wOQ^rccs)IpH`8^_Tj}m? zKO~5WMjmP28qhpCQiMW&mtI@l;k=42kInU3)tBsbwsv{ooZn+tdqa}XyuE|yK;k*5K2TN=e!-f7e}A*6 znlbRO>J*<9>eiO}SB5Coj$FH@muLO6$;`I;&Q4r&`zVZ;ojZ4}+t4^|YvoZ3_ho}K zBp)+mvo+?^`7|{*&+kW3&XV(&>_CNg(B7P%Nq1{KeB1}Wf0t{gBzDd&>GyJ-Xx+G& zqnU%&m1+-yiPC8G({#->ZA7Ow|F{}MH`tQAL$ZstXK7jH>2WH-|927ZM{SpJidsE7 zS6+U$jry`FcT-glkZq68tsV!rh!)kg8>6HLug+HM+7z&Ryokwizd60sH;gUYU>=m( z5cqgYON?Wu>jh{C6JMkn>;G};r5jcHytD72(zaA=1-=D53z2tqz82~xv+VG}x%D7! zM8e#;YB7t4&G|FFTCJBx*9FIrc{bc&xr?lH!=ueb5-zjeEnIOc-r0Qk+Simh890d0 zVD`4&)alSWXnQ0d_^4^0l%$l-k5DgG3tp#?J8|%i?+LFD4VWxzR?5;~Tqz$XBmk7U z<3>eka%Uk1Tac>f(4Vl>eE4vwf0dnedNs&~%#wa^Qa3l?`|5Z4Dz(2d+jZ9E1l{s3 z>hU~p!LWa&I;vXXDkk+c*dW-_(%B7(SD#mUluq{E#2p<&B)0L~{D$gXVU5B#~^>vV(hJ@6MV0V-3Ugz?18vr zswgLyysc)r*3i6q3fRCDVZ^eo$KT8tJyewAJ%dtK?&X#L-oPKv{*Cq9q$ws!t4ptU z7y<5>;Is2h*VH*M2CJ&81tffJfW-?c3Z)6_eSUxHcfHGuOz#WUmv`SQ(}D@8rIoyR zG|8u8A6xhCIaMsv($=2-ohB{X0g~!y37sG!;cfS5V}mxa{>Fa}%^C92Ohm2eZ&b9$ zYoErb3kucE@Q&Kp1-DZc2`!31L{T=>Odvvf2nN@L2Bvr54eAMl5$?Q*}hC%L*zMQ9om z$Ak7r6)*qQb%1Mi(Zu2U2gif}{UjOKfSO#qxE`ID%`&0)#N0UHr|=W08HTFX6gwy3STNiEdFS0(IF!!3u>Y=_;U2`=xG|Y>7Hq}S9cUS5? zvS;%M;GT-6YV)Owhp#%BDrQ{t=Hlz+->}w2=UF#t(ns5;k;*9-|x?VScDO7eHQ>-~2&Ku*sQsRQv?lHN#(rA_Xi<3>b zjMy`iSZOeCea+5w2%SDNz@Q9bOkB1JZ=qA#+b}-=2wCQ=&TiA8Ceja%nw@YqB)@@m z%6^NHQ4G#EH})UjU{K^e>!RiG$$bra?4X}#il-(|A$9NARoqjZdkd@Pp5E|M8lPTF%h*2wY7vPcBX~4wSw@)~ zDGM3Y(T4?`e4ue%&QS9U$sBHGNB#I)-&7RcDioS(hAqe9l1oknZa;Ub@%Vzu%z%y? zC7Xbs#^01?=CW>`4n+XX$?<2;pF9~%rTMhBxnYf;zH39}f{J1KVc0I8dh!WMETj59 z>)%mcTv|MuV_uP}Un;IU{R6aOt$}9PK^~n8 zy@%3e$+y~?|8X!}N|9+X)7p9m_sJ^Fc~|NheW|U3uU|&{4h>Gm%ZJ(8b#zOww3}|N z8+wifM09SzUn}DQzPOo>dL`80P@#>(6k1Ss`+JDVjWS+IH?ARkoV@^oej2+t z*_O*_HFn)HaZ;iHq?ku&K6vrs@a&$XHXxl|Gk!hb>BSgwLE#g`UY#~>T;1^v88I`} zVXg1&ksJ5s)n3yfCf{|dB~9ed=$f>QN^N;(;2MT3hHXOuPb=2v{3G49tDk8@Z7ljm?TbvM4RBt<3Efo5y)Y*aO1RB6yp^qzD#JNr#vUj5N*@2T|3c9X#0wYcF=rM6R zY{_jD-7gRMvW@iq{FTM!lB69Yy^x%lYL)f<`Hz%_Q=m} z-&^JWHVTYtxxHbrtr4?}{DuBbF$0er4FP*EPIigLR#LNex7XYN&Zr z+S&YBzYR0@ISiO-wY%}{-lRs}1)v((bxgOP{t)C&aKXU9y1FFFP6cr5zHN^JnARBsISGG-L9Cuq0Fd)1( zIT-j#Vrt@e~TNzN-V~pSGca>Q_8~Z`|l6A3sK8%#wX!@dp=ukO-LH zX}h_v4D`F*OSRbD@Zf=j`_jK7*quw1D`yv2C(O!T1zAFp)+XEBxEG@^zetgOOJA{d z#yhW09NV_aP+RAShWF*!tzrLIm<<(8^ZuhC>>6Vm${h=hZE0`Q$>Wq%#&K$%0wV$>tyE~)y2aGp2iX_^|E31exrgrsHMjPCWr4I zy@(hefBLlHreBE$Q$D<39p)A-m6~Nb+|3_8Y3v2#Kk9BZF^+){#tLrIp%e`k-E2SX z^GCc_%uTV0C!IU_rVdxHx?QbtAvtyHY{`KxUzQbh+`7Tq+Cru<=~LRX@^b2Q_2t2h zP9t5UXP-{LFm;pptW!CDnPwv2Zi*KtRNULwdeXasB!!g#G?ko0jZvcpU%q*uG0(Hk zv5#8OKOabTN-0I85abwyyvCgO9!;C!X~BJKqL#Ov^L{pZCqWN~Nh7z~JIX(Qe)GKi+K{9u4N0fmE=8@Um&H%B>fA>v-1Q}5qQv-y={{ZjhrP{ojVoB& z2o%XFjGk1xk~u(-7VdMhdsz3Vh#TmB{NkC1I5*sJkCs<$_qCAPh;Z__isuaC+}grwqixpy|TcfArRjxnG@xxmDT@-tIgP&zbUWA2f&lm{_x@M z2X^d1m~QNRKd>kC?I10u=K}*l=hpfLgGfimb?YN9Z@6Jk^(KqgkfMW zp$s(KYggU!A!Cks%3Y-BWHyJHhy4!1Heywi??X1Ib!%{2)Tv7Q#OOgs8ufcR?Ynv) zxw>bt#Q`l{0fg+evfrU(tpaQCpUM&8UY%s~A08YKb2gIAQ8!ysr_P^Ww6V9IfIRN# zb<;bczmQ24H<@~Lw*3_oCe}VXZq9zgfI})NZf{2PC+%EFow!A3lBOR3EjvRZMoLtx zRR+qAHSt)w_2a2tFHc(Ru$Vvp5{zcXn^IeJrb-=d%#+Ie?6`K%#~Zy@jLB0)L1N)| z_xZlCubm6pm^w2_H8>Cn`xHK3)`9*g^82&42kNQIHg}&H1NPl)HQJW4L)=NF*}(3s zfLAE>c>c0+|6(V-1Sy{Bds+YQAK@d#yy&#`Cn7~gvK(`ePAC$ncSSmi^7|#K7gV~m za{nwwqi%xlmn>jC5}m}>4Zd1#`wTMDAj=jjCNAv9#YZc`&*3pgzU;%TEnkD3f8Mf~ zee_JuyBkVtH*7H)CLez=^h4}}Q9WJ6ZuPd+VZMa@`yrQRx+XUyO<-se4<}?~jHCR4WM9>Eb`%A_siKR1O zsVub(Ore?&i`qHWf=>}9o}Rj_fqXShc>`J5W~{K_BCp3jI1#5O`!bpejvV=(&4FdE zf+yDyq%n!5eG>D1w$1s{MNR#Ms-P0WPk|Bg^C&$%X%0|?*_m>%d64XSF_XH+#^o@A zj~2qjYPc=CxRIy(akhtOOBk)F!s|)CpZ8(h#z+%V=<&d=qLPGoai!kV1X1S1WB1DR z-&!nMv~BZdHV$C+{JSWkJHuN&{DGDI)r2D4L|3l2s5ywpbql|4#VsL| zn=Melka(zrUOw-h20M&|j*`Xs538B8rV5y}q8I zs@z-pdzitl{ehDhb#tDMx^K#)RU>ksZtL86wX0r(q;f^QVc(};>Z7LZxEmDr8Tt$q zgOkUP&vE>D5}L{NG-0iVqiV$K&Aga%h#zqfF|6=a?7hBAQ?gHc8--8sCth5eeinS_ zBY!Vz_;@jW#T^kMkxppO!83yd>Xcli7z#7EKR}#b0{I%@&N5XwDezkL$dSo}vM8Un zhFhvgjE2#$to@^TgT{`79eswrMLNWce5q-eY6aa$bpGC z%-ld?tFNn@w`ft1Zrv~yRKMJ^A8jX?UT~rpW~Qs?{%QFrsfN^BY}@J9L?Fqwy06Y@ z@Rw76)qC)#*}zobh&(xgk?m|1RQN+owo_^%t}g6O(^z|4cHaE?n`nb@;Q((s=owfG zm5>o8G4SqPc)7}WoMrIOB2SCmae~*0-V*c~xq-tJ9ldBJq?!V-xT9{a<0?=n(TkJ?B! zXfUk!e(zVK{-}95abQTLpMlSBP2%B@t-;<-Od9X_RL(2D^RM$vO#Vnml^O2Zg6sru zm*M<>RA{K=w;N!pdfsAvR+3cZ9zV*{{edCR`09 ziA5iJ=6P=*@+K_pF>;sCDu17S;%CDQCXL|Ms8s@7QRv=aM8s77!y=^Rc?NuLDvFLe zdWtVLDj%48!p!(vSi3LYyt$sX7A+j31mn+4JF9Ldc9P0IM)XEYE-t$5nzTL8xNGI=S$-ju4xW9E&h2$mgvyAy`1wWXP=!hdn zc0CfZ#FOeQ`8i~%$`|aws`Y7Yi$^B*R%J>fZS!^W{+bJ0K!W+d9@5f-$6NI99li{2 z8|nbOS73jco|LwMkVf4SxE3)g-!PN zifm8T_J4B~0kDrxJFbtF-nV$Ql!w{tDrk4TxgcFv%#DJ%lc+_7Msq13Eb&#nCEK=+yV_5E&V z9M|oAh;z@%QA8hHg~PSC(b&%yNEHC(d)#^0_Uy@=0ub1u`NIq)!Vu2^ig4RXL!jJ zP2*&07Ug~Y#mmR&EcMD2z8eVnqblz&FsjH;%UIRKnit0Fgj7gfG0M}(SjO)x#QF{y zt+n!QK-)GQ7@7+wR4zj}{P9}(yLNK~Kux!;{XaTy_h*(j@%mLIj+p$x@C{uWluTNf zZ46a(FrR~(b|gM(t~4*X{>htu)LezI-gBeB4V&uDv+bkg@MqFZ z4+9k^MDz};SKz3JWuN?nWORLA?`45ackWzEOOcSE<@i*A**c1A45zQ>(#M;Q%~_TF z9LJ5tQ>=!-G=&)CS6oU!oFp>rvi2TOyqeKy%*8K(h#beYk6xL)ePmRyX`aU>3~+=> zxcDqfw9%0;r@Xsf)8UN_`EP83E;CER4*sBcH>?WOq8FWsi7^mKvCz=*CFr`DZ3y%ix@u7Pux2Co?{+^k3T zV`ji#A-$s_8Z7LMvAZv|yK^cdef{GqD->Qb$ZB+-Q?^v?5_<(+u3u=GrFbe|IdpiN|31klVZo}M(Nqz z59GvQq%RaXL4ATPC8*Eau2@RIrXSh-RVlukYTgto;VoOY!sTM0PNGmV_OIcI!_Xy7 z`C90IR*U0}GlBF)xg)&poCSW5e}W!3NQ$F$b;C5)b==y5WCMAX4+;YZ-ol|?^Vkj^ zh$vs5a-FKCilpKyHs0Z*O0NjbRo(J597a}Zlft4Vu6aWr-n)12mco9{Dtg`lif#xw zA(hb0GIW3?D3ni7z_1>oI)5ZFRr=6UR4Olq;%g)OX{fdLwWu(pt#xG^meb+#5X41&Ymat;< zLhNBh#NO_F)GY!%?5+ zpCd*W;O2TUMO%NFcWf3(N%Y8|Pi z)zm(8&)&jYJH{_~pQYjxTba1A{2_SiLj*(tJuEidE%onYp>XjXVKG6bu2@7zxr##% z`v1Q|>~u!Pa$eZq6s;oAQ}Xk_)cI;z z?lYvu>WYf(zkjUKJYEP0FE?-?j#fPt755zu37c|%t}g_Um7Tu}8=WcU1#3Xb5MjVj zxr!Dt;m)d0Gl@=Y&gmKP*or$zXh44Hh$leAo-|d;Y@{#3{k1rzAF_MyjC9#FN7pro zajDX?f9AB46i?`QOkzdRt5>}vAA`*y`t2$u<=bY@wxTAh-j#PA(jxv@5%oB8UGb!U z%`2;`$0$DTVY;v)WF#(RUnqtwb`DNV2^L(b$Cq4o>M_wXO7W%-=I5*5XC!P2h{H$% zr-~szSC_3uawxw}Ja`Jg3FR0R+VVT;8&|jPp6YvH5MgL(+1u&H z&oKtY9?R&)SV1(k&B*gJSd=xm5?-B%i)}Ti!XY~_d&P@dy!s;rD{s;u397=C6Ib{A zWFy%8)+~dal6SfUrUYH5_{@EEB&Hp6Sy`Fmg3$VopJb2U!?Z`~>6Ic^HI!RhiS@Zq z-h}EO*v70GJrgaJ@|LCHIjLhukIrNDMdSDgzp10)3JsdndA?MUoK^N(p)`qV{{1B? z=G=cO7YY{x$sWoF#7zZt&`f+OZqN~+$E>~P!-eVM9N!Joi;Y?Xr5Zr>85M)-j~waU z-ukQd4*G#!I@6{_-xikbtuN`epMG(myvB$TehadHhG-nIp)r6E?mOx&Hil!p3l#%s zIJztqn2G(}M^7FkY@EccW~?3^4y~imL=VZy$M>7<3K%R78Lz;kehx_r5QwAaplpBp z*c7l+%?0vn;dSTa)B+n|6fl~7eoV1~5)(9lt=7N24ycrVmQW1~6bG>ruHqQJd-iIRJ`N#J+I0vnn@_U|KkIynDYb zk+$Q*-!5^yPH{5*X@GAg?PmXfFY}~h*7xk)`!f6OIw(8?u|(^B8jpTU%Z@t3Fu~jl zxPz*nPtN{0bp)bP4VK)5PJRqHx&Tc>mI1@WD3ygF_sXD4N1-IgZz6K!Kk$;8Y}c=s z-3xM;XQ@U(zY=nM_gs&ZjsJ@50{JY&_&XA#mu?W}Y@3;w;B(HZQE{^;M_tgebnn`A z9E z?(^+Qs4#)adF7NJ@Lhq@cFHPF@TDg0nm*ooN1^nIUAUtlWu_^HO~Of^gt7;AJA~vr z`F8)j0T+O*2Hv(-?z`&lpv3fVfEBLs#xrLI_pg|fkgrTMf5Scy`uwwJca0{!OJ(~Z z2~S#bWnTvx*@fP1Cq{~xv~~POfSfpPHwOn!xx^aSr}}hvr9it9o$l~MzdhUd90`e? zh@<~M*^kA5CQ&{$JIZgtoo?BgeiqxbJsQsgZ{qHQL?t527Vrn{ z%2&{9bT$-&)*(?*HMlyRJv$MhKF!R-L!%w8vv&^wil`Rczq{c6ee`H-Qc}(L@6S{7 z-7PGe9$6KNJ?lyj;AnO+XbM#>O1yhrPk48zdO)`8MjqOr8*)J!D=DJHqKR)Gd4Ngno^Po!zE~pH?i(d3~%TBEf4o zw-I~l?CcT$X^{dxc5RRy_rDuq=j^_ttfSXe1hfALgw~cx9VU>#!4k?sVmYW z`}BA3>TAL<2@jhsNq{U7<6G~Ql(bUTJN!GBa?I}x?5|at;$Aw~O(bl5WRkrh6XTv-S%l?!(f#; z@`5<7j+cSTundrB{SEH{sv%gr$@j6G{+g{R}vGL0+Dbyx|tUXY~B*?hPX& z)TC9b@UoIMl*_Z3yFITNnp1e=e1Ee?Y&ED2+%WrV8s)@w-E84KEqC3tX^`c-!@q0G zrdwFZbq-?ExIAH(T9q(WreU#=pv9gWp*e%`6a_M=KIan!Si$v;ga3T1uGYak#6+-w z!sq>@Hg+Vt&lD7Q$@;>eE2lpbrQripNO}6@-KHL}c*Z{+nEHq)#WL01orLA%VaCfO zf6b!>irA;{m1058v$=651F+mzuV~@>NBG48C$Zv0VdCmM2C{X_K^OvtQ~0}W*}{Dz zq6xK=DnAL$zvDO5?yCcMQy?+c8Z=%=LCcZHB4_NHI)h ze&AxY=Ttj1n#H3-F_l%vumppGF0-jkL$b=3U3V3e-_w9fZr>kq6J4Q6rR`=$0*WxG zUg7-p>k4CCaHc)+>1EXI*VmEecj?jvd;?ew@@UYIpa9|*^5@{hOXYkAFgPL;tUUF4 z_nuaXG^XkQe7@2Tw<}9yV>VvE{##KF&IP(fAe+Hg)7$oaV8lLmermU$-yNo6?iCjV z(kkG1P99XPGx)=SC}Lg2ULDWf&!2#3C8hdxVmKW#)b#YsKitZ!tgQ0D_U{s1F0;#% zekn%~8aTIl251A@k)FZ&8!vZ~ z-aioFaS0xVQF=Z@;Q>miO>v``*4EbkUqJit{(VqXl=3;S{!+GUtl2V-v3Cr_E*+6jQ}VRuK|6@rLoJ%o{;iJIK(eVP}}( zqv6h%Sa(up@8uz8bsZ8IkU~jQd7l&48J|M91g^Tz!l1i1S*3>TEG%r1gM)g&&`0ZV z#{dGNpxQEPD9>|k!9NK1e>D=yM=pKw&@1$c%*qak@OjuNYUNl z;>6#sYtFr5G!n#^RZ1{F#HjQBmwK3%wmP(T>g^)CWGp?YC&!Grz&y6U;!jH(8#V=f z`u@EQ#weL419IJYc1zMvM3%}eXYEOLFO%U1_U~WH*Nzyu29*#|9$YvL-orbe5vE)< zJ+0jzR408H=sOmZyOJB5hjAYs&~QXAm=$+=y@X6A_6i9r@i0njs(wQr!+eQRPv&2O z5Xc!|32qhexPWl>N#?`IRL6#Q`0&7Ij*QFoBghQ;eI>p2FfJ>-*sDRt*=)K>?H(0x z1j4?$wxls!-2kjEpMUZ6Xm5~2_(l$^HT)Gg)gv|g&Pup=v9YeM|2VtO8D-r#;K83^ z3^Anqe_jEAB61%8U!JYI^-@K{4=N_m)J(IhYM*vF}|KZ*zD|;r%Zl>ed#N-QOv>QS` z$g<)zVh;gWGtBISYQ06KC|<#AY-&329&7N}(rFwTw1J4i*B7L&;~jF4SVhNKu+nF> z#B=S45_{F%`eSCsJ=QuQo-+f!!KtZR0f^iMODmhSrd4we)0z8!PP=@$p4*gtOI9#V zkqSX2aL{Mes8PItrhX$U{&HCMe-=$Y$EqRePE{zYVrWVX>2JC82H&YZW&VAt5@9w@>ISs zP;q|R{_~YNzCh5>!bu7z5^?%qV7pH;dEuf(c*BT|bjEy(fElPqjVzcJg+p0r-H^Y> z0gt^W0{6+U`r}7*2ojIU;5}j1C)rE<*c^TR+aRxG z|Cvnn*cNk*S*fJ#z>cCRSX`*0*pm0SyzVcSJZ>?61%nL)8XILr0^Bi!_b4ybnNv^8 z$DhRPj^QQ{`asz)+(O&Sg=9kpiGO>h(eh93+*wIX3;I@5(__f5>f$b5s~0!&*^-q9 zc6zs!i!WfMmoBA&g>d*e*uXjC$BkoS6v#&Vd<%C+OgEZQ>ZMF=cCI9+DpFZbr7R-@cvect%;E z#4}Tej?e~~$U(VMHbyEO=d5h&bUHea!Y=EeB~6v3)dbUqgEXA;)1-a55lZ(n;H$?u9_vGz>_B zvJWg5JGs!_z8n``2o6vA2}m(43pu>ufC1-xstH=|sm{VBh-s4*yrCe%&=qx!L?tQN@>SC_k)ssgRX=OXc@Z+)`ojpO_RG zX?Qw5e%PLSU1ent1qS-gAPG`%Sz8y<$341orgXNc>1#%&hYxRH9eaIvCDZQYXUxAf zQ=cVlCg=m2vs0Fk#H!so(AJP?S7!#w=Eo2wcpq~UFI zi{Nd@bI>U2HY66@E29g+@;T3Q9ueC%wMmx0o`o(Z^S3?A&lIjNTIHP&>@G+H2{MY0| z5>xAsT~kV=(@r$4(YQCm*!Wjjd3%}pulUK^8Umnc8Xj(=xfD=VwSd^2-<5x!@1Tsd zD&dwCL~dc+{+FP4AfL4vR7i|(KHPWb=?BguCP4urw-S-F%j;+hu0nGEZ(l(`XD2c% zCIEq{TDg=VA-x5~VCa)icvj9&>H`5@PO9?{nJT zb*rgiOO9;$nYm)wtIXfKMqCNKa;l-#X4%EpzqTdCFS*cgz9qZW%lKIx~hOVF?r-gUy-wvclLh0C3Za-=Bm$kaFe=*@4x z>*@S>448g(X=XdX2c|p|ecJxuKSjJhqT}Rh*YXSh!u1*^e=)&}h*Q<%WMRw3 zQFJyKE~`9Ei^aiL^Uq3b7aZsK=~C~OF-ye6L?U;Vk$fSePmmCaE`}J2`&JY{%mFlZ z=_V41K15%*@QiJX>}6zxpDM|3oxT7>MA;s;!dt*<4<0N@_t*yDN>-V=V!kl80HAT2 zOhCMM7clrHI@*mQyXNFkfcIl39WmG;6# z+$#&3LNl}P-;y#hl07}Wtl=Bq^`OpdrNavM;>37bTL%7A^l&60yLl%>Fr~}cSZhG3 zQSfx(tXW^VAgPVbTlrvEc`+69SijWT8cd%mZ_lTylJPn^UdUUZ0}+_2w%vCNCR@?% zpzMxcD%SPhyNQG?rV6>e8^*5fmWEGMr2iu=%*WxPxs2>X*}Hbn2?`wP+L=tw2{$ak zRaEDx>fR<3>n+?rSS0c^n(r)oiM3yLIuYeix_7w`8-BS zBvQ@B#G9Vt1YH*NI{etci2C$lHR|X0@Be|ELAKzglL=^qIjfAY=&~Ru4bXZFW2gKe)cU;t>;=C&z0V65O?&)YYY z0@;okJ$k5VB#v^^m$ax>shahYx8LF6(L-@U;ONVt$Bu#Q#B$cx&>H<%JTBPvtW=qO zO~*sF>#aUj*1ZCElC^hlki-v7DP~$!*vIV4LC{6Q7nbsa8EG-2A(vym4>ZUvp(OUn z^Z!mTT4{cqu=3LXKAV6a(&g}Y%OI>$1Wf4L&FvO=fKBAx0aece(|ZGN=JN1FTaI&W zsSE%2Xz`gXY#GO|Mf&Gyv9+|cuBWA4y>ca2k^fwO7%eB?o||rZ#tm z=EKcqO+2UtMfu{pn-;FJFEE+Z#sOp;=dcv&+nM9?v0PxvCoV`}?vhGA8uRnR3gZTW zalX;lL@TPXI9Z>Vk{)W#(7f z8uC{FV84v16$;(E3<0w59gB>don?@8VR6c7YSfQ0LR_!Vs<;XkLEQxFvhmkVCgytr z0xB|lDkxkBA!HF0Z3jo1nnF)6lb!^8=G6t!7IE!fSgYCAJV;e7#1dP3Yo%4Bzq74J zCUcb>t6Faq!jxXrJxhYNGuf*W9ennXUTyXoo>@+BdSS-X|BKs?9zWKcJo#_HK#jD! zYww{nwxPmN7r6_P4Gf-{4e>L?9j03{Qc^5PqK#x5E%7824xjtxi#CG?pW#2m`T4xf z4NZ0Rk3>Y;G}~>SFlK15O4e)e{+fROh8MN4bJDz8<-al%lIa8{!!3z z--2|Nm{TEDC+wW%T6DF3oZYyV;0fFMmUrva3tKTU`3BNOjx&=!5fUCw>uMHiEi4q= zGW?cxMz-=rivMlXEQl>}JEJui2wZvLFg1D4$?7Z~y7HuN_Ty9OmN^_%WaXPR8I}td zo&t^JOCb;SNPwh`CDA4jGT?}p+TuS4YFn+$QmqiH0LG}WKWVT0PGkJ|&4^|0W`-&i zm(u>*8!TT`_2|KyH*Pe^+vjkcD|8rj0sIPaA%9kknEn4*~wf8qAGn5K*o zp0pH!y-c_-FJCh8kf*6(Kjzo3H$a4WtcMjRQh1uqp1m)t95KP=`#>M}?(IDIyDm#b zsI0ypi;60JRo|1>M}iTgn({^k8!}tj5P1#wpiHAhbVRUP0s>t zosyE6mm14j%ZvJuyD{cYzIJUJOg5!1C9bd?X_y=3(OpaeDdLG+PHc2^+OR?B50Fqi z_0g9rFbE(EFpqRLm|yqXlkXhqFBQ)9WWek)ay#SyifdK-(({i;MBteHA-yZX*PIRV z1GQXz<3ys{TRRdS5pbIFVSX)AJw^X}<;oR!+OwsFhpIA*<_-{O$r9m~3&nG15DYwf)rC#*MMao{y^;uTz2mb||Rh|}`>Hz~>ccH(pVGB{Vx zkD;cRVlg)#+r5_GjY{6y@C~N+@y_>tfP(P%ZXJuNF^5BAG?F7`we%H9c2{^`1;(%} zB?$NVO*nJjnp`m6?JtN;5y9(ZI-mOpg@S?>^edsjRWykO?O_m3YlZDHOtl*qE*v~0H<>&McW%O`G;r19;o(3HKPw)q zgSP#wuRk-w7~U<_b}fm7SPOIZpUIP1YsiVP1&LGYc9N;~A3=!8EMkoQ3S09`re$U) zv{D!bz9Dn(cc%-2p&$^IQF9jzu%13$T&c6FEl!)vj8?svxg^L0Chx#-^G)54|7M}l zk+@Ev4Oy~LxOf{%2Om`OOeuK)?rtQUupDV>C1T=P$}LwVMUl}Hcnx)7)*TP;(IW(3 zkEv2Akwv0$Ah%YF7jK6{0Na&R$^>&Yw!{?N^XCg|m=J4D!)1vWP*Gy^k{$|2Vo%R$ zV|-)XdB))V%=QSO%uumNzk;aUDk^h8Cy1Ohe!uL1e?Oy;5Ix<42@kz7UINOxN#{PU(>Uc`xpnJA$K8C6wkjzxk)hVij=O1tvVX{q9lz-su*-%rG#@H3)h1J9 zm`|g|j*UAr;~`%QE{33=kyaH}m7G7s@nS8g zNMHxw&l~Wlgj`|jAsC-h^m&SWm8U|&CNX@~J-eR(h#?+sG-4%{wRs&79V6z%|HwzgkOCiiG#hyHUYQ?&M7Ploqwj>4+e1cW1@Omq*FpMIe zbi{f>eX2tL{$-W*{F}G@G`sf>efz^NM0*5~Z^n$XtuGWkb@fJ!Z~-8!KKE+-w0Bf= zh4Jh#?0H2(j(Ujpm)BS_&05Puc9MslXHznTPG~6A`(oOCCAA_R#fY+gZMsaSihr-A z4#BYJ&D0|yA>Zn^?&DNC%+nC*PnM9B%<$fPsgl7R#+ob}y*!KSuHrF=NJ(9`n`rrT z06h(cIzlhfqUw8iH6{fV5}xhHHNK=6i`t7vO)c;{wR`u>ZOw))7VNIjeu~^%Ym7-U z_&Dtj1yVY%+jz^>#djQZcK@7siIn}gV0R`zcYkez}7 z#qQk?0q~t;LfD(d9GN)q81*Va3rTL>j~{|+k_7lGuPaDdoPW9Jikoih*qotWr89v<*`@gU zMtf>DP(1L_uXlY$B*I=w5hHLw69}u7*N|HUBKVJ{CR%_viM=V#xu*raR$T3oxhJ&h zIGPjs#-aj8&V$ruNf7IbHgagLkEXHr)u(tUC@y{_Tza!^GNM7~WHmH3gN0jvyDPl` zixLEKWJd~dHJLlN`|*07(W|?~(60IHh=f=o`3M7P1T!?@!ZrrjA(XpcoyF`+Z@_IJ zMmUQ_hp-Qf7Kd!fNC8)?SC@tt!b$O1m1{MKBH-C6otxjziddek5%}cH8L{_+DY0~v zd>$5-tcpG2cly$4%DvNQ>od$HAr?nmQl$0gs0ViyiKVT{yEM90$&4V_0O!fru3*N6vo z$8FF(796Z86z2ae{$c}A`b$a*u~Bq=(B#QsS>+Tb9c_) zLAu9e(=vPfo?W(Y*D{qDF>c%uC26kVOMfG~8X?PFSKv|jcIXxwS!1IwEm{WsK;URTK01Pv%GijQy5)Wrws3PB2c1sr%%!H-A3u3AIvur^ zqa|(d?7l`5CVXobELb#UOhnT8#1Gf*>3z?CRnu&{VB(mDPoqNhSQEa!lm3M5?(V{l z?y-M-c0Sux5F7Y?qsXWLc%8Q-EGv_?$&wvH|6ph-m)5v`abujhbfwC}~&G*B6OjhULZjX+P2MN}e-?=eu6`}ynFDMDMnsf2QcFEHIwZmL^tMOm4F zr2(^S+JpE+qp1YY$jBugFNA;~bq8udVVU~>b^P=3#K}51z?g6=FHrOPYf zaESKsQcX`zeT@)bzWFX+1@;A-(#V0F85gjXNjK6clHuhlH5)W}>K;$V9SbDtrQ&Vv zy!?DdDy8srK0cylJyK15jPE0RtamYCsO2ll~w`j20~;zLrl{M7p7#6%Y+s03Bd zo{JyKd>EpmQ}W=!3;+$CzPoNzR$Q=IwhW5vbj$6n*!;5X1{ey?#DO>NU1UUew^ZC; zxytld_!=LrU+pQy@wcvCR9v@m_`MLNi^DIh)3Qt3U-GP6FPE)2s+||*q;b454VUaiUT$g zj8o@ig;dOSz;zb!nt}$CkFZQ(MZuo*hZLwwVSgHX4xb>Bl4-f{^}x%9;+q65$Iu1A;2x&M~lp{kg z%TfP>T`w>j4A&;Z%xEJHF3YkzQo}7&6_}2-H5@NJ(xJ3)r{hriX+VWIVc3hPmx_t4 zdp5T_ot*#j0}yjfX?x4DV)tJmBMX3R8+yNh<48_tP0( z4y!F~YrG~{rmuZcRq~P3wZ-C>4Ti?pkAP4WWO&^?apEnXzCub2+neVkz$Pp5s@95r zy4^i4v-L0fN&DH@g0=g;uMypU%wURb9Jk+XcJbT)y)<*f>eaI?0UULgVPW9xf1T#VPBT0)yI%c?o9tmV2|jm4M`wqT*s>lS}e=1Rj|SyUU0SFS|xu4$1d_zNT4c(hlLN)@!t- z)O+@^{$AQ}@7cBp%Hxs>6>dk(xuguRKc^a1`T$-iEj5MI7Mox66FjTLR)jZZH0Ubd zI;CtZpLw-+MU#~?4^VVGr2F5A34|TQpQctAWq8pyz>P{fBPKVQVw@(e*N;UOjZ5C- z=EjVWI)ca%S3gsUp*lLtI3G$IUizzNzj(2TAh^A*qkZ&EhT12jSc2Yd7f$RH#+7Mq z)rtcA>wBijGlQxNbolRyWf;EH=*#&i&<_sl-UA0Xf7EJuxw+PCfuyDo#{PTS#_H;# z8JW^?G2Blxz2j3>-I%}tkBM0BC0lj?KY%D7*Xza8rw42zDB%R|7TuSST2B1pod(SG zdHU#CO3cappNAFB339Z(moKD^{Z-T4(|wC}%hp#1+m7(~4jh=fb?d}2wZE&5J$R=L zB`=+D0$uNrC&eVx*WyecJ&&nOawtTue4#!AZ6;Fk_p|%0`v778?;($rU#eF}Ac@3& zLKx|v>7$@f@Y`P;Yj29oPVK*vHZf2EE>Q*n16~^fW@@+8)y>Uu=^7P=iaQ!xN%WUB z(!gFCwcXm%Hnfg-*D%;OngS_BNvb4XMW7BMjLo=fUq@1MBp@=ui>vGb%rsuO0eZT;e zS2XA*jSlCMlHvz9(`kqBzHmApV={^Kt{Y|kDnALb3T3at{Nz6??-DX#;kCE@%5-vcyjuFHcta*iDOboL^k9X_44JN!((!zZYX39L zW52_Y(97eL&QdfaYhNw>oN2AiD_PQZJju@9{@(H;x=C_{eOHglLoANdX+=^}_y%v^ z?7Q4PqV0kH<9DySkv=NAKHn?fyzZikSb%Tq*a}?<-{}RNkEdrhUdlBa-5q#BsV%2Z zfE~>n#o2_u3T{DovL`UiTN0#DwfG@N_gPp+I~sscw?@b58aKv_q3k1yM_51tJSB2n z!nRfxT?@BKbp0&;3V8|h26->P4ds%M<-UD8PIC+cSkuh#K?gj)wMYWp9*L0>`{#O* z%10nWqL!tF#bV-@5Z-?&aaiA-cm8sU?vN@avJEaWi>MEz{}F(q3j>5OzoJKTv&qA~ zgd@F{EGO6u(rqqt@;|jL(<|^I!p@ysGz){_;v$GdvxL3EH9qY(?fZL?FQJ6PoG20q zeegz=cJeo{m#Dzo0}zcFyVAX)Yqw81uJCGD04nV9Jfk8$%Dd>KqzG3PcpHc2JF%pT zg=m{v)2GlWiJX0aDvrj`t!ZvY)ZbF2-$sWn6eeag43BWjDP@e^5%f>M^k;jsqJYB+ zx8dy%MsLc8!y+PPNe@b-m!+8oktVFnG& zogL6)e(j>LqJoK>os2DqfJC_6J(Sx?TpWf8voV+I7tvz+&V9D;``AsiqU}W(Pj~6H zVaZgh7gh9@Pf7Uq-`SIkvZ<5{-@vgp`GH^8rD@vQHwUM{&adfNeXX!~phtPda2&fZ z#l6+qxO9MsM_2F$VxRVRHjo$dI7v;1ZY=|)WOR&V6d*ArWur&aMM>XZfBMw??y1ja zGEwmDXV2E*yENgG{MDP@XgEy`Y71@-@aWyA=ulB6sbRk zP4TcxPP%*OgLW_WKO-u+TKawQ%F^dgpPsjGZRH{IvGvbG&{2V>-+uW@yPX~W+poR-(e)P^oRB=JV^acM#jeIjAXw&5$^nRW<8PYUdnm? zq~Htv1hBXoE2wR4|3TLer$iX*qH5t;ZW*rsg4yZQQ#wm$&2r&P@o`iC#Blxl4Hz)T za@MEv^4{zZe07bw(CvHx~EZyEmJE@vh$T))!-u0kEG*AVBuj zb5zRX#<)J9az_1NU@7;{q;+)T_AT?5$aU|k*Q%#|2?9OZ$&MDQ$%OU+8iA33>P-OD zJAv~Oy`+%-d>xXnUVX>h?mvMP=EU8*g8;nb??b#k0<+V|=swlF;Ek~AaUWt42w@iq z308BKPYcYM!@RDpq2X9a2n;$(Fy%47Y*;r8@yT?^v4aOq!O~4jWAGMWsy$5^+px{4 zjgu&bBBL~x=O|A(O{PoOpBv>wg|*EZzvL0TYF*CX8xBL));6tlE9@ zZwWHUJFjwMjb}@n_H4?f{*%|+us89hlA_|{*&Ei0v`{(`ZX-iN?ybk=qwU*@_DW|F zOIf*S7w9|NQ|pyV8~*ne4nf{bUbccM%^26a&;bPHl%-rUgAgnfj>dHwj|OA?ibo%G z@SLXzt^e_(BcbUkvjU81uz;Hhkz)H(3oG7%XqJ(I)oEvjIa>Aug?BP?_;98zA8NlQ zaneOaDK0%|XR@6h9Rd7PevidQj2W3l~aW-Mh%`r-H)wC}q*q3#~ml^1IXZyFXND zs;;fMb;-viC`-sfl!-6e>LPAM`d>O!nSW2x>qnUR8>7x!l&`L;xyryW*s>+_HJqx# zq*Lo^HGF%HlhZ(R|4PcmJrap$TkbDaN=sy7eFaqZr}uh^N| zNE#?)Rw$$lDU~4^QdA-pGF4u!6# zzxVlP@BJY6eO>3d*0GLttYd|S-ubE0v!k$5=If~?8yRyMw9yN&@m~_`Jqx+y>+$=- zPt&R;AdLvzDpmu~kqo_OZe4geOph_1M|zDuOp2YlaJjt%f2U3+Vw>X%c`k%NsPDS<4Q0O4~6m3igJcn--mw00gAmhWh;j=eyPBto%rkrBB zQibQ-s4213$#sam?`Ho__3~&FE3=9tx4p8z?=u_!`;*FzMWKVPZTxpviia0Xd+?0w z`tJlz(OLnq*t116h{(z%=SF)sR3u^o>^9NZ_%ov2(uXA9Hx~2|N}D@x-brFk^fsuN zA9?ZOIls~MKO=nYClF!euWjUWl{e{l+}lMX9-*3JhphpDf!K4*u-Fopn0R3K?$D@z z7N*}Ij{p$QiaD_EHS-+(*L$}@BLqzHN510eF<}_LBD;X1hC1bL)mZ+^Wb1(G#|V0v zmhYg=IeI{$?6mJNf1)=wL7ThFMllBc%IF7EOsZaP>ekYF@ZUt=4VCZnfvNF) zX`wH6?h#+6I&-(*icg1X&kZGsBmmz-5(OlLK9~H~R1L}(f;Xk(P>o%u3@W5bQ%h>t zsf4DECLS(5`WVQO9tsfS?QiWq+AaAq+jn(X?dM2M2bmW&pTA%utHswx{8Wc_+IeC(qso8dRrZPq0 zDQy=rQ4n9bia3HU;m6OPWYBoz)1XO8W3jb7#y3%&Df12;04VERUH=pxU*P!L1vG_V z;Ks(o`N17zWNLo>if}UfnwP-Z^)0o+a7chlSwiIqy-hz?{`M~}w^3fFtk; zYl36?xf-+Ee)o%VpB*VZDQ5?K-baYc(K3B4j~G3G_ARM$r~G8j%@b7Gvu))R7{GT|mmi=w z^Ygn;pk#4X=`I6@OFFimt{_<2aM~5SsZ5|Fa*CU0&z`vBU#HbiL#BHgso~1sao@i` zrj!k{NpP<@l&OE*dvC)J?S_pFRd&G_toQaBnfLJFu6>~!Tgwh`6of~$kjjK$k@J013+!8T~5sqfk^h3Rb%7{d_1& zv&N6V6&t&Mf9pCsG*k#}DS%bXI8!MY`VrX`8 zs;|^JDRFiLQI4Zfw~OI$Bta=DDPUR1g*U_e8lSAXy1n?~wC_T12g(7tFQg>8dvu=S zdj}IY32y{NniZ0uc!!Z-hn<_CL*UAhfqyN2v|me-@Fz{B1%aTMT;iJ}-h71DyiEz| zdzkgG_;yPK1;Xj|Y>oi%!wbkbs0|u4g|(v^1wjO50*D3wls1a4vPXi*h#=+K>yRRrppGAWsKE1<{G z&`>TRyLx_NL_aO9YD~|=KGFx*R8y;zAj$c+yna*1npmO9X27<8Ko6D-r)+i1ThFcH z7s5rV6G`OjJpr0!(n4u3Zf1X)HjruSH8?1ZIs<$)5HV2Ax!QAExtClX! zeR3gXcM#oCp2J5^SH9I*w%IP`HvQkX($dk%c>K8Kat8BBFaF)xaN7ETWGSNhvqV`udiH;!!E_NIoV<7J%Qo;Mf>&7667 z@YdHC*eVndu8$gU58sX}x!SAo$1AebxeJ~wO_Ay-^!|Vb7ura)_py;3y>c1u1jZJ! zzF^LFE}`?uGB9zI?Dp-;aC(z93Gzw2H@>AbtKLbJsRYu2er<#-ZAdgxL@BvtW}7#s zKnEiAZMkL7sEqJVesoenfsOKTSzE{FdZM&o8wxsN-k<0)gT%Pk1I8dT*Y=KjEj1aM z9W)KBpEm9Bm(eAyslR*m95e!}GDS24IpY=Nd!~s@E!NiX^TyvOlKrC& zGDS8-qD)oME{VYeJtMmtuL@0g&L8S#bdE=zkVMJ3o#wLC8qVYy*KOoxa?d1O6dq`UY8()#~ISqHv1vcN5JSX{H=cY}a+I{T0CF|FZ zY&keHArBdNb{zbSW%Vvgm;Q)ToKLU+%$T)l85#UYQ>dx8Z^w-oG3%X@rO_~UBR!!r zFnMw@094c|FP$lFghK`Flo!FX7E~emg$hMf6L5`z{tC>1F^3X3fd*9hQP{l9UQ1{z z7>z#UzaRY~*0MYw$%*N5EGP&GqWFX*Rc$0rd%X@wdaVZ+(v8{!Ock@NP{OhVTT{~t zGIf0KYalaDt7u~&GLBtisajE*nXz>Y=GSx>r>KdNJ^=0L?}Fh$w~R zqc(kuE&AYr+pork@b1TVQx*hWXvJ(!UdCA`qZveTgtj^I^_|8g)7}2pRC+PRyBPb6 zBjL$$Zz{aqhtGBI86S#d`}fpjEnT_Ni*Y>3srhHUt1HF+APK85CISM0p?EM%i1b?Y zhs#&4?4hV=EF)2LLSnJXQ(Z&D1?y6z`9RFIQ4Ot<9BwawG=;C-e~!HkW5%3F{|Lxk zTetc!ALrEYMXgL5xMwYgK4fIuwmTU}2=s41fBsu27CycSUz|*xSwHdh9Vgm!rZ*yy zP4?`Q41k9X`wp+AqVnX{a@|g2?mT;@*{9Ez?M$W7*@mCuG!T&bTXovC zd-punrT@R5{wkjYRuv;_nWBY^#fou4z4j2dvO`6=GZYK0tPigbmEF9&csmG7l9BEi zZPEL^Tm3pJ z7ldbM3OfD!?~LiwyXL8nGHOR_;ZN*s{VNM<$xk?5fw*~&j`L3A#M=Cz z_s+&b2#c~$2@}QaJeduD>el~78c)w8mev1K3}U3k$ka6E`t=ivlV+ooY?w?>?-7-4 zi{UD2kB{W_(lN55smA%YaHm1MBJSXSRL`pw(!125USL9U;xBDBY;A~GZ__*;WPh-b zkOZTehe^EY2(3@uCsmlkXCQ zr0{VM&by0ePwo#n6M+@Od}pPr5;3V|x^MS)pGVk5AgImYs2dwU0Qn9aD4xMit+K*F zI(*>3TX(SyeSK*YiWVZCgXi#UW`D4q6CbRqh>18o{wph6g z6+iU5j%-L}bfFdNr1d-3gDb%?f1YDhqo?;c!EN!3n+n>f+ro-u0<-xtMT z*T*o-LdzK`7ltahqV8F}c-y3t*m*SMXVs<<+tH80LopXkchKa?*_ilvOU(bmTUEYn zBQ{8oo^xlBt%yV7@qdg_!O?>ki^lpLlELPl z^S|isy}^^l73uZCF{GmC3(evo&`*bpBLm{{pE^XyPT8U7=hVJ#goSONeQucNixl>-ekS#s@Io4_^@RhL03)sr8YvV zoh0y$VCU1Jp^L;CbeG7NjvG5ROWxB3qFCQ$QR70o++VM=f89^QfIWQp za0c2xq|>A~S6Q!>JPMLUxKm=>`zT0#(oI7nw(d5H`c(Zk$?hXs9#0VYY7G+e{!1FR z`26{(N)qiewz=oWxA)W2#l8*=_5LUpf7aIaUv%@?{4aV;!+iE>{^bL6lV`yA?6UV} z9NC5lp?43EjC*GqX%a|n$)9XrGwFxi){GgSii`bT`aa(8h=U-5zaV8wkH1uvgNcR3 zRqW6IcWaP=UA}r%0&r5lydwP88Hi(@2W9aW)fzD(RNYz>L^?aqv9!!=H3-2h&f-jz z(3XwD*o6#|p8o39tDLo!BSZn&;KAAPJ3%oN5>KyPU37N`Bv95BwRv<3#3`G>!gptZ zF<6n}`SC*s_s>zIzS(-#mL5t@{v*!e?)={U{gw({W8_Io9v-A_<^b+bUeP*re&S1y zxg1V@5J>QPbw_kiKg)*Q{!`sNT1wa|9*AG=)t}m*T1Ve5bE^r`?Uzi(J9S9u+P!-j zPE;0K)~;Do=-kh9C*$oXVI_(7-VqRjxA%B4A>X9b&qi*G!cTHiD(g4U|3_#oYF-F9 z)RVwaP8S}m6J%_=$jNCrW%nO-Cro-3l>w^gc*Nm{#4w02s3*Oip8urbDrkc{PEPO; zHMO}6m>|8oecK25yvCk3g9b%#RdOtaq)`Iqb|(V^ui!3}zMR^C0`O~c$QgaluZ1v6 zH%@)rO>l-g!wjs*pt;6?g8%K^XG0p^Url&+7hCCUxPEZtZ@0sk$l6N_9}HUysSR;X zH5fBG4nB;W$N+(2;DqnY-SXUWEQbSvI1%U_9Jfqlf&xhR+N?!T6ufuuURui{BrnvY z@5Hx4`%oqZbn#ZWH+mws1TVCt$+K`fZ}Rf81BUQyM9B%-SewE~0v#ZM*&47KUR<;~ znb=u#tJz392^YIYUk@!$WP((8&d=M+R|M@WE-CS;el+Oo@60B3 z!AoAeBDWDe!v?d&+eTJ2f96SQ``IsCuP|x_IWbiF4hp-+Bz339eZC%|Y3q>Q!s$AY zA!m&7X#nMPfkvH0+y6T5{gT>NSly&D)yU`r<#@dKP*TT2AR&{MkVy(;W8x`@DO@`C z$+Krn5nLb^A)m%%22losTxGpwPr$BUls1H zdW#||Fc2L;<@?6mN5gld$laE88P zhm^}sIRi(foQq1ut!(?wojCy$iNX=4r;Ircg6whvpg3E`MLw3MKLIFcWZf>JAOvF&obL=_>2d5J18M3C)B{C@6LN4_j?}c^S zeeDXFo-V@I1#YK~`NiUt3ilhnH7h~NS3iZZuO?`1kfM{L8r4RU{w6#V+qS5xzw z-N&2M@eSn{E;apr?(@^9pRR6_KFu?S-%R>5P}dY3v!T}mpY>z6A`KNYO5>&G`b+4u zT$xYkxuBh}y|wr}(CTNTr4?bC$MX=?i=BErY&tJpXD&varureK(#vRB^cb&wAtna- z`{y;=^hF>QsJp?Lfc7_nlq1W=VNnFjg70WR1mYJ-I7yvcaooUDqVsqN(4^1I__T(| zCL^9Z$J65cpq&Y92o9$z-5R0gUA212`H984jANYQVUFZurat%m)Q=MvKM?QTwd zPHleDSd?i4sb=_w{SisRd9pcVWpl0F{C;;&fRrhid3SEg(!zwTgvxK%H+(rpwNa|7 zL@L8>AxnOIyHg2F$7NbBeBFNznykBxDsYw9YSgjtcULgtKq;1OIU9j9IA|%SjgJC%CvKkT8-443Nevf+ z!|E7nhJhN>aN#K(LofzD=2rxX{#AXF#T|Egj>BN-_LcS3(Ac-XwXjw!-dC-2bHf)s zG-?-C!Z^r=s(Z;Y`{ZV8Xt4!HnE+8_bOWuOEfOKKnSB0lJo0CJ81blz*l5--E*!It z;c;9k4>@`0!_6#8Zy@$ztcj$_L_DqcSy5@LuHktnNJ_~Q%jpZO{L zi=f6}2>y|lhT)f~|L5nsWiBp30RfXNwqQJ8`|B4*m%pm2x_myX&)lv=nm7EP7d+I% zOmPVLiDY^DgPE2AdsPj3DXpsLy7G$iqD2Vqm35pgx%#GK*|cC{WE7f|6e!UK{{c4R zv%LK;25F6wa@?9iK`g@rZ}v9F^RoT}@AO9de^}{9zmy>!Dy#F-rJr!>!jOqsp`t2x zG?Ys_i|cO7de@DlHIQa-+X-|^`UeK#vNPcHC;^#6XiTbP{%Sb*+VwrX8-$u-mXqmw zm-+V-F2{N>EuppOnKcOJwKk~#sN$F{Jx9|o6b13Zm@OHBc#%j(ti+_ag=9=vkYI?y zjj*l^d`V=@f|5QPU^oRL?i{lm>YN!GZNEfJkn?Ex@PkROI3_clu>ETIX%)CK^e7l? zaKwSWgv+lgjLC%-k4LOAD86gZS)Pf6@A2%frL~U1Zf%9`bi;^;lt*-%j_`nnNBp8T z_*A|H`%{1_2}UyHR=Az+eskT&17fQF2LInpy!9tfc5ivk)(aLeqQNxwREbQ?R-0@9 z!5Fx1CO?(m)$}}kifG|d{b?#xAzA8YDctS==7u_LHNfyC5!4tZi@miEqn#Blf`~BR zDUrUAdN>Xq{y(Cvm($9XFS4`c@+NzGKE~E>`NE16RBA z53s)&H8L@?*9eF0C>GgQAtuSZywWk4fAME156?wq^64+lPn>CQ?@iR=6~w>HSQVt6j!dp+0=_?YiD;1|%l(rwW`Z$Ut zi%B-J-P}IuP{-HTd)mlKc69DH6;oQ_K?v3HLx)Nf(qh%!nQd2hboz2i3-tk&M0(Mt zNW93tdss?5EbxNYKa0>a!()f3*V>Zvk-zexI4wb^Ph&Ljprod(_ikYa;t@riR zJb?1qvqfyMtN*l7zE>|cbD3FeDZ9|dmtug4#rQfa%~yt;ZPnK$HmhBS-!IdD zZ~C^@v`d#Z%RV>ij~_qsd%^n80b9nNy87yABgc^@Ej|ig^$oUfz^}?Rz{AGD|AMeE z>+IzC&qetTLxqLY+4q1K<mjU5xNoL4%Ia#g##v-+Xt+D4WYW=z=;{VW3^3-PHNNc7sS{HFUeM$mnR{NeSK(sizQ@ju z6XnXT+w4!WwFp@Q9~(q-TuEMF*$3bjo0YBh@dR%L)ydSGTF?*NtjtjP!*E)7<%{D4 z;{Im%Qn0xCC;#c;!L@7F&ff`aMt2Z$jAm4JT(1g&(O*%cktOo0jGtU5tX10N{;rLxFh8 zl9qy&&^do3zn5yt3ywQE_6!a+W9uR|Z-v4H!1SXfS~qN;sfd5T^$?bk=mwtr)`U5g7_q0_u|d`I%P=;5&7_IP6Z=-Z}b zW27tYHVy%b0*{GFkE4Nz{%dghfFQJaxFR=NK~Ao(8~@BD8kIfZno<9}O;|A+alxXz z6z0Sc_n`w}`9{%)4^=EmCXv{?zu=Zd&xt87DM`dsWZ12`t=@L+8XjeB&(ILmy^u*NftU@bc;7;bMR z`1W1DcCGq~1+7Hm4=>v%4X>kf(I6|eol;y4r-EM z{^Yj_Qob)s3sM@mJRXwcI|3~kc&bLs{f0^!YN;0aKj{5Jy#^%AGu17lEdz!?PMmn< zN}#sW>=`p=bXj(K#qrcs!@SwS$Mm65Fqv{8u41((<719Rz&s9R+Akj;s_b)ESyK`Z z`*1C~l~7b#YLY&AC9CV0qEP&fFK$NAxZ@&~*VypB*`C_?AadIeVF$*GoWhTdYt|h6 zxq#`*HS^a#N_*D`oOo`~tjFqtl@8}t#CrE}7$L;8cUcUHmpLVOu#~UBO`VKz>CTm( zF6E2@tlzMqSY4hC!{Vx851Ye|PpuMH*_j59c7|WK=iMV^dMF+$!xC7M`rp5+cnGda z9fbG?5ZI7Hk0qCSH)9C4^v1gMGnMFyk4%8NGDynGR@4aq@%eO2#aCaX#VkJ ziI#2#>ZcNflflqrCt2BC4wyx6x3k5lt%#RX+T^uKP$IX6%;| z(}xMebCh!>#l=-^#Ag*$LmY&2j*r(a2+8qg)}E$wjr_FNv_QNO6`+QDvg8WG@h%0) z6kVq_W-n{W>7Sh5xN!|>Xk2s3S;m_L@FdR@>s7$$XQ!#NhYBWUW*gV7!)EaVsvEy( zIV))aL$z5JLT~Sjw~`nBOwLs9+0zBNHA+{5-DspPln=8`S+ZyrS+UEh7Z-0t`(eMl zRdK?Q-lID97;~_fT1CHuv2sE?bD+?8ws>rWG7ml$Ou;G+V#s$W!CA`nN_vjmHJ%mj z&Q+FAveb4w7$!*%cI58yYl|lR?~1|CO(o3dF31oc46Nf#HhPFpP;kv+s*)1&u|?3a zBS$8dY@%NM+%cpJ*eL&+i%Lr660MUrZ^AZSUlkD2f53o~r%u&as`P-*W>%jU;ynJ_ zFN*!F$^^6ft%UlMC}@fh=5N1QD<>C`rP94SqXo8Ke{(eUNDJ7Kn(N-R@BJ*;%)~^^ zI`$o4{NBJV(M{`er{D6Z3E#5C2m#RRbIUj{{l|_y`EogF#KGdioS0HslLqlwgtX&Q zeeZ`Q7SWx!HJ7DlUH}?m8T5?R`rjg`V?!3H=K*P)`bb=pU zkfhCj>Z;hz?VnRh2!F*Lg)2)koj4ZJZJYpxyo;|IE+yA5Ec>bFvI!j=60|Z*as$o( z4GF&;INe6Nj>od!YDMg_K6Ix<_t%*-!{5@(IXc)Xi4nUm{ieXPYN^4> znQ{2OXN9#jo6h7-?4d(l%tB>LzcQYz4WJqwaGCQ@{5=qnw|C~yvLz=ZAv>&ei96jr z@oeS$wGv#ad4$b0t|W);Aa#y`=a2IH|3Qv`03zsp3;oLr?`$Q(Rmo4AICJJRt{P=C zvz2+Icya>f{0BB>x8ah6s;UJ$A3(3kUn|BeFg;c%henCIR_^O_!0B5o~cbyfn&VAs-T-UD;+jq7TKM=SnyH4lAKSC?c zN^%79n>Vjt699LN4mhm4q1CR{fvv#vBQVcR03w*USiYTS>e`k!=`IG)(Hm* zlIIV!5_H7))5PlZF$^mQACqHLgg#Vo+FL}!C{A+qG!}6QF3eB-Mr3#2_A)PS@lHH+ zCBYdxu*pY*MlQlCNdQS5;aw#-5dl_LkSlje|Ea2#co|sJIrlL{Ze4mgWGnNssIBqb170 z?h(;E)4~|Z$1k1XJG1rUhb7k&9kYsv@_NzZo<>;R~qjHqA4X8_LY z`CaJ`6Zh;XUC+SLtH)zWkC3RU*U}wif3UK9bjGK3V=KJKMx3P(eqg*b@W)@eQH`cs zb-Z>wx*EV8pA>@}wkST&Ijq#Z`&i71gPmiL^w5YzE>$V!wM;uM0*&#%w^M_?9o}Tp zmK>P*`WApya#1rU6QVSazUi~n2p52(N3GaKzS{T7AhwBMJWjNtNPqR}7tk26Srjxa z7dMT|4);3*I~yqXIH?fQcqwl6Q$v!5e0TA)PDZjzPI;0xB^p-gSg~B@6(=eH2!>Bi zk3?1C0fVN87BQ1ceu8<6L@Kj1o=od-m;zNSTaGcGD6U5P?>ewZnp*nZJ6j134_Oaz zgT5Gg{E5dlZ*(R*+1ga`Pl2#B7kYyGk64?^hDA8*2~1k+>QF?To8C2e5*H51F2-)k zR5s`uSqy=pt$Re#4l!Zdv^2o0B*|^W8-Rh%jqU2j-)!8XRFmbFp6@W`OKn1o*7!kPQ@6UxdH3B@7d>N z{+`_@IF3GEa^>sSm6B)MyXtOd1AI09!`OJ|Y@%M#%T+<{FdCDMnKJXD*lxUSE~DC2l~RAQLSa{^A$`;{C^JLbyXMp+E{3s!zs)z;#&Bsnp@BFV%i zowSzR5|4j0DL2WgT&qr8)l9yLcA2eAi zp2T#326{isRU&Am#M3^rRIC z>a4{(yTxL`WEwscdLN_Q5Q0)pb(Dnk&iC%$AFHcd{TDuU?wHrJM~@cb;n0~g!;U6rvOp&CCwkSA&bTKw4|5U1qOaoKD}qTL+YvmCw56 zT6TEP9+{rEcnUe4qO30!h|YTvX=9O(`*lzdmub$Tb$6B9hB{IUEXTkJfpbt^w>2GgbyB0%wu4Gq+ez5|8 zU>|5HPIoG88eoVk=!}6MqmH3J9jT*J9Hm7zmTA{6?;nSy|GQLM%?;sEywdt5w zt%{q`@xY!vL(o6EzCudnT5|EqKqjMcLoPV-gSD7zVO*=vFA?RgHwWc7^m(MOlY86W0m1KIA5iDv(Gyl7~_43+b&=GTru7&kYjBU zSM@a0Sqhz~ZTUf6ZDK^iz<~qL|LEcl;tLwTS=TIqMMbs{91^rw--i@neC01(iHXOg z*30%EUSFhAmo6VFDl%GvJ^mMv$6mqZXTa3nJOMpvJ4?Bv0&6Qg#3+&Q2KWZ;`8X%> z1|CcB5U9qenV9pue@ey(f>I-f5AZ@8TAGkA4pU69zc5EbH^3ng)}wvb1Y$KFeeZzq zlwKW<9LDknI*DL;bIBt)w(Wp_Dp8N6g*mq8l4uLI@s6|TL9+Xdg^HSbY)T5U?+>`i zgYP<+d6znJ?5_kIw|A*tigg`uGuHsHF(NyqkKIKF zDWNC$WL%QblYiT_ldbt>@HqPA0(X;>x{nzLw2HPowCdK2o0|LT`8_$S2ie4l%GmsS zcgtb(PZcuN5))uCI&^AoC#7D`+^na5mkph|A1*F?J-edn*M5!)#}`>-S@oosGKgMq ze!5by^JC6FGNK0-8k4@ysonoTf-^cZ3Zeba=fBwK{PpYC;pti+aS@y`-0*+E6E#{7 zb_<#PKpfytk0+>Yb(>5(*G!@X4CFHU?*KW9Vf0=FQEQOUIy&j)E+f&N5CNi$xcPfC zZj7kD*xhonx2d9{B4k*R9dL_R`dDV!Q&HPh6d*37 z&d~xkZr`cxn90^&E3W|8YVENoBB_ue#s?1UTQ+TQY?b@+9kB0F+O0NU7_bZ9Fr`!V8?b#vqD;H_HXi9{jI1?0RW zRRyi1&0qWSdCMCD(uv3NFA^L=+DGnGG$bDwZ@7=rkxu;QMuWCuf(rS%XWu?-H9tq$ zclh_;dp-I!l9IyE&U51)WoNI$BYEb zWK$i;FG>=oW2l5^DTm(g5E^*$v4710w$t}3sCXSG_QuK{0Bi6%h_z1{JPJK@ZN*z_ z_jVQkj8Kv*d$*J=<7HYzF`}VkALn4}(vg_E#!a<-n9!P z>iuc(v4Pfhg5m1Mcx}%+yG*rCUK}nhtlao$y8F&u8)t!R{Yy8dQD#23*moU2S~yZSoj}(D#GugbPJawhYlHi-*LUK_!tv`{jbkW?fMtcUQ^a}EyZN21tu+8ym&q9 z<|Nfw>1pc8lQSMzqy!u}l0zSsyi(I9Q-#T7nScLfb*|V5S)LKNp{i(d!9-f(J87fT zSf0WWyBE(O;=7uGfe#E8jspP1O76M)RV{yM0f`_%9Bu7supa~yoS zt4YPu>-YG*MC*p@OJ=6oO64(Ydh7cYO`NMmw}&g7`Q7Y8Y~WGLq=7v8ul+{tUePco zUYVP6_*>UIjJ<(BHHlL>c5$NqPVR>PkAT}xH054dWE9A%rgaeB<>!kc^I;1{YHRnv z%P6xpc*?wt`X>s#(&RE!Mc3H8pF}S*AC; zLFvaR`v@tCY&&fF7}}MY*V5Dee*r`cZ8nCNTV9D++&Fww;@y?Dry@sv7F>QFYL|Mr zXuawO24QHmhAK^jfMZHo6^;I;YEfFHqg2C-ZYq@_x`zeFuty{j{78~9rWZZKVKzwr?;a(rWL4uMfd|9y$tg?TeO>7Omlt0#3`mL<>XzQqB{t9my4bxJ;wGOm3wiXg`Re_+n7y?8DC|kfU?tG0T^j= zKg~J3bL?C{fc=EqBR!&F80OT*W>~n?Ko#!v^P`nfn82P6#Di0x%#*5p$$b!8&^{*d zszJ}SonP$w-Rr6Gs4e)LPdUEBs|*rFCUp z8z|IxMaK@Wkv#$fWz7+`ij3BdF0^YINR z6;@1WYg32iLl^8-66v&XT-3bY-HiSbaKYZn>VNN!MJ)iw=H_Fwdn3o{HTkxG@%^;{ zRx!7WCf++If1sLZT-pweLJjv5y#sEJ$mnFtrN}SD<9h`Kx1iQ09G#4)zhZ9a)QnEc zpPxP1voCy(VHp5sSWusu_)q$q>*GQAdeYKj8PAM|nNB3=9!^^?cQ9QKlx)<6G(rn? zlt+tC_Ka_<(gDh6A*s$ZA9OTOZU=Uew5_Z=Lir6N{n@@2Nh%#E#L|x+CHw^btb}9= z=Niieu=p`_FV@{SJ=)isMIJ>+(UcKxEX74+U|!i{+}hM@_OZI8B$0`O4>m#Xa=c{D1D z6dNF;y86kqZj@t$ z9_;*cN1d#blrM7q#x*JSPn*RdLEy~)^A24PRI?coz&Ck_`7JMx`ZGU|4nK0M-E<2J z!SHqtOMIQyb6!T~#UXQ;a3rwjv*5{#$4!zTw`0sg-5wz>oKl%K7hb5*_-wOBYm;R6 zs;vbtn`vfNw=rd5ya#X0BieHG?AX41S#YiO8Ne* zr|aChRV&H8tIUJF^8ER=rQovD28=PK7+fAl-*%Yo^@9Cg!%t3YckL0~*MpaQZf@qlJ_Eot&zkc;lG^D``ZEN8FgU)WlULyg z-+s0EK>GS+)$eypUdCka0;G^1uD-PjJu1rm2(wQ>?O3aoJs|tdy%0zyYKV@`Q)$J` zo3HR>miMb4){}GHqUIertG@wPt)6!nCA^yKINhQc4vxd@dY3_K7=sCs%XSXQe%grw z)i5gT$Qjxc5c@ignT3=-deXbG%OEmUT5cb8@5mv^twN0VfiMRR92h-V@GUx9ckCz|M;lRhhyXr!6z#kMk%*EK`oHKL#)j*%bDm&95C&gHJMDaP*{A^l zGVhbxgiLq6A{1V_-9!hb*Z&=)#~B$%A1&d6N(2-kG`oYbUKM#E0)vn_7XD1xz+2Dk z$6H`wN+L~-3Pf`rTS)!pfdghmoA+bVb*+URv0m> z#b&<5`e^9fqla4gBJfw$ti*qzcP$S7ZIkMVJ$AxXNz@$R&ghXNCq}gwA|sY{LmOONSeW_j zR{o25xB85G)woXEp`rL;$0^?P;`H55zu!K}%_Z_^edSIm$B|sHI(A+;op>tY5$UUh z4^FYUL+zK94YBwKK>WY6+JP?PRNXaM@uk~ zWQ$BXl!k|a#t3jalS?)6C)l2Pc!v!HO0j~Vz6Dl091{5dU`8tCYt49f zgfjOn$T+K(RfsYUxdeI|fbyF7?Hj(nKIA>6O5T1`U1g}BAO1MwS7(NRPQ~R>5jMr( zX|5ZUU+O8ODq?f4^2X#Rid$5+y4k+D+PLmqw$J*lB|0tzu6S_2I5Vu3;kdFTQilIE z9C_YT1`HhdGIG|orjm|J|Lq`HGM0iAX()$A5bW2xxsBgYE-(2H*Jm`q%AW3w@LUbQ z4$S$e?yH4dg7t8+t8jf;Nie46sdaL1HP4L2=eORZTvQpp5C@93?lxX%G#Ns>@6pVS z;n_sw|A6E5p-l@D@iI+r_#2DYsPT))`&_QclSE>j=M+CBTm=+ z{P{U*4h_7!N<{>gmX0L0iM8c9E56*Y^FPw`=Jo=!)!$yEtfz}%-dad98YCk?j#**G z&pY+i#QgP3K?+>U4Hh5ff6iFip0ACHj`f-^DpjNT-iCW>zrXk4FKY=9V~G}odcE9D z%pi1B*1qmhGjO$FJ{(#Aynl5#lXXLfUPMHNjvZR)<=Juv%3#{c!HTMRBgHU)#GCDG ziHX11vd%Ci5X6IiIt}ca3y%fCZyO)$Fac&GnIZVtIqnc^3eQATinQ#s#^Tm-6_4Dy5Znj118?74OD&Kr^V}3eL-It36$9^31-iV+IyMU$7 zGGYt`QJ=7YXW--GQ(${6L@*2j`PtuC=f_VI1Zj+SW##2XZe7%8aHYx3Rq{>0mxE-G z9T%$mw!)|fGo0XCKhgOq44E@VR~IbdPP$pz+*#D{qoC5u-6i(o{_k5|<0LakiIfFZ zs>}OZK@sM?hYdUMOx#)Gv2B^C8ljB-6g5ZeSTP~VQ7gFDK|m916TdR<_t#Ek^35*C z-OTLjx7jIeS}Zef^VK#P74vIrs%=w-S$>uEc3%H+*}k&4?8#D&{p>V^NWf$FX31cf z52Og*Crl3%Dal);jXT<1O)y_hzk_%$d9i9V##%T9}foTDo!5@;DQ`u;@p_tmzd` z!@JEaG?g5Nsf0)LS|~TyrUw=kAm2ss`;V-31V?ozDMKm;N`=N(8b_ZsbsqQg!aR%h zt)*vf)}3}UW3Ep6vx5D{dXL?w@nw~gdD1gZA+h*;v&Xgvzw7EmiEYIF3UU>w6ie67 z9ltxm@Bs|U=y(-)i=WZ)AVF<{xZcQ^1P|D-O@*x1-0Guniug42)mipf)^4&=KOJWWeYEv)geRvqnAl+;h<)s*Jyk4YXgQ^v)s%)5>0~9#$Y@TlhZPJeiqPY#1h}(1G>H+s<{$ za`)dj_%u_R){*8$V;GS&y z-rE9ycQ03Hm?Skh+XcIy6R%exO4qcgayXf`@izzctYt34x z+|azU4>jn_88aACKR>9xa9d{_zJ^qocdOk$mS#)vt(|OtLC=C#uO4dvt`7z>ztPHU z4=46?Q&y((uO;ue{^E**v0`l!qSxeI3KtAt&nP$kI8HY)!mkG`&#z4ueG>OPFcDi91g`m{gPx4f{(yYgqqlD({=8Zb%Rcar&OX zi}PJ2Rz`%Cc~U~u{6x*~Hlsp>LJg)Ms`3wU%|c$FwaB~X@}%X`y?_^If~Hcw!%$qc zCGwwx^gU%*EbbJF+MK;5`?`&oFz^Vu$q`V3etGowbLy10Y?A*F6<#_+%CJv~n*5Gv znVRpGLWsBb4$u4JXBl=GBGe%V8>yuQl=P#ac%IB7=_wiRUb}w%7XZ(n6=~mQUvfzCUL8}xEYwhPcJRb`} z*4EzK2wLFHip%O!)M<7bZgH*5#ivc-Z9$!Fvtq^ktg_9&jPw;E8iq861UrgISy6VG<^Zj z&W0PsY_%Q1(_=4>?R)?9%QJK4Uq{UY9X;cME_0+dE`2QDX`avjB>nsrV_kr`EMttR z&?}#ERvcpedW%Esb=6POD`s9W>epq&q1m2$w+GEweW=!WBpoC$LWkcxTz;VQt60FKrOid!zV=(f`qzX`QWO7XurtNvW|#&`d0AwHlWkTiXH)Fu$` zb+ygPI;HtXHfpGt3zdr0=^FRV;!!R5UK!e_kLXYj^5|A`Q^Sc9C&)5YGY716k_vYA#}Mtj(&djIKW-kt zjH|{czuTIc)@z*>sGrxjvg>5+xi)3Eb@9=|_9xzBpYnrUtA9Ad^bbI#;+8+%4->_6 zms~iHqIX*7b1tXpkj?ED;yVA{k6F6Y4NJpZpGW37&k+ne%b#Km)ei*I**jR%I^Mm~ zu-*F~rB9CbY;``}a_uyK!(2u6d;`C+FCiPEJ86CnRC}fqsx(rhz^21I`b_&Z5l{p# z(yCRf?c?`GFP`*e;-w=z@vO6Cu)$W*u29^ex369u*;6L(EVV@keZP-q%iRybhh(+B zKgflS?eGqB^a7?OTCW{FExqu2Qu|f_FS`0Ip^Y^kVZWxGoG~z{QC#3K_P^kUw#9ho zk!Ld{nuJnum{rthiuzr#(l9Kc>+lPqh@Rgo^qEh-miQRXE5zWqSFdL|zu2&Ti%-b? z#pLH1)6?XRVSsGhc4>2sM6ntD(XtoQ->CK#DHHi9`u>PT#ys$c%@UcGfWqcdSg#TFwm=YrLR zfiDELe8mqQRvB`#^=kPeqp#WM{`OT6GC#1p=!nvcD_tUNlfPsgAJU_h)&Kv1UK9*- z@7%ZTdVakr@;CFOQp-M26%hlrdCzBzoyz990RWPLjuB?lTL(q065+AU)@O(4eM0tx z>~*bNYMJi%#4m`mdDI|!M#Qf@qZ_BJCHon8ezi7UFm>kQ=F$I7H8nk-l{rOqbAPWP zd0b}A@!zSr%bx0~sqI=dW%A^#d9Lf$U2*N>dU$AKfrjp-@VF1iEc|Y2-@0HPx^=EV zgRynhd^UVtaTKnIdrCF;9d(p0Eh`-rRFX5naWC7No)TBu_jDX%tU?*O=tFF&XnuFP zSERG$`4c&#jt#N?MlE-A#}bBvX`9Kdx>|Aguv^$Zc4l-rWfSlCp}2TJfx@oFqu&gz z8n(Fgba_Q1S+ioO`TD(%k``aE_Pg*b|23x`SZGJfo!VY?DDdL|>v3vFdQCIVSS8EyTnjOBhK{-avuWsB_bF$2|((!%P|YYJ20RwTlX!ZSXcb&N+^8rMNNaP_67j=^aw z*4=o=b~j>fvQ$Va!z=(8cbgt)lD~Za{yu5L3fz~P|CAVrl zJ>+L$hW7Sg;H-zBufYNQzVoS*M{ZQF6}tcxV(#(Knm zu8Yyq#LsciRQr^5{I*BC7dzw9mvvyOl^MNig&*?PWOQxKOpO)rw zew&s~`t(>g^MU#St?pMG9rD$9t@)gI(}Q80EFr!}O57;t7ba?g+DFwUzgE79Z<&?@ zHb|rr+*E8@xR;+KVzX!6;WYp-?j7W=%2djSq(VCHke0W6QlMnt%uVg%|><_sz z;~t${J2EeK)0aR0-bh>7-o|6?we3aMGHaqw4(MGwD`9!Ss)RAW4V#upr9zV!)D>=H zh8+r>7W2vAD)PdFBY*7n-};j=G34`!BxEK2E?%bZoSd(jtZkP6Ja@W`@S=z9Q=KlC zZw%Zoibt%#jETHC+kOjTy?LS%g7y1# zJOU>qnw?$`!YqVSk1Way&k8uTgF{YlP-Xd5ubE++MmW8@9*1x`|1_7le5^RI(Y~za zv5bEtTa^sA>3A4Pt*3%JxA>nel@-qGmWs#aH+8UITsIA*p;2hwDYh0QI4ih>XJ>5> zTopeHfYx=@6PY&S10u^VU2knw(YBms5cU_-)_&-#lfJ8BiE`Gi`Jt}n;h)_fDYRLN zD!xnGrP<48T}_E@BZi?HGnIsZLl!8L)n<|N#jc4(#D3OQ|3@Sh?0>d}V`6@{EoaIf z#*2luEt<)okqlIcZDslXVq*;k{MAFtL7BSnhcBK^IfG1>B^7re)U-~nBqSr}^k#kT z_ge4L4fg&@nSVce(nv7Irj6oqHz7@O(I45f?Lbzh+%OaI<-l2%7UiP z7LjAa{VityGcD2NdW>uHu8meM{}*gdtti?hs#Jc`tabkUw1kjbd($t%^E;PCxj}A6 zV)f6T1!Bi4;T~F*2@@xBgF^P!eszVaUhG|GPcgSSzHQJOZ;gg!E52&p@810NePvX? zl!2b)v!k6SBCeyUlZfhm1k~rrU32#C*o0LkvlIt|M?$h^56+suAta7rFn9_zfp*20`z36yy;igZHvPLB}>l99fX-6FK!Q9E?PF*CGJT$Is zH8-P{dD>%w?q*8 z;WHF($L;f?Ydo#(Qs-;OQ}ph&Rdnh4DQUi7PPB7q9!I}4z_j-BkRy-8(SQQ;2O&a; zd1?INndc-;9M4NMHW5RK!&2xoyQ50r7JGD9t&CVu(qT1MHZbC{q2Jwu0V~@S=w2LP zm&j06%ZYo~zWgbw4a{fc4c`=SJ%55x%|@RTnay^#8Rs_}o0{nyziVY@s}%W0ZF2S- z0ZGV&6G1_Cz??FzTnin8k2pmte9OLyhu!~r)NAIKU5g~T{OodSyMEZR3+EdV&LH^& zE-}j0XAtym114V}`=LJRVZ(fC)JaVD)zw>_pWpx8oFt!nHvvXALP%4gL+ozbMX|g4 ziHKaK@of0Uj={%-`hBc_xoeT~i`8Hl@>RwMh}(uY4S9s9cwKGjEJ--gAE(cbUDNq0 zWv81mC#w8=pO2v7usgwCU#rqh10wpF@G#xCQskQuS{?uQ`8Hhg<0F40;wjM=Eb zfKHDsT`v8}$bZ;t?)H`}hKr}|)7CN7S?OEEI`?RYQvI-^7jfGC?-I3~uSv-0`X~ltpx&OtffM*XXdVXCV8`v-C zrGfXB&Ox1X-mEJ=FnDzQD3=%6u#9So$w zJsSip)=KTK9?YrRe;C2z^<`3vmdG}S4BlT%+sw;(9yD%{ zc9yP+N#^2tGcQ;&#j1S9zjZcSYRRgz>>&tHL1rGD)Wg&yak}Z~Jhh|-+AbE)5maX4 zbLn;^x@p?R5Zn9SvOO>(F`gYyWpF&A+3DTjf7e*{6l0#%Hm(1UvGzN}TnFWqzE$B05~_Hsd3g7}Ee;fA$pfX2tGF{aa*WVkDrENP zktI@ZH1?hSgT^~Z6Jy*fNEDy<#Sky(>4{JeuxUVr5k0)*dYD5p&%SD*kIXoThEj+F z1CR3TfT8;5Feu4BZ5UQX5HCba=W-Ig0EvSNMDy3(+q+Xz5+bK(`EiCV3@xKNCi-}9taeio|kRt>6r`c`eb^Zr9b24)~`6$J7nSpdw z$wtL(&5O3AcHk4#5zj-J;FjtDi{hg?=MhRV_S)NVkhil}v{UaeT#UGb&o7-RF1^-%PN zZV@d<#|cO@Q-p00Mn%ZLMcGDvoB!lFD=ihayso;Y~Dj<-yoqU86?EiKu_HE!1QAuYY$k8>p>aBj(%`d^ON)i%ZL z9r82I*-5g;>Oqy8eXrLF47x-msb=_7KqtwLZdwLnInr`PvI)~&SSGgV87Zxwron^} zg=8&3K|wzj|F=Cozwvl?$gaa3cncmoEU!TU1l#<{WBWu}CzPJ#*5G8PD186wZS*c# zEN3k(3TB&k9{8EOY-Z*APZQ-v=@?UQTZfUtgj3yd>FX&Ed7(oETTDaOauX$s40lONnNd4`c z4Ck-`yWNGKVsAow8-5?wMi}RQ0Rz1`~GCwW-)r)>Q4;z%F)dy$9j55Tqj>VRQ39yzq0JLrGE!;FT% z{#(gGkg#yhVYl)CSlLB2#0#yfhzTJFU6QY96NVfqF&a_h`-5K=l;0RHF6OYc)hZkb;vyM#!N11_*)gK03^3nqzJ7bV`-b`(q@u`#rndjP|3J%$V8_koJ61O9`f~4duaB@4Z1ZZwK`oj}6=W*NIj6LOT_>`fA73i~c{Qk6=a3oc5L{M{z#^VoRQQKgizCpKqp6fGI}{*4E|H-q1} z@7ne9fa@lp?*8xL+X`XhOz|L?=}Z^Gq{+QjYk%Ky&*Po_Tj!g3Q0q< zoh98R-<1|O5w6*IKYosHSh5d>Z=L9DF50KBOmBR~$-b+?>u^#{pV+Tzb$TA6;v7V+ zOFmHi(5bIb152=x`Y%mJDsf4Ne@%%TZC#PTUHtsHRaZ>C(e)^b1Lv$}YJuv~jCnFBAz|S&0e6mJ9 za@Z|%;U{ki!gD{!KM+%|4;X65-AKaL&)!i2I! z$p}jZ$z4qobvAlFM+7khW(hX7lejRnU%f)YVbZ>wt~> zth(iYJ~gz%342YNv(6#o0{8d8?I)vv^`K_-HK)SjPL8GKLrq`G(5ugVzZV2G2oAbA zQ|jU2?h`=Kr@sx|{dp}IM9K!s-x&^2G=fNixiCmmou)E(Dnrmx#pOy**0vt&5w+2d zXKjvy_CimDeOIs$z55&-h5<1yO?q=q*!=ykzp`;VzJAa){Tp(x$H%wl8i`%8bi5#6 zfRC`G>)cABcBQ3W;5<*_}vKd@^t#6pf16lV3i*zj1$jdrV zIHs1Af3T2Yp(wGJa#GKFczEEf`%**Udc43csHcRtcl((IU+lL*I|#`l85U9De9_39 z04<=UhIf_QycMK^uem_x>a0bB1%U&|LsW0-=h#Lh?w$~YKL4}hJt$Z$w6$S(X~wt# z{Gqy~dDr?AqyR+rJD9iu(oHH@>ObT3W|2{F*TxAboG0H0GDC~GD+4HPB{pF@Lk&=9L_wk2j9~X8yh2w z7+D7+e6~-Hj!|y0b81t?UuaO^t6p3~d)RD=HFN`RfY8X^{#9Ph`%SUqYev2um50f{ znB|n}E+bq9=RY+%U%pSnvx21Wdr{}{6V)u)t?*Swr*db&{VmV991s`MxTlO|DE!)V}`&cUx41*#l*DY!zYG@-eLL%a|n(eTclILM*?16 z=sVXJ+I&mv^+ZSUXOvbjrARk7&(h<~dNrI+@-w$aD9{H=ZYK-v4u9J8`)iZ=^3W(@ zvE|gkjr-tf2GWs}B@63h1Dmh;5TvSIq%BrCqF3nGVTmBOrb9HuSzAJtETm+YC-IbF z1>t#d=St%Ago5|a)@U$IJN2^>g1XA&mjmARtv%LLeN25#2o5`)b^?up!q%+~H8uTg z^e6T--(3ct`UMKz$V&*P5Rtppy&2{^&oT+dqLD%=5;sL$(H^-asgz&9}aYSAaDi0YqR%dV70e z;O;GPjNz0jxKPNMMmZTszDUENqA|6I@mLB1Q~$FUS2eDczv4Yv=#S9-<`^=esZLRJ z<}=WEYXQ_2=VV8lCekt?uK#@dIi5bM0<_(rPU^N+b#$b3*f?ohd z2qLuuH6x6M3$M#7`VR-;N0vA}8969aN8h}C9FH`69U@mQ=8KM=Ld8HlUZcVyg5EuN z&)!+tXz4HNq#QW6h++~;tq6Vk=T{+A!e3AKT_u$5ey1K*Z_8So3!pUzuV@y3#P1j9aP zfSlp>zy^)NHJ6rQLN+fv5)7@rYSd!Rrv>1T)0bhd<$Ba)HVB>F=O@U~TR~=Rg zCLG~_e?Lhr9x@>H^97KwkV*w3sECbjuGEy?dzX_Bee(0Jh^2FqyOCJ6-6IqI!P40{ zgXRl|c3rnz)lR4oV|>wFgMSu&JpH4%d2{XS*B{uj_UUtQ z-lBk@nmV}qF}{^svt26f_uabuTesW|l70Q|H&@wj5Aojy_rnYX+luz(mCbs$hN&%A zFUGM;^?L{}dYh>=rb}Fyshrafh{`urF8`5G*dSah@WtMLSfDF1tEWQsFe0$vF^IKt%ioqHBifo`9wC-@J-E^!Eza*Qk9>H1=C?pm#MP@3k8oyjzC^ojY_z_>avA}y z_DT{GFR@PO;>+=M$tphk4GcCy=VY`Bf`o*VUfzn&h2*)Y*tpKHDZBi%v==*I^X8}V2cnQF33jaQAuxO;YpYN*MszFPL znl=(a!g@Al`1mS-qZKjsnCAx~i|OV2_cu%^L2%$9fTx*Sw}Ouk7dV%Bn<4c$At@^x zYBJi?A|-!}%jXLF_k@+jjm@1N-oq~82FHViBbKodOzFN`+K5J2;ee$#Nrec9^RynL zRbO4bHQ=tGZJ)&OmPW3N+G^wGwU5vBpAnWVPENjo>sHeZ%zP%TRKFsD&6zXMih+=u zd`#MHn2$!5SU>b+E<%(7-wNa{en#6^R{!P=x$>=7tdNbCYxV5rRH$3|$*Xwa&A{e(cO|FtiMe8C{{G3(0OK}ds~Bn6SqFtm{S} z)EA@pSX|%T;im&56vxbM`gk9nYYp!fLO9$w=xUV3iFI4DGbmaRS!5(0->&H8fV7kW z2#iXoX$PwbLvwTG<`edRp3!<+wO3oFY6S{k%`<&E@MPyOje0D-0Kus}clpgF>9w&RkRa~jvIAdteEg~!vmzSxTrADZl|aXVLXkzGBVS%8e(S)QHi$hO zmfmGf?PByS=-j37@=ncuJq!~SU~6UKw&EDBWPBkdR5x-_5ai({=Dhu92f)iAvl_QL zI|~b7ozs67RRu@vnD~k7GG z^#E>(_|J6Gu@T`GM)g_un9`>z>8qYUKW}n$x4F51#S4eh*(tyYDVRm@dO{#TM!K6& zl7Sq+)0||Y80)l(K=#KR$J)Y;>!Tkjhy9@4Y^mB4q7^c7nNwRNm4?vZiQFYX;Cm-^ zGUhs*bkSEkT?lt~@9@sz3xVevUsN3u=f1*mtLCv!{~437SG>KvvJ~>)0MNs-QQueJ zu@Nree$z4(b}wWrrnAHnp0sKkM5MOcVD);+z`s@D-U!d}Z2#duSU3s|}2iA|dn zRir6aNVD>03aE7aP z_-@L7hg51Aa?3M?9;nOo%uE&~4nn(~-{zaB0u)68n5OwmNram9NXMV~yX!;q=L#m& zcZ(G_SjT&gjyNdDJG5Sit=~%*e!}MHp1GS$=|{->K4P~RY!H1<$n2qfQKk^x-L6}) zZasAWa#s7koj>Lkf-`Ab#56HdBa34;;zbo-3MNJJ-KQk zgDI41M`9py9d@>U*4i4EYVuKk)gA2eJWuQQdrr}%`r<)x?vw9B5m0NK@yC{imY8@$ zrzljZNFV}s0ZvIkK*1&8Fbq|XS)Qq19p*{|)va2+8b!~@qUXQWyl$D?G&+wg3jaxL z&n#zec9Q*oi4q3x5~_gu?qvn@CDd<~Um+Oyy(>>ZFK#)}=;-O;ZC+@zIY6hO?9Qza z8j}_P7{nfJje`;<*`n-j)jb_eLoZ~;KD3AZxP7x|-L`G3=F&^d`Q6qG-@>JL&25t? zzuT9dyXrL7ZZn2&esBA+i$lXnLBNst3Hc(qECiRawz5+(4Gh-#Hy+E*zdQ21cAKTD zW%*I}Y0s-_DV0x(+s}9_sYsk2o1YC{GrV^@c9&HLZvKdbVZooQ$6e%)Bqt=0S*Kq{ zMv!9Y2?_)krWFi7NF!Oke7OJ?v{9EGb6+cZj(zjMRzB|ms{mvLMe#kytcTF~O)n_5 zL?INu;f+B$F!^Eo4;ntK2twpc2WC(Rl8!Chq%s`fI}zAO7zSdcD9MvAT|S(dTWNmh z1Al5kAHx)Kkp89+RTblI?BZP8c})5w;$;#|CGBI_J%Qek5vnAJ%~s@7SA!y2&d#1q zF`Gm$1DiOmkOCZYw5l+mA;H73mfSXkzk7Xj=yFomfxpZd{}|IIy1zOre-Bv5qII8J znKY;v?mzOboEGJdb8fgvOd>W|p3q%&2Pr9A6%|8Mfr;awbYpvGKu?&HufM;s=v|s1 zpJO439Q^`ByyBs##*U7Jgb2>p=_hlQmLtLU{p|yc)Ps-z=je!T*}_xMUm_)joDGNL z$9LzIE-zn%O`w_TlzP>gb9z54bozOT`ZzS8M7#t2i*I}wd%H)zbn7{}qs4p-EjJoE z6Q8JVwR?q@p!>puyDAmY=8sy6@3T#G3N{}_ccPf}NlWJ;GLgYE2}#J(tm!a`>1Xl`xVPDsdabOvPh;6Ib_n1 zPFg@@g8#YwkeaWeyuBgPtmcVg{5_d+6!^xlR>xF3myXiI8!v4YWGfwY&{?#ob*VW0 zJrZ|T^Xh$R2Tp<_K)+h3qswk-ORYvn56AIuIG~Dkrow6<-Lr|s&XV$bM|Gmtp|`6@ zIFEI^tdrwN5cLPV;{#Ku%>bGwvFpZQ%91Mk2 z6i>BT%29ZL$&Ju1d5WITrw ziHy1!SlLJ9h00e7++jp2G@414)u^110cB_oG}e5W%nHV7Q590>C&a}jUfy>4Z*@9K zvoaeRfM)7tWIe@Ude46+&nI)J@Z^VG<`lNn16YZ?E04Nw&mLUe%=u1o6mt{cmy7j2 zeMP1p>HhG{_SKzu%&^*cYq3}F+vkO~#hS;P5=`!RPBf;SL)6IO^W*6S1#U3oeRLv% zk_!RnVhIzZJE;fQRKEfi_+C&P&EW6`$WaTm)vz|r3~)Jv5-UJfK$L0=;V;69a(< zh#%&CkYmzkg$$ z#x(s`iec{*xV$afyIQi>XJ!>^9ZIE1-31(D4FDYzF0zxot=Qht5q(s0sm!^Sf0zl+ zPJf~xgc$|{RTd>vsse5^oO1|D(rKg2D{-5__v1k6iC1(sIemIZeqAg&n*e|R#Kgqr z*MFpkyY6q)jyghcF6-dYn^VdOn zzW|*$QWQyBcH6k)9L_!x+fpg@I*Fw@%jF8>i~`;jntpBH))k>Gdg6~rtNGbpj?Awg zT5mLz*lr(F>N}m0pU;-Vx(f`#8hLwN=pzwzQGPEG78AHH@Pc28h*g^m*zf-nop)3m z*4E(uh*v(PuLjT(*1gwtbv14nu0SRF#vGXF{5CZuV(DW%H&7jh$Oc^T8k&B2b@c}% zv&(H2x2|CwHdGy&%B_e_c!vNT_S5r?LwhA$%3tkoI}MzCEZ{mA9(>2wCPI>~C*5pP z)BE=j);{946zD&mk(bA6eH13}v0jY7m`LAY@P?hfjm*YT^a~E+bfK+;w`tkaz2C-c ze!fEbwR}5iWIn5AK#WWizepUW;w$Yy4@|8Cb^htX z1rTs#b?A`p(jSd^aSkJLu-5Q-?TVJ;Zwq}!?V9T|fjd~%?;TqcV35q^E4l7o$(`8! zK?hkCZ~gj-XeXr4PvRW3(CG4B#D2|)h;&LOUn58+AjFN-hd@+rT))06uP7T^?m=vj zsa7}s+;?FXU0_7X-h>=6q$ue6qL)$LtK2{58>$uejt??$QXe75i|mNK2<^Wr;tbnz zj0w5ccVrWv;)$D1)m;*}A2K_eWqzthjbhs!oe`hAG>z#^^84`&lJUh=jqZf~hm5Cs z{s63Rm=cXY&6ffO`ArV?)GGDJ>DL>dW>V_2^caHNhI-1fPaMDddb2~Dg*bP;rBaId ztLpjS5{aI@BYUNAb>aqv5 z8)yj^j@4T)$Buvbf^a^vJ^rTakw*3S&J*zfH+kcFKpTh!6cQ5hcrC`0V0PNV*!UY3 z)Lj8I`X#kr)tfqmPU*r{`)kPf1;fdl^-1D5_Zgn7)kOJF1X_BBQx09Qt4H14wXFMN z^D1%QuR=UgAJKquvY3U2g%wY{@O7@KU-5+JCkLFlVL}|;9a9e*VT>*3U4@*iZVqG7 zj|^dZ%0fq9=F>hu&tM}z;FUj_=*?8ITdKf?IP%m!a_*IQ;g?FQTW{z0G5ZBPUlup8 zw&?sZ(_8mPZ;xy$x5u&p)1PwW?60ld2x4*~`ivOk^0npPHM4#YlV`Z)#)WpcFin?; z@ZRDw=HEiJhJ!PIjAPg6oJos06ZftlW{81EwZ+sWK;bqE_fQy5byIA{Ec>i#K-XTT zq&1~J_3sXJEhfdPma>&;+$;OyXxHcbIqi>xShPTTwt^7UGBD4hWRnI%*!0m6(xBTz zIm)1TRlj)g3ExPG6eH5W?CtEdHvYMr3ZexEH|K4B71}cF%fwxDp50Iw`-Cuna?fX%ywdU?40UWU}hvp19)wGhXgPN5*Ybpfa%4)Dj% zpH-BVo2f&_>u7`0(gu2+!`k&vc?t}NYo`nO0O3`+$FdA;ZortZR4$jUoyk93D$4r<@=EtAvZ_cowCGI|0<`zl@g8)Au zMTjo#?$Gpk7IC=8=sQOBcdC`EbN`&tl=?}7u^%l&tl9Cmhqc_2qqHC?O0{+W33?Xa z>r?+3sYgu28FC$f3;h{OWo2cMe#b5mJdA3zMY0E#dX+a_+4!uqO7Uxyc*X9z1``2V z;t~6svPPwKAHQ<39=z})Dl}1$jqm`~UjZQ<+46}BV(pZv=Pw{PcLK~eF8cBRI2Qa% z+tu=@ogMAwlQU1Zph3uns)Kf}`)`Y=E8CCDb)x;A*x%Wgmn%`om_RN?vDWd!=7$?D znbio|*zDx&tgqYA-^F}FueZ@mn4+=o_)av^LjwaEdAg;;(7=_f&lgR+daOb#1I+}6 z-E_)!KoJ2aPE6pNza}&H2Z0U)Wv^RWPJy6)`g`_pTrVI5rYdX_V@lU1g*do#%;UHk z^>5d6IA4m4OhG-)r*gZm@I?BKImHGKwdt<4nt7WA)t=~wM$qwlwrI3N)4O%uP2%Ib zt5y@RpIVxmcm56OyCmJquX8cA-HNk=(S& zeLp>#;`2RFgNCc!X+%FKCy9RCzF8hetxSYWm5mpMH4|wWCqK2#a_cc7n{H-i=E~*E z?XNo?Fa(j?2ArL>s_-h_F)xi6HuOij!-{?X95V`Ztw%lYx!a%oRN64ea<&SM2PnBt zYPRHgZSeO}6zjfc-~lS?jGtHqGYm@1Jw z@@BWOF}eE*=o=rJt-0|>6Arvh4x}p+?{N$DA&I`CuO#UJnr~^Tsg)4uKYL~a9I4e~ zGkQrTW7j1z!XCgP^oSTjA%_MVs-0JQ0kK5nOKU>8NJGnzdxLcjs%E@?CDaF#1tGju zFJ7#3dMhyNYqXNpE?RfXPES!E$%`n>fj8%x6V7ZRDqliAsDj@IJPT;ZZTCRF0M8wLk*?cN>37j*X`dqGfAg&qU5 z<8lJ=LiXMw^1qasDil8!XdnMv8E+%5W%cCQOmEa5)2r+ zb*i|q&;g4P7eZYP)x!*xX+)O`5cWZc(nK@+5464yd@^Iog-d`$TGx=LgHu3Ybo}c* zz|JaSujhl~aXe`H_Din(P@}JC)RGVhP!EV{xJk6r)W_Nt=zGv=Hq(R;eOvf-N?Kal zoT5Z-Ccu_Wq|M-VS^7}I;oOLlg>*p7k+KW%3s7bxnK-lI;gh}!6ubBDVSbM>p~NHk zf!vj3qIg4!pTK*5?nzSJTA6RR2G?_6?DTTmSpTyblc$2#v8IHg<;0{NDza6PJ#%sM zjm-qle_-o9ST+yX`VQX1T=iYM7`X7hBBHUzunPdL2(#Z#CHi7>AMkmIlLpPKDde%0May(#?p^4P8?N}ndM$WiKrIj#b}V~b@E>ckD&2Dz%=F3M zG!yHdxtF_uCTKpgwjt%86TvMd=2=7My>x$ocEfYJV)AL1rY4!p`iC||*n`%e36Y1X ze|~&q02{O&%``vp11qY6|JI`L?3nA<;kA_f2dl4AQ4>D_(kx*5{|`3ZhYGdWFQ=@c zt=#H4gyba|Ik~GQ(W8T(K6wp3XExW_asvgH^gL1@!q~zFW4AelfdtJnI~Rh;zJ4t< z*d<7Gq43iPj-hNcIejIKd1&ZE4y+MO)x%m&40{)GNYF;KQEmPRQEdrbef{DucCXgz zb)J^rZ)#vF&b{4V`BjR+k!e>mmHR0@^;elwk{`PYGb$fIm{nm(33iw)qMNRjvm=Q9 zfZ#{h{tt+K;VWiK_BGoIb90%>&d!aj&t0z2Iy5rkXlEC5;|60FXRw(`gG`F**~JRn zCQ%|t-u~|plst&4 zpL09we%dhobkeB*{W5FS!@++Z0mURUzU)nN-E0SxbtlYiMmG%TIy(?ill!KIgs&gs zPt%e}g`B(cfsu{DtD3qxG6A#a4pT@?A|46w{0oa5u^f1MAL($T5G8hKu5b&D5M=YM zu_Y1Q(CPWF_x~O&%6XqNx&iMa|)pBm_s94Ie>&ghUp z3w09?7eXpBmIg&aK9$GJPu=llQ9sCU;2XCXlwV%Ad^A(!(9&d>ch%(M+qm@lOJ)ue zN8Sd$ds1FR(+_tL)|-i#1<7H^@m*Zl{=BuNMPbt>2iy%v&}ssZ=y>#KRz^k(unPp7 z00orSaz&j%hcUc9<@7gfVA5QN%-!$?tDQ;7$!^ZhtTT5HIZYzKq;hYEDd-&o-XoKQ6KA2`;u3LOYf2~p}J|;%2!1um75V)EHzt*)z)g2f< z8w|CrgN4N!Qn0~(#syp~I)+foZEJp(Dq)KYXm{}t!=C5dUnY&-vjpGHIWLx+@UMu`EkK@2~@aeIwD3j>nej!&h8`Eb~Ppd=uupW5vYOFtc z0Gj$Q%CDUP^wO;G2+e;R>T5!PxgXC%u4(h*vv`7e(nam`W3tP$H{L2$3=D92c~$4| zjdHeA);_*spf2tN{MZri5#CTDk9;-ZMH+zCP)2qm$N7!-L0Z&10RK%+pG2`>XAM99 z?K!(aC*|)=xa#puo9j^Z&CK{0Y|mnXRVJDYag`vxO*?ON7bFFs>dCWunWi6I;+S*> zEcWPt%m(x>Jo3g%Ly%CM2lT_m^Z;2}bz4JCu0qeL@jvGT6h_Ve35{T({{YRTr(%OMwKkNr=8t5X7J699Z5YnR_5&QMFrfLXeUU-$_&W37vjzCJ@{#JWEe zX5dC(A0GYYG)=D+`gf{#TT0VqCv#sd8Db#PPDCg%-g{oByUl*H0Krp(rI-DAX_Q}u zo+HK&_i@ohlebf=;1;6h0(1ok{QV91%9Hpj1$vE*nP5Qm|Kedf=ZC^R?nav7bU6Q5 zLT7(Um9}Vffz>&-4N12@Eq0Socg!)m<^+>t;G!^WVyj6xj4D7VArS%*uCx0ZJ2OKN zHnc)3If0i5h3;KT%YomNH1cS`%7_Ad%OqN06Q-yoZfQ#_umT;dYoY zzmamjfSiJ{P%gAo$C5p$Ebi~G3WqZ+RYy7Zd8kIVo5#_EL%du1_zQq_O|>f~l*`a~ z=II(3>md=4R!xdL6i%2zcf9TD>9VulFREx&@_FGpqT==RFMK0v2OKH$RWzH{D5a=U>X7zx0A*=-!^8!8w@AGlpx zdUrz`qq!w%)hW^6l59Dtmp^J9Q>P8G!2Su+Cgw=lxujIMAj(DnP0;7meutuROno;Q zRP`4iZ?YiYUUK;SD-aO%JEOZ6DGj?OB5mHj=-l?_N5Lr)Tke*fSnu>Uaiiea@Lp*x z)j9_d9uFTrv=6|(5aa^xZNy9uS6b~Dj8ZR=5CO#=7=O!y2em~D*$BKqQ8z0l;-ze1 zv;ir%!gOzp3pQo|&;#}yEUmbLCdjg8O^6(#dZVH$3oh6mO242WG^rC0*j>t4;p$pd zW%%&X#KdA~t=@Clr0q&!VIlZ0^e~zXK`?fL+O6Jm1`#3nxy3{$j?xF65LZ(20r!le za_%5FCfbw0l5tNQ8-(T)3d{SMnRI0X{!{fzeOOPQ>~|Xa`~Tq6G<8rK_eU_i7b~ai z-M?R=$@eZPGQq0@v=GTJ!57v&*le05MLE3eVAABf^Gey}gM+JKOcWAQL%3S*@<&H~ z57TP2E?c@(f}OA2ykPuveq4ojBv*26`35Y%q|2wSYRq6FX-!^+hlfE(Vi5bQBO<3m z9G=gidJ{DLztz{QG`Mdgc0of*S&YtziF$;Hf=KQjZCGUQ)SgJgMXJk!$*qU{0s^pT z_ky?#w7wHknZ+m70Vf4szerp-o!G-jJ#DB+tB%nMAi>Sa?cfi3WZpn2*)O z&d!H8Egv74;BE#VL8|wVxW7l27u!};u~V)cE0~1f!^Thmof=em)ewkAF~J!f)Pt!O zzRFeKT)N8g2M=2b{tq-$Pf{nq8q5K`GYTidV^V`32<*-(NKuhx?Lgs&DiT@)CetYB z;Sti_2U++IJ?r5j(Sq`#%B;rMubolt(6u0HA9omlTZYhDG!;=;xivTlDT$bxPql>4_= zeF??HJKUY1bd`yvW5sq9d3AXFf3;_~$z$cvRrZyHSrv+u**vvHn<5{Xc&vARAf(&% zvXJeT<57s3ny@*Hk&!VV0B?8~0QoVp3mb-T33FHd+qYp~VGIBWi`~^v{-UmX4^*7_ zjb`#4`uM?!U&*92KRbIXx*?o6A-$h>!=W8YLF0_d44I?9zdy*$y^ zC5|kd`Mz%Mx2PT8%&;S`0AIuzHElt_?&;&GLf5h24{?Hj#UR205&8LF@pk9+vv9M_psfH>_MfB?fOlXv8@MXiOa{w zXB)oWhfVPP0z7S8dhQ4{Uw-^2Rz{Xd4lLa$Z<3TEnL{Qd#l`8iV(L&g>6++!`V?0W z$PV%E2yxubi##DNZ=@h-$|KC#PCmE>lat%~f*x=wKL=m1?ppcXGVb{@hM-YO66=Yi z@Lx`%JS*zV%iTqa_3N%R3NWSfwKmK=V@g-9^wQohi0H4y?eg;1Oq^*jcQqQMQrc)A zMmHv8qUIpqY8~{ku$@#5o!|aoTS7t-e2m(*; z)YH?0rXn|lCbgu*^SN%{8LM%Xjbj7*xs8OvKWr}lqx&=(YS&hFRJ~DlOs2J<`_ZAz zAlR&#h9u!b97J)N=ODiPD-&OO)DU~0-B}WVBKs`|IxJ-A9^Z}Aas6K*pVegEhHS3R$i=+Rga6*J27t#E@Zu%;LBq zHfGF|P%-7y?KDVJ4+@c8$Dq;DrH#zl?Hy(h@XUn8w`8W=n16Cpdwe<91f#9L?+*?( z)+_~0P9(h(;SbLNkoxe~+40XT_d#;wMwl)XFw2P=^fL@$#wL|s#&ppNlM09L+0rk3 z9JWq7ZMd*ti1z(v=)l-Bv%U>yM5A*pKhHK_{=9#A(hk>IPbeP^8G_JyL?l>P#YMm*D)s4#LF@t_e0 z0{XX5W14IyD(|JH5>m)PY!E+z0LH(!GW50mUot4#e5BzC)wU$Ufe){l(v~f!=VxUF zQ%z#y;%+xQ=6FxIBY|UEHL5_74)YD(dzGW!?J%^U@f(NM2cfa|p=y7i-LZRYBKX3|Bg%iV`jo zQ@{)dugZ&rOW~Pn_h`@tUB(9px_fuy(p1irei8^UmUAay2#8*fHtGnUtVno8uuvDc z$dP}ZsJVIKxs2+xq}}n+9rf!!c+Pn0h(>4RoLcxrx2x}Z>8bgSo-qxppVMQmr$oQf z{jJKIu{ed4)?$*yhzFS^xcITN0~n>-+uK122m9szf!rG}JS!`!jf#kf#^J-@b`M+J zV>pQ=LWR*cZ-S-`Jk}%9MHm_z+k#*R$b?@g+*L;fp5q--PkUpmkII4lcQ83tdwY6r zS#t+Z{_C-wi!k$GGr$yzohKp6iWOv=?Q1x3J+p56jHM`7Fg0L6oE>QiGl`>n4^Q^j zu9Xe5AEiiF8p`0s(2dfz!EImdyC5PfSPAxdZ!RWci$d&5If92C{$ROLf;wG!Z}eAK z@KjY*%eU?+C%4W((2CvH(UubT$y>Qif)prInNNISj$7AC-pJ$-7Rx>@nQE<(r#J+n z#QN-SMH~fWpAV$(&6B$LPw}HrIzSA-WQrG=tdmnq(_3UG+c|wLY($QX&h?sV&?@nWbp!^-HI75>MiZuUT186}0AXK!@ZklDOM~f3xRBY?( zOIbQEb%K+cP9iL;r;YI5`}sX6F8)4R?l4<3cKPTt^jEwWwb8X*mVM;?i9BNu`YYsd8B7WQ%q4&Wnj@^EoIlSF!Y*g>>jpri5Op=}lh zD?lLgTbK;Fv2D<2Kq|}^sPK}LcOscgQ1BLn8?(@HLYItd{MId3CnpvS%nwMc+16bl z{p3@d+Yqe)?)xhyT>Mi&;(bP%KS2xUNu+CZSNOFc@&=~@fxK|xHc_v^F+4)TEF?+DaQ{&cfCG|Da(?+zSRq4u zucYMQFf}$Ifq4pZ&gj6vvxa>Cwjs^ZJyuS>or!0fvok+q)wvq`1YKo*6_O?-Ct$jfpq)!J()6z zuhA4Bc94)o#G9Z)1TJw556M@>1wkz3;vyx1p1anhd&+5z(M;-9OPf!1oT2sf@40D% zs{FToJ5|xz9WUNB$0@!=2K|D;!Gi~Nb(#LTrOaSJ*nnlY|M%2Idz{fNgJ!>Tx~Riv zgfqb2`{bDXrH!)CmPUupW9hfxLr;rS4yr3e78lmCO25w7uzO^T+VDT}O>|q}Ix@il zz(N$e2TR<6>DW}))R0)QXW>nwM+uQ2jn6J2=rva%se%`yQ7T>I<_@|L*C8{$0&YdyL74Su7d#2?X1K2zGlRSPw zW7K`6sP)fp#$SuhMxRc@reN<+{nI za~_|;9c!+?KwotHYRFj@I20cGvu(|E$-X{+N-cPxPyK!hW_Gv^THD&#n3!rXNA=)9 zX!saeTR)Zfym`1;*VmYV_AU+a@Eyf+?Dc-pqfI=x{qVX%sR|B4YU9SIc-Un8@GIsl znt}~uN{J>P!l{g2V=U697bIQe4qap*biBFkd#eX@_Fui>v^s{T|GbUTowPKOgjWbl zefrBEpvBs4EAgVz@2~#{F%%#(6EUch3c8vVpV)mxN;yN><|L0up1%Pg$Yy6XyOrAB zD{sdi_`7S>4VBie9(=pLa3*m3MK`P8$HGkf*2LVvzz=Cz3w4LzWIs*uEg-behC?}W z4{njp`qSFGc9Gj6O}IKvf+#irS=ek2V&b&B`z9!aaaRQu)8tDr17|eXCD9kg7uG$T z+LlXmx!`s$Dbxt4UmYneD72L%Q$nG@Vh!3v1&<4mxKnGHB4Zy}-KCrf_`~T1EoiCt z$)|Llyx*&Fy#ZP=$or=8;CmW-!&Oq;`T2s&O;;4%`+qo{Zfy)7*XUm>)|JI5RmwBH491r83(;T(_LvybZ8Uvt02AUvFO z;z_X3^6zd2;VDT;NIf113JSu@5iaetj2YyIE*(p6HP6`^#)t0!yCorHo*(J>b0tYh zU8G9T3pmdDrg*Vssr16q4p%_MtHR|?)rVz2d^6B?(2o6YV&Mec}JTzNK}Ox z0)v|BDdQD(*OuMf6id^ONQL0y-6%P$ApBX@VC`<6xr9Y&Y+))F37!Z3 fPyLK`} zhG%V~I0BherofGcss> z#yyw+%J5bYLKFaKj*^I)tC-X>aIP?pqRMQ*8Un{k{}xaVci}rl;~_w_tFQdQ!kF+@ zPB{E!D$RO(IwPNt(?@2|H~|i5re+sIy9n~TkxE^5{K)XC7An;PM-mP1zYU_E5`oPN z4p3(!8MEAX3Z6%LJK_*FtpfcXIInPaaRH4^bFCB>$lF|xk?(%!C5^Sun2yju0nX%O zm3fx=&P_C2bg6C_X6hJzJDppx`RlPV#Xyw+|C&_FDM&zkdQOkO;ljCll;D%`*O8c! zQSA5qm>vRkAZZNTMQW!XBiKiR$$y@ePbP?s)62^XuN8~ns=qqT==)}F9Yyr}&jIiP zMIiCI1$HJ-vUGmHdV%9zz?&s(Hihp*+PPk8^2d*`5_n;vWEVnDk+N5gQgZWF-1)EaDHx1VD)O?z4K@e0HL67Xw+IftC)Ma(FSNs@SLxop9UFb z^%uP3d>eAeFv<8BJQM))sE@)*A8ckIHO=XFsl0Ip9X&2&UUW zk3t27fNwo7oz>?H@o#NJ`MgcsRhTQFEYXybWPdVtkLn)6bHo^N4tZj~8C3`p+`4vE~07Ju3`Y zZ|BZMpt7DpH#bO2zpkkXzuOOCj%T5!;iCi&)P214EmEQf5QWdp%?)p!y6syx_rvkGBOBAA zw^i(3pVpCJhY2wRCcq0VTmEUiHmFDP`IEww@Erq!lTsUG6L#3?DM^2aR^UPP-ZzYz zviUN{!y2yFemk0ZJ+)9nsJDqx=UQUoDD;K>!<+*AWmApUUHPXNrDV1^&*pMVfq+!r$`UUq1+*6+WWN?Av0&NZlPq1%d)PSXXAFd!=vsvN+FioRa{ex{hZyp%p9r%h~ z&qoRmH0rKN%dksLyoI@X_Z0@h4_y=&-01%xNn$?%`8f~V;SP}$91k+x<5*jncFtml zr4PLFf_CXUIbYT1^@T@20eYCJZF6pCMFs~;HlTKBqIc}TcEI|os&q1$VP%=d5;Cb< zYIrvx5*39V3;#^G%xDE)<7ch52Iwj%%)yQ>sSl?8%GJYBO zI=G156wpOPM38HM3M^H&6`*t8r0nyF-{-4Dh~)Aa0frmb&>U5sg`9wK{B*A9$-nk+ z)n}bAs#Y}|`=GVnJJ7V4l(!HF|*v4 zKsERSQNkN*#k4oTDX}#%pvrPl(01QhAua);^w7 zuNoB$DKc!gD1=`2y@klfZcdN*aA^UP8Z(g&Lj4=EfwE|SN=X!Szlv&X8>l)QMfWKD zyx8FHXRL|>#nLp6m-ig}%$68CQZcnCrn_wm_wM20^NF{%^Oe!`Kh4NK>l}XB_s94x z_}5N{{?qJ_f}Hh1Mm&iSZ?6`dOxz*X_@c!NT)IyXy5P<8ehyG2x@JYi1SntFg`5 zW*V#gf~v;!vXKhDgz9DkX$GvT{n66z<(O!Ah|yge_YF{ek(Drxqx+Yk4)+@60NZx^ z*)i$_kJ`4R(l6asiOd~)PGytpYA}HTkFrynGq;$!Ohttvhqp0M6!MW%49Edu_*@Ik z94x>{j@tYX&=CNmnlh~NIVG@HPYp0qU(|gOILJ;idcH zkZrZzRw8Q0!1XJ#ER?+#4}{%RAExi%6n&3!2%>Q!yo00gc7?_G{{vTuS z9oKW;_K&~YTYG3|r&QW0M5Snmv{Xn_w99A+ElG+rw9^nxw2h=)sDwg=hPFzSP{!~1 z<~*qsHS%UnQ2~l;r~%WE>nH=sG63qr_U8L{nG>aS14clzx(qC^IR_hB0XV6 z-*^Gd{WZZ;+N#8twD0JA-j=&Cm5TX^h>AKu0aF=Wu$8p#4^+$l$fxRw#oaINirxs+ z{2%>vQ3!^n&330<92`E4cM70H0C)N`pRMZvfq8wqLeGO^Q}O%{Ze@=Hq&`d z-0p?(Bs7%b{Ev|1R@A#2Xrb8H zNEq9EPpRl_0WnXKvu8{^+I9&`p8*?_h~<=wjVRGmQY{C1f*-?YWGye1#A<_ol&wX~2t((cqg%w^I+ zym|2%?%N#pNaWNb^5%{f!R33i4Z%`4%&ZmNg&D(kjh!&b`Z8C@AtW~mFA<am~I^9&iMb;>v0cXxkxYKDTcW zt|Pqf=kJp7LA_mp<#RTPBU-+@edx z|Mc$}B(Q*H6X1dJ?ZP8bTjcJmqQ_8Fyn{`2pi~b34OTG7r$NkTs_PF57`Y7MIY zD`BG1Sdm>%rr?$0okR$APebljxlTCIogjH26;J7ePy3A?w1dZ23}`(`QpGz&mB&zbMXnFk$&C1&>J>g2*#Ai{*uuAIM^ zul-sI_K}kT0f|?xT*NYj9XoE|<%jh(>f*&i0>o>Tb|2G`#}hl3suPiX^6WO!PDn7e z(f8Ey)Qz}@$;rIjS4Fk7gyt?~N5Ps>e{kEjZGQa8J}I>qB3mdwx5H_I~Hf(Wb^Zy-?+1xL*UA^giq)kW1{h) zUdXV1P4~(&aOW{{Oi#}@${Wae@Lu-O)7=`7vpsKU=~i=N>op)LarCA}xY_7Mnz>zX zg9cUdW@ej?WDw%tv?^3EGqJNv9odmYoC4$q{C%Y)&>m;I3m+YLX_9es#qm6MNvLe@ zZGu)o<<_;2dit5YM$@VkQ|gYre7QJVwt5qf=YbLRj#F=kV`oz4%RUUGul!)R^D%6c zjIKBqsLi6N28Qk8o>Vj@ahtxcV;2w@xb3j5{UP{koX2b7vV*dXo=a$ghYlq%tB~9Y zp!lRb<0r6o4b5TaSqnlHfaulX^IC}w_#Ez*pO#$wx7`QmoF^KFsg9|OYCd_D_GnXm zC>4}0+lr&oj^gW9P*8vwfn4eWe_rw9Z3mbMwGCp?E{W$=Yw=-V+Hr2p=Wj2}+F!ix zY)M9Oxvtc#NWxAwQIhm5A{I}ACNwHm!$>8G)wpmZ;1&pL_=H$lrbC4?J~8p4$msL8 zPfq*x?L&PYbtb+YKDdkD`MFxWp_GTS-{JkEiKVKF`cG<0EvP}7VE_65U;i7Qf&_{G zkrr{^?qz*F5q~=g*DRFO8d-5GK6w>!#a=AR%rwuzXUYarTPTqZTW*1Rk3h3!ZEa)r z(f**|{f-f1A+QZK7CFC3T4szahl$$f!f^iH~Bnah3stb9xQdp~oTU zVZHN7Y`plNXBUdjKWCTT!-=D^G%rv5*rilcX%{?}^TLb3dJsee0f|!Ph8(Yy?wqps zZ0*ks`(_RkrqVvqs))31yD#_0alc0lGL9`!Z~c3Mbt>vo`P5Uj=DB{Drx-{)?{&W| z>Y`Hh=ITo7^4m)EL{b|XaR6~sM?ZWpp88C9Ezz>B$@off=WV`|#rQel(*^GqSmWd4 zvuzH4q3U?xhybKFfsH0IZ`v9=r}4<)Aa`uC7H5fD`{sbkeMmQNE(MjZ)2TA+d}8@J zo3+xKM9B4CK+<~fO2N~PHn90`-n==?eOUk;x==tM*uu;JD?a1m<-?YN z?T-jkt%`i=B3He{yXl|ufcYQ`UEtmY5Nps(V3&^6?(D=E$7#b(x41qR$CpXxk6JF& zE#h~Iw~CI6%5I8d{Bx4)LHGgx--V+LQR^|H^dkf>_h~n$Z%8uMtV^ zga|sj6n~b(P(2Isdb7b17P9iv(cm~-gY5~=CFL6Q*l6;$10ca(dd6?NZ=lOKMnx#P zFUjrQT?2T$hx$WSG2Mo19(L)qqQ8PE_nk}evYQjl5k0oujF`BIOA4}D_@2WtJ~Jn@ z)b$=W^7DHO%_ktVDw*%U`1b*)7Cm~_y8f={&FH2G5tFsUNSI(d_tRTC$B|3-fRNoC zn)5DhyzFkOwC{d#Qwgeh59nT9y`K8EFAkztK?Kju>6}xQFj8e7uKPZo4npZ4QP6Y@ z)~6o3OKO5No7T)v&%WM78q~VuV;9aS7%nae>$2#7f=cA3FKk18oO|CCp)~)7nhV5T zN8YNTEuO2G#(U3^J^C%BuzhXlmm6AvbR(`6Z;v1w+ij<|RUBzqS*teC0P1$Ve{V+& zpArl>hmu1_iLW02BG_R-hC&4_z*84&gG6hSlsV5EMo5YOk+1eux$`E7q7t;x`vMt; z%2Xe7v9b!qmr$|xCt-KVf}XD4IXHL*Cue4@&-}|JyjXCcliHjf^(|Yn^uo8>o9TQJ zku8`;-OV;h5t-VT^ws`uX)_6k*OTzdqlIlzQaRgIgwZFnFfim;>Un{}7wED_E31hb zy*KZ7km!)#W<jZTp+f~QD7Il^Ge0uTkaA~zql2YHC5O9J+|Uc z$(5|(?!7&;ext0Fr1R^sD;Y_nS-02m5p370b10}B_k+!?4*KGTF$pqP1SATnJKlj> zx{E349aa5Gs0P@a?cXkjSRC8_+!voFbaXI>BZm%tj+i5Zbq|bbfT*Ck$aCuP0*xTo zK-6E@oFRZ*tyO|qSP{DS-tSOZ=%N5Sv4RcF6pcoO_aw_>k1W5GM@S@Hn|O z>n*n1mWqA8hEDIWQm^88SLO;rA0V%%NPY{)`?I3YY{~>G=&q|<-VUOk&^33+jLK6( zmdCwj4PJ}az;vb7u084*;dp+o0lkyB=(u#sCf7g z7n6lydHQAIi}S?`ooG8!lj&8oKhj5w48&6qGH~Cv{dZX( zoi4ddJ>kak@K~AlSnmhpa~~7^`#8n60}d#l9Ylx_P}n0?t@ZguOSQMS=lEfqJz_a# zgCGfX@SyWT>vqhhzTpKJyJQz}Z{Hg`mc=pnIflF(v9-GUs;Ij;pTsf0*Q^wKF8|^_ zJG$-tDIR8T%N9yjH_`d=CQq@V0x^bqg1ynR`u$O@*tCwM_>+>6reuu(=)a)ZBFJMAhEk z;(qxhCCrF1|C@1e^9%ZbJ4L|YG&Vpga?2-f=y!=aqimG!n%LItjkF;kfv#E3!!Jsn z`zGjdd|nEY-KT0Kw2Bbbk~G?{+jO=4eARs?XW34M7~TlIXdtod`^sqUH7EEWmI3}> zzgHy6zFknL1Dlw4XJpGDb}9=|W0}i8`LHua)x1$70&;2@+^>5IK^!M!`1 zA~5ew6uWrZ25+yGP#llT8gb!Zxcg|aBCuUT@Xd9zOB%V49Xa{<5JhKN-lF-X)$GS^Zs^E+}a z5gKO*@&jUs9M2!L>+`VQ|k1`)7?0*ik+c@uFI_2|G_6DpgyBi?->qA~X`VimXez04-fKJMv3ktdGVIF+f71}I zA1mnnL$Zk;ofP_xP9X=unB898XNv8#3{~G1eXgcWlCQXDP=2&Z>fk-=JD;b&{CqNF+aPU_9A08From7$Ct;fwwVwX+UM`MRCX+UbwU$56f7FqWU&u*| z8h9f4j4!~Oy17xmVj%k7qiAd+<-g)d43#o`^w&AnvfnK}$#ggPT4dSw9Ol~}gxbJ# zL;SBoSVYi7)9s`EU`0)5#O+6WPQLQcs+z5jb^gXQko|gbuD2~`Hx@!>GVAnEgNobR zi>-jMZX&O?1L7FOTXWcw1}+6L4Y)xcJox$W_b=mrM*K)5%#wL4q;O>G421$GBGA*+ zz1MM^$H3HhVD9NpQ*S~eXp!{&#bf2il`G`hi1vw@zKWfbA71@VexO2eVdOQx`W;a> z)pf0s9I?^9G#1S?E}YFQrkvU_&-5JGnx(}=d@mS#dBw=CQ(B)Ld1u%t_s%@S+#S8x zI|>nZwuc)G20x0GTK+w;>%a&1;`?b1kyBHvpRUZ$FJ=_oESj6z0w+Xf+6rdgEQCEI z&5z&1DNRmEF&G2!5t0i{kjc_@el<6)U@UaLmd!+jC^hy^klbYkrR2`cjY(JFI%N|U zRxr3y%@lu+`fb)uxKv3$P5jiMn2g#BwtHjAUnLqEEogy(kz|M+n5x7)i~&0p^> zKc0De|0`Bp9?FlBe1R7Z5PZ3rG1bP$cr4SB(pZlW1jACoP5Oyc9z=!y4chwoutdR@ zJ%2mvDn7*!K~5)cSfxgtpiIbne`edx7GqcXo!T<_J%_jJlo`%)<*jA_C@Dc}$y`P> zEW3=NyP?y3#CjH4(#h@67$CViyDCg)R-(_yv*?>w)JM?+2d=T*UB~w+pD8-FrbHpL zW`==Xksx#`8{oiby(sj=D3;8(#frcrTN(TG(6g;ZYOFE-a?)N6ot?AjaZ7rPE~h!0 z6?|z_siD>^rlr+e6t|WG8^~d#;+JBgt?GDCX@0$lg94w@wZht1Tk00SlzkHJW6_s| z=%p<>-^d<{Yk6bsOAxmtpMTVKxXK%*r2k3Iff@JkOBvUOfK+J0%sYpAEQpZoql_-D zu5FUdf!~9wJO^*Wraqf7fM2w&Z4DPR>*Hi=_A~Wdf2V{3H)bs#kvN21=1_h*h`ddRPNs(oI7!3vEms zzosZjO-&tlo~4f<(kcoH+6M;Q@g1UkU=aD|1?2$&ZU{EZ0u(Y*QEIE=E5k_t$5 zd@O_i8fk8j&Ol;hpC2jRs;VW3BUGg9-MD+<=DPhQ$JWsAQk~(TFVZ~ab9!>D!R*kM zp%w;4M#uc9^WTBR<6;hJI&iXGpb1(n`%O15FR#XAx3Ys56 z&fN}_Vt7|DF$=}7s}?hgU31Mh2cyfm474PY!VPmpNFeQ$nj6fg$Y0)}!WeDhc7w08 zfUq%CV4>)2ZznWu48`lLiGJErZf3W&k6PV}X5O+5Vw5lt^|MqQG3MnG@U}U)(Hk9u zJwb#W4o5rYKX@uRiJwI}c#_h(Fex70Xj#k(Gjhf@OC|kM5+gDXEL~WmurG1w#GeaY8koI_>K>x1bIL{;7yZO1MY7h37hs%w( zMpF?1@|REZK0js07b6qx$^bEBSiz0F0003%Y`*N;zeMs(PYvmblt&OrNSBC0h+!3CQ4u(NU(&qqi1*9NNKP&F?`3E7o>D$C;# zZ*gC4%54P3n^x(O=6mS=v6ih1yc^`V!1T{`9LZrPPoDhr)cmGxRS}Fe&L^+IehL$3 zdg`ptv19wOt^KDp7A^MWlR5u@FW*+UvL=~6M(X^;#0@qAv(`oQQAkU^5FCT)Sj7$% z3i%KkU5_4#3gcQMd&@u+XZ@(ZfAZXOB*-4P69EyPxgq`xlGeQ#SUN^S#L+@|cU4Nw z>*8m785LoSe56!TR)6PH;5Se;+Q>Mj{s75` z2HH8Fhn`O!qP3Eq;NbNPgotP|6pF5+wXbz`YOcEAz2Vym$b6@268|NFLC=K^xj7yepFNw6`KZ99pOR}~ zv6F(qKR<1Phx`X>7U@jTI5g|o6Cp;MPc8Q}HN6LN;Xz4a=3)m=f0sO<5OtX4n;XP^ zZKqw+nvtoA_p*dbhwX*b`dyYpGwXsPGJ->q11nk>-UFq74tgK(Ze&zkm7^*P=Tm^b|`2Pbnwj7&e zQ_boMI8Q|o*p>tt<~C#M1Ezu!mvvz%b@TQnhbPz%bFN(*h0(U#f`e5N{bu*cQP<=R z9inX0bcy@CoRcQkqOJneo;9km;*U91l_TR-k- z173CK_y;Si=#(s+q8LzjNn%$FAuL!7D(Rv8^ySN0ujIeaWFLo}V29yQ3r}?DdOl_< z#n@(th~*uu(|36?7^LOhRr9i6OSm{~PsNY6^6gucr2Q`}6(4@^^_{qI&OE}~)A%_! zC?hlMP5(cP7*gL;@MWZUtK3mx3@C5PlxBI zQ!2K>-I1*QsJr{IE!R^H94YEO!p6Sg$>+bmJLwJ1q@+icte^#vZbut%2U6&n3DZkh zHh}NH%nSufPv5@a(NaXzm2U{m>if9!Br^6uaUMc!mOy%lNYoVjrdp(72L*l2NK@Wc zTXN>dgfAHtQ&v`JTk0nJ>gDYHJE~sehxav{SbkN}+&5m5f!V?M6|?51P51^guk4J% zqJh4OwUd(4rD^oagK_EUD`@iJ0C<2r`j52>T{vAX$wf@}uJR&4+=tu*)mud4$;8Mg z_8B$}0d{<|5`OQfIeI3ChKSzF`g?bF!43)ox<#pN6<&1kIizV=#SV%-?n&l9&XJOlgOVfp=Xs@Zs|NoCx3n$zyw zlmt08S9|U|Twi>vj*C&fUA*TwJ%!nioUrC?&rGI##DB(_V!vZ1{!B&6ud=@^Q7#{yC8$*-XCFCqv{4 zHQ}>SwH|8YvAcH+aYmOiQh<^1G~yvoRn*nWqx)jRw4Oy;O(bp4&<4Z46j2EY2?KH4 zPy`RW9vA?Of%qN$?zgK8XkscM@r+|O4FJvnW>I`nouU*Ae*5NoLry67MNF7j-Vqr2 zG19Bgc)7SWc#87jrz>Cb7p`cDQqsb}q<#n1UBD;Sh0h@OCifO5*@|NnJM#S6BS@iJ zq^v2)fylVr%;fVh?%mh7{$wM^LJZj`Y(8piTIm+SE6<}9D_GWCyTl!?d#LY$eSpcB zXHTQ-jyMZ>_p#>|pGLkVoBYgOnYeGw>T?xD5+x-n-B1r^cr z9zg}zln7f*^eAjrx z-T}sHXNL!1_|v`B_Embybl(1Dz1`K{e`-ycOFoJgB3FVg&e<~u!uF0fr;NcRw1?cs zQO@x6E`AN~opqrg8JXM~cO7Wm)0iJnGz9V7WQCipE^t9gP(xEw-lja_@t23iwHGY! z7}M?#bK51XHF0jTdVC; zNdZeoZ<`sDVAjQp7x4u-aCV&YAczzmq;g@^czI3DEz9DUFx=9d_87**j;9{}uVBoP6Z;96QYzCXc zXCd+6gkADqwqwY}6w_rS9|4&^1V>gyhOL)`A0p4h$}PX0mY9!G&AO`}A7B+b4=dV@ z%i>p3Ue!68w#(h;W&U_*okx$@zKbG-wl5!ZHwnlg~c?ZovlFV@8O8EUT+Ah1^~DcFCv95FLkg#--m^-e!UPgtepH>>9^eh#(cPw8{c zA!SfBUDfd;>UhVCopsMmlaizvRXCWW&`yrTN)VDA;#uM!At0nrlE`?JY1JMw(de zG0&W|7dv+Er4jnx)BsMPZ}jtfU_#PgHdL`Uee2*jv<0J{3QSm5(FHUq%|$Z8&DW54 zZ4Y3pq~|%?v1fP`OYkYP0SUw>C0Q;c6Uw{q8}z)m57<&1efGJcE>$kwm$s0@+Z(#2 zP|Mz6)k#uxNNfA{VzY$zfMj;MeXmaN-yppc6Q867ojobyjD@61lD(yDdY z9Th?jQ!Lt|Kw;C8_)Wn2IXg?pR&7VE#0E1{D>u;?@RTt3lvpI-_8*7m|8FG`X}3iw zxghnagg>&JU$eZu+Elt!a?GrXvixMje46d_cKKS1u#zO>J!>!snmEwfj0ikQnXJ7c z9n1?AEp$Z5Ta0PB^oML3G0Lry`YfuF<<@L=n|?!p4H7G!;>y?*9*OHZCr0lmW|4kT z$%zO_%RF*-fND(5H8VMR$7!aoXmY~3-u{GA2_<4*ZTVHY7qp6wvEL~u)}V#Pt=fte z>oYB!{QMhXJurSMI?GGe9g8v8Lw&F+6oeL@O?=n7nzV%Wj&y8(NM@Uu`p{c_YJd## zJTh2)WB1cCUt!nTM`CXk&8+0a2zz`2h(uz*fS|{HZPhKjZr!2|55g(yw3Zq_679cA zw^j!Z4NxKSV_5FGpq?6{#9t!z!)_yHgJoDAD>N%3P)ca_wxERtN{TymH92`=WJH&W zKu~Wb9#q5_LsQ?tm!M_R97e+gs2A&Hk#U-um?&|=`3+Lt5Mcv@u4xO*e^Ea8%Y(wi zL~vF02uFtGWD#WGc8De4A$*GZU|%Q|-C+g;o3g0XOK264d|wm~9Ge_RtgBkA=e5T09nlo$xkLgagl3?IC(ry~*8c7pQ;YKa(DA zC_4XRRUQBCr4w%=G{y0C}gvDvlZ5Zyd z$&oW{Pf>EhK6RO2y=vVD@P=SIpQRHHMwE@lxG!{(G@_Nxh?c;~4(ovDD#VLM0-ksj z`1Nvf8JK~BsjHBvNEPI`YgbpB@EPXMV3?xEDBJu^;yguZ+$6GGm7YII4k~)2M!ug= zB4Ga^i#cVkkX$2+ofUAkVH1b^>pB~SAq74UU?PQ=5ZScXI8IHJde_<~Dpc1p3*}-m zMdk&hW-oBhP|zM9FLCLNL9hMdAakDf5uZn+DX-Lz2#Boxxt{uBe)Gyk>aHY41zIBN zy$5s|pplHWax{D5NvyKDcWeq5-$sTUI2gyciMXVs-WSELuB=~KImIVDZ7ZkGy!804 z#QCd9$>7aB|GEB(ftMYgGe-=LSVtJcKX)G87CDYs#vKoU84Eh6V>54jgC6gk%008Z z!FB9S*W4WH*q!CU+RZ_W^pOoy8_89`Qtr$c9XIc#Z=Z!9^KPc*Q7PsXfKI!q2p|yl zm0MlAGAO8MH8eEhe9qX29EEHYOn%+Bkqhv?fVMWhN)ycFvvKhuycodJTao)s(+Ibj z?l49foJFzIbkf76HSZW17()ATVTvsZb0>C-iJ8ecj*BZQ&f^O&;o5TnsR01tID%x+ zps8wq8Xp@g!@BpfM zz4->C{h|C;^5EmCuP}2Tg#gC}ZX?+$wkrw;w_t}e9ysJ!2nY*5H7ey|CWf$Q%FirJ zMt!ByRMjO{c3fh+!+JR3cnwB?-rAZdK^f{1L}53y)e8L2JaKWo=AE#eBxIJc` znl@XeE9}b5$aBo|O+Au&JBkRw-2kVR@u%-&Ty?9YM9y3I;m zzFH49L)Iq3dXhg(Iu}puOKvk*BXRd3mP#PAAdu}Z&q7RMRe9$Wbl*ZkCR>%Y$5HL# z-y^M`mp2&z-@}x#tsy?R{-bv=+;lvZfq)_XvS(k~8c07KJe(~YSuQxR+N=zR^OfvoI)^CW%6E5L;!y|qlYNYIik!9#{ zGZ1qxs_+eS37+mv-tRW2NlYrUk{I(<4d2Gr)a|faGJXN{9pMiPA(AoE3i9Kt3ZdXQCPY0r_wMJicIr<$f&I#=U$wi)qhTpx+iF-HOlL)+O*o6yEpwY1@^u@!~`Qk+aMmV&3_kG{fq9oyS7{Gd4lq3lKlo6#u z_S`r1blBV*(fu|z$o@JI21$T!7ai*wa2D7IO5J~ggm@)n6Lon$U^Wbjzlc$}p$&*M z<3@!!*cy-Qnr@N20X*`u6obOO;GBbz%zq$Y|C!f&9t0iN0QIKvK6@1_D=Py7JJ+dK zjPe#Cf6f5Joj;=v^UU6FU?HRAi+;Kle-4%o6B&y_^UpZZ8s1n-I?%}H&Z zqn$QQnGq-Ez|LHTbs!{zT1C3jq@UI=mhnn_<1100n--!(Hj*n2T;28<*GOgI;qR4) z^Fr1){b>UY!J&%tjU8ocZYCRcG!`=puF5gY1QM(0>E{wiEcWih8R5|x?nz#?toz~$ zjoCj5VzTZ}BXcOEhT)yelG-*l^W1X&9p`v+m4T=`9}vZd22kWXaxDuYR*es9d-MiF zAt{n0yjp}|eZ=VuWf#Ur`zgy|Ab zn|*%Ny^zj6tQNA0w&r48v-$Q#1z*@{{~Up~jmvG_2$9tJ-GkG9_Jp5bUyBE!U>1WNPPX=UHkyeATk?)#i6^sK3fkOZ0YitwCV$_EoXIK0YFC zwbcwi)TOPBYEP`&FQcW;Pmn<3@@0|r!c;^@6GA*-7DL56Ol#T=gRVl+DrzMLlw=bF zgMc63lvcRXXq_ktBvx%aF_&_B@$7@&8?L^+d}oOB>92RYW^L8IU*uNQti(;`9Gtb) zQPDmC3&Mn=Lf>NnqU-Yh zz9FWj_NW923EQ?{^DEiWWGEJ0iix?V3FQ+z8=E%XkVAZ@pI-u7c4~Q%{TQbv8ksu0 z0+#{nnDU0hX;mT~o_#}w=Cb~ZiZEj1?7xv?GvHRvn7b2SL`n8-tVvkSaj)bVCwvrFAsc%DRDTUSFIvvGg~3lKng-Iqe_5M2PUymKd5I z{c{kwaS#qvd#ol;K{ypL6ib-fqiOv#{acOhq34Uj(pc)>{*=vq*ROX>j`x-J_)nt{ zml!l*cf03cU1k!0>=G4$c9iM3;8|MYBm2!?J31&g2S7RMdez`X$SMUr7rX=W`^i-c zD8*~dh2rQsnBzGk6JQbmA{q7`k8IuN$Fki_iW6ocNB_m@PWS?CQ0^Nh?tMlUC1M@c zTx1p6h#tgq+L8Vb;(i7Sb$OkahM{~Rt*%?J<)|M`9$Kh!hNx+*HZFeoKq9_7jh}F^K=J{5HHrV* zdm14}c!86y_Q~EQ3-L7l_g+3!BGnYKYO6Ic9g%3@Z4V-(aerwEgXsJw8yc06sh#-- zv(#rJmAGYIQK~vKWhB~j$mVYup(U)> zz*CK^LP3n-o*@Z^QI87_?O82I#ARmA4o`h*eD~Y+MCEaJx@zOM%d6i1QgZ11RaK}t z*s=dh>hA#xqqbiDfQ|v0A^KDlebDNm-OI^j#n97{R6bn-LEiSCv-sMwk?u5v=N(oyYYxf$KB(D z5K~A{Fx#iRhT`czho}n~j1{iCL7!0Z@skuZ*pqEWMvsBl+Z57JK4?WdfJXFLo2%tT z(GO$RTwx-Ic61U%wHI|DdoOKzKd|W0p@#61p2OIY`vZTO3bPL*P^cl8(Iua!Wz)yn zqeJx+=r|B$Wn*U6XjICcVS|&jKuct7Idu<~An@DxKKAO+LExj?1yCDxvIm1*&Z_e5 zCRWl<5CVXgb$_8*K32M0mRKuEOGmYH*WSU*z4Yl*s5zPHQvwWcdNo0=zQhW5fK=NC zz`|ELG|`Ije)}`WFecJ|Z{c`1CVR4oy))N$!yO9J2!fBYq{q2^)jCozydicxCvJ!6 z`LNT9qmp`6!*~B5PFDm*R)7nA{r!zejin1qOSCNp=7FA^Tm$tLi2<_~cg&cH+na~x zn)WRNxF}Az|@emOLB~@8M+xb_EZKqNXqs=?`pnvauE5#>2^Ur`G zZrl0o;Z?@?=Wsv(#ECe2wv3^O7;i*QNqyv#r%w;!dZ$R`ucP3+ySGx9=mzHmhS#cy zrQ76E%4G#hG&IJ>E6h3;4$y8ErAj}3;O5{kjC>vILJ>^llfbjf4IQ&zA+-8t_R51* zN5*jvq4x@BXS}B+lQR#{KgmQp=DmvN(_m~GS%-xCB(ZTKiw7Vv$_6O)v*e~LMO*7a z8f7f1K((tdt1z4PvF!G44np5pzB^udnd_`bR6Y>Fs1v51w+l@oS%vHHR#>^If*k(w z>fx^zqf({-Ka%A%l8=U!4-t}CNpA7@RBf6sar0f$e=0Y5q*q))6N9qIe#isye+}yz zsBnJ7p)=8sh&o#bha8RclDIb16u;p{n!<;BLxEv8$eLSNG)pD6;+b+j)lNC+FE3qv z8-i_*BXW`c-zN*J^;GB3js*BZ2h?U{5;>Dyj^k+0QvRHmeZnEg@YJroove{s8o5Fi z0$EQ0ng$%L3d>;7L7(FC?1oBGKLaC+lj&``<^mO0dG|x6**tywZoE++BO8oOW+o<%PJEo08F`bfLxuRpGiT1kh3t1&*h4*> zlcxVA6`69trWQ6xUU;>5f8|6~SsB9`C4+l!4lZx-elN!Qag9gLQ;H8cMoH^n!K|pM zam~F6mqQ!)2TYTsUs}Ru7;hFV*(wNa`t*_s9_V>TkWhm6(y>R~=Df&x-iRLXoC!{M zoJi}6p}i?Q`upNyemc8vAY$nVZKi#Q#U$A)9Rcb4Q-#yC$61U7EH+5{n7%K#ciB(p zZwXM6#Hkt^*#q|YbG#1>$i17ZqqZCl9v4?u_JjPkgiDDN&B>K=H7PNgR(H$=s0jv$ zi9?a^H0q2le;S>=dytMwdTLzTtYA=BhfP*noPwATZM75xBnCIy>}M?h#myxH ze%YxDhaF~(#kes9%pQ91zc(KdSghmgWlypDuDPDvGjZ(Hez!5or6KaP2Ha#LL@p>v zQM2~$WU|vQl37jFy=wqYSwK<{{05=0shh0ARJk^6?pdIrI;*xE)7eKo{~`?ccSy#j z)*N|O^xLLljJ(QG*gp7Rv0K!1UI<#90u~NT8O5;+jdz0 z#g6I|P2B;6ww>KOctorV51IPB2)Csjn;GtNni=(FQsy!5WpUfQoWN%~mHm3paq(92 zVprv+oi|&*>MTqn2^{qn-4~lE*;ThL3{OuZ3Ao*eU4>#>IP{sog>@%;i(df|MZrMv z;Vu1nTIQ*16>c4pcKIe6>pNRo&Muuo5jyL846dweH}}odb=G5)81 zB)(pgrSQfGUHhz6wDKf=Y8_sb=4k3jnp(X!=DY*>b_4d!>BSyOSRu zxC$vf$Zt%y0g2K8-~_1nN&}s6jKGGA>zPMv8smg%b>>R@g8B>up%@>L|!s& zLDf0jW#;Lmk!d~Uch+r4*5jE^8f%dJSmO2Pe=0D(e884o%+1e#xKuGPEFOm+aI{`2s{(rr<$;D zI5TWl#+B9q^Z2-g1gav|nNWPn94EE=)$L9q>uX>dkg0okHuOjUL< z!b65qXJ}SPLDcq7$++mgryvx6#az1NnxCfg(uB2$kE?v2YmLp5U!td!r-Y@KE@o~D zjo;B5%SQ`}`(?D7RJ5TGBWA*_Kjx`q^*bW2xNbT9>$@w~v!DmM1b-|3gbPVxJi54K z!%43BJW;d?dM{mRv}9sVILxRV9pA}!vr#%}sT4;93xGqgUM>Z_S1b&&j1f48$Nc*y7453aR4dZ6{-5>!->;!&p za2(7sM}yXWRYmo(B+So`;ECcSmpxrVlVXQnrxy~8g{R(C$yXGR$!*afXJ z#Yw82eOPb|y2fyz)txo0AjGX{p&Y$;K10r!rn`KN$!Zi-l`}s+ohvSOhnf~#+9Cao zskVT5H~t5xyCIqK(EE}nqo5$Yl~k9RNKkH?zKahLvdBwJ_Y{+S&e2}i=XK;t|8cXd23*&u_{`9 z%`3>XF*|yNl|TFH^gBMbH)}l%_=>!m338gYvQXI*u*R}^x%+cR8%ns(IZ6{UEAio% zm;3CS&{mCyCcl032-uAXiS@;BA||_4Duui7X;DEz0fOpafL@Do@o5L#Eq!&|kvA8= z&cWi@vQp^ltIqdh>BFl_Qg`p)C-a^!QKubLpRNmT8i#cQQartT}$!1kNUz0-WRvIEO%xI zo%chj0z&ti3KRA12q5Q^ZP-Rou>cnm+P|Ojzh-A?cC;9@}l zyvJQ}k;(d&Pw!}TEurue!>3<54^+B5l6M)j|I+EIZ_xR}vD1rPaN|j8_|tJcN;hw2 z{l0H~DI#sROU1-*`ThK1paAce`MF0tMOg7vg5z;ZtL@#p)0j`f=5BOn{{U}wK}D7x zC+GC&i{7J_moiIHoBuK7hNDLRrC!u@tc@_JxN56za{%J5)C!EHvIdoRC z};ozd>ndq@;xo-d1G+O3QSt4!x3Dfq*^daBXw#UHf+$;T-{m( zcQ;;QUUs9k{4Ny@L;heiRd-`8(6!H>pKO1d?ESYmfibxAP2-`+Z{@p(!v@zxy*m!s zYr}PQw3r=#=E7&F&t&jNk}W%A(lh8az+RjNHikDY0X;mNQh45Cfk8h{rJ=NLM0w2@ z#zNwHKB+b{stvTgfP_8u>fQS za4qbbC*_TQj))RLD6#Vq_NTKp2a-+5q+B00wUTg$vl}vBi__hgpYfhp+aG*l$4J=j z%_}E2Rk`=onq265H@7YK9F!U~o%6HLV(|l+tLf#-6M$J9MX6%y z0D<5m6_Ldd5az{j8VX(SxA*k7z@maLIVH;ykEax&eYO9~jwciZa7cex-jSTSL0v~h z(JLrm*T&-~WQ(UGe+a$Vm_5DyF$R#o&tAz6?lPmw%?r=&9(7fmwU%+TvcfFwJECV5 zeK-pW&<9Ag28wtW_3MuY?Cxb+0(+a;wm;tbbCENto828z8V?_yA>W1FSs5U`1O?>; zi0n9)XpWN{_Ik`5XMa2|SozHIrvdBWCT<^nwJUj-%hJtTTdNCIf?k#Kl6V0b8II={ zc)>gT@c7~@%w$K<2vE_jf>PK3#8QK^V}8FAo?isbm-O&MHo&&21lefyr>$;Bw0t)t zi0=>Fb$AKWQd_kKjR9-7BTVsQ;k7Oo+KpF{GPdQzeJoylimFuZ31s8+-4q{Jzln;U zokF3j_xkPA!S28{6^M{I8HOuFhUDBN5UW+a3_&Yne;Xxez8%FB32jc$beg~#lEk9E zv+ceP05>4$PKMk&hkt(*|BH?1Kv>LA`4>GG~k+0vy!tEa}I*Hero^)%ME2wY+a+!gO~p3*+FEHe;*f zOxq88cjtm>aLspV4)7FDG#3ryF8t(I$OVGjU0;n;|7BjZwPYH0YC{Z;gxz{0_M=ql zCx9=KRmwPXIr3oA%w4vRNaR&w3zJu=x37OZCL7TWO*T=mON0_Oz zh{#>eSjIm4c>$ynUJQk>1I(H9=#Aj-oB#9BRZtTlX4AvHFBCs6^U98YuKi#YskDX1 zY4fhu9@)s{6Lr`Lt944Qh(*2XKiyNT)Vg(Uq$sH;I%KUp)*#~jwby_@?P8MKC46M$ z$b?en!bq6jjsPhw%{!1{+uwM>>Ve1+l`yuyg{s2KCmcGOCyA$T?;cP%C$cWPe_jwD9A_v6kv0Sp|J}QH0JNN7 zQP~-_*1V#k;;q)37Eadmr!!aww5sF&y)QzoKb;zje!80Pm3U&J509 zK>?RS+@zJn ztKz*MflylHvLf71D$^<_>OvCbl{v_TtP6EAv`$tdq{P0_s=$eA2{#6oy#Ow z3B|)01FwoKKj6vOJp30UWY&$- zPC%@Hb^xFVKL3UfFHCnJ;KxGO&DpLPz(;@vW(S(LyGtMZTK)hb7q-oH0iY!FU5^kU z9)FH}2$Ak(SD3OxO0|rClFO7^ z?Hli?>_-G~k1~6|m>`#_^k9l5ebjc-PCX^NZI2yCw-xx@(=Z&Cm*%xNnHiYq{Q#GN zFn7gAh9`UjiawP%{(`ZRGfuiJC^gAq!HXhviH)+ zW-gX?hle~k^^e}(6k3oeW=F_A9zOhyvXWTQu!;WtB;MHurIeqyYCk{5Q4IK=@B2++ zx=!$K4~VNV^R}j@g8ki>EIX~j3z44^UaWH#?OMXbsQekIP`F^US#pg%^08UV2L9c%XH=w~GYZ|otFDIPy9T)vRw z0zL7U_@Je6X7DF?mn2Wp#K(#s+4U|fz8+ZvKY#o%=>GDTDrg^Bul3XKPvcjPf<5Gs zZF;el1d8Ipy{kW0i}{?%bv-dCw$)<~u9U3<%1(DEHqOMDXtNy^$=Vrf93_y%CzEwN zYDJk{PJLy%w|T2!$BMuPNp%?+!)qsgZG$cq?J7GDj}%X2WF(5l0QfRepJZHSmT%dB3Gb0RklML6x0aI7KDbFk1NVz= zIR;n>K$lYMvJv?BT^6S3-V~6*XqW6c1zyBXJ;km9(Tm*bb#{LNq7Zm`yXG=q2@gn| zZgkB*J@>q`w>qpecGbRp0~-e2st*sCrHTHW+@8Ga<|hBCDtlX=+ww!-CEiQAhz5h{ z*iQ;9lIr6avdR!;Mq^l5AI^9@$!){{XrDYCflZ@FNC~Eean#$z?0~l5&t>0ixk3ZHSo9U!o)?#a*2vs5liw^>cps z1-z~yNVVSq0Y@7R2m6cF&G1kw&H(44s(zrW zJ{dXW1Z$dzD|FD>`x>P~lCSa}KX}n$fj*2c+=h~9H4*fV@z?U|*DLcsFtc{Al&b0? z7Lpx1dX$q1B5e*%83T;`_wd6@uviwWV;pX%aOJ6b5U!K8LfN03UhtaS*9y5vO z>)WAOxt9*E(y`D6Z81ej$mL&{@PvLWaFMPu`rJ9X7@c){Z+ak-55|n8MHz1MnJk&J zc5wKK+t?X=J)m~_0wXig&4xNNSnVkQ`is0Cvs3Rifj56D3BT{Xw_Bq@oYruy#I-ZP zr4g!m9=#6F;NC(rnyeF~Q7zu_5bJ03HbX=9erO9pnH@lQ_E^MdHoCjy{fn^r}EuPfN@swa_ zwH>+X2rJIw+8P6S;C=`6^qMiGh)r;|<}3%6dS`7yi&DI6+wVVeEtC@}XZ;yH7rG<1Wl4*Bj%H0G7@%^q2YUh{TMQY!b?%zl-KuE#`{s~~YrIr$nPmSydowjjtY1n| zQ4v-Xvc*|1B`Hb1H3EdTw47WbixFMZH$=9UEC$}$^o9Ks&{pf9_D5Xn(+`SH*X?io z10>)PS)8A9h`Au8=~Vdoeit<%9rF?bQ*aJW@Nof>Uk7?`8TS;lAim6iEYI$}{|zYB zDaB~WvZYar>wd7yDf(;F)7b$~#0LHf%(|riGDiogZ)auC@*{d!=Z*Q}HEa8rMtfxw zPv}42*ZJ)PpD@$Y8u0>+*F92zD1B7mFsLtf6?)j%ERJ1IYXBsKl1u9B96%9}PAgK> z0I>r!i8Rjg!^$}x0=!&YaV(K7xRk9$JJ@7ivJtJJ5VB@Rd7S3tgh!r1M|$)f<8CV3>wVRLv&e)yk@}=!4l_nu9O&mp?8pKdrpL) zp$?&?&NNt{f{HJ@4%zdRZ%L&AXn|F_-5-IaUk^kj9@5Vf&Zi*o#hkOFC=ZRs4)qVu z^O^dbGieDd_?zwBdoaIP-bGgO`!HoV8Y`G0cAb7)gq7xin?{Oxnu=|QK&%cwe;4pR z7D8|rXXiMNN^`_g{G(^3E)+TEAyEi~)MsR2<5}F|=ac)|d3bnU7Fo*PSu4>ar=@iY zi^=CVccds$8Ldng-_nMx&j`WIcaEDXr-tNwx$=d>AU-=+-^H0Ag2TJN4b_Cd5_%H1 zWwv7wn!RN@nuy4=jW!-S^mLuH1ZmyJ&jDJYc|&6t>?h+SD&xXSJsqR zUAE*xH)92*R{>MNB{1l^;$DG8+lmqks_*)ZJuaCz28(tK=i{RC#NsQKHj#zCc1IZ9 zGHx`{bg`^$>ku*@jx@Dc2>c{yS1wDmA6KgQSbp?(?fKv%wGmv`gKu;G72mVI-w9$* zoUS;kZ%?P*o&bvg*3vDqena}!m4}}}{Z%g!Xuf;6=yN-G$LWc~FK)(OUe90VpUFU&@|7aZRSMUHjxKzU z$wJNHXO8ISSol>%#P`!6iIPX5V866*YsGs@$piysFMzTA3i z$m#O&!vtpZaDFI^#c%)ErmNc??Bkw?fShwo^j6ke!ssz&iU&+&#C+Ke!s8ldR?z! zyiCnEXMaa*h`6S0TKsgc&gD;F-_jfi8#fhMmDPdS3e${D+XxLXg})Op(wKn{CA)2O z^cQ6IJ`{(TP#-Ft3ho;Wr1xY#@6og5oKrR)Wnx-dyF!-8()zWuhk7>*#jxmfu>>}> zBzi!Ej#xi{Lhonv>`C-GySR&N*G*J$ke!qP2crIFQP`Qc>CX9!cfJt(Zh6%tGN>Rx z>J&DMy`+Z9UC%O;uey2A&>3Fpw3xT?lD~w#sp%>8)AhBTo38)rZ}goiQaoVF5}pVm z%t5gX*?rb6$5@McNVu;Fm6eL2tH3fVITxyQvue*Iu>|SaYIoJ)DX6fI8@}~3F})Ga zVd+Z9q*p%ha!q<}{#$)FSJ#ppdeYCOha2C2wXJ7(> zRm2>6NhA7Q6!GmfX`PXPX0=Z-xiPwS0%W$F=8Z0+v+yVH*F?^pTWL;+pnE#nQoNU0 zodvt&S(x_!nC(%XctP9Sn>2I)xr8@l}w%L*gm z(C=#U-i8cy=YX=-f4^(y7jk9>D8+ly4Vy&KlUKAqtS)U3x?9pfcV~IV9~F?(@(Y961(v@VPzH{O;hlaT$3C3@bZju zcAfXZfN+&0@?7lXKCjomjn@tRb@vW^J4lj`-0z74A;PdeGR%`JK~IGz`b&wFM$N(} zrxcnn5vG0fkbScXDw2X(s^Y`iWi(oujVGR)FWYmQ?EG=Bx!m+;!<#UMP{*#l0Wc^B zz+ZUzvd)io+`V?2iSRH}awwfi-%R%o>|z0y${Hx^0h$;e_oBo)e1i^9`=RrFdg1(< z^K#CxO<)5U#ptQ%1E7H6-FKlsuM2^wy*GSYP*9+_{Hn&^79TV26cO_2E`uPJ*_+RI znVOs1FYjx8AK7V{vxQ29e8F4`UOSuz?vgoldNZ`w@@o8PA}1NJ9`Q?FO=0;md`QEh z31xBT9N&liHwWwV?z4B@S*z0yi1r(V2w0D{n?K@VDGvJ3M*RMA>d$%9eEhL# zBw9^Wm}+b%x3jn^tdKynG)da3PARW_(mz|unAt}>;376cjEAE;GkYznIRwb*0xyE2 zg!DW;Cot;&g>Kl#tSo(tZuCrq@z7TJp%8+*U1Yj8lts7bSEGop`1^gVnpYgWBYB2v z{Aqk>(pPbEk{*fdou$;$UJr?8<88a&6z3K{uoU+7y$8@y#|kG=e&&X0-|M%RSHm%7 zclejK#Bn#%ZNFD8q6?v|_efxlyrE#Uyb3awzz=Y)0CR|NmWFn|Bq3DEzF~)TZuk*^ z_&(RZn{5>p{S_57yshpEC9>hVU8xP!XwVg*!70c=#Z4;$VThSeCc=f*1Itu_+1g=A zMeGz6fetH`9Qa^!Qi*TVf9;0mPL9>%_bl27JGf)S#`5KD(;iQ1(mCPn?7q{lU)wVI z1{$QDNO3)(a5R19k)Xa=-9aZjLV6uMAIr9_NtqPI(SauyJkwg(DasU36 z7~h@`wj1QSovNYN`T5%a&n1btxhUz@h0mkArhatwm<1 zn7}YM^G%jDf8xG6d3c@|yLmkJk|c5f-_3D&>qfcZlRBOv8Iwhop-C3RC5xyt zpwn62lzf67$Vw2HL^Gy09jrtdIX-v={Oy6$v$biupP9Fpu)tX?3pJe_zGc2O8R?$_G1dXuvx}5Gus+?r@06}hV`@{*e0N(3s_?|=H^B*d{o-aqD^)8J7 z!iVWbNtY~1yLX7zT^qhuEpF747-Q%kD?C2=`=kh!`U#_Cr2_c#gt|gM>0H{uIh?L_ z`#74+JgvOK1BS-OKGwun6y^(;a$8xg?UuG-a_YvqEy5$hDkMW9of7cE<$v$ey#&6} zAU&zwW!`s8*6w1KB^h0O^UrO9%#O9@xKiWBU9$O2_tibQN_+}CwPd?GR!9bA#Q(dOSbkAMYOv^0x$&cw-Y2 zF$3I_Cof;_YLfqs1`)&{gL_xOJs_l;X>GVY%ELfAbPUKLQImMj2VcBc9J&#cXFwJB zyG!FcUfV#2Y6y!+J+O#(p~Q0jcr%`eRtz^t8AhPbT-i0 zZX=POzctHN@By~i7>>>F>w4Qe>pGY5m&?%%)a&|J2e|6Qu>2HKXmUykAfS(L)Fcx( zDs7I?pg$8Dv>)h9a6LY`zL~%CX_cWIJ4n+*I}CbaGV8=*ym*AR=PPL6lXVzhl0$T_cGW6?`IJCY-5EtMc8$zln{0UHbt#E*rv$mi5{qTLQCn4e?`nLcBsrprP z>MMtzE~1&W|hnj+XOYc=SX@!vF+Ws=PUO3ocX0=3z)<6r)VmkumLU z3^=rx&1#aF@?l_$<;s=Naac zyPn;zhD~OkUh9)5Jm-w}HN&dmjl%w>i_MMNL*e7ve`F`Vg|rRv9^Xce2Q3b9v6r$u z9X|v7-|HgQLkRp=1pf&XsFpC@?4i6^uc^7#@K?pa98O`|T%SAa&n{ZCezs%|H?x1A zA8lls{9+%Y%`T_JptICb2`heI|5r`G)ew*N6_*+m3=co%6&VCs+3bCw+<-PYZw+Qz zmAH-lu$-;B*LF<_-6P}(TyR!1f#=?P?C(;NnwD9(c^+mNd%Vo#pRjGCB2oJ$Fzc2k zQLS`U34{XS@#DuZuJ8rmClA0c-@D;hxEaAQ2o?Mj=Gq?5SFpL{|GglkbiZHzP#n&@ zZXTQBC#g)h6IgBWex)J-@q=>jbi*!?$6(O6X$Z@7;9S-4SkbB5@fm6jaN+FA9pwFp zD*q_t)2y|W2eJ+Abd%{xgv)hV$*$WC(0zj*2wI``)IuHui3FE}Wc_7{Y5OkQFU=?p z1&$HRc31@ej_TS_t{7&|^0Plwc1I+VUF*OWB{=?!ZpHgi*n;b$z&4ziPfPuWrqIW z@daZ<@aK7-tOTF%E;-@d!gGZlKc~ord@}5$2n}rTMWVcQcuDz5zB^k6XTGi6raPy0 z&Fztvk(q!E`0hLNAe*z**44$R>Pbo%&2hh?eM!fB`18Hy-T%j(N=;6#^m()zR8ztb zb(4tfO@8o}u4HrDM#4J;_2!1nLKP6|72~jS8vF^%cSPY#zj`F~_7d+`TfW}%_@fWS zi19@y;D)GCSb>gOeM)o56!e$`*M7xyh50Wn1><2r1lx4Gmfw-vjK+rkzFt_$AD!u5 zrBi2h15m72P)b(uww>t^9Cu!j!lMK--KMAj= zrV4vm#BfYw+PfE-mi8o7!Zen%fq9nOuls^hZ`y;}7~Z87uPA!A|2o?hu*oE3H31wQ z<{Wd+pI(ikDih-U{bk$H_yifgJO^tlUK>`cwfK)Sxxl_Wzr*_oF@M|GHHz-eRv~GJ z(M5#9W9i3hwtiaUl@kob8J3~8fKv88t5DxJs3@3U_+_O3e~xlyPR>Y?F;D0jcw<07 zrsqok|2WJEuUT#CX_{oGRX)Rd=zZ7o6;0NJF1PQDJktc9l)>K3!ti&4CQ?%w8+GLl zV25Gj}o;2s*qQviKiTU9^VWGU~;WTibh9H;OVo+ zN_x-bFYL_SrY-{D7o??2wleI((?NZGy zop-YRJb4Fp5QN0Ym#WzM!7%3J$vX!7COrieJ0O3Tm%Zs!Y?0e=a9r1+OmnONgS~+4 z&8-6*q_GRR)86qH8;-Y4^!kuUJ^vhF5yWTln$lW`_H$>|iW;dGs;Jd}2u8<@NVC7z zvMc5mtwFm;5y6GaK&K(sg+>D%%IA%1`nC|lG9G#F6Mm~e5U1^_{1m*a zin*s!H&yHXvpb!hUT&VDjxsDG*U;hXEbJpQ?#ZM*QCxK)3A>FugjtaOv1VHkdCHL% z2O;b7`=ur7s}BL2U_JX0c(pHO#&GUNs#tDlukfD7DqQfg6ur3aLN-nt`9rJ8%^kN! zk`GJMR&4?0kffiOq)>iTQ>5OK?9=qNBkFfpN8M#Y?m^II(F=lh=$`cqR!6{RSkYgt z*duxpJxR$HoDp{s`VLVX5zv~8&-jL9o|oR=u+(>^#;O}c$Fmg|Q(j+6&n=UZk`me8 z*d?+vb{`X?Cv(jVy&B=s6?&9_MA>EJ>E>mDJ>MKx2g$Hm?#S2zqxA4ymcwpM6^R~- z?nYo+39w(=b-S$RczMcA43tZ9!uLQ1^R~3xfA`a3ob=n9Qv3WxJLyQAUUenOC;nbH zoJW=oTs4vZlK}e4ll8e~@(K#N4GC!z1fd*1!U}a=)I;=S zm+y7c}!N7iRjTIm95Nm;7}4;(Y@4RxpY)WDZN^N7Qlv;XEsXouZjm7Jf< z?SIjIAFSG&&%{qNG_B|NNEd_q)Ylc6pL{YrceZTZT5ZE7^4ZyVlaF`Orq|hwf^R>Z zbSgfsju7P)jn{vDQNa~|j;|rfzh8cS*+S)^cdw$=03X%))wvaI1P@PIN=l1K;tX1M zXbs#c&!{zwAa<)5@I*%LajcV_OP538Ngyw7snN(1f88gn zbQti>fo>6KxHLIy-Oz&ph&i&DHo<`AN^y6xg=z10qazDbTQ_+W8qP?<7xxK5zXeOr zM`Eu6NmJ3=EpUFkSg{F^)5@5jNE!nX0+NXf2K8UPF(kxKPE5LPy_H$kkh|^H@il4| z>&t|y^D8d3W(Mc4w=Y zYP1}LC2X(a`$WBy;cinSzyuY<#3t}miRunDwvkEeubw|TdR6bZF8OJ_U+?yfZ-=c3DQf=1mk8>{PO|t7#|Lk$T z?>F8W(W~nS7naX`*%-7Tt$7A8?Q745EC{zK6_-vUGH?`R`1ZCo?dwpFZ2chJ5$YpV zeS#wLMuBpI5-}=K7>34Z?{+LV<{Y-M%(CfiTi6ayi(10R)+$09bW54 zh423E_WH5-g^Y2{s5ki4{|s85v7# zudqA!i|4UA-8rt#D+F7rFeFI9u1xo2w7QsBA$m}84$>-MjTVpTKs%&L3`V&-+{)o1C1t4LCXc zBvLQ!+P0?k5v-4%w#D(F+DN~DM}M6^wP=YA)+3BWgD_@%W}_Mi`4HTzs`3Xg6x49O z`7GuT2aOaXd_s;a3_ZHq*mQJs^s=et^}OCUH?D{}#1IeGQr`|?Zyz5BY{Fn#iqq~? zYX2TkoxtCV|( z)e~I;QL05x(S0)(YGoge66%2%9MqI$0OP zRlXm8tNA17<)QN)>^Yl#XDUygdf#1f+qdUsYxl0b*HzwjV`TL&4O2Y!hk*Al(`@Db zv{l}Kb6YE0aTkD_ciSotcn-E5-Fo{;Wy(9#jkl+UJL^uoUbZbZSBLw3dOw4RkCGrYq2n8i|aJEjCioBc=o}#t$V}rO|wzO$h%KUf3=mSEenq%*{a#- zV>CGA5WLx1QOoYQqTSRa1S7!qD3n-h(e3gF;y2pz`2vmnf+4NnCg?C&h(H$lY-pOl zS73@pTnKi%x*-LxQ3Ler;2DPv-vUMuF#|Mv=uRJZb*0Mn-A5zDsjjgbc5aZ$u%@K# z0q+>{AGx=FTgt%K!>C@Fo0T;?Gn1K@r{HV>pQO(KxKj5V`#fhbM|f$qw|m0{$m=M& zc#y!0?wFjPI$sz8)@!Q$E)W0)w&y7X*ba>0p@9<&5D$6hlc`OZxu+b5rmE5cag4uB zYREY11@qe!eWZhhn8^Ju_?N&R4xvCY(cDiMRd%&GZ_=fX@VXE*vE2SEYWdnx#*12f zx8JrijPhUX@PaZGW&p*M+A-v~VYhnMzx3NJ0{V2g>Q{Op@)VB~HDM6#=B|MY=#g}s zPfs*nu0*igh1DSCk+Dm}_h{8g7Uf~)eV3>Mjjk~8z%cI=>oonwt$;-H8T?B%#}#A@ zPI5E%K8HZut1&ij)hY)^6Y3)mwgy_ciP7(I=8l+njx+JcBR*7y ze(-dO7tF$^RR49rGCP+|NELLWsLbp%FdXju`Te72fK;hf4qY3VP4GVMN<~TM9yQUu zh`V#a!vk!Q%46WPJ>Wi(qjn(gcqay|S0{f2ib>uF2i5k`nbW6f>F81xVA25KQa}{} zOV$m+{@n9+wHGon=wjYM);izO(UB=_YbaA<3fh)*DF;yUn3x#Tre5jj5_9V)ZHyTY z-u{51AI7_4-)0&$LaH@k3gjD8=EjzBW6K;_DzX61cZ=X;{d&Fq50MJy{P<22WzEKk z3PEuyl8*RV0em>v&!Li1D7DS^@4p7Om_nIh;60?RO_EnphvNdGAZ6rL*10EExHV>G zZWSo(B*(W@^jS^4$m!pF<3_K~@kpPYkqZKhFOn_j@k_@37VKi%6|AJB^rAM<)O3OA zsI~M`8Yee5xAp=U|16(Wq~yfp3VjXNUTg5mRV`8Ain=_!v<9DUTO<={14;Q42)lmL z-R2Ttq`yC$wvWn;q@BYzYQ}q(gHM>1?;9f{ zqmhM0F(o$g0x)j0l>XA5;$x>i;W*-E5?TNZnf)s_6J}=UyADrwusMs}?OZ1;%wd2H zQ66Os6JHU#RQNZ}SED+`S1|%rPWAENsiWTphP1hYLF41|Qv}@_N11cPh6l1NNFKu<04k*cKw-z`p z25Fs)48|A-h`^K1o!g+hJS|^yKb|Iw6e0N6l^Sb~#5S^sQkxH#3tWSmfuGn0^L$dl zmjQaHg;fOcWj;rySeoXtSbG&{M_tJGlsX(}@y40upX;=n#ZPZ55B{Yh-}gwVh4!f* zUCgby0DmU@mgQAQ6+JTR*S8{K z6fu1B$P_&m7REIi{_Fb@N5|)Qhc%BzM610F-2;um(tax|E3{2TX&-y_CH{COs}vHLG^#4>`F1 zaEMeb;=%jGSh&68P}J~gyfw@3UR}z3!LwLsGbI_9m?&>u*oFmR+;L!p4ico261RoU zxzr;s@*DJ7&+UI0gex&myMdGyb?VQLSNMyyHft0L?Nj8_nUCMkpYe!?gwH)qN!Nli z%mUW0`-Z3HEBh}S(0QbNJiN(;x*3qT`KLhdzdl^b2X4~TQ2$9=ew#JDkMyh3zS#P! z@=|BC2JTNDbtRIQt0E+cT4sAMuLd<9ZO@4RJh@ex>R| z5S*il^e?3v`(A;2Ojrml*|7WBV|;L~yO(_p*8nJV>hzPT!&^Qks=X|+p>*Luia)f> z&5*M8#9u{UL0D+@YAA}IgCjopvMKBahE z4Wx6hY9AQLjo{OA!w7Z&wnm8cb1tmtG4wTX)SH(3zB?K7*@L@1CJ{bqsi~bI2yKCD z0M39!F;_@atSy;|6nm&Qu*%;IG9f%0x~73tC8}B9@R6y0_(Hc;Gcc7k{lf>+;FYf6 zakNx*e!KgISXP5L-yoBCerDee0l!**|J_V@A2~LV1W)56UMViVx4S`m^YTIcw{KP{ zI0)Wn+s_m7?&Z~wue0hCOj0hj-0G&+~H7Wb14D|MG!hb7P){df-vUDW60Rpi-wATJ%cQ35_G zbu)m22VF5^Z{L>xq%C8Y5-^(cz0xoTXI)P!{*vm0V7jEAPGH*salcW+CYZ}MbcZsL zfDB%M?XJ9v%{d)kf?9z$m(@pHL}Uz`v>M(`tZ3pNb$1t0W@w9JF%dk$^K4E+ zUf6N)2_f0ChkFl9HtwZ~S^JGKU+x6jMV5GL>0zyI3L;nU5H@9w;v zoa6!d#-P@ zKUHYRDQVb4OZq;-xTHZ81Y>ROh+M7g0eEm>q2Fio5ytPxz|wIEIE+2YeOMnKU zdB~#W1xNv1n}Tz3I^s)0!@~o@ z_lZ4D13l|w2ILHEOXD8|Fn0CV!4J=yp%F=Cx0FQ1QjCV3LSU7xyB5rlQ zM3yV4V0dZd1#N`xUQO%Z`q&n$kA>In&O`Yx&rcNy{^&8VEreri%h z^s@@*%^vqoqZiu$qQ=-cJvmts`5yRXdM>KH!{h@EaVhQ}+E6GjAkCuV%T#a+hk>Mx zx|Y^Sn0{M@;hCIDD5Nb3g4z=@Q6F%%dR^e=nu2%m88kXq^5a!fP537u_ z`~~*`P#}sah6Yo`J2=-K45cQixS7TerDj*V3(3u45hZ;p&P3U}DoC1T8x2prWNOy6 zRNHs>5-6x>(67!q$!qG&NABMcAKw~%TUh*|`7xX$6rFpwY3cRgW{(1oPLi!LpWW*S zE5|)}|09Wrq$Ze7wn^JLrpglGLE^{dKY=aW05&Gx_w@~yY&V3bGrD{0d}*HejyHL{ zsEZ*@cXC!dl1nVEQHF{B@#nBZ@|%`Ze1$C+gXH%lcQUwMgYiFplG6&>tUdVYp=S4! z_GATjbQqt*CBOwCjg3I!T>?n^rf*DG{C3#n-W-7@wb$ktN<0icZR!yyfL`M7>V=6n z1QJk}02X@oHKFjKatSyIJbga6eN87cKm=4`%4UqQ5-$J#r92qTct_o9W&MQKzc{py zI-=8ChS_rO(Zs{Qaw@*qRM#*qflgV%O6gL2XE5r%MQ z)EbOVes^P8dtM+h-GpRC|D-hG!2-uy27Wp`TJ&gdn6nJWUYTP+P>|u$R}0>mS1wLE z*qUu}_5Ra$GG&QEYm2*oP(N)Tj3e?_=geY)I(%AS<{J>3hezN9TlXx+JzPUkwkP=loH9k&+PWP6A;tiKm=>0UE z=z$)?;fbp1k{1ipfTA6J0)yQFfTg$zxo~fN5xEuXK6IdJteeHfp;gcX__}`mWoSTw zuP2;6OZ$*U?w0T9x9#=2ZERn7d0ial9ky4catD5Z{w{`X_T@x-g15TLdo)IIhVpR? zkJsKj&YbeD)ryJKD=b(ax9cp=tv6iy4E*ZKR1vw?B-+g=In`jFqgU|w9a&n6;t2~O z(ZhY|?T;zEBdeUaFU2m%=J_SMt6X44RY4Nv% z`@n3TI#zaw=2MC#G&I&^CgVXL?DB2T z{iyMZ!`w_%G4I>eQS^TsDn{x&{y?-wU8nb-2)?wm<3fBsXsEQkp&kbo?&vK0(q>REvmjHknX4DIDI~4nN}c!u}}!r*no*tSXo?A z;FO`yZf6}hGFxdg?Qi{-)^GPB5mZ2=>GU&ZN#viK4nTKt0sg&1{3;f29zpMO^zAeA z{Zs?tc!#ImRxQ52b^q|*>baeiV=ns?`BNnH;?wmU)c(B$s}K+1zH#fL`)ypUp5=J& z{q)rm;xCf&y#R9nUrB8jwnpaL^xhA&S*J5z(TnkOm&#o1#ofG{-LWN>(SZiQz?dHt`TcuL`kyk%B{Oh8!zwYUg$edpcxwilV@EzQ7SbfFIZtZNy(!*qCUIbDdb)-DjX=uy-2EMa zjVL`TGjkrGHEPL?G?1JK;j>tnoLnDMtbgBx?lWx_Ps0VNY9;%^!`qv4pOiap6oXaM z>4XH9ia!q)F=K1LNee7lMthbTn2*ncJIJE~9`WRxH@DylDYhLmae2Ayn=F%*Pd_I+ zPNa_=+_>>Np$qSP(%%m#xn6IB->TeBM(a0aeq;R~mor%R#vKw4iwn~zeVu&|Cm1;g zcoALVvrfZ+<=2l_e}F?!4ML$$fElI!tG1u~iTTgSlr7@waR)pS0BV%wpLU?+fgwa> zyknf^+ym_D@Avv;_}i->YxdggB&scn&J&xok5dnLoEW$5_WKGnlpxE%(Md-@FbM<{ zL}~QeNTQXHif=h9a^~JR6Up^_DYv?Yynj$){LdV*cM>&2mv?Yf=U%$>3T0`_d((AV*S1x0()=)WHEARdz4z3l2U6J_GeCO514Jj6CCx|>jq$|5;^ zo|Td^x<LOyIMvH4j z$s<()7lW>zh^_z*GlF=UPk%7ar@CWr-+HF2`PSvkqXu_4^dv~fzfGUVb6Y^6KXb%g zG(Pyc=n=_hp0`779or#94@V!Os){NKorBi;t&^$`dpBp=DzvVNYxM{b( zDjOX%9;X45sA&}ur+70Yzc%u+vMf^=7$qBVVZFQ3MTffrQm1*k-REMnHK9zRJez*? z#!)&o@6AR9%h|n-T$0O0GvKoUfp2WBL{AE zX7(DVc-4S`*vzeT*zP=A>7qX%weWWpl=&`m`tDv-hr-w7k(!TOFll2%jQ~2|vyhcF*E7JTaTn)NgKQ(==zc}69J3g`GwKKS7`cJM> z*5lh*`%XD+uasikHKG_RG~$31L@#J!Y6kMg3b}QH_3Eb8)u^;3Dx~*HT{97jwG$Fr zyJ3l%rkEE_ONEPZ+$6izzMp+i_&cpJen`1BDF0ZzBfd`SSVc*(&581(*R_StCd5lz zUUN8-{%q!rpdO&bGc;yXfZsKd-=v_;IULzyr|*kSVURWg{+Mu~Mw8Z^qaK9W6S%9W zgEcf>QUjnZSbrXh))Z6&tuU$!^sJ0&bhzZ=^dnPZr%s*f*TsquSs?(%yI=&lcCEal z)sCU;rbmG$np7gF&loiAFX3oXWhROX;<>CWP&(LG!X)-8rhuCSu;T-afw5tT+8^E) z`R_ryk&J_e2)?jPYNe=BGiSOxAw7?q3waJTn&i3!R}>XU#%cn@v3^OfB=aR|>9Oc8 zQfjB9a}xc8Cuxm3^%9<*&d<{~N_eH~43fByh2kau=Lz)av5z&@ zrdv)qpLO5%w#s^aEuiFdf2i^H#o#pn)iW_ujGmOa8QsujN-^CtBD1BKLQgv3z)0p@ z4TS1!)cyqXJ^hs)dfN=tQM~U4Je9$41k(vpxrNIdFD*U&y}7*1)f`TpSO({yGz&Uw z=d6p7BvleMIlFA*;l#49>bG8+`tj`JsAkjlPBtP-sjDRy95y^M#tV&5mjEqOuQ1{Y zs^#bM#NRi@YqWN4J)B}Mki%SjLm+KfIPAO#@r0cnqpuTnuOaAI|JOMx9Z;$;v2hIC z7o63}!kT4MIf_n*%m1_q$qzP9VQY6&4mljS=2Ug>{(S~j(CxQlu=7sg4*8lo=Ct3O z^8MMMW(r$}L3^roZRu!)wfeR%u302^6RlsLoM>9shdJlP_f>cSn1>rsK}khLD9Wg?V_iBmIhCDL#ZaU^7BMyw~1}4d>fc-L&QH z;$mKI?p5A+tJ;q29bLsG*JMuc11wp3|K94{eoXOvGIpfRk zAiW3qIeX$uj2FD0wJ)p*97m}w>k~@9zl<-8Q^brkrNXm6Hk;m zku`%S3ibX6GSB==KR5xIMPw63oX8Zqwl`P?Mo4LxKNYxTX#X=u$pmFm-QynZEiiUU z@c5^Sm^^0l@>9PYi!%=V&{+CGPtO7P7$MBkD8yhijrOEtim#LN4u$GDvMm!yX$c+P z4(ku6xQHRg@L<$1=hwPj^#>yX;kw$1#qOW2)p)&uRB~G7mW!c-@myJAB0G<|zg45l zl?H~+Q5p)$$nLHsC??WqNf{kOT%&hS-9=a@o<^zNcYb2LN;hy#pGo{))s=1X`D)cM zvA^D|eYTBMmk)<5-cqD$PA3=@Xc!2uO1B6RRqFqoGQ4{SgMy;yg!Tz*h7UY>Vp|&z zES6)q&3!9qCEu7mjErJ0UD^$A{?het#}K5dxVn=e4tWr)nfgFl?PIjpumPpTgQ^b_ zAcE{KfHHE6-Qz`#|NMEX^s6a>*0y$z8CnTof8ui_2T~7eDiR2bX%nXOzlNA5*`*VL zl2oK?9{7|djBj{iHA*+8yD<~%JQG^r1AoBGyY_mr*@6kXZ` ziVHk>`fb|mnturk2<$NB+5t!_CplT$)Z7$Z0eCX{rfj6|6`Zuv%Kz?d+J=XJ07%D4 z#FXlZwJb2=X7Y1q+j{&Xd*e5%g1u5{k$r$5IF1 z$tZbdQ0G^yjOTV=}6OD4}|6&X@l^jurT+8-au2Iv28Vw($?jWM${%$oXEZ zB6bijZYE@kwQ~b&Ql%Qh6a#}?Df_YkZ14w~!ajC(8V$*qSX#atD18FlcXoW-x@lXEmJ}2)D9Re}21bYx_e=rfvW5%GlP`IH3hTK%UBXy5AN}l{msJ z$6->kFL7T&c40%=Cud}_h2lr{+UbKRHbe@aYZI#X8%PaBYxl4brS=dv34e25US56E z#6;VMH#)Y>{n45HtGsW_m6@#fYT3g-f5={7W%$OxB3G1i4o?K1pdh14!D_iUydCCK zGQ{&~6YJyN7dLpVngu=H+}|zU+jkQ{Qh!rxN=zPZVetF`9ioL9Xntmf=$ zA4}69^KxN{Sx8H=cCvk@fz{EP)vIN8gz8A%fzd*K!nhn^ zl?pR&Nl8-d>eq9R@S{ZCyj-~TR;#4d{rCWzxt*q8>@ZmKnVwl)ZAfGI_iz&T4Jx+r zLljuZvedT>=wi#llAW5VMdL$3o7Hbqu@Fis1gxG(2s>?OdY;P#uV+_xR$8O^z^c5rJzDo~$cOow#D?&x zo7?l)la6cuc@_40ExN@-NKKBwIwe!=Trq}bMbloJC)9@P&GusWIC#Qg&f#`9qVMKs zW@_DA$Y8tNJ8mSfo^fIxC10THb4%f&a|RWcGIR85$t`)-OwS8sXe1}rmA};FyIbD5 zSx#5kW|a&vsL1>hjU_Oin8udCPLRmPUxEh50oGKvP&&0d!gUwAu$)=#*~w-A1b60Aa2R1FByM0URm`dXmkR*LdU^s2#%!gdq0F1 z*O_PlT&(K6oGMh@(+?8!$_Dy8VXN+g5BupDZM=vBco69pOU7FiP@6i@X<`bBr>Ru9 zira?8IES#7+4(m{PELZK3eaS@t~`2YSXWi=eXigVuR?77y;>-|Ix3fP61_to3WPrAkK<{|BtWt_h6|y*T z;YV`3BDh1h5ls1}IXNrK=(%1F3(9xKm40=?1-S{5;}-k{saUzc$H}bgNpD>*41yF9 zT_$hP-2rk2R6no?GZzp!2QKpO2s$)_n+eAU*7(fTq-*Wv`=36HxcB+b(AyS=7L3RcDbm;>VBkQ;tDK@@a^~tJr0d*; z^y4@({M^RmW>+~|9W~#3-(i7M%HA)ngXM~5X&&!2F1~$A$_mSShc*bRQfaHP{^n4! z_!2K@=144J)+}PAP|U#;-eVsEXBgMS8VRaGB zQ#xh8zF1E!p=^sJX$^s{65G;V@2!WSqyZ$9SBKh%U@yci%i|b;YtuIm*VX@QeJme$ zO*0SquZp-JTs#3V*&I_RIvW{X=B#h%h-xqZqm&asOlmtrFnEi%?!oO79+jrG8O!C% z`=yvTI2hOrm@*!9mBj}J0J+DJfdp&;rU6Fl=g%u6cgd#O>j8l9W1*o|4<608dr73K zOnOG+lyR`JWh9XcQ8H2%3SI3VeBUkQY{$w?!rsDC$?5HSJ+l~KD2)zBm9Xd_Jn0qq z(I%)InrQRifd!e#skY6fbsahXB~t~z6Cfuh(? zW#F*Jy$+dLyxj-Xbq~UCWc&8*f&y7zuqQ}u7W5I2n^PI$;c}Y1G>rJ}@QFdN-IbyD z!HIUsT6M1#%ETuJFf1Nh-ZvP1&fC{FEx=XnG4|Q+N}`X0+yM_vy}qM=P;$*;MSFwR zpnvk=r63_+H~!eI*dwxag5rbEV1j22RlEhvrVWy?w*?Od6Aem3;PY#1L181PX%8M0 zP#%+|s+$97%k4-bbp$G>QnznN$@$TPYi}T9m3nm@8OT`S`(QsSzJ*0w>9`hxUSSo5 z$*N?_llm?@?yud>_Kdm6W!xmU3ZMs_Uj9p(nRBL|3g;^{fr9!-A#_%uv?nBWfr&zX zYANrMC1owV`b*c1U+G-GcGNFa)}_rpu42d{HJ@?p^WNUJAnRR>;x0fWu{ zQ@A%A#`a3AZ;&DGXa#$0gKz6z8<7<}@D@Rg1#JfmIvoKKqdu&vfvDp8(HVfsAd9p{ zaJu5wY&ef=LeIc(>O;E#fIoEDschzpP3zZ7k|Gw+r5h}HdIksAT)FZZa(L}_hf*7F zlt>ZUhn6QVb7d)dPz%N`DNNAf4dr`Hi~P4_T9RU)yUS(DnH^KbuOGhzLE^*v_kv&= zpzy%>EVi$k@9>pr)a|SVkP-w|tzu#cb^4ibT2Ap>R2K+kEkMCgq}fuzv<0)y;aYF8 zxzG@k!dtF|(R@f+z-rDvZSq;a_Le#Fjqc#Q|GjZK6nFzGpr_b`wi;rtMX8p1IF#_f zM0{^YJD&{`2}l$ZV!wu?@08I3Y~dsmyUa)PYXGRX-@kv~iBJ%@x#k?)H#G?~GM_GA zD45Dq7j>KA8Lzy-njJPaHW0b^m->IhdkLd{ztT`1W)pAGtYG-4e>IS(B@YH4gt zrJi*kLvcCW1G2sVw&PpXI!`vBZ1|$jeL!6#0C}mJ*c>(s@zZmxWBn-Dy$)9_`fg>z z?)rnGa^o>qPoWqylCKNq0fAmktoY$zGcKY;mC=~C;5lZO{+dW z;w&;ZTRW&M0yY%_a@pi>asgL5Ba{V-1>zyV!R)uWLdv7dBKwt!z8Sgo5t0JG(iq(ll`^<<#~+tZZ0o zt*sGNwt-_fpYLkN3ntW;)E)U?eInc)x`ocK8(O_Qq&ch+TP>h&5_W4UW!IhKVJ(|M zwNN;fa&|$yPI5`{spX@xP#gUMCFQ;h-yg{EA&St(?)mLcqDIl}s&3U8FY{j?@-Rob51;ae{f=eWsR_|eMJ zTatQX*={alfzUGW6K~cmH5>1=)5SgiUW_-9_Rc8s5@%nnSB(}jvsd!z7W~GiSJ^`t zRMdX__z`OvYRX}*y^LMjZ(#ZhZi0+M1(K^n9xQos4xR&l@d>lp$p6o|tZ$fWLoVtsx#pX)Tge!BZ57Gh}F9PV6YKNwx;zp8k@ z)3PsU#IixrIQ~DrewF$dd~5&FYV%LA_M`dm#fZpGw<^u+tSu?Ur)}fb z@!sob*o;oEQ~-H}1Al)1(xDUH7Q(v!1x)+UW^51@jpXI~UY6?ipR;~;%c^sU$8jRa z6Zw=_bz@W0V{fn4`AJV~`ThC&DZH0wGqd>>Sz78Tl#YnEHwxvBFD-EvfE%#n!er*D zcvhlHT456l|L5!xo~n+!ChHl=vvI9FB)lmGwXoS5pO~ly_occKuqA>G|CE1crSSX) zu~6{{;r`OSIe51Bwd`+U8J=@~&k9|ID_$Aq?J-CU`uAPOA&0ZMwrL=(hAR~n70?0Y zz!#s0No~Nn1g@pTS_gbV@Cq@3>YL`$S$pY}a#h$+wXMrs2r6;FG82=rae*y#N|TF= z7I)G=gYScW!O*}U?lD2={GYW}xuI{i&4u{mGZiv+zvb5Yb)y~_l}(E6kmf5n9Ib0) zW+rD_+>P}_x-NOdxK^?#h1&9w{Rg?ig1Bo!IX2gYeTH@jBhb=P~(QhICkIi8<7x@IRCXHL{S^KSI1gg&Dv!Oel}$$)B?DtSDYBbx zs9o5HuB52qvY6MEX}Fog0sr>*x$=Wo2`4U@l_ME7T9_U?ve>^_)6-t9Cg0A|Yv(|E zICV1?_~5&>!_ghCb_W4y_m%_7FmA$g>s8Qk=n&D6Fyu1cE<+CItnIhw;YDJu_BI4} z{dE4ttyWbRxFlPvQj)+F5^5XKjem?;K<7%7y|#b|`vj!Dd8&b{foWn4Pxy5*yDk<} zPkQv9|5{FY?X)0SZ8}p*mk@5g80R#(o5Dq_^QjHny=nnXtcGSH@drp79UL4~^6u(X zxn&VLhVFRz(G9dVM@cRxHMIGUUW{OQC~nhbLb%6pA2b7aI5Bz9Hr zMr7)}m@;TZ4~Mu#u59kyX<)2&QSV(#wh^tx)Mj7%0;Tuzd(AQq8K0%LKWA56@*5mc zx1ox*x8lP*{YH^jX39eO`+D^q^X&Hgkp7-jQlsZ4yqLRZJ2v?`pYB)fKR8UyQU?Z9 zugx~wu|gZ0rvE*0M-5_ULL3^c-E=wcql5T|PX3tmb3L-x!siC0v0!Cp5i)!9;K7HF zACbe|fI663 z!0aJtmgoa@O}QAVplKL=Df=`MSP)1L*sst$^OazdJ5IiP_wK&_hfE}y4#gw&%h^l+ z9yI;Bxa`JYdyCcLR1rU8BfN}BSew;||9^bFcRbg9`#%1%XI6GZguHE1T2@G?kUh#Q zNs_3H%HAWgRYs9jAw^1AA*E6jQ6VI$BqSBz}? z9`Z}TQvQ5BEg|KIhCNpqx?m$Tnt2YP#n{P87^H~iIMITnch!hMpBQ4N@@IA6ofPwd zzQ9YYTpFf+>p*Bd15x$s?rvWcN+0v>P)weJdn}uf8j&^xTz|}|c^J**l0XS&Cy#dS z`&FX+a>tYNby$J1ew+o(!ga@0V^^EBV81t@6G5G(xhmLv*_$_S_MZAtaK{#GjdAlC z?c-a+D%m|wr|j_k^vXPi@WKp-L+T?ueb~0ai~tv`d&pkV)^5)RD$UBx>bbi{7JWId zQNYn>)cQ{ZG3cp7kTi`w!XoHuXb)}bWji(I!lLG25?L>Jsi1HK#Gu)q2i}M^&X=t= zXUPF`#EErx($=7L%rI=kX>u>E~^<`clH^gaI4kl|sfm}2m?Hj@LlRP&(3LE!*1GmWA5Wp0%_pHEaF;FK#a)R=E zC+ec7iFxwCMggFNmp_JtWY*)xJ{6VsfO}-A(kN3;FcC5#v^O5#| z@&gx4lbcsAV`OwgLByR3mmfMQ&-8_roM^ebui9q^$`eN<(UQev5FEU`^iQbebUh+D zxd?E|o`dsY<4#{g&hoF}L)jyGst;U@k9d$CFiGi{4g7MQ}d7S+AF&QqG-uLIz{;k4V21CIToK)?};t zXvg^N9H&%%{WG}_F{h&A?ZffXDJJS-M$1%Eysm58+FBMjt#MHjhr1eDIY`6)2DKIzOzfefBhn-`qHe8 z;E7}v0K2~#7N0nY<%`&JU^;w{U6MbT6vp7JB#I$k;m_CAY>`M<{P7N{%#6#HVTEyU z-#$qX%=x`bG{5je*TJqsS28^_lPb}FU7wc*r_L`7Z_#$6>MCl>EX+63RX^%$UR3o> z_nlK>@^So0I34>^^jD2vGT2p>Q_I0bBoB~zD|%79|Jdr$uKZ1{JF%#|k(-a=@wM;862=HQn6!8m~G`UxW2;m=Q=bl=OTdx)lbdgU*$s;V#U6~h_s_YD+&;0=9!^_ zDtw7q?Odtr;qU<-i7gH+D>7rimAaRE&9k=dt1JYj?CjS}E~0*}|FaR98;VuH0hor4 zjw_hZ!qVvE~T(f@?&`3o1`z@{_9 z1H@m4yLBLL>$jax!%4}yJ9pYqB41Nmzd;i>aTLttNw_4ZL+%5bWx1r^m%8sgmfbgw zhf-5;@5fl%RRj};^w^5FJm+CPZbU2oisbI1H%$Q94$ed3U*#oblF9`5EQ^eYGqOy+ z3U?-{r>!ZC<`Ve%x77E(i6aFLP4yg95A{YYol?tpSD6p&xJrkF>F~9n&nJG2p8o`v z;8om5Xu{7JX=!$T|-zGyu9^9z_3>Eqe|aOAr1v2Ses90<9?KU{JUBLyiuWWiN^= z9xb-p6@fpz8RXC2z{93KyIRhdwo0etS))@B{$vY_ z*FWp07+EZwG1TDrnvT3|l5%SEzd?&sIc(1_99K~lLar*L%>DC!cZGvLu*c6tL*nb{ zus$+X`n)FvW97UrM0Ge0DuthV$!L@c+^nkWaTITAzUB3`JMS4p$L!So=|(nf-Kax}|pu^EM?7OlCu z?>M)8bnr6MQrc#gcyBk9M0s9^-i5#`i|l<85{d`THeKg$}N$D-0% zlE6s7C$``2NE0Kn+POVI%;Z#Q#MalX&ZrBN({nVtMecR55|L$-K^Lh6cg`@DO z9SCNs5xkuT8G*QrOl;Z|hpO)UERIKh_(4m^m7~kk&uYETQ(`HhZ>yPMjdNTA+M2n3C3?>p?ciNd@CxYxNW~@ zDn)j>W=+j`voFzCcFKhWjNJOYI%)wKZH?Jyp==P#UYP1AD23>=`68iV`i8$3AoC`B zGu}D(w6qWD+Pc@HB>kw@(<)^3b>CR=2lqI(gCOq9owhI(V0^Ld@@}f3{Bvuir5`jk zIU`Ns!fLW>?Fwa=sLPixw`Wh&uG7E5#i6j)jJh7_2$Jd%J6YzNh6WCeVa|949^D5w z*kL3sJ^{-Jv}Krs454;Owc;@{`;?Y&VE!k=d|YjN9>n96i+{(*coBuU8H*G>bg zf;j#}vJxsOkJ0#~0yx4qOq^!ddyeXO3lk ziv47ESRPt-ozb-cIS_VV1&9LWBxc&zRR_-6Ex$R~LjMD`h4nk7zk2ay`!~((_Gf_M zx9-O25fdPTa3JWpFtEeZR*2`v+_xthxMsQdc5xwHUeo9?a`D9XpT$w;_3mG>$poAZ z0r@fk(MD+#OTGJ1{Bmq93Q&Vqcj=_bGQBcd%tdBREs}W1(#w595!QHv&+3sIfi(Obhs} zp_xvbz|Sg$`sfi$EmqcdQxjok6@IaQV0NJH`BRQ~RkrPX4N^kk7Yfx^LT zVEo|KeX#c|2SxUw!oAIyC~ytNRkWuhz28O0e#l0_BZz-zEyk#^2XMB^taJ{wDjS!a(7gm+@V}vA=LAh!wEyvrF468y*Q4H?JnzG*-4}xqGD7 zEE*p?oqm=~N;paYkfRE_S6wsxAJxUvEmGeFj}H!Lh|I1RyR-o z>_>}w?=Vd=$U$KgDSwm~@c7miD+k^iTnv&#`8P^ZQ!?v5Z!2>+`EJ4y#Qp%v^8hdf z9Ea#`L2kTu4g1?u*H2cQAMXeUAl-Iu-3EV#zYi1I3>vXJ&v6kQ7)P z1Ssz>EZ7M{4JlH1Q@koT)zZR(SI(C?j@SAroBLMI6RvB7vI)@`b+6ZV%AT9A7D6Q! z+PPB+bR$5+WK_FUquDXGHeD8F>}e~v$BDLrdE2dqPg>YM0zK6>{KR_PqX5+9XnpLf^-9mC2`jsYli2iZkmiyrfSIkg?cv zx+n}YZWMsHJ3!z0f9uACm^YXiuf&*DSg_6&yFJC~1+-RR;>rHmBBQjpJmJgW2Fjb} zdkm&7;N7$-NVL^%25%nD%}d}h%`sDe$Hj-w9l7Vv%g4tMp}TKip(&%_`T5W6-33FT zz*G@{oiHJ>ap!vUv2feDpg%OkQWr@aSyqza-th&EndlURN?222JE&Lr&10{}W&fk3 zWX1bc$y+I}i}dvLqr<}|{QORWJg1Y9mNq&%D(P-)+N0u?i**j^s`m>ai(X+O_{7;h z-fg<-_3bv8sN{6dg@9WkL#jW-=BZUmu3b~WQWg5&xU zqyb$^Pj_M$&~lXd=cdm|5ehneQ16HEe2~No3L-9H{YffP3U*TI@p4>l1z6WV7Rog= zno7IV@+t=ME8t_En44VXvHsu-kEgj>kie$OYAu6R7l2=rEu#58eeTm-;!);m?aMsb z2a-B9?xox=>Ka70l8yj@&~iII8}qBTE;JB<^pwwaDrT#|rJ&glKOMhn z)Kpk8ojLB>Lrfz@;*tm|R1r5dm_u9Iw!50Vt@-l0p{M)WiY{{X5yn&4 zZ))PD+1U)Js7FB@m_()8jwLPJJ8aQ5tquK$1+BwG0MAT6jtk!nKKeU$q)H$tA(6Ty z**QY`L2k?)-EYOe!F%-t_QR&?Iy$i2LCquyPvxJ;c0bH+t4{BqIKcK!3C@Fv11m#r zEuW^V!L|}*O|hGGHn!ZqLq{QRqZf-;u2=JP`ASgTCXGU9utE|GmPv>`^B0Dfr$lMj( z_V8ip-h1lU$Koz6dPfZ;3&qMHwJmJBhygv3)h!(|Oy{oJ4Fa!4g%iL*N(AcX&!@oL zBa z5lpZ4WUTvVK0M!MtpZSV+-QkzKC+5}n-l*7ZE{_lktj60&j8AHP(ckw5qwyZX3T+7 zp?}9O_0co*TDvWF=e*Hc{=hrqnU+|u-%W*^yX$TN6X1N!+I5oLV*HOSg{MQ!H9-H= z>&KDHhTTargz6e;+_J42zuBC9$4hPX)YqKFUzbr){!k4sJ0R8Ka6n!`)Lpc3*q5fK zWo3el8rr)R!`T0FHtA_!T!*_~Zo$oqSY;yNkqkMsMPR$)TTqP^=qEzR{x$`qssed^siephk;FE^oy`w8+cDCV!~9Vq!K(8t(t z5x~g)p^zKa@61wmjj0^PkCFEnmWsn0!-*{FJ_$7ONM3lA($bP@o2f)`@eS*>v@F^` z1YHosT!#Hp16KkG+G$;)UbiblQ7a90S-xN1*#DiGYXypau3oo}B(K8iLGm^rl`nY3 zdV2KzlOzqi6nY=~uG~v*XNCAhM0O3M*#Ui*!VpuR-A@tXS~8UI@&bN+E*xhp|5d{J z(3Y`D88%q~LdU)s30jk^=i&5Nx4g?G5of6VM<6`Eh_8s^3$77*(E9fIrDpV&8&0qv zJGKr)MQA``qM~yDO3)tD!5>FcKXSNSF4b299^=UiH#&K1r!vw$uIf6reZ{#!QI*v< zr;wMqB5N8xHQ=`+4Sj6fPkWg}i`GI}Z6`q$q{&|1bG@LJVco2gNZEt3~p>|ceDNl z59YETltOvgv2)^V@H}YSE}JAJ!H-kMeipeJ_ZTpsdaK?vyieOIxpWB)uV*qbm&K?N zvZvtP>xVcT1osZt&=Q~TF!zzV)EO@8jPq!)1G#p(`d|CUqr-51Q`y+?Bk!Lbv-gjc ztmlIiFF!lDb{CTR;OOWvjIBND{?kRcJNH!$x6&#=j)jo0Tv$Nh!_bhJyRrY>9Vblv zLDcT&5FK6_wf@$jACEYb65;3FgiDy5Qq+=EPo1i`JCoo>WOPm~3C|el?)Yq0*$r|X zDH^UR7&(+0*ADo|>5TzAs5+`Z$GzJl^a*kq7!+bagenK^J%MsqvvBSG;e@ zb{98&CnvqY|HA*u&@=)*C|*6P|Bl^d8B2Y(GOYB33{Q26(ae}}SS&0)c}E@$zT^ab|>oEbc}-KCBg zr*Z<}Z#_Ypc|lbSQW{C)=6%knw`{Zn-UQp}mY(({$qy0jzVD8iyIyLa_a=vv-S=ck zHU~4|*$foj)z!5V6h97rBV8Z8^|9kBhnNWDo)2&DX}Gx}zkpK`L|6afj_*ec2Zw=w zEHj?|E}#5)@10zIg@cBPwxWgR9t;ZOyiuxW&zvC|F&-o}>qNgSa0+o=PF&r44^D>% zZQNJ>r4A|81V#;#y+c}r1Q2q|t9D6dm%pQO0SBx`%UM1Qc*XA2DRt?}xBqR^$Ye15 z|3&Q{I-j@@n$4SpL9oLa8Q?4|iCSuMvF9aw+T8?!o4 z4o=K6qzkK@u8#}aUR3`4f_?OBa~%&;u0Em2%6b4?pSZa_con$zpqPUWIc)?=j!;^))Hj@|E|XVcX{5;NWDMXckKlLBw_)Ei)%Eq>IJ*m-j#(0@y(v zpA-(;@gJ6o9gQgDpx-^%)x^!VDgM>fgS@HgrQ93Vque`OWEWen@bW`Dk3pJ9bNK7( zx2J5?Eis*tXo{bugsAq6MQl31R$1dJPp^P-=#Nq2pSQ)fJC*L>*sqIW;IDds4~rXU zqD`NY+x(N5nK@$70Z_R;n6g=ftK!J`IJ15DDZDezLg8B}ee0t?&=ha4&GUX;V{a}{ zTzU5#A*$TnrN9Ex^_h@hp4(eJIGZQDLW>!lH2w(Q#VW5~Um_ysy#I^#>+r)t?8mA- zF?a6VLD9#0aX?CL*$7zU0?%^1FcTnKISStF;)F8F$y(TSaRgFKZc*Q3%fG*L|x zS!d5)5!j4ZFWDck)~L&v zm^B6W*9D~iI%m2nXcf?zl%)^ulU;ht(2~|9pDYbMKl$bn5(i%@u#Pdwep}*SoKJuw z=~8?MCV~mK8MnDd=1mx%3yx-DyM3#sCS{FDV|Vuru$J)#@@2k0 z-QP8pb-%B#?|vhA0*=+E7Z+S!;wm9%Q+&S*b zLGYsAVmfV^6_bDoNN1^q_~DuVuSkQbzJvy{^0K z_G;{i^TT1fg<2b|tBn?q(ofoaumjO7#eDM{cp!03SQhSTE1BQ3=y7BZK;@Y4e5SgY zg9d^x)89fafP-qm)}&0-%kxAXnWs++1WHd5Ye6KfHKIW;>E(ex z9pfqIOda%%M#i>g=J4xUU3?pUPtvk0(vz15hEYPO&4D;qQn;|WYRN8#boIFY=bL!J zA`b1)xKkeP)PePxBCA31N~U^U$+m5=jp1Bpmvlmu-u(A3_mE}qe(vK$3yx%M!g8yz zu@U>eKjc14a!~M-9#4vnjih1Op;tO#Ftn>X@pT9v9olE1l`HWJo&uwitjh9bCCD4* z2|P(zS@WPCvxA%s%mq22AHH5l$1ow-1o#Y!`0Y1t{DVsNzVFE1rk;m2IqqQ7tx!`? z*yRUIFr4P!_6^d}Pngt!2a=*%U`KF*3ldO-^@h#3PyM@cDE0B->sNxjL51-O8;uva zbTl(qc(`nh-LOpaH=sgxbn*CGR&qooCc+m}tZVO0kIc%2{W&L(fhX#B9Hj>BhigDS z?Z$d$pf$Y(hI|H$>(-r1@@V3y4~nG8JM7T%Fq&%O*09O)nk|tVK`ot!QZj-~m^q?u z>g9T!Qo@xw=-2JMWC<$KwQJ4ZEMsOK$I+MG(F-pJ zI!iR1jyE6%*fMcv`r_NQavL#Ji_TxXx1xV%eb|RHi7qj`sbD1rOAD@j2msR|KP{Ep zGBWwlReDFMLrIUFOeou~>WP*w$ea3MY(g`=2%Y10)vIqfeIvU4lXK~4J4x?1ET*Hh zAFxfB2J>G&jF-N}Z4m50pGIFpWDTZwD3BuE|Fxafrnr@h2A}|mwxfSSg2=7P{tEEg$O%1(!=PY{;eW1KvT-ZWV zBDSX@8@$8P#CEnK*K7P3UQPxZEDw=6bqzZnUD|#Yy@oc%u9y2I3<#p52U}bRxWA`@r0ic(s=;YV6zR{RXERAON4?z+HAa$;9zeR`VL z6hgufdC|k3$7;w1NOo$=>1AQmCbNc*KwTERlqzejRmeDMkB> zIE@O#KFWc>EJXFV56BRjq4&eXXT;Qm3?8flsW%&GqjHPM(8vf62S;uSMrA;C%%5cn zj4Q)(N-VAz+{|12$6G`l4xgb)yY}36d{SU%7v<09JHR~s`Sl7tK`yQ#sC?1hvvFRb z9^TizoN%}eADZO3t_cGj)3sNX&*9(`u~9b14gMW4U`%daRHaY_ZE~6Q;gTe*>c?nI zH+Q~W<|*ZH%r`1F?sgGvIQ6*0{PYKa%A}t4;I%r*C!6hrDbcEon-0YK@Wz^M@Kz03 zJ$pQO>d?PrWx41XwrUl-D^l)MT2(OZv`ZlzG#kesu-W@=yD;!o%&guz8_2TvGgn8~1F$rWmqVAIr_zQ-R zS9V{1jOw?OPjqK1;$X=~l(UYJjcupCer~}&a%F)GbMX&*Q#;Y_{>FWeGV1F!ADHF= zMAkVm)Xc=kmySP~{2D70zkq;;k!@|Q-XLauo1AQW^vI0)w$BH#`7^csHXvZYcmm8@ zoG)D|Z+nw9>k2x3fCa^-*bT&cuGF`y6{A~fA@2Mb%|7W&S5;Gcmc^i<*x^xpUt;$vdCe!pD3 z2euS#6Q`N+vtR1TzrLdX6tvrO*z=;zD35JNO^2hEf^}us1oj#)y1UJ{P-*Ih>fhOJ ztM{5+&u`EwK6mKL)Yq>`_jX^&Kc3Qd-toqT7G4e=;Xm>^T4^!J2JLEmKc)dqEjs~+ zr9rAfe(P`IQEVg+LF4!I@{@e87y`Gx8cdjjU3I;~IxXrrUG9d4$7bwXqfHsG zEHK+e7y_}7h1?ptOM8RE6~LzJk93Rut@$H8_P z7XaU4A2UWx>loC#3f}x!BQAZlH&+Ux*k;ELTV$u{8b3`Y&Z zw~YsC04q(LMSo`MoC~83DXG>cPp$#a1X^qd8ViJ2!?eUmVh&n;2*Tnf8jH{ZUGXl9 z0*|bys0e0LsS115H*U1?SC6B(%K-p=%(gzUQu@KOCahsqJ^Op!z6B8~hoTE#g0VD0 z)LK0y@{R8&ZkbjVSru_Hu>r}vFWF9^aJRa4ZvnENR!o)W>N^biaWm`Q!i)mC93hQU zci_$qUn($bh1CP0ZphJIO>n#IT)y=Jdpizm}fP*Bh~_{;D1ANgAeJ-gujW_w6iV)^AR(mW#5WYTt_H9?nY& znCVcjDvd|FxDHF?Pb9jjCA)U5{zTozBt`-tn=(~E_AndXSf0{C;!&LYq zEMe!KoL<-p{a?_Zq;O~^?4P87(&qp`AEX=8n>dvKwkWy}ZDd2}7D(&yfCDhj0HE)- zi)=k%p&Rx~ydM$u5`Fx}o0_?<$B%ypFC4IKyQ7)OK{5$nZ@qv2LFDq>NX18(fGtWb z4uKY9CcT6;(Gty+r$_hnpBPEC#1ZMkSO}1e33zg#E}*2IO-TW1KcTD3Oz|9chXR|9 zl{sphHRnc`Z4hcvsSFF;nU7dU0iF&xHoYjNqOyD#J!z^0EWLvFr+MI96`ae{y-iS< zwo?<`XQ@Bn8)4>5NyZ`!9PN`B23+_e(5utlTKk7>>SDXrJ@vxTw;FrLT&%=D-Vh)6(gv!a|M|3-QhQbX%xPD!cR-X;46WIoGyu*I zRM;&eOAd0#U67HtQxFPv!$VK6tknPF_JOt29~gN*FGn-QCzj3`B!ArN%{@V7co!%f zo>UfmbD9^(B{&j1UL&8C`x1=FpNuJNBl*5-_CG$C2f?%>rnIH19wqJ|yeC2e_vw%A zlxAXK>8o&NfVXfAngr76Tm8Ax)Yst$XYnJ##4Si(^IXawkho{&5Jg7^BNllx5fctt zc0tm!AwGB(Vy#-dV$xxz51sMM^mG^b?b8f#Spp|YdK03f!5T9{Gw>e=j0=}u+28{? zX7|J+qtu;VyzBCTRNp!}V>y6y->fMa(!&fpLD}5nORX*E{$ZpSFcqr03V*@H8p{J1 zsb50oK}%AA(G~w*_+)G~T=oD8Gknl(h8>C6t{ix3U?E8m(1GA@PQ6>;=Jo5>U9;D% zT}#R+F_o1=0lp6biebVO2Z6D;En9*1u*YzvT$?sBX;(?qkBnIxHT~<- zmHg{gHG9-T$~biva#=2g8g&Q=n(uW@+zpJ_ODL{OFd$Cg_#hhY^AoAU%@M!ArGd3l zenFJ@z{We|PI2?5+PslC&?(y4w?8cZEZ5vNHagw;?dnr+nvW5A8!xi^>#0Mb*+W7H zF~2lsA8)=IJN|PqLm+>jV-2soM8waX5vlp2SwVVYA#0f3V^WfbVg#LjR9q`e`Wcb4 zS~Xo~*%3N!CGPulw5j!Ux|$q?m8>aGz656M`nKYU?4986tY7?&UN}9O;rIIY-N;+t zFKpW1QY4_}HGpok++fWkkZx_N+=9*H-+lUY5=+lALHgQF#AdkYU_C*~V9~Fjn}MH@ z?{VUcfs8?5;GOVrUHl`n(i_x+xmT=UaYEYzO`Yu7K7bXUCMEz|=)&vO5|tgW80!-- zFj-l{SmGALkB5j-mTe&TWxamROitEQ9OLRwJ7EP|USbKqFFepl^FvOD4%u5-MVVi` zn46o6o55D9`VJ8m_O|>nVk1O-&QoQc<3tU7(O{2fI&DFIV{A9qXu++-W7h9)a(jDj1QE=g8iZz|rb{`+&48_JrqVZay(a|96 z0P3H#?sbe_a6fYh7{MyZUmNklct)F4t>+ArXoBt*1?!n9V{zte?9!QA@5P9-yUTF0|vPPe$k+{2v*^=PF7_-f5#5;mEpathDr z92JdX&U>||oQsW6b%`*J-q$F173Enm6cY-YEHgbTKPj*m+Rkd0)+H`t9(-CQY$RyA z0Q&kTt@9#Nhay)DoL>I!V0;l(;QWWn$bd~0Ee>YV9vy@oc?}H_1`dig8$rL**ZK+m z`PfgKEaPg{_yq~17tQ`L=V|(&qK1Ru#_-J3`*e2}G$RX`9QMxhi&bB=1l!5jtNv3quz6H)jI;HP_gz-|idwDAe-I3=&*=MUG5?XJzgV6(?zK5>nAntC;EZn52yl1B{{kFr z+zZDwBhGYVUIQT*G?F$(N>kL0mvhEP-_yu*&Bd1^=oHPs_My<8R9JwS9-oLug@{(qIDcNLld-$=IBfEMuC+?Tmk?la0(bo{YQZQVYh76ElN={ z#8Y4MPwR-n8~Hs6n~*uw}&b3z3SFwDKU0PywfRB;N$9gI9HbGpG(MH~f@DwUy}s15w3kTN5mg(q^6<4p5zpMKqT@;=`y;xvmC2TRRlWa z>^pD8Clna+tbS$0)+zS^C7M~onfklYnxri+>SPzhMe?sWzhe}UcI15hVjCL~czVG5 zv{R?26hn|;JL$X(eOPvOEDd9baWVj0wN0CxQ;j*j2}Q4xhg3wneT5U91ye{Xwli?{ ztuQD`csg(c9f8#>xF-s$SBKfFhKGk|W@lp!-{4&bcMfc~otzs@OEA3P|8n7=kdc)o zDIrmz1qv^eS6(szQapj_Km_fQ%xK6RZhNa?FA4~dUD&?xlO8G*_eAmOSa&^<;!^6X z0(jg94}JuWU-@=nd7T~lW46}vxyXlH$yFY`rj)HdR6pnEA=-*!nfd;``o;~{_I4n1 zG~9JCAlSC86+p+4o9xA>mDC%bT!Rc)TM~B_b9a zQx)BD6Zy@lpPyi0A*8v6$>#)`E@ZF)PZ)C?K*{VLBXC$eWsT;S!-N>4_J%g9`ZF6) zLw)P)&KfVbvcc615rh+z%4}?G2q)Mkp^vC9NqPo+6|!-s?n%Y0FNUIRRJnpcg*KVA zfH?1!(Q*r(p?z!Z-&B>X?fyuJy1KoFH3XV45E#e6CGvlN{&-Egi};G-r%?tR5s75h z=>dNNqkee_bK`I0RR-+)T^PjO8B0FV#cm`Rzwauz=^3sRu0mV9rGKaEiGTpo9|K?J z4D*6E8;C#$I`c^_q2)P_;>6o$~Y^#y5$2}9oI2QO{bqgmBQ~_ct zZ>>$g|N20IO=}CMx@jYQwCBffU)}^V5ca2>9II83ip~bh96&3fI@p&)>!=17^7^_t z;0sr zkVjsHg(VinKY<4}5>T7UrLOw+i%tkvcanP9MH}z!LSckgPg6?jCYLsjbvsQ=Oz`gV zWY;Bh-QyP-eE$+ACyyRILhAb=CO&sK`S7(Pxvw6#0&3ZKY$D1$pB~mEtLLXe+i?!< z8oG6>ZZ&bexF98JR7TZ}1P5)NFBu@x^70Y7-vRs_Asp?X)Rz8zzXVl=yyk=u!F+|Q zA5A4mQ=qux<~u?J1ttF(pVtjhcX3ru;Au4XBrTZU6}%EA!XjSa9LUQ(0r7^(+1Y*h zy0PHf$TL+YjaN!&anOgm@AJ~Nu5h{!y-lRKImc^u-KSo99R6B2>KsfMh`~QeMsjyj z>uLaQR==_DmXwr4I5_UYKm6c<0}oclifugW=)2&fyYEj#C{-?BHJCPyAtE;lX<|~> z0Z$_LsBpfs^@O;HGEz*Gx@pM31#QP~F1mMVG;e7riH{xQ^(KmLZ9X~Qh4WxYm})_y zj1q8-CLNK>4T|WD#2qF_*~7vH35{j4u3rxqGLt(;8yi6jrXfG@M5xamF5?;(bDtbi z2UQP~Ft*YTPc7WbNWz zE>n$Q*P;WPYlHo0_e2;~epctU@sc2x1Nc4lb<$-5GQaLEgm`UJF{?lW9o9!ra9x6$ z4QpdO-qtLh+s0lwnZ!y!3 z?IZqhy!o;zFi3V}D$|wf#e{}7L3LK)eu8vXY+3(paaX(#A?hyJ{h1J5TXkX6akfK3 zC&0bCb*rLPdquIlEoz)`(LQ?16o>=izO|yXM6_1KfFq2n=q4;<2+`Wp6r7P1-2J_E zbxp6nr>FcFHk{78*oD-L@0~h1YsKA}$y)mDvW)W46~cz;l&InXoi%cD%VN|{{Zf1O z9f7#<=h&Ukt`&)aZB+^J;`91Uv6Ir=>vpY6`9c_f-t~yF_I$Vy;l36u8%PXZgl!KO zjicwcBOOZ1xZF8$9x-<9T3#-C>R)ToFSOjtoZ{x=L#CO>=Wn5+{knNug3u9P-}{t2 z`FwhXg-V9c-^%A9_GKJOx^Us~=6fkl?P+^FN+4%pU|>LLz~L4c3LnF+2AQaHb*-#& zZJcOcOKykqkQTr@tPp??XPbhvQ>lT5X>UhI6-~C<6#WOGA(vZl=%9&Z6On9UF7A3UD&N22&`G zv@|?^@K(Zj)?37LTUuI%{{F?CgTnK!rj}!v(B$9p&u89nw57Q@?63uzu8DHTrXJ_K zc(ZglrVzz7W-6gFF|fk&@Q}4I*YeRw!7DR9q+r!W6~x)6!2^so-a5K#yCe^Tr4eHl*E>4Yh|^R?Jf2c`q~M} z7xiB@p8NWPjWF`V(_Rxxe&CIhiCrMrfhO?||4QzQ*jWvq*d0HCAkOTp@cHlzgNE>n zU^UH6veuO@NG)?OO~aA$1V;`AFxKt1v+GCkLM<2|v~_is?ZL0pUnzwv1%Nt%lAKg; zKb>Ub-w;@XE(@Vl_W9#S+p9FhJss(uDiWCx44YXegG;atjixmeaaP_x&1Nm5u7c&P za%@7vXg<_}c9gApUR2nK-GQ?n{-%d}$_FtUT64CeS$IE$N3(3@=8UA@e9wi8vWtrk zznv7Ls7O(m45&yifT zd96#Ym6ve1VMna55WZVT^-{C(ws({*Ec z9aD|t@Q9o0I7nF*qo34}<>+%}xEyAkVWlr( zFb~BV)EUjXm8$7L32gY;EtVw%q$MXXeUcH}=$y zmDl1|8_#G+EyQ34M5gh-U)p+=jxcd)c31e{A6mb|CG0s*@t;|sxou}ynC&x7fH!z3 z*RFj*-Oi}I8d^&lkKi3Ti;wN$we%}~@%3GK%1>@)4fVHFn&~-KJ)c-no$8^G#owJ} z(S$76u2`m7px7H47ow$A0)1&Izs{8;utDw7NoAX_fwSM9gJFXva%rlHT&;- zTNm)MyL;w7NSwf^9d?Wi4K?bWweg3T;{Sn}9{LiR5;2VhXm3d;wpEMh|NQ;@8X9f? z=S>&LzXU}pL3mTh!g%R5SimC6Jxv9ao%kj(H=rRvKUAY;{28zHN?BRf+5=AR?&I&@ zw+dujTDU)_x!_g&ji=^xNDuA|j=Fl??d%{E8#w^lYb5TZDnABF4Z$|aLBc| zBaB&SU*O)TE44I1W5e;HtdZ`=2@ch&H@A-;#wkzWY}MuEKp*a7u%7!nI(X3$jV&9CL4VW4b3c6i|0h;tQA5K&(tOTd z3YDP2_Yj0yNkmZ;!D2?xIi)_O%6RmN>_KQ(wV=Cla6g5smqMCF;VBI)X)Z_$aDg07 zK^qFVc?WeLvDKMx%&F!0Zt`Qz!FxJ$-#mCs!_YY-Ws}hPi;$6)PGVHh+G5 z@5x=^nEODgF2%t;3VFP-0WIN)&c%m_FHNutWu86M^|=_r9^(a61EbSjKelTN@y;(9?cp?#uS?f1ma3o+8fDqxv{^i~bl0We1Tq z!90HVtplt%-usVFtywEB#+ZQMI-^ttvyx~V|M0H0;*>n!E~QWz1Bhqf_*!1V3jnKI z)d_M3s8W_k2W~xkN+x1uyz3zrAd9HPUL1UU-vfPI2Q3Tj`ULaG?N`5b`C(khtWwq9 zZNW|RLDNq>6_{2&8xhyMQ0(}%wH_>sRI7tl>J1Qnd5U#)$s%&M+Nbluj zwbu+EXz`iTfz3v*(u`4z{52eXrsg3ZMx)YML3A_LkU_e|*`MoR81PEOiwH#QOabO0(Z zHm2-4fMl}{)wrxAeC(8)Z+r&6=bumzBGkTdpL7NRuOsy57D=ZIAX=el}9k)u(0=Jry+g1+rGMYeG=y z6NaQYrR7To5hn7po06zZu67GcW4%eo*lt~!(DZamz|Rb6q$%(d^`CK_pXPtTMaIL! z5riV2;NIsQy>yBaDmW%o1`rf37DwGz~tMIo+;Ds`E z_-sf9$9@PAfIy+48J->+8v`z+&GsvCQTs6_Ik4g@3=~G6yH6hU;murkosu~b-~uxJ$Jdp zY{dEkj2FRbYt` z%lpkLN)D6cI6wpr=7@E<)foc~RuSf*E-*<)1k{(b0Lz~Pa{MF5-UclLQZIi6=iaV_ z)v8*$M@5eNuU|&xVX4N+`N_ep zek$o>HWw1dCgsM7dV#F4tsBHm&Ui(k8AFY!yLWF9FAd!G20aw?U&l#SYRc>3rPM06bl4aH~b5k3#+*8sT_S!u46|+(iTZ*!2@lC$6i!?z1 zb@SUMq)XSGHMt_H_quZUFVv6C4zK-LG7a%{F*v{tVLH&*NJXo)hKTDW?SLx`I8sM2 zhbpo(Q%qK(Rrv%FG!h=KmC&=@somjE2Xr}N0Bc;1v4{Yv7NX`!W%<2#<3bBI;Pm`z zK)h_^%zhM?t5=_dv zFk?Fllwj>~7UR6ok3S^F6 z=fqaq41EP$!^<~nm+0uT@ew|1E*}R69o^ltQc{|uRCiZV8C4a=wd>uUI-p?uIwufJQ;$7KFHNy^j8|cU*(G)DxzxdgO{peV zg-sRgn^mp3<;}Xhny=k)ON>5dY3?r5oV(GuUB3W(%mg;V)V8`i++9VWo7t&L&X5ix z?}-F3puoNEV7wc*og-owdQcoKD<{ML!}H515HKHSViR?VPDnU*`S?XH9#U{qPTxYw zW7ZDk(^?Zl?g@UcYMe86d@sIX!!4{yP`_8=w@!$Ps;A@;B;#Uswgv4xJTXAGuvkVT z%|J*=NzKmvKgcH&_84^V_0F0!@zy8d;Mp2*uh5$!Bu}ZgFQOfQkn!Q;$D^^Rx`4)C z!@4FwkeAm2ofC}73uWH}cdoScL{?s>s_Odo-V6k~cqLdEnYUKO>2wC-&-aEgF1!#7 z5KZj{^YKT{Gd(tfqYAhm_UBtiv^IDT>tl{SbNF$f!;I65v1#>>FUgYVYwE~EzP5%~^Y ziEaBqN^7iQJ-oQ$l;oI96zM*`E^6`HgRZlF>PXx9=~vBMW>Y@a!;=9!**R5MgPp)l zpmk=2gJX@QLw^M>yrrd#GJ?z-*Pvl)z(Ypj77`X-_9E3}8(c`9LjO$$Zdh@-)e9DM zQb7US7OFcXu;7Y%(y?VOYNxU8`+T=FiT^|@hfar%jQDNVw<*C0(%0-Gi5Y;)u%x~V zbDZ5bA7PsKfmh9cKqdXrpOpQDs2}t2ND>*!y8tfJqi-9;83?wekTm%)Up^_*Bz*i? z1KWedCu7C2mpD{~l%7~>Rv|Ut&u@JtXvSb5K=vjtI;nz0Da@F8`7;{oGeDl&O?Dl7 z{AMStu_Zm+UlUBjm|scCtf51&l5puoi|61Vx|Du3ZES6^P~MA~VWkNfh>-}yS0W`F zipsxL;H3hTa7-=oBWb6wEonp`;zN*d3!o!(wIm_KogZ(R7Cl0%qWyX$r47(I2lzu$ zf!wOHI3ZO~Sm<4BHFtn?0J{si+?~nAT@b3mDh9q+Gx$%Ve&_J?dVrC>`Enc@+r|U9 zYP&n_8xq{DVbHl8bDm&?{DLcn%(p&Vu&8pgA2@uag2B`}^&>HfxWuYWdqnH&)RdF> zQ8ofYerP+M#nBCa&}1doy<&P=AUuS%x2?~)@(J^tX4s;#yj-MF^qD}|be#JtRL}oW-uJ96GivoQ=Rh76L5slK;E-^%hs{9?*R~t)A(x{N%kf&+h4G zsWFdMeyi5cTL7We{q5Z?cD@s>{jP!u@|z3YU&@KsRomEO+OY@{DFrM7q)IL5sX9b< zrh`pwAdjJ4sV^ynd@tB`jYWJqsK~N*H5{e%OmPP>8#I=*6o=6ef?t0eW%tv0pRdh8 zr2MxlXkfj%R`JLcy=xj6nb$gD4{%mE3xhq0_p7(axsE1VKTJ0Syy?QsB|PNp?7VgB zR>v5+nXtefH@n9?z68hPwG+I!0VO83qdOq}^OXVhI=WT&%i+oNwh9G{dNk5`OU z+@$XgHr1nGtNVZ|$e0lgi6YRAqsNb1e)_D(p(>DZgWPojO0$NQ1@_rd=Gwe-B4&f7 z&%?gLN9YI8f8bC5iK~ZV|HPFND7q+eWL)7i41nhZb@<=LXz!t+u#&FupO0i}NV_Hs0N#m5U;hVEPXcv)BMiIdPvhN4n8=l7L zMkuB2?gnNKZXh%xx7zl!UylP=4OHIC3u{wGU>v-B%@%s;qg&0MnbenIq*9~nbAC=^nalQN>aca8qoT;PO>-ZZJ#d2nX8mDL#h#P90t zC>(4I(=xy9Sukr+iVfpa4!pHjcTaAzVEUwj7l7yi?g9fp3N8|qW>Jsty?ED_WqC5% zBByRqpwiJ_53Ly+Bc9^o&|@kheokALe$dWrjK~e}q|IXiQpYiVF+{-bz@56fL=laI zrR=0DK8k1o4}*mPsw5PeeY(-db2lsO-uWi0xAIm{1sJD zHrYEye4gP97m3_78f$t?pZkip{U!1I2=UP${g?cW)Rdg3ZEQowHKms(X#D5b&>}mD z)vsas?qB0P1ekr=Ku=E(baJeyNjxqFMq`kN~tp!v5X6&E83=RgHYx@CK z2YaSO;G_$yX;@NTY|Y+0whIijE>5KqmhN z;|&T$rP#Pw=kLS3r|4Y9A8gLKY-ZGHXh~5HkexsJrT^CQtM!eI+L;k7%PqdvroOVh zBHu=(xfL|)NB8g3m4?k`LN)@l3snrYjG=l*nR^T}IJAn6-6wD@A_v2%#WJppmGush z=JDbq1hKr4Tqtik`RT^X=^>)Np+mP*w$%F;XBg{$Lqguo**87M&7|iQ z>iLuJyjlZI+S#J>&9BD#=%CAD^f$fRS`#gB@#TN3nj{0Ey$X#Lht#T7d1zhW zVk{MVkAttdcJQ;ICR{bSP3`Z9HV~oINYP4ZMT3K5Y`at>W_|!+#UyceHKTdY%|qF! zY677t4a!J8a7P+Ea_iit;|VGsrizUvUyT@sA6JJ`WA=sZ;om^e)q;wWV0P(>+1E;9 zx&u>f+?U^FDA44oCyQmF2Ac>1miG3~o(fcA3=%|mCg z5cwaho=7-<{yZ53Dbhc?)T$R(_a8;@d_?`j=5oXTq3gZlxo-RS@fUA<@63!Mlt^eO zn@Y(Z(J;%5P}<%`NQ#OI*(2U`WhI(cW@Z#oX()*@N{RR$FLmAb{rLXAzx$8R<9^(q zt}9;Wc|ON+9M9uv*idF+81N)XefzgkqUNT0ej&6!`1)+@?0r(GXw%VqD?j+wg|ymd z*+xfIIPvECg9n>uAJ6;;b1&BIm_qyFwvIPLbevM0(3vv>P8MrVW{0z4A=tFDvSKJu ziK_oa*_se_JoYO=O;WI?BWp;XU*u59k4j3sP2fT@qqPn#HAd>+YU)E8o6Zy5q+&^V zj8aGdPX1Ua-Q)Mhu*K@lUAOEXe`A)ctSow7kYO5JQfh5zu>p$4Sf7Fe$&K%(K?e~+ zk!|hN_aa%5yJN}71;4B_38oZ=*z`y7v1CQmw}kw#ci=MYz{7$nCgBv`m4Z@cU{Kw=;L;(NTn$9oI-^?AjYCq{1y&xL_6)_AfGBzzM}ua{HbRQxj_c4NA7kMyKI?X;l4pJ{w&XsJ#An7s~M8aATdtkZY#1* zg%z#psJsbUEes15(GH~>qU;3TPkT%F!Tl9uIp8olpFA1G>aj|!!b(1Z?U3k}vBi6h zy88F2DgKAvsf@qsYHBhu$vB zlPG$)%NHA?d~R-Y6NH}jI3jQFy4r2XnsRYh`s)WZ2?$BJoylzvlMv{Tpue3&#UFkI zSQcre4FBLfMF$ayWJw8$mv9gtzDq7In%8^T%c-5uXYT)yLU`u7GeX9ju}3AlWE+YM zM{gy0c^wZ@)=OKlEHCzNfSfXABwcMQ4!I1k*~)S=>@@XcFH0upM?U|VBCB8jy`w@< zTm00{I?<{@7u1GW;cWf-kkP2bej;L!_K&FffeMjZlMpS1g^>JW|Axy1k_wg+ zQ?U(Utbs^!<^St~PA1$kJ7izn!KavBhKx&&@?rAT+1k+Hhkj>NQ~U!pI-$|I!*^m5 zw|gqJZwdUlbc263EBDrxO~GQFk9$Ke5#Y(dKv|(bBwhV+!y$|L|GlDdCjlGS{bn|= z<)u-J8Vv58JHPw!3xDCA%&J|Bm0N-zkte^{0{;&TCe|z zZwXphH`DP-kigR0JUsHRUcEZ;pq?tl5v{#2clnjH$?!SU{k3}Ez*DWK#u5AVNFG>G zci70n;u6|#XkXLB0}qCUM*8Kq<}DiKZ{+0j$#4C(8G!7WL3K~ z3x_V@Hv@Luqnd8esWH-Z@TLPW9}(v=xBKTm=otwWC&~=rxBM?PR`#SpGPtlt+y$e!7(uS4SIK}n_6%YPqg_Hnjb`3c!nx&Tq4*O#iAFBK7r><^wa1wSIp>gS_h3O z>|=Cw+c{`&Q+!$wxlfZ(IgV8YfjSuQ}UK#0?PrwzWoPLHW6~~0UYD}9j zUAc~GfNh`}co*0x>dEuAsK+qaVw%t6@VtSu(Z@-h*X z*KTb{YfJtGja{m!bV$vhY2B`;q_n4L$6B8%g(>DWCKqc<3=0A86)(UDJ^u=$*rn$I z9plkL@QLfA3hLfT-0BvEP<1gro4L~R0_+3#BAa1U3qWT`NC+tmK(>aT!W)74GaJN| zmpRS4T`Z|HPE6Whpj1{P$D&jg;gKrat?RM2q6_w-=dlc=Kr!>(>0vl>qI;^heeWxm zq^~{|AL2j{Jrxn;TPVuM^eSX>F{0!E0EuH;m>*cJ2x8EU#ci`>V4s+v#IwU`TUkQN z%vQhWKBo6+Kz}SV>;CX2gNCjgHa;IYJw+wU4{l|re;>#8?rV%=y~SaIXO7jh24Hj? znp!kGp>k2L0t^lqRKbg&kNetq2uDW9XtDeE@WX??>1(@ESYOVAFG1;fnp~9|4513o z5Vqj;qS6QI^oHyz>hLW{U%BH6yctU%)d%?(YOP0q%^Y1nR{=xDS`?CJ9t+yA%64`F zasH*Jo)w#?djz=roNp0y{e5k1TwJIcarBSJQE z5$HqC(kuR;UwWQz$&#SV81j3mgm#2LS6F=2tWx=&1wXn@@uGU^+H)7*3=bd0xfYIy z*(G8%H+?~pUdjbkacpzi^Ps)=QyU8K+mprgFComY8O}FiCNe}UjRp23`Ufi3_VXk#hj&{&X3!YaHKb8}5SYicVrcMp)o2#f?rR;WjdJ??{1ujflO!1H^z^%PIq%Ax` z7BS%!@*DrSzoHy*shbQJhgjMN96-gH<42F)|JJn~$l>6U>ZsNzDs&lW{13h;WBp|2 zj9bv_(m8VWS;)PRmO#00sqiE2r=X5_NU6fU+D60j;$m!bdAF0d@VB0pK36Aq8+-_nJg{qT z%JB5|UClmLx6{A1MW&?efvJqm*(A=npuF*hOl6*#9n5lgH0+LN*jdMVeh_rAmJPQG>xLcH+J=z4DEIUDbADKx+2hAhMJ=+U3(P~vv(Umcn!|L-;G z{`r$M>M#AeE9~N347bC%jkf>4G4l5PmjPv;i|H@Y8x?JGN8v$04* z{&nK$XZl^~sYOXZeDzkGJay{RMt@s21`WJe~oGe*WuhY4aMu#(?OZ5~LLA z2OiYc))pJf*8_oIJtuQ;n?6r8{gQr+R8Cmuaai2bB;nHMQ+VE|zW3OYX|kmeQ=T9H zt2q7+@;;vO`iS3`felfYuU~&cUUujQ*SJm7+f5w08;6jPQ3km5y(Wq22IwN8K#aUJ zDB5vs$)rd#<#z`ZC(t&Yf<+U~?to#&_ZtI7*6h(=PX5)uV#yCW=st37H*9Hry0;c1Ik^$l0D0ZXiM6{@eVe7R_nIDZ;VD!kPf=Y;WYeq08i!y@^hHuAU7SaAy~ecDY0ClviHM4S8sOsBkrde$)Dd&@<0b_71qOzPP&E^zp*prFH+W`$?JH#te5R{MBQHUz?o8WmM1dQn-i3Mz(mf8p|qp;GL6NOKh~*cCapO8yIs_K-m8;&lMT z8b!HSiDt8b3%lJEd?Q(@|4_?7QpE|D z&^$Geh=wp7(4PP4QplIYxZgx2sD}2i5BTRQ?qZrmbaXTsnF~eLG1vvD5^@hT6EW?g zJGhAOPRsKU=v+gTh`-}Hso%HaBaC`#|J4V_6pq<8^R0CA{E*os&}1LHlB`oh&VWHLeG)_ilJtexD4p0BF~{u$j!(KecMrs$Q4;k!?M? z4Q;OT`73aD@6y`}?$(D4x0*~5VO3RCn6n}uc_WEom3cr|97ADQ1KX+Ku@~xy2!=<& z=ODCx(zCdXL7UaftlpzvJ&vV(kZ>NN7L$8z5EI{gHeHjgp@+d>{6KI>v_s8~UN7X* z#f#>E8qiK=UK?G)glUktgTB7)eO!Q`q{q;1Rdm3+jqHYFs`g4@n2>)m_Uu_FQ^Y;| z^cBnF<}RCRy^+COz2?S^{x?|4SIux7X1)aB7ckTbhh~d|Su{q&AT10X4A(k`%OhyK zB8A1q!>kpIhrbMnKz|D6j_LC%lST1qk{ga)H~QQ;CudxF32`0w%Mf$}F5xctwI9DG z_KVu}aA6sNN=O6EJOIxJa6zHya&iiZnX8UgzYBb0c(LUJE<*hJzsH}Gdi`?o-`6e} z`g*l%MuM+^(}27ulWho)(Sq@piXy-irL0J?(E18#^%W~VV(nvuwFQ`P60Ul;ZS?$q z;(pP?4?D515o5wnM&``-?q8Zojuov5WsTs;FN+HX%m>Aa-OK0CuLcltX=TG-2!caY z4O*}Zx2kp*AP56&bTZAMt*~a6(7cRtDHEq?i05!GJ)L02{Dd3)*Vq>J;kT`&V=&i! zg=O>*-xu;Z2afhmpv-~>k*x%K&d@EnBuK?XLHa|E>>;E^KQW3zI)<3o>B(Dx0|yR> ziHQk+Ni6xg)-RWIF>(a|Rw(!m*yN}!BX++0#Sz!z`9Pj9+8`4&_QLItDu88_F;i!r zgbjcAOO>2G%X*dJ5@hqcEBxp#;3u2K%SS_zns$ZT_R5Y3ap2W&Ur9Zt^8tP37>jg7 zPA?Z&b^j?#Gc_eir^oar{OAjFA5m?vvatc)T0suHqa|{&eh2f>36ul>t3vLO2QHJo z>z~dJ%);6g0F{9)qB{(Va07}8=K)?8d3IyCp&R980n^#yjYRmFGpDxk7&U{YrghTk zbjK(tHan;uKIy-+dAen*0~zg4h6~wu%Bj#hl%33CdG}$vZ0n$62mU+&4gLbf$sXH% zox?kL6p5*Dg#9>C3#V=Dlt7f^2sf_InHOz6?VcZ!k!H(=0%HR7#I=%G4y@CRJk|fe zFaBy$_Q|`_v^InQR2t(zq|o`O2m#%7_wsVEw~wZ<{FW}|g@=|97?Szg3lQ5JK79BT zwyo^L{sUAx_bMui(6*3M?>hL=!qNe#gbIs^c{w?CqS=_Mp>S19;fddKa~wzZY+DQz zP^ovw5uU@Zu=;@ECA zBu>0vp#+e+Mud0E|A>=M4$mMcK}30hJ%N_GLn81K-WL(oJTcn07G@P#a|RyYAc3p| z)qN7=IKt80dLB2tDGI&;JytdDMl`t8s_W1tI~kBdUp@gO+Ppd=77lVyWpPFv_AO9W zV6pdxoV+}ic;I0E#-QI}BOPQfeem@|rB(4cMMXSpm)H{Yh$@;>I<%pfpDy)vlwrxV zhlF$N?cgBEnilej)-^Yez{9nc_7I{mF)UlNxm}A68g_<})>(3@s$C5Y%%HxJ7{i6g zp3b$Y1LMa{QpX* z_X&{w;85iEXmVN=ULLj8C@l{+S?*sd6MIs(X8+_x)8v#CC41SKO=qIoqJHY=W^Put z+&r^!xHl0PzLjuAGB$#Ccrmi-eFcV_4c#<=LL_T*UO-?UK7BfCtw%MtA2~ZhKmQdj zDbd!UDl$-%;L!r&58gV)ZOsnq?KeYRL&LkHCBw4(Sq7CUu^1JD(@9B1g)lRZfe6sT z#Epj)ZCE#V;&H)fq0m!g+N&Jcz1kR2gvsAG1!0(J=EM_Nt#TPn8b)_?p*a1G-5iA@ z$#r!G01XN~nXio%+TVr{zZH)IUJy|zl=09Pr*-T9wCQ`9scgC$)-~>CZ5;p5BL)WC z4!&^qPyK^gop@YX(m$^tT6!&o_u;2~(t!$FX{mR!ZyLq_vlOS2AdSXtbt`7P1_89t zjQsq@u@nMepWqwZ;cw0-_P`bBa8M8#u%huxPLK4k%WXv=8*>I<1TBM>rluG(&vCs@ zw_rmBMMYzQxXYKr-Zf=j-4Vk#bVLi86l)+ttV4fB!k-5W>~AQ6pqF6Ao(8F}}v_LRp}?rbF1l{O-B8#Ww+ycMU74T0;SJ(S0b`5$F! z_|0j2WAtnIl`fP@vFN=9a(g7?JL41AL1iwfvU)hS&O?C%^yKEF8Ads76_09~IX^G& z1nFJSBZbol?T|rClS)=}Qjo~1L(7Uvpt2uJPV|d(7+GO;rb$K2;zt2A|JABDl?NR5H}Q%ZrIW? z1RxWKW>g2wSWllqexm%{(G?tQH~@0v4CIG}df#m2gk-Zb>p5jH?z$H zKsxn5fY5&??seP#!fXWrc6N4EjMO^JDP!IpujjAgvRn!hauMTCf7FtnqX|J+`QIro z2SWf8t>w+(QY;UUM3a~f(n$PWF+&~7`U|p*KcOzd4A432vB6KDj=|6nJw(yYL{&F7 zAo6I0aSAN085edXbKGaB-m(Gx;1eVwNFwa4NywX}M@5a9iV5k6$jH}~0nE%+{OAoU zk50(g2&OACTFc&Kli#x$e^9X<)?vVwZ5PvTXdXSvEU?q{cnjgEtl`z@Kb|Od(Y=@^_}_&awC(T>ifAj&B-#$ZS|o!NWBV z;+H~C(zu;A6>cCK=T8)22yQ2gn5BLRKSigHJ$|KpsS9q_%9uep=VDEVQuaB_Y7v1dy0O8e*t5IJ!fm8GA1xXh5T_Ss zL7Z(2kYjPtOBqrUIm#Lu>kr-*Ei7&NfRaJ$J_v+EG7w!149V)49H2#Ug6WcV2pmz0 zemJo$TegtBkKO&F#nlfBJN;n0u(xzSC)!^&6q0LJt>PeaU&YV_1ARW!aBV~@&^k2F= zJJ+bF#A%*Cf1VTn`7?vf#3kMs9mimEX7tei7gv~RSmOvcxqxP>r07o6T7B-S?}vuI zqf|pDhwYc&F(toY&oH5aA%-vUQY<+cO_y>Nl$1`v00CTZD1~LrZ1-+&50A6fjE(K> zUwey}+1%Dg`|tIF`829FQUnZ={7=OABgnWevYQd5ThP=8lR{bxs(WO2zdM+m^c|Em zxy`)qUzrxo04W}g9~W90kTrR$;3$I>j`WBR8@L~20zk|f#feSmvcBEs)O!RQ+VVxo zDa*3AkTn30U4t8^Gpn!8jMxBLrW|Wy;i@zBb7tn^qf9=mf`s&+QeeLyC?tw^WPd3* z9ZBwxkdWy6IQsuk4oyf970U}L10g@=}#;~;OqsW8QIx&sB%bl zvMR6dw%3wX9>tzOkz%j7V>$>l?yQ6bM1%gkkN}q@-v{l%hU9X{54X|BT<^!#QiBe( zk?F(aL zV>osx8s-H$HbNx>e)l&9z&W}TcvF`Or{sOjf=-$`8I!Y-JrTb?J1-w&F@64M> zNiEa?=&9j&VF>eAu-+j(o|$?H`DU&!&E*|7U)pPAZqCcaQ{~%&m%b%NnA}8x7T}|c zHTQomqz4=yyarthBZ#$Q_*1s`f|$)+aA_CSI~^3vhLEGwP_1(T@w1qNzUJApXDYLG zbPO3{NMN|Zii(Pu6^O;Jo12|Ye!cof3%`Bg9*?ifTa29uB5U`9gi~Gk5xlTONGM|8 zW?f+kMCnt>OaiU? zJ-B}L>b8M_`}m$zxWs|Og;j(2$DtwC6go)oA3qpOqr3%sSk%x#?zvHxm#4ZQ!!a{Q zLVKkVMcyGd*dDNqPfx$XJ+omwPonn@yLLHQi>y*oLQ*WIG2(gCmogTLYNDS6PF6$H z^@kAS-7HgK5*N{e|<6|Kej3$INr$vOfvA}Hz9AXAXUgue}ImLpX1 zu)#ok0CS41?#kE9=<&zCQ3kRdxw6f@M0JIw;1!Z3p^&MFxCsK5t$-mGCkGS(C?!QJbiLyYssrrS7B4X9!56-! z=D}xk`w5^wIn&e_>)LdDWEBsQ|4Yo!5xtSXrhz5To&!&OyYkk33$T}@9ok1ECBGv_ zjGQs?HVj`*_q|LXrFs5}OGp%kg=WGN-W6Bb!$K}oI@PxIUezO~MJqNA$XQUIh}qmG zcE3mLvr|8U5WyI*>F@2P1<`XT-Td~WV)1(?jG-0;p^`$1jbLsAW_5l2)NUn8!6uTA z=~D%V3_C>hIv8rg1AV)-z4kWaJh>u**%qpgs*nLk80HA5h80xd!*NdM2pd>qhLZZ` z?5m7zR?EII{=NO&h`U-^THiFxLOYhI%wqfW6#jKkJPy}GlEW)1imqV`egQ|haO==K zvc$FTyCoCV%*(?=^40)QMU#3ZWxj>#X-cB|_|#K?3#S(YWDou66liW73q96$E&4sF zVy+P+aP4A+EMUjs@m6IN*<3NK@}H4*|5V^kDS=BvyK3RfoI}m&5lK!&#PK(>=eA*G|9CT zkjz4KiJ2xi_5xHI`A>n&WgHmz4WP-XlP6KpKQ`RHb0>^GzWmq!>J!{$(@sq%!bbqZ zT!RZ21uGBK>Fens(mpoC27)B*V6EAnRh<^$Rm+2cM<891tS7uIZ`2M20uTKXi2MVi zjgT;C5%CE7Uc6{Sh5@6uW0xB^F}j67l3AureqLrESK@vqhsKO~G08f7x>~&we8-ao z>FzeekvwC5?zY&LbmBf>JD32TIm{F%vwAhT+7lq&MSlu)2xutlk6mlS-8c71E;(Lb zUHt~tW#s9IFK=aOJ5jn*Dyw0kbvG7+% z(Xb%0V-72v%*+Bfu~|f76y%!sW3{Xg#iSrSXmL;ql;-+Tqs~d4I?Cl*;VNdbL$z5kq$Z{Y~h{b z9v}odr8P!2--lcidR{YGZtmEtdyjvD=3%#9xRq-@+jeX2OZx3kpz6eOxVQso5jPa1 zTiMJ>I-|g<;W9Nf4aPp=wlwYb`u3KVFuosS{SB$cu;Zijc=gZ{^2BuT7;$x=IkUUP zp{4pgyj-^970~`0*(c?pPiW}Jo?}2V)!zm4XV`o`g&3vt+-Q9PY(qI zpgv584(mWFjWa4Xp3^^cs2G$dLVzg%w1ZHr*vYai!#NsC;IC=;2)4RZ2B+Pg(%F%pe&oAhY zrO`c-3q}@|$Qg$Q_x^xb1^KiaLl`V)0EX$cLvs8N4hCf$+1^C21tUF~C#|>z_y)RE zNv)Gnlqgk4u5K7Pq4G&xk6ZRfP-RpHG5~tZVq+^h)nDD>0xxEMK2k-<;M;V1t``X! zv)E)NAU)choG?SN9jZK%nCGOB1wv*QFp5sJz?G;k3i+5!@nirG7SlMu1z@Pxu7@6Y z$7cWcEH>vQuv8I~Q#f82h}C~IQF^!7zx+F7h6kH!Sr31TH);soSdxXAVaN!7p&k4R zLjfZ#Y%~h5S&+J#0Ed#~WU=HGKjKJG-R>3@_nxe@;}WbCc56k!?y^x@UVa!Xk=S}a zsg==M-mOd7CCHu4m6q_k!y@MYacgi+CmbuBW!m{W=R}GZ=o7tJGZo1g9s&i4@a40y1W*efFR$&A8Y^fL zFWBV-ApT?dJp~i~s*OUcl}Uh>@oZGoIOzIfD|lfZ;2;SUMHvI}FscQ-(5SHIT;2>LQK5%$7Hh3E2+(z?SrFAH=2-2Ii9AC+ z222gYU{`UXf;kC74Xo$HPTtU?XkFI2eVv8ZLjek5a9{}QI*{9O80^>IRS%pKh*12B zS=UYCkO-JyQQ<=Y19FQ(DTrx^*1$~Oj;n-r8cUr1zR!(Vnq5ba_%I_z}^2QX8RK|yb6k6pGlXR*Xc)!WAh)5^bG z5OV8(td4N7_(Xx;xB?}(C@0cry*-$U^3r%R?xxY#Cs zoC@zhic@-X;}&+10SOm=Y3#HC>)`&7m%Wy#H`?f`&^jX;zH@#^24@QO!Gi~-NytnPiA`F8&%l|$8kPi_?lkQ#)81RfRzTpI$Jooq8k=wJ>_&NBuBLHaS=xeV+z z@@p}1t_e{9Uxj>bRi|7~)-1@>kveY>?EHLup%gX7Zc=dZGx9k<;z<3s_iHk^E?l_4 zb}YMl2kcKv(eLBs2?z=TyyN;`+q-La4^PfOHCk*etO4S9v^$^QCZW2eAdsnw<(R1BE0q-4-!zHzZ0s>XV zH*VZ`@Zc2IY>Lsz^^sK>D{l2fh^lS>VCg!Ui#=ZEC|E1#dVwLPi%^5o0)6VAP3fX#u|#QGA*&yi4j zuZye094of?{A8k{K-}d(F6;4oq`3J;+CqYvdT|${P3ciI{df@C$rd*LOxg~cPL~@W zke&Uizw+fEBDLGpwyj$S{g0?U#~i@S+BQL1IXNwCht2f|y9CK8FdqJ0+X^B8#syU} z?NYeH&q;(Qdfj6;W!&x2Bh?05UCg|>x_nOW7VN8>%LEC!#% z2u#oP&M!AE9}#VFYVUuHCul`y!l4MA z+UdE820SXYwQF74*@Gia!E{;kiWB#qN2gB#^MJl`VueE~pTp8CsE}=Qi&TcZ2(!CY zofUZR_nz30qYx!@g3eVF z&oxgb$Ct>8vKgt0nU6V%QL3KF3ZD%{d(6u3YxYEK*$Lsy7b zlcZNk>f>NRrY5+SLZLYAJzvBpN;GeF?R3yCQC>H*!Rw1TQMOc+DBFRB9|$=nx>evK zd%2iQ)WgPxqbI^9kW^tho|?-yj;0?Ddrz(6@chfZW)-Qb6IKtwoGfJ{l}~|!8LE;6 z&W9vy5dJ~886)>XJ+t#hTsh5$h|703Hh6{>7_<_2`K@`L^P-+%bW#Q{h^h<9 ze#euaIV!hc)DSm)qo*hVEqX3g->{rJQc-aFfNy~cQVMj;GiR{_U>)|R z0708qbP!kN?+ZqRsQv&^=u82@8~wvg`5l^)w=3)bG?DWc(3rlS2jl>XZ3sgQ5Wm9< zcX(vv$d`dWm@@%8ncIVztG=4jW4~r+k&FVQ9hR0~<`)-#;%{o&4iAR%TdKkHC#P3x zDqc*BxtUw|KGSHgf5r9_vMW}=nXecs-^!oX>3r!h(_*y54ckIO8;%8D;ujTJeF%dk;{efob$>B~;bmuLtAS z3KWSdk5n0W*vnm-Up;VV8`r?OyUfa~oq@R|*xCy}()loV*52Ly4N!U+k*)-e0jtke zUis>-JBk9O@3SKL#=%aZf!@7)H=@2TKgQ26WH&cq6@_daG}3)G*LlB_)5UAowqimz zoQdksy(_!Ya;lRc%%sU@+pEQKV(I}*9h9hM5J}tGQ>t+9KnWOFR>SEWj!NrHZtp?t zM`PlyXazhHMQ>JqBkO%Ssdejq(C9hO5A8~fwSEJpnVedJ0vQ^duB7+Y$0!7q;cOY5 zj;-xFW#!mhDLv$((T;a0g31W9!&d49xnmC}hcW4E+QARAvX=GFsHv(V>kEPEhRGMTwYs6< z2Fh+!kOwl}IyVMK|Fi#CFFiyorDIJRhk@$Bx1#!`p*e0!kINeGV|E_sqDWiz(qE(& zRo3cz*<=C6KYqfK6bQqiW8jb+CLj&;dz48>DC(P;#bGOe$}KT2DK=dA{a;T}wg^Zl(}+FDbHWCwW90clw9 zwMcw|8NJh+Bo3y|{ln{^r*Q}OXIGO9C(gPBVpEhIz15mOR%!|zL@EwSK1sBZMQq_N za!>W7^jeRuY?N^Ucbc5X*P1`t1l{%9uQeLClftsPR^3l(Xnyjf5Dg4K;)*VkTE%w6 zVg>8XH&rXGA<}RJ1OXPR(nA=&bYLU`?%=5=)L}NTudlMGC{Bn-4>XDqmagCaOfSJj zlK}{fTEczMv-c`1I|Xzoy%}49hd}A-PN(AlY2hw=`!4*?J3%nxsU+2CFAUs2#=kku z>>$Z8%$F(}Lu~!-?OS(6dt5jUZtffTguD;oD6M^DCtkWknR|6`<3A(!*b<|wj|w!f zS!~RXvoNADAFxp8UnUsFkuS;_b&oyR;nSdwH$BXWP)fBYsgtckbgk;4AB=HQYJ_{* z_3Pr(HOUDHhU`*XM`xUUP1SlhruSB2%X1RjD?l*Z5*rOHeS!cK0y^xn%0qqm160&g ztM}d4-_fT_bRb;K)b$d6FwFvuo)pURxULR5zeh(WLWKxgd>0e(Wz!@^pl3S3T{JXA zF5E_{fpnoHctxt3$0ggd=xIy$fkvhU%9UK2fu9Pn{1x)@QSnD?*=Tb}9ba1&L zUUxk}H9#9ic=9z|H`W|ivkFDAfdmMfo+P2=UvVLEHEt;Ad;*l6oRkC&<5T+h6sTQL z=XogFqZR<{w%&wMr8oUUZBVN1H)o zd)?MPDSes|1(*7zjrw|?(vnVQDtCSg3W zf~-#2QXf3{4GEhEZ44@n-hX&a&X9;E=pHq}O0wv^vx685Wg@7}E9#l3=n*kM*ePJp z*x5OOIgtzn9c#-M1l$y9Sy=&JUu~X=yFUL4VL51vc4}h4k}(9>8%av8Le@KAP*4D|`J4jDCy4 zk9+h{L4AsTyWEGuVgH++P0#RlcYOlZdGc&BPC6lt`qHj+zX_1P8BJC2bf|Pc9Lz#=Z^s-E z;_q$vPTCXHE)lR>WYNr)ZUX?mpF1o6J-&iBm=-n0ju?PGrC~fc_>17Nuv{fPmk_O* zjs!igZ`lMzXB|elJ=n}V(Wn7vP&-os^#ZJk1WgRR;46qUk%8B(LBJGd*C(8?4~lS4 zrg!Gvgc+W-%#1G8m2DZ5Ldw~TCAtw-=b7qWe$f75OP;YKGS=Lw1g;haDHC_1!+2D^ z6zQ_!Tc$j8BEY{xbCZhk8%%R;x2(o@eF<>Yoi>Is#HQ&3C>y-DtX{L`Vy?^`JS6ne zT|p*O2M<^@4_%CR6h%0u659-Pt*i|w)TD;vSYC8*JIuhYe*Ex3!@YyrYS!~q@$!i} zUq>rq9#}RB) zl^J>Fcp|W`uMh7Ru?;=WmgddGsNXfCbfw;jPFU?$%{P8}{eUT*j?hUJ79+q!qbv1b z%d{{v>i{(?zBE)t0STv9@@7JUV35%Xq*qT*T9;`|Wj}adNRZImT}>FRn>=uSC0_RA znn`&5^qt28I{mG4i?QjNtN9haKy3kbqAqijt!@82JnSb)D~$xKr5d#5h`cR?{$Qp4 zz&ioUz6D_HnRIggl2nE#k-zuBFO&(cIhv7sPnVEB_n0@ElypZ&Sb-fT;08vg;SEOG zQ3ZGH6w;qBHZI*%677x_==7>xO}RjLwS&G@?|=NEiah&f?YHRW`|5y%?M_7p3{Ys^ zfg2aV&E=LrL_`F}kTS^&04!WW`}Fl(g7zEi%H6wn(R!9i`FKZ?+gk9}uc50Vcj)92 z_O6H;Xe@*J%iYvhQd+6$Vi_?3vQ zlvkr=J;2%h3|E=UV>MED96q+!eRNpwIiY#mX(8 zn%mo9!upPFoAVZCz$UJmtYw6K$ZscGdzS=SmI~p)=R%>v(%;ya%|zD9Os?E$6*M?QV=H4F!WPZg2LEGhvEskE0+?CrO;2eAg zsDN+n{3*EI0&f%Q7jx0vsFt8nt{wiBF+){QNa)B&_a~GcWVZ*MuaC1cpQNNIJ0W-z zi3h5QjQlcu?U|oHfgY^F#q)J_t=s#=gRc24aw;h1j-;uOUaxalwY-?pb!=3RT)27D z$KF0^^fm9-C>NG)-gtWMXcC(Ptf)>LxRK9*HAdtHdH{Q10H!_lEqN*- zytybLg`aY?*BqWj{a1*pOS0(J_sNtq5l5tC()t+o4dlBT9smD)MFoOq`VDSVfnkv9{>j$=E{DEOQ0kl-0n_LX&4W@0Nnq8 zn_DM6k!QUqUnG47?Flm<^lWH()e_2~EkU+G!nJ_o1Bf1+njuQub(oz+4^oCu3|&P; zZdbhq%aQ59m*-LW0E1UMqY6_=81!f6uP=USi_41E3N`BK(d*>g)6elY0GZ}Q#%W9| zFXbbuteRR~@TAxk9;;_b}q{O_t=V+bO#y#;|Tm~QV1MTDp4?PN1$l>A1O0pl< zJ({H9^Xo&aSdu4ISed|VL^bAWI<4q;@7eQMm1^$oT?lI3M3o~yH||7+!cS^r%6rxT{o))|H8t3swvclg z&;L0E$=AbhCL&K2-b$7bphN#)$WEj`LxBU)9;y|4T!#7mQ5XxyBzlK{&Sl&k^bhCE znRU)RI62eR*vR6tIy5&0jXsD!-1#p_AJiD-vrw+1&TB)cw0-+Rim}tkyT#CDcuZPln6;_d>@I|06 zLh{*pO(ld3^e_T}_iDC+&Y8DIyn(5?>(<~OBEvbG*%6WmgD0B@o7)*acgnk|=Nptmq<$j}^H)3C8t|&P`hP{E2xiuOn+VM9$wW3E`haos5 zDe$qd6j5XBOhS6WhrkEjE4rD5kw*gOjdjFot5(J2O7X$SWDGzSh+!pyg%xz_)dgI$SHxg0@ZV;Gadtdj%1Xbza9+vZ3N6*db1=ZD*jf` z7b&@f)G_?l#{t#X?$V?`fN{r5m-H+-kvMr??oGr$!VvVh@7&tig7kc|H@{B2SiWXW zXJg|qnh;=Zd))~lZpB?D0v%p+pUD}vs}M}#V8Mef`mMrSDdcd z6;TWYevSf_4a~BziX&kny9y#Qlt;L-n_^2fHYVk{skaGge=9&P42qk2n;ZTGP0zTH=&@Gwjh5m*zD;P6jgzl zT#aS&I7;*}=xe*IdV5J?bTuGIJ-tv8&OXzOx4Q3u*LnEKV9!{Wz@WrpWmzFg^lI7R zZ}ec3DTZ29+*nIVkJfEb_e=+Q^a>bw%{WGn>NEg9rk`M ztN35xn*f=1XERg4y94h=27LTJcOdaWz|RBA1R4JITnlwX zpfhf*uQ&3lU;6Cn)0UQ&EDviSSVX)S;P}0Zo18wO8~31N$bv3AVTvIqJ3GpK_e`)c zQ;lz5iL8mNl$u&#T@bsm@n#090k{U4?pvLs2$;fX@!T_hVu}a95yF%a0yeoC2h_BIB_U{ZLHw-N z%2$ZkFg0HgT(aaTJ@o^oC?I%ydzq!f8L1d9N|;*44LXEkP3!Q8Rg!TJ+#Mlq#|PSr zSX#kSbq~wBc9SjU<9u8g`ul-`q6CpX&_PHsuCyuqNkSNGp-ygYg8cmYmYk5V@IF`h z2fx(fWoHj;Vbc2RR~LOeJC0fhpg*P`zC!7ZUwQ{{8B}a#`(1TA%0LjJ zUU_O&L;lu4yq^lBTVUd>889#+aNRuy6FSA6aKs7;q3f5-W!z?8;>x<(fm=`>3L(5( zbdWiW@+lZ-@RkgM6@C?+HmT6o@ zsnRK6)sJ_`p})I?#qu{g?$eGB+%aBtF*$iV`-0(o;Ly7K0jq^r@c|SnaY!2&8O9HaE2bI8c9YFVv77^zrzCb?mTJ^A3~+H?p%k=n44%IAdWyUJ|{0 z0F0(RE=XrbKYXZV-nNjH+f%5Hj}n-v{`wTE!48ZrK`*Mt2s2v03cok> zChF-h_QtbS6D&e*^SGe-84CTuDmo-5gq7R-wfDV+rCmSz>ZbyODGB&Mjg5Ibp00en zzXVo%NuJ~??YHmVc_=OgnuMH(IpsZf4(@@kOGd^UR5HT`qB`aM_p}cp0izzo@BAFw zW>8#o`1hr3I%Wa0wi^b>aPV3h7Vv++5ZEO5`)jz27g2QkHg>MdndtWRay^2b55QgI z_VDu$bV-g|?Mx7riQu^ofj#bqM3yQg-U!;pgoGZPHL^aPCPUZr=cXnmIgI&$!Jnd_ z!sFRR7LiUh45%!50awEh0MSMK$outP&sHTFU#c+L!C>Cl(t@=WJGX94XOuq!#r~c> zFTv?N4Q0VHFP8Bq*NnZ0qd$Cyu$>H#jTg%zdmnxSW8kj1)H|%S z_T>C?tS>-OUwJn-ZfyEO>^1!-0nngA6cXm+}U3K$ap31WP~eGF%yed3(^kqQi}h zLf}O&526@VYx*FzB0$K9+>^^lFoN9^80-_X&Or@{lS858cI^8I%+ow}?BGx$+|Oa| z^wg=}5DawD>o9IJHbxb@N?tyl@zcbQAN$BWP4~(KqTwXL79R+WA$iKlXDNPug``$( zZj&Fhf&8FUd{7Ded=QMwo9Ghtv68`9%??I8HFfE1!;O41*o;{uE!zy&-RL2YjbP8g zCy@o6RI@(bo}C$Hvxvx1-Cq#D!05Qy=IDC`u2eTwQDTL#Rn~PzHhR$wauhnxoF4Z zyRa*I4>opA4IrPJvebaj%pR3#Y$B#Nt^`& zt&06b4%Ac~Ktsx7GjchlG&F3sZ25?>ud?*F3h%@SfprSfnM~x&HS8P`a;3aSeV_F9 z3coI;M6u7d--cStcVijxm5Gy(G6Sv;rHYK`bxJxLhN2e8D9LD61j1{#@!54?p%9(% zoHB9gfeR&(VZFT_HGUZCu*fl{$TMd;t&W2f1DxQ&HhvJjjN9Kv+l&nXI~8kNObp$T zS?Et5Gk-y(k7U#3?6bZ6_0y?Sr-n?+0ddr$MugD({i6iiO+7lf&L0RVR{%m65Kg8) zKS!b>kf>hX@s%`DuSJ8qc{4Ix4;-z_gY_(Ze*F6G1-2@Qtp|;9#|h;l_@(cZmS!pF zFq-!eaG;4mnGI6h31v5a&)sPoPJD-uTW~hvr5BuM;ThD4IBVCo*Ati&p8clj&?zX+ zunJJ;_l^oCD$;PzO^2)6>-O*_cL6VKYC1c{jj^s9fSh)7>wqCmz6m{ROF#ZC8T!m$ zyeA6}NTH|iHJ`@vYz^O6g#`sIuWt78ss$aHjTT%9Ru+@SkNGqfNI38{{iyA< z9dse7u=NDB;->g)`-J&VMmYi7eLa_2jF+a2QAqx?!MVR2vzenRX(qldF113Jl$81f z-^jgNV%KvQnIY=s#Os{6!vJ9+WhUc+(a6~B2I_ot~cfSXP^S!`N zp}|5&3rL{?DtZVdLYJe7xk>i>M8YsG7c@h7N9l}&50bB!yZcWxPxeHf6sA&jF0+!4 z51X=)7S~QIN6f#OgtoS*ygWCbMFPuBs9Dz)HHf^89y+bhdnR8$jTK%~^XgStD776(gT2~mT%B}RaRX5CRklXW&HbhkQyX_ zj$fzo%xK;e{BgEz-)^~+cN)p+2h1w66=O|2pl^NI*4nDRW{uO9hXdX-kN0x} zuv~V;uJvH)v68GoU@)h-VgTi8MtwMl)mgyArNH&hs%vRI1e^-=nRMh*&cEULAYLn4 z=5dd0>HCsPTRw7)Z1Cq zZqYVYUUdS~pT)tlNugBqS9=U>FVYijKnHs}CEL8a8Po`>U-7$fJjKd;_n=03DoV^P z(Tu2D5o;!j;ZsZ(swHHjDTHu14VS?S=?(%WbVf4xwAVF!L{UUGq8{Pk;6Rh-uIv0S zXhwg(6rC%{#MRe#(ckCn55zqR{yrk86lH?lI$>cHlY%5?3bpGpQ+MzlrG?GE5z}e5 zx7yBsPD@Go4dWC1f(gkn3pT0OJu>$HjXUx9UU;<;xn@03BBMm~posu5#1_Wve5v9G zEXyW1!uN%M!dXk!@N7K*RXS~!dqT{PNF#us^#VjtkOfLeq<}-cRC@IkHVVx@hl$eB zCD6^r z5m&wUtD9H5wwPyE{PMiA+h%gu zJ2*`FP`^#C-~S=&&Es-h+rIDPEQPELQ7Dm_$XJOCAw!X(&QQh_)e6Zhbjm!Z#FCIG zIg>^iN-~6Ml}MCOhFDUD5Gi`Td+WNN`+DB@zw2{-7V0?mW81gwH*Lwo{JNxstejdR z8C=^JilITFP>^aDpObCcwyoh}saJ_%T+b_P23?<9SX3l5-6Jo<>M!m7I`$7D84{r| z776 zGP-M?3#Vhqpg|aBJHp(wT$SEVSJ&Ep=TN{KiabqVEAF7#vu3$#M2JzL0WR?Z)a5H) z9pY#LbraE3Gphbd$a>onriUe`uQEM8rn2V*9IYUB$WdH9tN!N*aI7wR}ecZe!%zNi6)?=~lO&GqMPMB^+AdkOPT z$cx*A-Mbc?&pt8EsL#-wzP#S2w_p>$2;5b9BvY7Bgf^z%D8tk)re$>2Kjs#B&Gdnq zH+44k8`;SO>-;9w%f+ZOXj~TKX+=qLadB~v+K)Rc@OCcxz5$1xUkSHg))47#>@nrH zz}~)XL=&%-Cz4qukX|=L@B2^n`bpP`5MKd$Iw?@HDctWm@?Ju`3uH84N&=->vKK08% z4>wiG`K$oG{OuMO#>qCnm+hDY=3`zeqAHCn)BpmT=ogzLB_-X`J}f==1hK71|0`0e z7yS@0LY_mQFhbauSv0Bs@Qewn1mI^kPI8R=0`2K$W>zT1TCLXF+RT^}>=nDxXE(Cw z+jC0OEQsmTYB7&dU4Ae)70A3E>B+H?8=RKhsGpECxllI9yv?uwVNt6J z4+6Z@#a7(qn@+=7Q}IGL8Q)TALWeHzQQv2~(IQZCuy+nw&WXFfORuXYPvq^q>RF2@ z9d%cfQ~$Mfr}F$03Z`FsZGACj@(Zt(8wZ)Zhno$;5;L-%1Z4iR4s2J#g7Uhb(OryS z`zZG%OA`D(&PbCN@*SX2t_gkjVAkuMQ0>5w|_@DjBtDM zSV}+M4EB%rxvcHhh4sneE52Pl4Y|;0`zSMTgjQ^9SYW=7cy#XTfYwArd3n{<)hu_C z0_+_e9@3FBw>SkHM-nJ&zObp5!;Be)JX`R8Cn-+{;fN<__P-qL7Oprt!JTuP?AC9> z23as5f5J=~c)P6^ckbE4<=fL{+(gv0urkIm)2w0F>a0{fa|hUYNjrA$#-s6suW%Ha zB%ylDY-4K@5c~nBZObrg^U8LOx`5Mh)+>4%SrjyF-W+FBsury& zE-JbJoHKId97msc^WRbS1(OTSo1`p5J-L76&xtvm4UblVGhbW#CGyB zY80os2(iH+?e`I1r@sD}VLxkwd{RIAHD529N_QP%C;iLc7UiBk=lXTSp=eLZ3^E?> z4si6pH2Vg{d#^1R>iB}b*Q-6Qxl6;DZMbf#9``ap_(7cVzV;6CSfHAY&iBrq_V81J{1E^OxT+Xc-;)57m*$c;h=a0ho_Qo-iA(#&Dq%`e z5O$Z@X(;nh3;ETQwGFYp$ABObSqA!AG(l7autn!+iKe}08qDx#4Hqi&U^OZsB)?Zedh&z@iY;0`& zy7k6E8TOj4%0q4=?+EKR@|J+`&;dYk4lP%?n#6 z_KucEFI>7bc4LoOs?w`>??`MuMQG4(^X&S0_WL@GnlpX+cRK8-IajYlN0U$5FPk8o zVQghv)$&=`Xw_BU`N6&OCsou)gR8(exmk(7hmH)!P1+#mk64wd1mM!x_BxyK%Z z@_{Yb(CvJi_Oq9D@AGNO?uUC1U+1AKIm9$`E`xGRoG|~-oGN%+SYpx$Nk-{)bS!5K zA1%VvmfS%7HI#)0~IecWdjUevd!~#yRNMRoFY8p^1gk|k81mR7A{z(scwB@ z&=FHpsqcSO=xU>XLiDnXwF)xLNs>-l+t|QuS0c+B*UauDb3u5kPMwx|dG$SH`(9Eu z^8E-IlwSHF>^Z#Gz%4!R;ArXIeQTs77mHg^35WMqB=_pu7x_vp)@^rLSQiR|)yu=n z$X;+qEU24lu^cxZYnC$wZTk@C<@3wSd)j2&&djuOuj~Y6ubiUWi;*CeE*Z!E-Fu-Y zCVzHNJyQOkIlv#2{$r}uwff&jb;5RuHTGOdr_L^<|9c~(VcOl2?U!*Ehj^M-C%~Xi zwm<0v;)Th>UBiVv!FPT?C@j&VAu-OlIe9I9Y!+-hol&(kI3z@ujZ@GHRfBRYJDR7? z#;=faecvMOMcK2*?%zUC8<{Jjk8VM0RHQ<;{i&(>oHy$GSuIYpSy^wR-1Bl>?+tC_ z41w9`F?5NT?pxA#D?V2#)M;IV?%zh5JZNK+;D}@%WQOex}MQ!zFJd zyP@37Hi%JfFWKXb-jc_3-(fJB5$j3c57EG$@s2qFEffGeUzV-?$q?FVUZRJFOZuGU z^gxLBH0??xyQYONJX@$SS`5-&{ORLAbgp%kAg8IsU_D(Bd^Du&agxT|wmI{iGxPGi z0Vl{?Jv%$48r<<+8~@IS)FsSqNL)kwKmGXmlVm84rNKOeU|!QxFJ1aeCoKG=OtK`N zE>Qmj(VcMK2LTxW}Z^Wz|RY&d?0n{fwf>Cjy-_!*l z>5;4lW!ZLpS>Kj|zD*F1+*pz0Jw!qfMJJl{PFe*!zu#OR8=>#kw^3i{8FT@`~CW9VkD&sg`B4T zu!E7)UUGChcASC@$7JE(?ZA6O{GBkUoB917ZZpn}9}!tSeFC>n132zV^uzICD|bcO zX@7WrwHTy0#Pc>?7bMIS`xktLB0R&Ud-|O_Jq-*Lk^T-WBR+GcXLatJrY^R#xEbo} zzS#2P?9mu?Gff2rRqT!JyY7)@Mi^A@R{Huj+2P?f@4IvecbCl241)N1Xs8g<>$P<0 zH8PV$AIg!o~F`kJ1*0aLg_7itl*#uOOXJ=Gf4~z{$bKGOpB2s+5P@Y zdiNwrxz^X0%@zTX;aa-7=GrCvjfrHPLKMRChp+DB;w4<*93<_l28MEe&* zN!kOCb2pSRe?#6zoHclMdHESyR?bbW$io%(Cuwghp9U%UI8_DOn6!AUH2PR(MypTj zW=dTI_JcckYHBlgO(y>tzbLAl5FRw8hltgV9Vya__DoRm+df!cJnbka2O04d}U#dZivQy7Q`Cg+v}bm zxpzjtt?T~D>nx-t{&6E3bTsWwB#m{u5pSp%p4@YMz15aQ=t6MIg!gWeDWC{VP5QS< z%_IW{`p=)}bs97Z#sX1wVSX4ec(4dgVZt3%Z)R@ZxN&1>mbnb2VvnhZ;Xto5iOk*& z!Lx^5Bw7+$0Bv#vO!^8bZfZF&TuP)FeSKqU&FaqO8hhas(+ zz5Gl^Aoiw4$~Ah*|1}Y20o)E`(`v|=h(7)L{Vu8hJzaEGx&$OzPpx~5o_hlvnQ?*0 zP(()uNy?O)uz3l;zY%)_K@z}UP77N$GV&FT+gOmS9awn0-LIal{(?QB+=D})3CTpp z)1au3#;p8Zx7Oa?zW3a?tJWOyTa_@7K+S6qh#`b~j=BTzuYs8eyTvfD#dP0-;D2~| z%mGx#DdZE>mekQfh^vMBuwA=$VLRIz&RVu?pt9Q(IEPhofGV8z}idCXp`;e(a6rTBJNK%lLRMT z``)kcQq>Fu(YR-9>=moI1Kz%RHP+sP zT{{Q1ZdF6o3cq)k1v@Eu5vw`5@%x87jh#Dc^h%rBhV$P{Q_{K!LM@a`Z48g0crDu_ zRMC+Qa@RGy2%(2jRzBh)#WfP55zMLwbOr7jDyAf7a+Tk2BU$%E_i*Ee%3<5aF&E*g zu;<-x(}=y9S9#D3vT#T(PeWg$c6k}7FIUl&!bCiaC`_OM%z~y%wlt7zDi%$bE?v4_ zT^Fn&S(TR!9h1*xjU2!T0Ru=ZORyo-Kfd91pjH4MB^k(&!tn28%rLVXdVbE;tly8l zH&skqM>7_oxjecvQ0gd>sGF z)aqYmH!JEyf%DmtR1QDnIrDgu-FPPNm9VEPR-~?Y7uX~Dt+PCHGeU=M`zo)tYv2Aw zew88OHu=!QE-f>xW>-X|y6#|MMPD3P85DMlY!jh1nxRIK|XH!hujfCj#V{e(T$=hM z8v@bi{KY~gA!QuB6~<-f&yc2Q6`E^kgnJa64N#Z5c0EOVeZmgJ0h_ zTD!Y{A?Y%4$hzcr1B9)PxT-h{6!!XGf+qeRv9)d$)V+;WfPGvONRwO~;G9??}~ zk(tiUw1SI>~W|D=$27m{*u`^uh%bo9(^VC@Kz|v~o=!I_5P%$Qkt$}lPKfomOZ>@jk z1;PP1V;sDVJBU0|XpRu5^!e`YJ0=Y{{D3=p+_NK{w6tEnduNre?D||?0WbeJYU%>g zJWxAVJVx2Sul?Xi(9)`&TcfL3nG1ZgyWXTnk2kaX8HBa*fL-3;OYD)8lM`?xNK`1R z+&g}TOylbQ_3_uiyjySf?%w?;!xX3?t6c(*9>q&}NjM9fEPM3np1h`F+@5-Q>tg$< zY9n{jYgZJ-CnZ^`TFj(|6kgk(mbQ_^3L}f|Av8ApW|U46wOak|y@wa%s<$Qb$p@Ux z#I4wj9xXF!5=dAVSFF3nmMpmfBZ3E+JylKV?shg zSSCZ`++-Rrp3n*b2U)m*6}oGKmw%O{mfSG3O{6fLQd0zO>d>IW9>_0T*j`N*nCaT1 zM~@$`8X2v^xq`?JpESKw{1fg3+x~^mBJrlJR)Vk`g`gkb zWE*OBGCI1LSH;0ulI)d5d0ke3(vlJ#e3xt|_hb^rErtu#aiupySGjV2_1gu=Q_Q+^E{`mTeE)cjN88wPa?mC=(%EUcLwe zz3r=tgXSOInzL(hI6_eYE<{{BpUqECjkz;N&n?R|;b+gOkuKVoG7p|tyZJjbPqK4F z-0Nq3-uh!@rHy@*<5r%1B3|;_TIvMFF`=uHEs|9_LcUBE2hyqP7or<*5A|uE=aHg#aLd6 zq-;DwlByWY-@Jc6$zDNdB^gYj8M?(9)MjIUCsftwJAzl`&= z*VOKpN4_f$9qJ>efyvE9N&SYws%B=U{e@NzBN`oje*5+th?RxKLHf&=*;lv6joz1}bULS^NyBVR z4Epw+{6ke7X^~$?Gv#$+@728z90Iu7uTdiZi0iK7waBZXtyN6J1mWZDo&MJ{1N{p3 z4GsEN87v9J#)#@P$@1i-OKS*_WrRzTf~oysTl1JTL_0V?ubEvWP0y`cuVF8f47&4d zLP%^zz4oGb=`}X}ncK*cGBAJ@73T>kS5~UeM|^=VODlEVQhG8%MbTCZhz5g*)~JwP z5wNiv(JmWnH?7|FGiOMN=qQrx#$n(nd=rCuit?Aach4RKG?$B!%zMQypro%qYrJUX z_ws_>{oA)|C+70?nI$DBMj(@1iP_V&^!<&Gx&EmAJf*!!|*!R87mK+xbtQd#?=qWbv6I2SC!ar@|jUB&Xvap zT=a6xl6CS)&M%H0zWO9C<}3QPFZo{{dNnd>ab0J#(I0bPwlzb&8{>sv>u} zM-g-?NE_G7J6<~wJaXrt3(@H|k)B3;+|9Y@`0_2p!Sn6F0LY%pk06^}IH zY&wAa^Rx{|BcCkcrBM7&o;47ry(dpXup?3(U3^bhR367h+orH{;p&nN+h0Uk3}ZFT z{`a$+~nN0YTWKyG@m7^FD1@W)q(2q7}6$|q^ual(LXs6|=i}V!Zf3XRhhhQ8W z$irIoPaYcX8^W(fYtqn8=IIcR7tLm4AvcL(_T4M&(^19r+ zPxUnqBm{)TIJIob(nW}f5XV*->I#U`f1Rl-#P-qSBVH{}?ppck)y?{0pevNus)z@n z1&Hu-4*&f64$L<5FtSyYX02&mZNoaQ)~KaTvZ@jtKdEiAa#Z?^SY5|=R+08!Y;9G3 zl+*A3WmszxhknWLBV1W8Yies@Q0b5fBRW<0Re31+#*qGxN1nX&t(}S9=$x@pp3)_; zh*!_Ax9WIX+gRiGgJ>?&ZcTl9*ZPFzktg!f9`+Y>+@)TPjV0+VS`lg##ON8ds5VTU zQK5exMoE1NS(veQAlL4ymYmZ*c*|(2U5W;>4bsTBYh>>xEmm*6!h003)eX24#GUrj zrZIO-BD)|&zv9*A6|xY}GW=&A8j?SGnniT${2dQe9ES1k9*t=sDN+F~SoP3!&SahE zhF!X_?Q>hC77=1y4#jq`@Fy^*;>{b-xW(63O)Z>K+aU8R)fd~dnb(Yi6(XMH)-B;} zpu;JkN1E&RD_F~*YduZgS zK`8W4;VT}!20?NOEKpLU>Os|FtA$I)i+~|DsZzUDtAP}akTLQF+ncgj{-1pHmRtE5 z5^i2;fIpOoD)Sc3YN7a}ZLMlAws5UvP_G!MN|M^$>c7!oPi{8tWqJ9o`1uxbG`2() ztY*j%7##R+Mn4UQzN`MgWfYCChQs7__zfF7PW4u7o6W@ww??%~g3ZMt0hhn~>%oI8 zM?J@V8!5N+P?bKyt~0=`Tet4Yxz~St0O_THJ_|PMgQ?%Nl|*5vsH)a@h*BtDv@&S$ z!y*@etc`4wC)MHQnW^^(wc*F8vVub8%~ORke<>Gk;)0J^^I;7;T75rWC7Yx7NP^N| zab(NRyqNT~s?)zr=wxf_mpABug41M*A}CN;FdrWuSI@(at&d*hJE<5oG5*<@obh&F zS}JuVMx`*CDpug7eI+d>`2T%-qgB(*$EZr5#2N?}mnwv3PEt-ScdFr{;UPk6_wK>g z$T2xKPL`!u!NQ!AMq3AbJ&ryr8Ik_=v% z3|m{VIOE0*|47|{g9kW5Vm!Q{3I%fw>V) zToo)TVm1bnRG%`9x$?wxuc1Z5i^lkL@~O(MW*X=`c3WK8B6)H<&iR28M_*PE&GP^C zl`44W|3-<;nlNFz^E}fa?);53OVKBDeEDLL5hZM0ix8y9HaXyF=2vAyz0Q}sIYlrv z$vev*#bh5kQhDm%k}aoXe=`ioM2Na(5pB|1njJ(7M!9RE*CRHmV|BZ`Qih)cE8WSn z0YneZ_CFlq_R+0EaV*lmO@|J9I{LoY&7QpacR-R!7*)wiyt{7$CjivgNzg0QPudVyGD^h#!ug8h{?LsuHpk%pWtnFi}UuHC0m1lS;Xk6hWk&Wx2F$Nj=!)tzi zO-&z2sVM+sWC1V?=P@?!-m#+{@sKtr%Sqe=qN&*}OmMUfky4q+qQDIi;)$N2 zwx6(MO2PERH}ms@B0tUL_Ir}x*32d&j7mU1iiF7yk#w8(!)E{3l9-Od%7dbdB!A5s zvzT9#%qBU@`|s4MRp;q#8&$8%K7NdD2R_b?AJ{+$J^X})z(Zzyj&s8H zhcJ(EzSt`H#61kVG5-wOX_A+4vw*&g$TB81Uj2acsp$;I`OlxvG1!|Wy?jZJ@?cUM zmDE!xZ(zahA(p!F{1*Fm1r&p6qvx<;Pww6Yy4v?$t zh7LWVV{xOfWHVUI!T#sOjtJxjc*!R6PofDH>wQI=!D!=>(hL3^dsIIrEqc8vwzg-~ zVnDtxuCMU{?9@0inTYz10w^{RQg_}~>sRd!DgJHm2JKc?SbKv*4DL>Ps`a1sTDcD# z`~Lr!%$5ouZXa7a)(FIGa^kBVhlRPp9i=If63xdM7buW(G+lMA&(NVoqL4nBVmaX) zupRw=MDHe6U;@Qm-BN_CaVerA$5pADJ5Gd$?P_@8y?V3uzC+7PuK7V^7)gqB&J=>X z7n8N;(=$xW7QHCnBAhaKVml5Br!&9JX{5sMP}mv=??HS1&%1ZRL#3h!m~~|Z0iSKA z8><*4!^Xnbp^KsuNIh*DdJj6RgT6j={7jbn2|qYklan;`(4j+Ie_>}!jnJ+eyq$0; zO*RkUk0XEPtYYV%n=&pWMckQl9W&QhYck;T2kqzp;3E9Z1lUheM7vVgV?$en<0tG< z*m)@lb0idVfwAjm4NWcd``o4>{sEIQAb?pLt0^w-+_lRyKQf0u2j#6ox)Dzicg$#LfG$b)??u=}_Df-;Gq~zq0Bvffy zq|Fyt zwN#bn@7T2q0Q|JvZhKZ%7KqyiWa;Ws`UvEtg`>A-^KXtx+EBX+XYJP zBGm31DwGZm4l`!Xq}m6le>%$FLZj*V^gwSYSJ%ahZKBH=FeLYZq%&MaNk$pRu<0RL zC(>^UoHytSVu;XRY-6pu34<~R6G~m8rPh!(bKc&yQJx3S8Vcqr)VV4d2^kM$y z8knRiU7vwpaNoR~R!m~ceSE^@QmODrkV(#6HIbs zjK6|R2lUkUmGiKolAzYU>-5qz1|fKf>U|M1X>Q!dt|#@?TO8aTGnk=eQgmuETp6(> z4-6F=-Me+$Vc8|rCask)M6xYPz9h&xq8@`|pFVv&!h9$j3LPwp#;$|_@l+$_b|DlY zKU#|pPyl2z=kBbg%!y28E+)G|u7D8ntmwt4G5ni!u8R*PO znPMIf7{zRWF3#E5C& z8(`VG3Oj=4$U(fX4A|3QDE)s>l1BOnoZH_&eF~FHY3$0JulkMCr7IcFwXxCEdp-+3 zN4@SF11C+elbT%&RzY*~6s6KoH{Bdqq3upf-PNaGAhAL2mZ!`T)S?q-B`<)vb63X8 zgoHu53Lk0-n0KIKa2UM9)8Q@K728#^vdX#teZ^x?RRsSNqC;PQ_|Or-c67QXEoa`N zM{m5AwvZN~8#u=tH>S2+7R6@CNz{Cw8;us*x!);=i9HHnBRo!2DJIOCU*?lr#9B+4_lap?Yio4nRfupmyCE^E4;|R}kQd5j4@3@@AoiFCCEZp$q5YBHE z9p=Ne{pIuLlbru{MVqu5tz2W0!#&^f3hny4@^Wlp`_(1?&yeS+C=WP}jxt4o0dT~_ zfaitlI$SEX3u`L2T9{rD${#7cEf6cvXoOtxrceL=j~_iE^QpV$k8BL#6Z$l|!1o(8 zu^XzWld@F)GlcK0bB0;n01!FccjlOA!#fw$r2`X%de$kXJL0g;oi{mZ$5gx2z35V~ zyYHEO`*MZNsZ3f=VQ{x3S$9U?@oleo@;!Qbc-&WN5%pL~Ba@UR!!_BSpe8;PI5ECZ zkKiEY6L!$LYhQYSJf17{+QP--Wl{Qi>hz30Cr@;;xGL5NweWwU&xQzOZ8X!|0R1vC z(wLgw9%gUPm|QxF%dq9{j1ZsaR?^n!s3;^EWO_Byvsy3$#<$!&pjRbjLpGd0&na%)B}jKq0W>_ro-G+p9zzaB{x(H_#rDgUsfa%^;m+m8&rp25S9RV`3n zl+6SAqo@bXWrN-@QjaRC=!TF~z@xk`D_coONsTdt-*I|;Je@f5d-Vs~9LfBy8ON?| zkgxu+t-XSZo;};ZMa#3I1_V_uCXVGYUk=J0onLgQ3B6IaJR^*d`awfb`TT0i$xPiF zx5u!|qur1tq1UeY5#MvJowl|K0Ego&5=~J#o-3TFYH@Y{!M`tm@JTr1|IBXWhO4U$ zeXoFK4S^vxy-3ll`6@jnv&H=>*g&D-!Mzb&Z}Tu2`6njf&q;_K?o_70U8GSt>Jy!* zFC$fqPYb>G3FB<0Oo^0vOL}=Pqq*G*bx>p;b1>xbr{BuR0B@w;Z)MRL#}U!fM&8k3 zhw0N%U;@kssi)7U;{B*c{nSruC!a6xdcIhB2-_9oZ-5f0xt{4BN42qr!N8&>I3v}i z&=veI+LpSy^}5oh4CM2|nW-IC!vd8g8Xc#ySa4&J(9BUjf91+LFc+@ZQ?k_Jl)F!# z)^WIvOXGO+tWCSG^eiZrM6@wXX?84B>jt*wRNR1M5z#d`mq=}3ish`@n~o+wgWHr9 z%>cOUvz&{YXQCr_2qz1Un(u)ID)3%hGKllVKOZk)C}$hl{CuR~hZC+=((^1{ytqg} zwWvjqHgR;+DjFEbXT@O^ptcuUJX z$wCtJ1&cCM>tBreSgm-B0i^E7K+Q&*7yG^JIX}+;1oVdAWW^m3U*+Y+#&}8Q zTF{|`(54%B=p;xTo~_xmX-(6_0c2RPqnlARJ7;>3~Ta|sT)^&9Vt>Jxom-zs;2a9 zvhMx{y4m2qr(k{=Pr(!Z>2x^oww2{z@!J79!pBU{v zg3UAtPaYWBLIEqeFn^Rv6G*z>su@RL>tV1)E|%XHn@SJO+jn-p%U$uXA7?D+X$pYO=FMaO^HOM7A0Jz8b*${*z0;q~ zXR`z)b;*JSgIP!_-U)W$71aRg zVja3ijvxKqrEWkXnjIVZpissc$jZG1Rw`VnyDh^-CcLER&)L|HVJF|vVsOUo+iaSSzH%i*eYgo~D}Ld8Lq!Nv0VOvs zJltR1|IWxWx+}C4AkQM>&QTuSTT>#VB|%*gxVmv1zQK!V;s~ehz(OfQAXbpR&$Qvu zh{?{r{+@FG@q5AhB_n{Y+Evj5b1)KX>KwJPgqEL#tn$T@yX&Aa$pPRx(X)}@8}`Z4 zkuPNW^Xk!E(^z^~JCWw{eeUJcFVq!1o_l1bs4KSRu9VVSA-ae}PyU{RLW`+sadP9& zcV`&;c?-yT6V1crD@k@r|6bx}8(o)idlV@y_jlxf=Hkf)hISnNj1CIknyNwL>n=H$ zF~|{{O-nF_ci9hpjgwk%lv1fX$n`Eqjj}Wg+QMU}B_hu;s(Fq0KcXhUcA*j#P>+

ot=f(ayT*d;|Qqp8@}O|2W@>|6|too8r^ zMSBHANRmd_NboMLs22 z@JF4JyRNqwf{OHy4P73&Y9Qo{V&X%IHr7piGH3Yw8(YX@rZhRo<-qjut9xi#h-E9@)9e%r z=g(a)D&USU=cOq1Vyy}sYwKDp>^uVwmc|~Sg5J43zk^G>&@LHEIhzPkRwp)|R*y)Y zrg{?%j1gDyy^%>5E^A`N7>PH9+T&r+4tO43v~!G#_Y7jHEW3NK^R>!DB*%Fg9u^UR@ewtfs5~rxNY}qpGdH$-$#ZpUs{a=Vv z#1gp^6auCU3^{Jtt-XYrTr^AdZ$*Yuk%+|Q1=fB!QD4Vad0}?(Qgn_U5+N%EZKR#{ zt1T`W$#P^sYT%dnql{zwbWL_?U;A0!hol#pxvcLMi?VMY+GI3?_oH{@-1%F#ELk_0 z9KfZuAiD;#Gr?uux}V$wMP1##h-g19%w9(?o&3*7bpC3nYY&^t`C4EzJ+OZmKci|B zlj5%3=1h3_OEB>0^XDTsjftOs6Kudk{_F}Wp2LSHL=VrH^ACgSpSSkg;8XD$Fg7~O z5!Tmj!NO|IDHJiW9~uf@P#}`QJW3DHSdvE8VU)yJI!2qMsknzSKP23HC&(;GSB+0; z?*Z!wufAqbj8#=WA+k@{O7jWk@X&4~awTnW!ZBkKzOpAN2`a;%soEhPnN(01&vjmZ|?JY2X`So zB3R_KZxu0Z!Gc$4 z&GMu+6S!7CS~|(2s3bm>eoZ(86`S%6{kRk|3Bhn2nps@zONXDNQEu<#M^;}hk|xMf^=Pzw)eDXO+MAO4H>W2_$kFkg*qqA zbT<^~JgqmS?Z1Sni3f3~Fw7MY9E1n5!dSI57M2{87Tv-FY0IsWl9D@jwmYhAzFH5h z!f*%PGN0-}hdBPD#;10UoA_Y$skB;BcM>3JBjp?v3%aVFv=bO53Rvm(x>R^&H9}R5V%13L#w+- z7s0Dwsh~Aj48;i1X=XX>Q(<8DNhnPxL`yb3f~~)O`_^g1I!o1&r0Z3ZY=PWeQ+l{- z@j@=Fm&SQpHgCR(YStszS2!kEPw6DrF5usm`U66SU@@Kq#zs?ZERCCy>~$oe#k_ci zIyMyYyAL%t7m2}+YkBh4_ESy`X^f`#F`FNI~|%y~i9 zI2G^q7qh^|-YxZ_=$(bD$08j8O8Pc$H#axq(stEK&enaU5-{su)Kvvd=?L#qDaOAG zyNXMSi;p^Lm%|-{ZG_1$JSZq&I6W;tP8HE;-aJzN0XnVK?hJlQ^{^fQg|qRz#Hd(o z@bvTqf#TP_(J${0i%(hE*%y+M3J)1*T6lTL((skAR#=_WpqFL^TftozEJklyk48A=X17;;(l@x5zY z+R-$1R_dC8G5#T>Jcb-OGC!tSo+U0$KE&G-)WhcRQ*{#9qyGa281^`AWKJTGcp+ua#vfj9!f_RiR@4iJ8@t0>~ zVt|NjRQJUvB#1!#7-b_4URVY{h0$O}8K%JphDlC@cvyBhDc?G5!%ySlHsSJbL+ORs z?aJQ`<-%6<$y7VJJR^AmW_}N;oQvvIbo6eWWr`k95|cQ_NQONm=_4r{pT%6KA?TVO z=srT_O%+EAx3wr9wrctEn=iW7H0CZf`V8j5kIAFmtu77M*3}goOB)z~0ih#&A0hv9 z-0Di+rdiPt_i%bi^m!bZ`;29L!If{`Y-`=w9GFr?S65RvHTyN4UxU{Q?GYq?yDQXm4FmTvh+`@wJ0b3j9E%M9otE zL~`b6w!NH|_%@vcr32sA0J>8rH4}j_r=639`~$Fwf<<>(xVCg*4PkeO4(ZfRQnlO1 zBFmkY{=r((5Q+*hMf=+iRyP`!3gV~G;j4I$G|T(9ym2Y1U1t-^{0$2pK4fVyTTMgM z{adwdyU5jb%l!fF;S8=jExUBc(D<<{<_K_B0~@J`#8?^Sz46_mPKO%mkTj(4>?;p* zbDO_Ccer<-KKYLyFDf=LE)C??qQQKiw8m0DyjwTd_L9Y5z=6iKZN}^-j`L?t&3(h9 z5W*0}v>;!`Q1Y_0)FQf^)dhwDtt%ZJ9KinMU8^OyGH%XOFoi%?ywO+F+bJUF9eoY& z&l99IE5UXwHgM2EF*lt&bEYjyBCdP|7d*g2f@Y_>-#4a-yy0luA zXnzR}KR>`8M{Th@4DEiBawHE2RnBSo$YFdS{8+qveDWDC%f^jU)3hin)DZc2Onv<7 z!$b`k*`M12Ch*}P?gn~rZakTuH9LvKNJj-|9(C@VYw;~4oJeThnOz$oOS%n5 zk>L_=vC*>eR&zd7zV?=n0xAZE96c%+R!O0EdYTXd5QXAl;J~O2N_a-zZ`q0!4%z+B z@*)~I$E5MN=ux1?jCmgy`wgYsedWui*ZfMv$5$XyF*re7`P{y`%Vzwd6DXbhahBcG zsUk5J_Dxla>N;@XR9Lq95>@w5uZxF7wNhvi;fi|yfFrx=Pft5b&q>p??2yq$SWb&S z{$ z$zcifd&9IKtz=%D)#X61Z?A_+buHPVLx1O_)*T~N3K?tP8*h|Ux&>R$F1h}^K2=~* z8KSpfZ2{XrG|_=^HsEfN(P(NaKB9|{#(>K%IMwUeY0_;~!Y*@0;cEAF7z%=x_80L8 z0;g9o9!+ut3YRV37GdN<(A>fyaEA`2mqY64_w!v(-6nytv9V6oaZAH7zq57gzkmC8 zK;5L{jVbiDI|9(zz!_-wM4|HV;X%g&SAHZ!ib+`$P_i_%0w0R#b-7gLtI+p6&Jd;QJu#)xoY z_*#CU=byiRWnZFUjK2ho*0e5y!F6v+ia9TR$G?0qEd#$+Tb@iY21L4fuKcD*Rlo~v zqt_!NpULme6X1dz&k{1rF4%fbZ~Aob;K8^v$Pf0Zhu)8#-oK!}6C5+q^Ppgl7U(r+ z00@`6pCyF}(V~+)lwSj`z)2p2te096=vWg4fWWL6A^(7XZO_h~C*^iAsj0)0Gnb=u z)|JvfvgY8gkPzs$p@!d+&!C^Ds_ztJ0aXI%2yun|YAZj*aVt(Yaud_#}(=c9j*QvJW* z{2cg)8GZW+pdsntp+kH?1)i;0c(Lm;gym57y1;tnzF#yD0eiX?o+D@KGB(uy5HEK(R=Gw8KGsU>efjq-bkVlF4bz^+) zQA*il!WYno@UTaZbbNcrjL$>tVuAwFJx%4`u2~QEuxsP&2xU|hMOAEe(v&QofiM)^ zA#l9eHA#a9C$;&=y@A(IrD%1dEt8YS-WGBvDb;b#oPGx;ZmGNS_~FC2(aYIcq$wHS zAPgV4R{S!J3J&JT90K)MPVf8#O!C zkH;#uv&R_j2s>_NC%~66YygPsY&~t7mS)&E1m3l%BJz~}*el+2s{heAMmg5i^)=_v zBX~76pPZK=jn7{|ev+4-F4gPG8EDre_mh8)Y^1nN6qdfRJAnh8n3`l@y;QbgG5%xX zWp(s~%aSam^T@IVriJ<0Ls~B?X9V!I9*JsP9o|n>N-y%z^xCzfMZYhtF)}lo(PY$g z<1U>$kM5=wFEi%JS*~39joL#67up19FI`iwb{n&{zH$0bk*T`R0l(!k@TaSymcA}5 zIy!q>iE&He?o9Y-vgG%9tb!?S**Z?}EXGn?)E#r6s z2)h_P2pqf{Iko6Uud(R*YNZFBHw*QGuXb|(HU#9*>x^JJ{oQxdwNd_6N!-fw}U8QC$GQg*jLr5EF6P?!uAR?=H^hrpFnA;w5 zebzDnK=p7cJBA_`n9`~2IK39_$T28j>Ih>(VN058yWL(N>q&wDbj%yDeEAnalo^RzqU<- z0KzEcT*%g`fn;|7Wap0O&EIjdd63Ttjjlmy`A+#=_jVL0f6x?%W|zN@KW>G)2%^m? zrtF1zl2c>n8$u@z*o(@ks*A4lLxA+XrkzcqtAw2~R=i1B^O*|VHO6oXT*4*q6$qxTv+ zskvA(M%m7-HJTxwaCmunIZ?Ib}1U}CpV$CRV(JD9JU%!E@{ zX=om`o=i#2hcQZ}_4MgO1b}0mZ%5SPrj(QvcGP|0JbBG~^LSI!&%R`BfR zfBdl?L4~*XCq5~D$eia3P+iRcluqF^XMrfyxKi*bP~T}25Mh+vIlbuUmV}%2^&}R8 z_2B8wp;90fbsKVdeNBSbpoHRK(YBn79052aXdL~)eMP0$8?~{HPnC3>w4_B}LBc-XYSyl9)ih&6e$djHyG!fMxF^hu1f{a@od-%>uc;~iz z-OF1|37}(O2?FsvDN8RZnz^w69|6c~cg^8{U-_PUJQIjBVbL12ZOl=NY*W@diafQ~ zLAI_r?Y`~WxwD3_M8IaE5jSeVySp#wNpy&XjVjhMhHt2Rd-*`y_|u!F z%N~lz3JWAOAUSt>hdK90K(Y5LlhyBEEGFMDeQEW``MW=(M$`Q?iD_R?$LHj;@MT`< zH*X`YGrYBJxxl@A23T}uLS)#B|31oMIkO}D^I>jjQb1R(Siw9nQGBmrSk{bh(CN(+ z&BZ&oE!GLug9Rhg7$=vrQ#>Cprn^+vJ=mHXnGR}-YUj(q<1A13=yzGRagvQq<)fHl zz(Q(dGZ+~H?|z6p87VJ{qu%WGVv&*@(ecmL7MpO}7#^4?#L%jpNj*$XOnfzO`vcyo za7W8MpeAQrUAL0*_9pjOifg9Ufr($q_sRNxxgJQr$iC-^1im!fK!;}QT;)VF0Zx<%^tEs7l=VmNpc=XBz9;{fF zS)ILTb=L95G>%jt;E00bnm^sw=#0q?JVXJRljGmCp0G-9gXulI9lC5?`X z6+bESU)_0kFE@AL>d((smzX?IPd}80Xm>;-(~Gd`cXw5(OVwi%FCJkO@$K18S2qNI zhjVk?wXO8lXd))+w$m^9{a8DhIdyZ9MKE#7JK^qte zosiPT-T#Jo)m<8cK2(w;J8tCsg2at{2C%|N#-U(CzpBO;nzd-r$=JPRjK&_;UhQKQ z#0PXhC@W1(i91U*F#p}*_KUo}LjRDT-};8F8h?%gfzg0&+^lz(E@MhZNX+p#Y~dYF zZ@Plx+Bvv3sbH~m_rKI4MV@tRl>8IVCfhtJeE1r%6_dGSm~w@g5NpNVp7aHZ3LpBS z+I@tQ82XMCs|PET=W^d{S`-TL=m3JM6T&qxi6 z-?g{%4Z6X^W)<2{g$J}D;rI;MntY_r&CS*zI`Q7A3eU0qUAo4qM5)mW!~?w*-8r#A z^S%_4nVUOhZR3#ns&_^Fggl04n@gaFV@(8Vgbf{FhDWRJBYG(la@#}OVe5vr*y&!; zp$WJqZht1%Ya9ipnTT7&>5!zc%MO-#!Ggq%zt)L!0Iq1+R|nzEW_5!B;<`FOIt&WI zO&um`Z2@J64a$xDO8w)OU7FbK zM$M%#5fDH&q7fr!G;tO$wpwcp?RuZ{1mDJ6{f zhA3dz$78@e@zKI#-nuV_hK_e~XiNm59?X|yr!(S^uUaf96 z`gkwU32wCP*g>rS8UA%j9pus{IwoV3lP3PtRPcDPh^@ zwsbH_Cp2?KW!vW5T6k3>OoeB^)tUE*am_O<0rfVQMHo(ldvq=rADPS9qgYV10)wTPwt4FhM z;$m|0qX!Q{M*00FtN@$jv=musi^HiWQ-W+9r;i?e9We)YCQjN{5aBc9KlJ-?q3wn9 zqr9Xzj6EkxFhs14Kd`;%$JOi;y^dhdB^`On#$d!3hhOW1+6S(FJL1##@AYp-)n|VA zKyP;8%)y``(g5)nLb2yQpN^_--k%+-|44kBSl!x;HYPbKAzRbPdHGy5JJY&F9ou}s zOX{`2e(i+VwoQ_6i?tU_V}M6b>Py^~EXlpDJ|)AYWT!A}%lFM!HLV^uIXcXc5krp+ z$wCh*r3QiQf`25>gphLg=aOU$&jabVvB~+DxQPZUHW`4B zSwt|#(~F`)$MQ32urz1Qr_xP)!b7)hxq1KoGCqtX!vW27`ig=JU=spND>w)C6am;I zdliY-Ib+5|RfW2kFi?JgC&Bx9{n~%*ki}-$6MEkUnYsT!LJskF!?Q(1MwY*OcQ$fp z>Jrs-PMR>JYHN2-VG*woCS-rV{UPttTl=vm!N^@=yVb$j3z?9Ki11?@QB7|Z z>ySrMKxk+k0v|@XDI?n-u^6BB(ad?A53cq(fo4vPpFE93t^K6W6gv*F;Gh z;N{#K9ngpqqFc88d(LNw;LLtQlDv#IYFh}lC(9do8F**KgD1Kadib75x!U}>eafHJ z9FJK)Y*oeGkFz7`?9iLu{e0){ZBv?mGFKvHQI>#9Gx|0NaimlL%SpI#1YrYn7eYu`ayHO(zjvFRFY z&06f0Z~ktt`{_P67$L6rBb)2K23hQKU}D_fK3rp3JGcC~_dEEOea^7NCI463Q#A5D zlplWaz3Z2HCC8iomr|&&CzklObJs6}OhPlaqCR`>Z>V^?go?EbP*yAJMUC$_>iKU3&L^ho&aU>Hl!` z-tk=j@7w9r(sYKG zrc1~%NR|!6V4$M(EjBMN4}oO&oucU*m}Ex8^XSnG-Z5UY6kSuG7dOF6jukIpdvmuC z>INC!h2T3G^jA=|2&%l)PQS*Aj5?5?>a{3@z{iYG%VROu{Q4Q$R%iyd14n#FPT142 zSLG_&8gzW1d5^1@i{eUd_4dYPDem%wLk&FmC6Dzp#jumz$bOxGwjT+CH9K!R0ICK7 zpHs^oT&%N__f1SWd-d;5zy7OV=4W0&D*U>iuy`js*=Ex`ZE8|Qq2zD>vFGx{z==L* z?jOB~93bw=yeAi(%0y))*H^W^UfJLtfKA13-T17CfHz3^OD`?$^84p)>U(XI_`oZ1 z&t~}j`&EJNYMFW~(u($iSq zO>y>crHs)zAR<7Y4*9;vRl+V=K(@be-(|x>wGSamM+Uh(b}uA9Q+BBMX)e28@@vVR zJB*cry}E@kU{?pwl|1{GAbuKiEV$eCt*Rn-L3kJ&J1MQ9yIcYvj#6UVsUABlId*Qn z{dq{w2}pm&>FPE$Y?xj=_83(7{DxysW658K4^Khan3*f1+i`fMe7JGc zVn*CVpU*3UpL{C!mN3;+*?M65Xwz4$2umJ z!U)9vT&x`bgL}m&5%SIr%$GX7>yf)cSj*)RyBLFnf(~jX78%P;INQccT}-dQcV5dK zm-$A|bmh5GCt_n~GsM;{QShYRxN##=RRO^6nd?wmEXz>IaWU+~L{9_@7lj?}2(Q6k zB!9m~dVpu1Qn$d(G{WC{#(lOKYoILt?K;Qy-o1P9rXHfiW&qLb>}&BrH7=}%?2(sICfHe7V;6zJzfhY2aMJkfa7N*!#3nT0}ZtF^rT z+#g^tLqkJ>#0UyJpD&g;=2^v0u9z=FRVtzw8ghN1v3te2=DVX%}%Y$fH0gCaIW)v+;-&T?LavmAS zj1A8x#lA(%LXJHKCy!G>g#gNUvQ{S>SJQNjJ*Q}JC_gZcV&M&0}Ge zFPdy=HVXvN2!}Wzf}A0)E0zlj%>`fobnz#A+{9T7dW;%7*2-k{9d00S%j47m`4=vn z_DVQMUyf^K*^s@0D;FIbfqLetX_f3#OAr1&(Z6-Bwe|K1&O=3qNm6H6- zg)rh+{jI(0zUb&p5hzpbiiPz92hn{qyw-TT;c^sy9Q2;@0!E)nYkW)1qmX66l$lu?-O`5yZ{N=TfKKVD!m@VU;t6dqrg%Y` zr_H}+r7Z|pM|2ZJ(`Y@aH)@C@fAqHaz&>h;B$Nx!4VIp zq#s)fX`?4FNVTNDXJ>tmv&bMRHT5IOf+SjKe7tZf0!qHjkYU44aF&Q3tV>Jp`T$lD zMpUQV;V*PNY^i~^iTONwqY>;*cwtaQ` zq|%FX|AKVbq@{kxdSyG2e25Eja$(`IN=fPQ&SekuNmz2RZo55S9{nB}Dh;I697{XaT-L7kNmmRn1xX zd1Ymnx9M5XpV($~eOn}+aM`crDdDTAsD?^X|IK+u*69uem4@kbDcL?aBS<>U?n~{_ zL8s3Wwsdp|H$231x2U1~NJ}5f=tu4t>$KEI97AE8Vu;JRd&M1uy~0=>Y@7=(^q);1 zwwApnO=rbL7@-=`xp6MoY!bFqR5pz`f#80k02d`rdbV{(w=cq|4iU!?v6{;;B>`I2 z7p2H?O7gW3<_%QvYIRvE8lDYyy^uM>4r)l)*-dG2Y<3OLRM97IdsrCHezY*sM7c`^ zAlxC4QC3%^T)N~#|9?-k0_|pIULFG* zke(ux*EGw5e@o#?TA(ouc5SROVDH{oK^9XybEy7J(kqa>ZCau`T!+#QOBzoGuwVE`}hxRMa~-8PsQ{HgA!6uoY^IHZ9P>kVAtgrLF3SsF;IadY&oeM!6Axo zq6#AR`JU%8z-VwmaGD+Y zbesr<3_`(ao0`$_ubT9yr8S`ZsMWbczXs~BtX=Bx`mmzbha1Ua|r?V30bFyL&f?Gh7cqpp;V#5Ldb6R~K_7T}LVlQ^*N#YMbWF$5|a|c90AC(1; zh9Q6Uz7WdOLv$Mi7c0>`N&_Gp=AExSQ~A&#N(kJ~v#IrpKRE;`65-It+uO4_$5tfn zMb3bn-eiUcImc=i(|S+~sWJbp5WCoot!C zxKW{6%Lr8_iWmm=NL)AL^b@Q)DZljXLwJG|AjGCSBk=1u1M2^$xjl8_bPGL%qY4Th zvty#HpX?iCmNBpAxCrq&BhPA698yRk;Ke6MqxsE*Mn*xC$2Ks=i09Up4LhAyn8@b_ zZ^d>&ShSfaw10nk1}ku6W}P~7hIOX~Lx${>D$~t9Uye_g!ld^8EN8S|knoR8NE;aF zxTT0$;*JR(t!e-_fvr5P0!8x$PtSRxn~OfJAlMU5Fi|R_>HvqUc=QORWRGs% zE1pjvL1;Ydyg$AC-@kv7>uS4ej~iD3vMb=k_7?yufaand70ogMxi5{GFrkG9z2dfQ z9aqI&v$IpV``aF4-c62N`SixUV=3c>wPmOQtl8FfN$5ptYRr;T1iJOF&*wM4Y4}L8 z?4-}!{E*IiBrYU_QcwPtaO+L(^H10|BxU6wSNT&&Cn z-;b!K$mG#dheop8$1g%~n3OtPB<0P$@92k5x~a|wx@8TmzDQnoY*Sw}Oe|vZ%vToO z6wJf~O@ZQ})2B`?;CU>yWPwN zUm~M0&HlSj-Mx59uQl_c?yX>oWz}I}AyCvDft?@dg)mIuEAU54$lMqgI0Yz2mFJP7Uw@b zG6$^yH{m z?`SWU+dXl2)}8;QXGQ-nJ-b}Ob>t#wp=Sz@5`*6DD`+>% zQO>3uU~R24(oFqT+!m^*ljTp{T=G}t=A`<5zEoIL6y&%i=t|}s!CPfM_{_+ZAAw#) z9GA+aaUBEp_QJyURT8W#mMe%6*Kq`zgUydq?J*`8fdwkz>PxDoSKu176n31m*2z(z zG|?~QpwrATGe4?T{OEti-)sE*;>NU9!Oy#izTf%0me%sw zML#m~nI?h4O+LNZ7)je_R!)S5PBJwOTvfMmtUqw*m4t*UOgXML)kd%ysC3Q3xcVd1 zkeOSu6ou{Ara`9mkHU8HZK=7dRaT1&{*1W7oWPNn**y%H7JW6Q$iHD0p<#gtQD<_6+-+m|K zX~e{uRh3tvdBUEf(CXTTGf#F!#DbIsYu(2$kqepFylU!4c&8(`Qo@-I zQxExN-fIkV9XD*=oLv62^3vwBPPwn1#YsDVd7S!was2jGIx_?e{JyJ&{qKoG504J? z5>sszOuY8Vh}Nx&Ioz#{i%(JRafB&XTE@eu52ecbQu!i~Q5~~l7HZ>rBGcJaFH{__ z1iR7xw>o<>wJ16il>V25z#n@>8&n@;;tODys)oAHh1$$!;MR`0{~Z%mscm4&2~O_Z z5SHBsutx@Zu8Mr~Ebp1H@Lu%IcGIR@8=FDdNFE-Rm)|@%%Jet@F2XtV0nYl>gL7kF zVR`rKXd20LDW{5AMSYJWP;>N!Xf)D4i<*gi0qreS@3y~HSq;=0GZYjL|Xl2s#A zX#TW@S+(R7aQTVN`sd4!?u8HDyLa#Bbq)>>;gc@lG*wv+l!Pireb6B5O|h<T8EY!DP7+_W8a9m(7F4orWN1OG?4>V(ViN9gj2|!^^=!aD|LDKv3%`R> zP4RCJpYrxKl;0BsYL2FkeDl6epVz%*G;ob|M~w_YDWNq;iMj>e(#2bMBJ7BOpVokzxWgI z9ohLR-$@eohlsjU21XOc=~g12Iex<{mpPYnqx%Qyagpv-RvvUrI*8DQ+B4~OyRx`+ z-xsa`-;KY27dKBc)es=yQO}g#a6OrGKR0gpH}=5qq3i?f^HQ;F8;^0BkC)ePg@gZD zlHp~gBJ#)rE{L!k@H-BjQ3LXaRabUr7h70h3Wo^-GrEkIg8b-@8db^)1+l>QuKk_I zuBj{tFcAugoaOK!Cuf&m1Un20G9&$Ub4mjB|M4K8Z9VDa^EH%YZFK={+tUv(`Bh)< z;rQpr4}_h)Ul_y+{sO2r&k_JhC(()r*B1QC-Gqh8kJDE!U0!A4|2yQt|Ftf7yFO+2 ztJD8W0e^?z&PrqIK-hIux^}eh_qRJ*9rezyy>m{|HDcD#TknoI2Hj%K>8OAK1fy&=z(WadT0oruJ6fkdm)rw^62_@D}@ zHmcbzJRI)Nx8Gq~hqDH(+oF{#R(KT71n^}iyvScow7b~xGX)N#21@2}>n2SKO;7&< z&gqfp1nUIjbE@1iB~Q#WxAXW*0DQWzeo#Eo|9>9#k&Q$lF^S_uJNf-ZYukkBn!+jY zWd~gRwz#-Bm*GsDd{=(z=p*LdO~m(zU4Sz|E0!Zu{NvqTS=`TlkrkBJI^gM)3lbRDoW+N?`7 zZY7;c$6u~$o0sC#P)Tq2?(jGJi62 z!Gdm*C%B^D=6eK(6#e{}LyZcO;_R%dY%z-?uPU{zk$9-5eg*)2sFoJ}rnQv(F+H7` zFo_^ffG^g?^2hS2uW$;ZT)14X{14-X6BJKmf|9^$5;${|p-+y;s$2HM_JZz*p5*h# zj{_EqQw}>XT-e6Vau3{YIAOwa{paG2e-q||$vi!aD2paN;9o_+m4NoP!j_>Q0)E6- z(Sdk&e+Dhyo5f1Gxosck6bQa3$(aQO?Q#K45_Jbgw)O=(m+O5aFDIw()KGcW=72b@K7!+jeOrb?#}n+bS(RT}wiokyBKszE}#l zi!@Tlv(ESJThs;arq#%p3w6)uPtD>;Kvvu3-Y*~2n`y)0!#P+@i^sQLp+w~jtZe#G z^NDQMdMlvy7Vo^Uw|x$LP!^y^^}pF~3jUU#HTdGvB$%M0X0HGA=}l|g%|(NU3^Dfk zEqgUA;YaDz!tqy^iWL^NY`-ztr1}#ViQl*W>Yu#%vvsS~>$SV(FDMyBD3-9MQG_twJX2Isiye+s>@uDZWk=f8t- zQ=D~-f0R6DE10{K-n=P!-!Dt?-?TMrzKWmfCOS10MFNL{D$1EMHFw5Kq%cieUc5r_ z^17hRT&7Cdv|b|RDA0OhCuSQJl`W))!}KZXq-2K2k3Nu(w2~(O)~#EmeaypKT$}X9 zj0s&;BYY&;q|iVak^He#bo}H5#t%@$#*7L{`HZeqR*IpqC|Pwutnva}qQp^S|Gs@l z8tC&)^t6cw2vxaWiF1>#;s;Q5jx+fSvLjg$ICQEtak6#Q=lytlG!EjMHNuvC$|1S;JyH3CXto=to+XGyZ zItR3o{;ysZ39Grm3q4yK0@{6$iYrCjT;6`|LFt1B?brLaeM@L>PB=WV>BWl|Ok}k+ z^M9+_|5XVJ&$CYFcdmc-!0(-S;NQPnIcZHn?cYbXi<@8)ANAu3xTaE%9?cEyZ4Fgz zzw@Axbj7^(4DxM`!iFlK?Nx(6Me&m-8}NTVEGfF-@-uYh-+-?&oddofY`%8A<>ku8 z(uC39vRxL;ohu!1v*6#0f`3(i;3&s`p}p!REnV$NENO9CK}~H>>F8Ru`JzDi^Gr;YefSA+&3lZ+ z2n$6L+&{%?^eD^epg{CRP{!`2;e4W^^^BxXB2wgY2FVhXDsTLDMplC&GsaZh5`R45 zML|FTMbfwV7E)byG2k`pxDX$xUM}S3{)WzB?55;^>jjKE4;^x%!&k13y?D`(Ttr6F z`}V~+PgEU4zR;MUN{?B*4h!he3}9XaX;*=-k|~!rKy#5R2UBS7+~Oeq2_Tl98e9 zTqn?lHy1^vzl1>1R2wbLSE_1u0Vl%=V_o06cbzRuvhWnPCxA6tj@Bq+a5S~KmuuH)M)?AkqNjofDvg9PS z0Jj75nw!3lNG6P(AZkT{suz|5e7m#Aby#gijBh=95MJ4{(x__Xe*IqY)Q~?UW@#;! zcIZ3Joj}SGmYNrS4Q0m;96l__`&b~TEI%J*FTgEqitx6A8egK+2gC$vbDwsuu+I_? zkNof-ai9(KB&AQ+{+TW#Qkr9Dx3~BKelyXx{A3p8`E*~?dh9{Xy9TGZ-%6J2JK;8g zy9GD71*zkA>4BY$hl!d#{JS^VAWuBsyK%tPvwOUYA3k(QNe#Qetm)pJI|n$|eMGSt z`GH6Jwq31q$`TR^st8{nJ7Iq?h55@T`^S+D!7sIp@=Pu3V*ncx^Vz@T6sh%9I%y=c zxlu7O&Nu!I)9E&)8W^#qsVPz{L_V&YoE&ifA?Bbe z*IaB9j!bQ*)p2_42wCZjurR5Ta<`m864DO#c!?gq#*$Ze({qKbxYS*koA-QM)k=4F zR?2DVecP?Wnh5H&pUVV>MD5RT;|q)q7u2u!ZoZ_{_wIaHw8(3aD$%0`_BCE8Dr~HA z6!Eq!r8U;zwv~>klDRx()Bb`QGa^E!(r}LMXO4Ju`pX<_#_3g}(q|Pf5|V$3V;gb} z$w~)A7Rsjc<@@(Pd*Vc=9sQL}Z4TVPi^?O1o{%WuMuloWk^eTs$@JloUlB2}ZS4Gvy ze>%<^ZawZOauyAbGB*#sw-m}3J||l2=Vr16kdynT`9*d!9UL^AFD%`#p;ziP4Ak{Z z=Mnk%m=UoP+m3j}MOj-;+bL!o-g{C#nj6A-=7KNzky+T^g2NlvUgVaC8bTciAp!G1= zFJJB})l45LJUfJq9z1=7D|}=3Y4+^jWl_XOrYhY7=iupz9h|k5RY~ETp5J+`NzF}f z#YKC7Kj6L{EG#7QmRcK~gPuh97L{JFsjrfb^TetnF;Ly0GFh=JQLFMnN*!+>I(&GR zgTr7Q9d(IT4MdHo=crU=6GNr?#TU6gDKC!_3kk8pvz1pq>$t&t^&6^@!$(#RPYRiW z=hm)WcM0&NBJ=Q-z-G96Ax}oF-pPBj#*I9ovk5-MAz)zk}2|0RyMcd`pHCug8@vxT*F=iOG<x~s$4s-Z?;aUM{ zz?=YahhCt6mpZ*&+)5$JZN7W|KKsCSm%4LiM?4v@Ddegmy?~8KFrITMczx~KF-kX1 z%n||OiHZ5~{rj~RubB%hEklkTRU6^2#h8!HR2;kK#E6A-x`x2o-d zSg5mUQo`%1qiiQpXK}f#bPlpM!y|hpydMzc;OxwG%|%Z_T-Vc<=VPESQu-t{-Tqm5 zT`ICe1Hw$QF)CMC-%!MDT)(K^zXybr^)?%Pk;;%5wQgGTF7hj95mXcilmM(0PD;lm z`{+?^iTdP(#4`xVNUfcsFQV~|Oc%OL9i)r8y?2=Re)nSBH%2#Veio@Ft$zWf1?q+d zi0fxA`r8h6=m)cO_`%!6Hs1FS?_bSr z=v2uCrh#BE;BZ6Q9{B)#Ad(+?#En>R?dtRe$B!Q;SP@sWCCvA(Z+wYCT?vm zlZ3hZ^K?v0M0P^&xqd?4>Qd4jvBQ>UHHQ|f<>}OQ8Yf}wjrL{W6~uyN_dZEmwi%zs zB}HD|RI1F*q-b)Udug=f_+Qmq+884(brcdE){>ger$10g=!!ON+!!ThT=w#%EC5YG z;c6Z^j6NfAoFKE#J*~MbyMjRF^KIoc7S{S&E<`;iRo1)61~)K&MhV;QzLZzcr8Oia z<8X@uZBF9riG3Wp3umqHd~U7^;3&kzJ@=@ zF>r-Ly0@9}JM(Z8Ty`IomQFS^tAQf`A{gS+SR2ZyM@GgPx(8(obE)ESq)QOGD6t?% z0HJ_i^jp~r*`>F$Ap(56@>Kjw+?m!uoJ-9NP^@^$s8 zHQ}n5nh1ESVaJi0JM1YaZ0XbS&5sk>Zp7a_4523IkGPOwS3$62(tQK7*GZk7QkP45 z(9${y@TnIVnMhur7E1hIhoGv@JE_i{O9M^G z#9RVa`PSuq&5n?-;wo|P!=7{2)7*FcrRHUDi5KTYJdh!Cjn7zm37Jj_@HoGZgjL;+^eRcz8$`>o_<$p-H=w&@zv__45Ax zJ*y_kbUoWeuoj0{`qF;uGG#x@1bEH5l9WK_lBV-^o&{P+t92wtzc7a?!HUSjbq z_DaT-0*}|xTb1aV|2W6?9db|B?E3$9Nk^xzCrn-2Hn5I(f}{l3W%s}puKA<&_1`{t zaTIyGy_MCeyu3I<{x*Ox4ZxYY4P6%;SM7)033qi&?LaNxU4r8PnZ~V zri>F~T@)VvcDA;-T-nrMRWCj}^vAuO^qlTH@c-CaToh-G@>)mZdu?eSZ z>?Xh#J?RHs=f11uf4UZ|c*%zx4f9raW>#LjaGo*aJCD;lTQkQ#an`IEGiY}-C9ZWl zx*xc9M9B^*tt|k=l^4rM{RFR4$F7)VJVzbTtXuJ=p#iG!tm(#_e-Y-4;(sFrZ|K5COk)T4;*6=P4|kXcCDNkhS|vQ|#07Na5_9;Ry_+`W5aqnVAX>HaKy?^Slc|MF$HcZ=0=7Yd2o73D)P z=G9a0M1rBZ)tDN9Gk~&1coxS#aWMr*(WNotO<>l-|94 zdjLu9#xf4dnzd``i$W}voOI}b>~%M7*bpgJL*SV+V}^OI{{$OdDu9xwp}9~`RQFl2 zE2tf+Kh!>O-f*pO=;0+}9#ogN+obH;vxh&|>$3};S8h%YM?gM~?7YD|N3>)?!5f$jv`)epN7#%W$#~ zS06lh@{}nbnK8&O9dTY(P7WQ`e2OXA!~kA?7WJ&*k*m~VlFcGnjkX(0vq8=1Tj2E@ zufDc%y-f0W{CMX4`Mxx;-lIpjjn>soOi%a2(uYTwcvrBa|BQjE%A8FaL8B&2Q0?De zu3rl?x8k{J;=AVXgMpn2{948;zqyL%RJU%s`J&%l;}?d=A4;a@U@6GKLx;M)-m06U z!|w!)4o!IId-u+rm1Zk+CQZ626f5a66stHR)1nGnk(m~YPWM$uNlMRP_EkE_xQh@9 z7vn?$szFLe<-Vs!DIe}RD(aySOX$Kfc+3pyV!y%i zMEs^{P8wr|Sq>MD<`2}B2!cnc^21`feUV%1n5+&7KH zyQI)Nv;J?GvGGw^>Fq%Fm<-{HBBTo%7pQXN-xdtf7O&T{@&fE2J?YMFZ0qiWvpdey zpbf)@$)0T_6<R~Ju0$K|NeW!!%4sFpYr|vSy|ai+RKH| zjZ)d|5$v+&;`+D^n>G;?F+?f!umZEH%3w>@m@(yGPAuRnEoHK8<)THwM$#HX3Wb)j zy^b_PK_n=3AAg`9MIwXm)mt-Z2TRy2I$#uaetr{`Em~7EGWO8b*VV;{{RF1!N4;E8 zQGqb}cVi>lF^gp;M^%6L^ojX)aO4YNVXRiTf#kYGQ~ns3`)Tk^uo;kToc|%lqOH}< zezrER*N)jDED%-Kr=eG$j)KaUMb)6LA>7>T%C@G|wv zl~+_yls4a#=BS5sdQeyx$n-x=gN)2|yNHLrtUoKceH*K;Xfe0)ii$x)h8VX0OJ&vw zc)IkdBwKm~F?n?Qti`KWcYD*P^5Qd!d5uB6-WwizL7)pFF9mhh>>~RF%7q$2-0^~@ zrD9}-V0LGlOd{H|dv}!ER^N^hHs-yVU$?>Ol*Wh>=h;xkegNUYL1)z$$TA;9_O z3A!|0`}e>9`0=)@E`I95b$z|IIgBW2XGbo!(AomU*pZ~0X0Q?AvU`5{pS-OYQqkp| zK0UZsJ7zzEx0&L68^NWE8e%4ucBdwOk zBw8$qVpx#m^5-RUk)8p-JU3+-#KuH0-0CE z&V?Nlu4{xexj-1A!Xo6MYWw+9U)ieu6Q7|lr-dd0B9d>*ya4#(iL+;2uwcm@fmQ3! z)Hz!y;~2Ca-~a01Ov0LtWvOG1DYOLusIV>-vaA>On&7Q5`}S>Ew~k?WjqB1HUsYLO z(6#F#-9GF^%D-OmG9)aFlxV^}E?M=Es|+4YL-&FP2TcG#FhV7wgjeUz19gS^seY=f z(y+mUNyH~;w2#wCUS62r<Gd z0>OVM>??Wn~jh)jvJR<|@GE$VIsI$Ccyc0@?eRRG(B zIrFQ@$TRAx)elZPg*Tvo+s;l>Cu*6jCj(%+QC3>28*3)cjEYp9P`Gbln)!-PMPZ>R zPE0*SCkd@AFJi%hi9?4*?9%wT5z`m9r`P{vj%W8(lBc?J0UoloA75D8KVE zx#nUPOJ=MeZDjNr5tOTIpq;w;dNyuINruwxrKF@JCApcJiW;gat-?EZJlmC&RH!v@ zAhSU^ZEK5wX|?Xmrxg`3w={0VC7yl$>>2;8`qtWgkx@}a?s^)UgCy#m8O0o?um9xb z%fm&kY_X?Sr)H^HBpN6rWH#xXJ$_tbc_|u~M4uSp*_LIx)pL*?XUs5UVa15;J!b)> z@OT;aaW^gDela%>4hW(MR~zLPl5+a^@#|1@w-2I~=t{8gybqr+!Z}xVX9N%n=t{J; z=VOE*eEK0&LuO+Hu{@g8eA~ zfRPbn$>B!g8aMn7 z{EUxAiiDRaeIu`+E~G#G-t?ecVtO{bhNy4}py+5wV=RDunMaenKskI3Jt z&!$mMM@K7+Smq*6S|Ur~Np$$8h|`D66^$CGBmd07e(F@24a>RrI&b#&n^0Un{gr&} zK|l(46cR<+>C+gwy~%Pe!oXHC<~vWZH1Hb*!j#FAAC{HPMfwtdPwAY@si-p4*Se!d z1&wOusu*3hKl-A2;1zUmfDw?Ro0xYcx_y^k6YTW*_3MRJR@w#z`Df4W1oa6;$a_vtRd6;Wb%EMFc)AVjtx z5z%$wR=&~8`#Yw(hVf{qCxv%WSzYbQfF#k3lg!1wS`bop%)7NIlX=3JQdtE1jm}o$ z+F0UQ9p@N?5?=hJhSZbhQ|_P{OZ3ml&Sr6NQRodDZ=s?q^M#K3Fz{Af#jjkM7*h(G8fw3` zBRLy0k+XEXdqp`zex4kAA?|8vzl|BXVBlHT!>)_5A69v-u97se{_EHKB_#&l-_-Xlrga2)qdo)PxTzRuBV8jrcyZyTK$KwO zc{htM#7dfPjg3riuUNZwklZAXM$_k;>A94Z4XCA)d4iVL{^+MHuSY`kl!{8Py2kvoPueJQo92iSwy)BrLC{L=czO4(tn%DhXh0Ae z9;`dkzibdI^MrsrDAz{Uw3~FS`jc&CX>h}EYLsN=FF|!OG2XfVTb`4qb-i8{g3@Ef z3Y;4HrP}C)MV75xzC7r!q?PJ{zsvOxv9rk15_iD!fv*C4C#?+?>YDlbtZHy{+(e;* zPJt6v(t-vFxysRl2QfvpRNF28KwwtdnVdPFo$d4eZ5L&$)*8M#K{J24iX7pb;ZBQ# zZy8(4$hbd#jJSjZ3Yx|r%Pk#v@|2&-;zf5Yv-1e%HG@bQ_e-Exgv4?V#AVoXZ zTD|`fI1~kedW{C%^P^~nnUTY-GrsYK5J3aG0*L~H+8P>e*_Gl4cXI_naB0u%236; z0{@AXe}^-X1~x@?xeinT1UV+X*TWYtkZ9dpTk%ow?u7jq`F4S)ZggKGZ%=GjItU{! zKf)bepn-E+M~x%oXfOvBWzqi<>IDE+e3{vN?DMRS7Zm_ox{YDh2g8cKJsN50F z!0+%0$BZ2ty?wyI8$z}Y-%J+dTq8f?TO}z(?%SuYs|&OexSzD}GPT?A{Z!%<93(QH z0x>H)pxP7G94M9UdaX}EXw_d(t1@V5Kv$J5)4tss|>cSF}t(N;FCek54 zdGNrFerEoB$meEY3EZCt?zIS?!JU7$YV3)px>L=ySv$Xly*M1-9ZI)R=u$nhN4-SE zE+wJ>pycR-(?*ZZMkev{ue3wIT`y?CX`8mU$)40vK4si=9Nd~&b&?o>JH~|9vrT#y zclLG!VgUJ0TvgO^LNlc4G5H^8WK#0u2S+AspK$58OQ**=AnJ8(e`a-FcAL2#L2*+! zhhY2W^~PDuF-*6yQFk7U*{o!icGgCR@~2N9dFWqW{v#Oaao|;O7N=GnB?t5lYhi=e zH191NprWh{`v=apSnpuuB%rGp>%{RN8X7huxq9@7Ag8`qx{Gp(G{#{(^_pGsY?>9j zRZ%f+!h}ntJELA2Kh;2vrH3D?temIQh2XuXzX=P!h{EYmov=;V-qMNTWy;lvtlB1D z0d0sQX~x`4bIb?Hlglb}63^~qzZ$1@Qo{h1e*G{eWRIkEY&ZCvJzUF03E4YY1AavXh*<-3k60sVi5l!bTH(j%s#Wz_xAM+{%h?I`u*;LRInZ z!-pNJa{?Ywlq_6$lrEi@(@MoVVa%9GY9aC)1)B;C)R^F3x7=*OQ$30ujy-IvY0Wv} z9}<$IM{+6eb>(;{RK*w-1D4bBg_H>b*x-F(w#nCZL(6skW}*H9QZ^NzCKa-S5oF)7 z-fe9AVvrHEMznEfNJyti4RiKmnxBzjoDi!hlaZH4hfHf);_U_Q5hM5Sn}ZdiV;17ZXi2V&dXD&y(LGe<+PlNI8YS{bl2>U6MI) zPF?gzi4ENVBKY=#4is<~^Uuv5JJ%Uu(faR-KFUhW?90T=${tB6M3o{z< zAlQ|#xGufjG^cp5>N7mN+SeQF3QrIgT&c{ibeHSG}#tG~-DX0yDm<@Iwfg($nd zN$7y#l`gwCS+)1}+|(4dVS$s=R+u^FByQNn%WnijHJC5~&>Ns1sd2QMStHX>O-M!SWy;o!Tk$W{7B z^jPlxfybp!&u+pA82mA7qekIWiRDt2QzUiYgWBRM5E*n+B2*CKSD)9G@bE|TNs-HP zO3xT+%SoX7fMW{1d*htLc3w~g&_-#mo`*N^W_dse+`u#F`rcML8=8xMPv9NhxqbU} zK>(EKz{wiNKn3lc@MR$?qfzW3t!qDe@G^R`NvF+ z=~Vj4M)8?9w8&Z1)%N!Qa=;8~eUAHzr@9y(xxX~KR8%WL`3%X&DK+t`4dF;rw`?mC zw!_wq&V}A1Pk;B3uT|xh^VpF3@*_AS!<6p4c+tW$&hM>xOy4H<@#DuDQ6S}U(b2-z z2b$6mQq{|J4_>>IL!7xR{<$~a8O#n_oD%c%L0%D=`DXZK`AM{vUtV3BGi}-`@IFDa za$UTWl~B$g?bXSlufXmUH|YuA)xQQBzN3OeMTA&@7e&|fwuiJvj<@T8KzFHPE4TpP zv#NgG_pKx565ZWR4Nw6g-HcxKZv~|VTYA-P z)isD@%O5=oUY6fMXAd+*Q%0%WQ$d0LcE`{*=I4gxOd4S>BMoH~qJt&pS^W7%jJS{5 zuDF<81j=3I;LkxDX(6kayUNJuK1c0%sJNdngG+BuwT-)LuPX=725wxlnO-rAZ){Ke#9=!;Ve@@(Q7C6nHS8DH z?E=Lhsap0Uo^uAOvX|}U0PHFT7UI0=rg1bQw;!9tgr`g_(6N=kep@6w;?YG$GRq^S zBb&GSg;WajL^SfF$bd#gwQ-&B|pym#9w70iMS*)!=)(ci}Fx_}DY z-EuGoc>?SVu`}^MQ`4;22D98QT7z7!43)9ye_F_#mFkWJ!I+`C$5zv`m<=$|Y7!U?V z20Q-u?YbPni6#c1m|6B~L)&^dc?u1z*-)Sd$!`w5tKHN^2Df@dscc)3}~A_Y0^9Pp1O-1%%D-|MaSoD3hIf3 zj)m&;oSf7QJVh!RKz6~-6E)hzu4G=ksA8B*!?j@!6%ujaR&g;0q2p!#zs)q4mHv_@ ziqCQD&#NYD+L@f34IM0h!Z>Oz$j-|7FP}gED+K#~{Xj+=^LP%VMx}_8DJkAGx8q&j zS!tm8vac-iz2UBxRKGO4^74g;>z6GXv`c}2aQM6UX6yi4Oo0IY~V4k|$8LVFUzPK3su146u)@~=2nk#4l? zI4$t6V9$zw>C`U&^|3#34m+q?4zyJ1yggZp44k^z)^i_`7ZddEx&fxH01d1W+ zdrUCD+yQR}FL!lKge#<#WEhN!ozgSX9lY|4bKd<|ul_M;1lt=KN-G#$wWF-gtlQnC z+58fsHL+Md;^vo0y{Do}grbX&SIzj+LqvR{CjlIl>)KT?DAm&oz72Pvcs~o#+5)~H zdP1td{BeV7JJWVh$x?Kn9tyhPiae9(M-P3?(@#xd7U_Wa59Y!5BIK9l;XpZit%W zs$^y$6$>4>tD9R;|G)vpod+53dcey=h@+Q)RG~3ZWGU?h;LRiEUc)$1(JZ{U;q>|Q zu}0Fvb9i9P6h8vMYU=M$!AZuij7UvZ#uJd!QF{AisbD`R~xooU=iLl36MOhx)Fr&Z21B+EIxkfpdA7g3T#5p3e=mwWT)C zsF~=kICkx8w3whpD^bD{>(7E;5tC?9t{aX3&7mK_u80t%CiR1!_UE5JF;zR{&vJy` zzk5exTev}8TN|K5$`6~p?{xJ0UbJ;flA|ENRIRIKI%0yirDa!=Vl3v|nmr=o7{q+B zl2GpT+IYWuJhQadc!fzO79ZKa_6&J6MQy>yuV1@&?HU~y$A)!2*JY&LIb6odB zE0>k0&h~syW`$HDCr91#GK1wO&`gqDlne#b<8lED4${(Ml|^n|UQE@l5Vd``i9g`2D9XcTRNlFh zmY68Y>-|nkXE4w6jm;vQeW3xs+F-D@nfI#7 zG{0Q&=n+8AI)>|Q1_C>P$5pSi(SX1!vFLG3y3a<9%%Ibon2wB8vq;V6BQZ-uTdQ$F zI<`N5x6l{zBdq1r-N(T9O!Z>N)N3o{IV%5U*_HVk1CE8zf7y6Zy1VLx_07j$!pPjS zXxfctPZe8dBoAZ@;`MpvXN=Y)6*XWfX-s?7x{&MC#}I8L#q)&k2EYSX;R^d=_)Aho z@Q30|Rx_#75=4^w@(Hfn1WjO-SdSK<;`w!6izg_y|qJLV4*!KH_rG~^@&IiLC zpoFujtz^41km)W;x+yh#HS8NScXHMQX!_PT2)i4SXrKrn6!2s9^?{Rlk-QY|hcbKd zN?<37sgu)R#0dm%e?5?y1q=KzK|uZiEJhRG_UmlG0*E=T`SkDfhe+S|{iLZ?W<0^!Jk19Oq%K6<1#YLuD3iGnehh_LP= z;-(XJEsV$^6BI^4F3neC4|;L@yDk?b=4z3R5O1Z1hS zZ~$*3_||&tIrReH)7=+WRk;9|NUxO(ZmKLPkv{w$NsqqD^f`0>Q1Ij*>}_rYxQ^A6 zFVohX_Q^Ijcs^+i8+HbrwNdv*OD89lw-7C#i+rAI`akYal_;pH`mkmaDvnEwhL9u3 zS_JCb5kO{1Nn#`aScfYJNRhN(yqt^#6GPr_OQJ{uLd2drb#xzD0y4Qjy!be%dt6-a*Of|sy6i`K^hkWr zVq71nO(9gq%SgE9bATZZLq_$rTG!`++pLC9pEL{lCxwm%xn)&Kj~O?1 z9sC=Z3Z(>}@S5Vb@C5F&`A*g@F9kqE2HCV#ApUL27zRM^v<3K?a|A;9&4}>V!EX)E zA>X32_x?V<;%xOIQHcCrLG^NF>ugZ(uta|P;^BkeQVE0FgnV?&u@8c4jY$_)a=HNr zxPNM8pTu1xM62G-i5y{`DO7fP=WIyI2c|7$yM(godRhhIXQF$;Q$MV&-6Gpq9X#xNZd{aDu<#1rYFARAS$D< zl}eKj!0$^ruO9XN_wPMBcQR2yafXg&c(Z1tILQ8D%SE+e(42kRHM9axy8)V_iK4OHxPCp|j?S=QH&&L&Ph$KDxAp663X+=L$H8s> zMo&-au3c~6zyDQ$ey!GvPu`Q1q+yh4VDBl>YXboUr|b|zTXS<^F)N4;FQ{kk^rt;S zDKc)|xN(M-m`Ml&uVYC`K}O~V%nloBYQ8}>M*7zkLn}p5h>B)?i^W&EC7U{+Nyy>W zk310~TaEP+X^q$U=x!krc)+vR&8O+!r^|LqVBW>0{Ct6u93>`M${Q%@`}#C{IoaK& zdIV8IojJdOw|68qF2rKu0$L3x5;Z=B~7XU@>d-oJILbDouXL`GKDcZgh?E%GGj zOg>A*mkyTpLcC|n--00L;*6j0j~g7ffB(9cfAT*gSVdrW#87gZ#*>u>W}0f-zMli= zqmnNBCpg^^hTQdln=jG}GO?5*LyepmS@ht5{IQEv)Z7OER-qUW`g8dTdN}}7NR)BJ zVm-l;kiZ0ID7=`#gT0`e`SK%s*Gz1ru z^#~wXTnxrbV<&g&_hCM9k@?=j=;f6n|Hl`{s==|Vf2zPfvY!GNFZDNiE@(O+X6;gw;?Ys<^do&YfpgJof{?VIoZ zy`3iWZ8vTDO`c%`9!4F-m77W)KOT)M1QA4_SH~rul_;&j;bRLttXRq@;wFu4V&UZtA7(HswJu?PLo^0ayptlLO$#rkv zGIQW9S-c+zDJ4){0~R-M@ag>g19tmWH3mjfK9A2E>vYw3{I((t+u8aP0B3V_bF0LR zk@OLL`gpkvys?^2p62WD%6|lA52oBwQ~zS#2w9AS@WVTIKJodvA_9q4&{M;5FtsEI z8~27*0v~tHh?~JLonZ(I3k3^kqOCA4r;&4jP84Vu5k1d*ys87UwXji4a=;GsIfJJ< zhjcUfkCzGT2H=B`@_VxSPP~4>kuGuj;QLsgSo8k9)T=_06oiYyPs}aMY9=1IiHr`z z%~x&Rzk9c@-N5XhT-ANCEoLfpBWx)sSZm5V+a)-Ly|pu|Pva$XQ0Q@9qh^{qbtTq8jQRy8fu{F0?DN`C%Hbi9 zDh1Bn?^Lea8)q`8uJ@WC(pge6vvSpe#?rN>E}af4+%%17<3jhffty1x&p7$(uL-K^&*Is}pfQ0lQJ2T4`Wl zumUGCX4O9}&I_Iv?w^dPqbViKGpd_oQiK3T( z`qWBorxH7(Dk(E}@7e{fOaA*Y8d=PoIk%Q(vw#kabS38h7XqysSLy zcMoC5#a54D+eDXE85awg91DG#!ab|jtSPqJC;5Q4*y6)^`8K5P{d?_y3mCF6ng6O? z5s8n9?*wZ)lNu9b+izQo_ul)_bq67DZ-5h(Mcrn_gLcydy2KExaHatEJ^lh($dB{C8tBhx4)KIynrLGwog~?L0rj`R#EJbbj@n>L z)l;8RwwQi~rBzZcmo8o;;fWt(O)e46Yr^Z75`LQXy1Gw`JiGOy^4nSJKE=kyR0aFC zAP!Ldtb7A`2M2~%2*reRP%*ci+eghr080JNI0dKWQIMA}bQN|&e-%ji zC{t!EBx6c@MGr3|#LzmIJat)z@iP}XYoi(%Bx#93ONotnOlA@{l;_HV=IL6|uQF}; z1=ZTA)Kp=3XN&)eZDr#=)YpeIVK#*ec>~WsYII&MVUCcU)XZ553h8V+j$B0n%RS*S z!Mar8?Cj|oX{6G)ozV_TaquzhL*J+xdmIJ1aSaQ-`nI6~Y3^@Cl4ci}Yf<2_vcVOD z)ay-TM~V4cx679o`HGG4!H58LI3NJM_00=UT>!^VwLwYIDWfG%rY`S2b4^E4w~yt> zmxz_b5Bl{Zqz5hOWMWqbjxWpl?b4)u3?)YCm}Y+XGcQ?SjM z!&J1wcjqGMSTNBP8A{vIK=tUTsGz~ItNsZaxnZEQHfB`crOl~n|M7{_YoCP91ELHH zz%=#5^xbZ_kSa2Qv4S*=)0G9vmBhPL}|9IfQ1ilAP7C~NK;3kos z8tc|gpnD8@v(eQpMXtH6&AbaWZ=`%LE8iBPr8G4=ic0{vAZRzg)2Fw)xWxFd_4r#i zKCKd?bC1G0_x9c=$3Ngwuq*1#b3^D*CtlD@uyAzuQbXCq$|@>dyTrCPvoGqWw@J?3XsdUF76h`N)7V4v(jt)GuMFj=*Wo(8yOIoy)8#nIW zyGNd5P(D5`j?3K9@=S2BSys!tXGg&Bt16mnw{{3C2^1ekdI-D|o=ee@d{y$RzV0v1 zs-~kR&%lN;!)MCUvWv?PloRTLzwdMnMmqF6_#@3;b%C)8j!{8Zoy*s-moGStv87mH-#D7fy(M_N*%|LnIM3XwV{h_7xhy_gvYC`vEvu7!=q@|Ppn*36 z!)9zOzRjijH{Wat6u9z_5Ooi#=?^IRi{>f*a3K)#u9LkNu+o;`)}g$@{Mq0c^5;V! zx*gmynMTv!zl-9cwL+twi@K}*#xgM<#q{=VHkL1N5#wQqV*J+o43G$F6&!l>MCfI5 zxqNXCp=%{X=6g})UOfHEoZN#DZEuL`lr=O6H5v2~zzP7L5F;?M9ni^PT*6I+$+_Lv zJiZRfPKMSBKz$G&n>}FXHSGMLO4k_f6$C%?Y5b`vN3Sz{UV!s`RLCzduDn5qwXppFpRCM6~e%q+nt9@k9?FqwHbigB8pIiAouU9PQoUV3Pz_5{?c)V9EJSs@q_+ zhLn^j&9P)Bx-(2X5}0A4&)Amv{$lLVuxlf<4Wo0}250c>z@qVYzE}#;{YOCWK{%+tSu(P^s-0=i z`26zIcqtJ0!t%EJ6YS3C@8djgD8Dw(QU2x58TFhY;&M$vT;t&J9hi5ZA! zeMa(H?fY|P;pC4oE9Bn6hRd)}hoI&{?1+-?^YZ$Bh>amQ!?*s_7*jf`GyV#3Tp1vC zirz2yG0-)slIK21WJ9V{f-guxA+M(o$`(wSI9enRh>0Mt1jYvp@(X(G>FJB~I9L{! zH_N`k(ULQeZdzIoq^SD%2R#c90O0PA54!*q_G`Wx(TzL%DY!m({?I$XiXpIDx=~!s zo9P$Dq~76c3GKJ(1ingwCcTD@1{3IT7eNEGD0IhRpF?SxV~;Mz(YjT6iPaXh^yl+3d@-g zx}t4K&t?EMvS*1;75q1XIfhOp{Y{6c81y=cX0T+g6Nh>qy3_nL8=m!35RdSFaR0%Q zH(1phx=lWTp@0T9uy-jGs02C_;CaxJS|$nOL)YBfN{+sTrU>@}Px1k3>|b`6uHtH= z8o9`1f}-@(?v-1ZpW^MTii?Yrxg0iaL>Uq_2y(3a^|k&FUY5Y|Wl8W=SaeRJZ9k8X zhznCBS_-(2{PyP^C2dtpi4(xiVj;MOcJiq#-w_{UI702|7PKA0UT4kkV@>gI~P z0s^pPG&(R*C*&Gr^gw0em>PUX2h;QkdPoiuss0LtF>h%#*U@}|qs766O`}l?wqkZG z)zDPDQK)vlq8?T*BK&gKql4W0OhCv20|!do)5GKb2U-R++@6*QIB(`y7AmYb347i` z2kIWmD+F^;9cZbmr(xmHwrx@St&XlmJBnQ<(9jGF$RBd4(V>sYgr*;mD1xgQdM_+GU5 zkPS!%8PB;9nt11AQ3kFGE{2Ob6n6<3H<14LThN1pF2OK9F!uE;AgBa2Tz5q>k>Dfl zM>j8dqhG!6USk`mjN%U6L2d%JsQ~}VD=9HN*e$UH%o+>B+J0kPiq@l|iM2n*6n3Ci zmFE?{aH#piK-~;{!jNaiB079M);P;}fcR~$698fZiH@Y&(Xu?K3TwPDl%BsD57r2}vm68US$Dh~+I40!<{ zTkBb;$|(K=-O>1YDfNiuh6$3KEM@g+wF7a*(CvZL8I^vC#c(LG7&+}XcD}@P`Rve& zQTR$Vs^t=pys_tI&cv!;@UDhG4r!3*G;sFW1(x8s1-f~TI|tPl00DS=;yh#d zXGsaS6I9Z~PztUEfB%=jUt-@(|M&5#;feNlcbgn3^fIOCv9hoL)`rFl3V4ts(8<=n zC9;&z9sT`_yVJ81@cp5eLz&E(iIrR48}qM!e*4PRZ>9;4w4lkbFD z?CjZpTW?x!Eu{k?OC|g@ckT>-RGJW)7>wkidm1wp7j|y_Ikv)QRYBn6s|n4ViO$`c z3QtKR-HAfDgYi3NqOL~&mO_7lL5cYy_iQR^0=O0BF)Y#sUznSKR$j>4uOHe!9NS%D*<`o1p)0~3sfD!Q91|Q z_}sZBeC09!s1s|0096%w+1YhsYbGVl?wY!+kr7nKW<3e%>UO>~RNM^HF>4QyJoqj! zJ;AtwQOax2hpXsTTVi0$MvDjV2|*j$MVo)oqY|OEQy4yuS#H_{4-(L57&~wru`(R| zy-=^No?bAd6hI1aw}j76rq1w@o94x!vE|wyh86F4CeSlL?SuD+Ngq%b1{RPUOs~*z zg5<)dNR4e62|H`5PGh2r^`ctl1IQyx<`<5`!GWWTSAi>>R0Y7W-D-$kzsNQ){YgtXkC;NXez@ujgVlkvtV zkepAI)~|nGzSj!l0?>?tU?d+yB$YBJX&$?TD3~DAmrO|SfDG}1SG=|49AFcY2QANE z+h(GF22~w(I&ENR$Sqz@ z0ZTgs^E-FTo6u5uAd~Y!+X5jpP0I}+M&*sEHQ4of6UlBor1Izonf7SdE$N`1ZdnFOS8uEtwyB@OC$xKY6+~JRy29k(_w6GNn z4Ml9G7X#EP{y#I#xoSAZv&nw#_;1GtA~@;ukCWu4fC+FbCr+F|jrR*gz*OaoMGin{ zQ&117iGEfdUijNkz#2b80Lx#>%%A030#9&tAvPj>JB@8(JkkMKd1Z%Nx-t zxm^b*&hbZWq!U@o#4cFSc6iz}qu9SYaO1Fj6#^OGP z;&*X*U^YUyFEkB(I1l)X5pMR@sUZp?D%5NPy!C>DadgqZMjIc_^Z&1CbHGdrN$7cj zydRLtN=nk3Rw8u~FS+9(1n`%!Fr7o^PoIMXasisKrF?P_y4z<*_v0G?KFJPLla#~AJ;d$W{ui$k7m8)F$ZZ%) zlF%FsJoEqb978$DpjP-(D{x?|pwqEahfy>wvzX*BayTg<7J@++9i{xfcmN9baGj&M z-}eSiy=O42+`d*12#fNBHiO%qz3YXwD&T97?Ks{js>rs8k%V0Vt*SPBo_elNj_$Iz z=TufE2X+}y3PZBC(hi(bpoY%i2FZv{D<` zWp@R78uWEDIWf{1!%gU(@PNr%pgz$>a|wj0@*Z$0v?Ab^Tn^gfMq9mMA+2ku)&q+#BS5@`PUk4r^3^aeN-`08oUYhsLO zEE#ct&w3)hpycnlJgc4TMfpDc3sNGGh{PE;RRo8{;uoW!yE*;{eHp@$@e70j0<8`A zLA|~UEC7l%>MSAu{l3XgXktfHJoUsZ-Yi(R1FtgKa_D&>T2RJ4v^MXW?B6hzxh8-& zG`vYXCg*P>I4GDf1g|phs};0s1t8$EeLEWM5|jlH?eSi~evqV9EFsG8fEW`3TVKCE zDt-Jhh1`3W*y6^2v-uvPb2!=9pf|vL+t=N_@V#P0FK`B733`C;_|67852l9v@1wS= zAAqcaam5sNTsJ1BQ&=I0V|y|%(8>VFxwTC_+Bs}cZxFz>@NG|R$QfwhfGk2&e{L^2 z_&x}xq3)!;yJqwpxEXFn&bYK6`RE>i-Z!4J=$5C&{Kk|qh-|Jr!o0Mpksv><0VA1I zV;I}afZ*aJLDEHq^$44jWHLf(L7Zoy;~lzvD;NqJwLLSGqqaT53dJ7X5?Q3do?PS2 zMvIQ;6xJk_izxU!=LJP`GxG|%5Ro9P1z8b#{YwBLVZPCm_+KBaKBF<_hbsNN3^34R zLQF9a3hKj?W4lrGVox07FWfN$Kf}N~+(AS!!lYW@YQy9{>T$GLnawWnHY8>o9H{X( zG!O9@NeW(^UR$pM48Pn5D$lS~BrI_6uO_6`^-n=O-C+7o6Z?~eM;W8-7ndIoUB@yI$S{dAc)&bbibTGaM zzYf?B(Yz|in(%BJL4(4bI}W?iq!zcg_FI;kEO;7V(=$3F>Q#c@6tlao5e+Od{i1i* z<@EJA2lY3W7_wK#qkO{(`umUp+8=D*wgrj*U8Cs zW$F@n0kARwhCpHCIrX(R=iCts*%~z0i90bx#b)ze#Cjw&X8`Gd<$*cIkOuk)kOaCL z)VR;ABE)wLA`G|{m#E$&Bsam~=^Z8Aj$97K!fGh4@yzp>a@m1{5=17r%J=VGPlq!9 z7tm-To%!=4tFmzs6%k7zlPOWQ+W|>8)BscgZv$JTFklE>5&X%m!@z|vU`5p0Q~W06 z^K*W?nvGcQD-7ATH(`4mZf$O#;4^2S!-cBr-qqNve4BgUe+6@A8r*+xm-%f5VpQ`* zr(nyU`|S+mKSu*joM@n98csRWnF{_#gwuCz(U+!|4>Pi3_JKfpf&FPw49|H<2=9@% zzoGRKgzI|`Pw%mq$B#FDVC4C8R#E}}2LpR4+B^N_1axUlqXL40=Bd!q)@Pe_kk@MA zg@raov*G*d)&Rf@af~%wg&sQiim_>K=NlAXlwdIE0H7fvMPq`7$og?n(P#Ygl5g-1 zK}vpaVBdxtlvpJ=UjR96weZf9_k~8_@BSYKok^HY}}>g_=y`8YQ)H2k^4f{Huo zj;Rd2oEHN*j;sA&c0a2z)`otXOURyyG*3>BkmsOidk3FwS%Hfg(+KU(sV!dj= zB@?;#Pef=nxRpbPrl24PoQ>VFx%fYX%8!nkyjfOzx^wpT8GK`>aZ(o+77q0mLTzD6 zhG7pY;eI|XYFkntZ30&fnEdE)^)-Qim?LoOp~kNfQfF#SwiO`4D=wiCA^KFPWWDdU zsn%S@iGV%E!VuItx&xqbfT|!k#vQqWB96jO8F+O~1IwNLfKpMpHxhO7oH;nL4aXJh?d?h# z)ea*N8of9+Ap;*W3|a7NE(VncsW*X$i9ZW9c~OdE!(C=tMcCrHt-b08gt=JU{j=ZCNi5x{)DjPhZH zQE==v&}dxlHmkSX7 z(0#lV&v0+Z314Ar@kb@V_Dx%|s)oin>n88Uz0p$ahz9hE@j<}S+=drB>hoIM&I!ZA z0TqN)z`CO~ew1Fhpw*R7CnsLzz{7{{K<$bgH|U1(4u<-%Z3*n)m8(}l>N^Bcn?e}l z_o36S1gn9cI&cKI#e%cnt>}*#;>>e`&xMTrZEc~4?MDwE2iE>@UooiKhCze!8Q@VKRh%$`b$!uCH`a0(U7GYBZux`$_ zE7)U;W`$OneG_?8h;D;m8uJ4ZiGfU_nI2oj zBS4-)4cLrTzDq%Wf8MIxseh}jzCVs?o*fzzA~ATm4@O3u5?R=}`_UaXokoSefIV9E zqxi(e>fv{y$soF>TKLA4D`NJPuJpi1LWD?e1K0eq|Mm2KtVzmfq#z%B3+B?J*K|)7 zdRXnCFFx_y6mg}RJ`b1F5k7bpFac^A$XL`QC~qMIO}|%!fBHVR(3Wp-qwK_e;qz84 ztqbKGQ3{gfK(8RlJ(r%TzCQtr$$@`Jz9Co{)Pf!6R;EAI9{kI84n>{{_S3+C+Z}e( zGAZgtFm@5Teruff!q#VanQZ6a0Nzklu5*3^ zw3!fhH3FDsoM9mpq-~_$FuXVam^>}Y9j`2TeV}KV_F8PP;vh@HfF{V#dK1pj3b#PUaFqPRj>#| z8HEnO8X_)Yi&(j!Amo1E+TI>ta9y(#xC5$aRavpzD~R~CPJMC4e^y5ooZjdYy5%@v zC>}t*Ir;DZ`!BkH`7C#{*9SZt)@N68ay-zjTop)B`@p(SK~Am>!py28Xrwk@QTz*} zqw0yHVXqLCA$k?>0Nnu~x0x8#GZ^0Uy3mGlN5a``k`wr5y_CfR)RMld^-z)13oG&s zgA7sD(b3tk;VXJ)G|d#TR#m%F=J$-ktsNbVH}ucb(M8bNSeQDqT@rl=6mkqGWuxhZ z0v@a(KyX)$($H+3wf}_AXWLa&D(r8KLK}{drsfll@LA)sbcdD!6>YVvVRbWrYb?Ns z-mNcb^u{>9@+jo@b*n4SP=pi}SC&W0KQz`|zBW+dn*+5(ZKYpZsW-+0T<^F+fba7E z*jt+ftkyIFlzY5jW2oIKf!V9x1eg$?PC2nbUcU4CMxZY=kZ>U*8D4sGQ@wIb=_Dm? zPc+J?lI9pu@X9h1-J!Drfghewsj$1JM?W^LqCA`r62>UqKyMWilYe7Ym9(Qi;VBl zkQ0MHPpL!t_-DhK>yX-aU%vp+B*ainTemwqH*ac4cg4;$L{*^|LF3r$;J6a=O8ue- zPky2flQ){Eb@o)WN;d;Im}F#r;|d%2F*+KdmLW=YaA5@o;E+%Wa5qlAvukd=gO}8U zw;<_EQLWOq6msj^8L?PH0I-^W6w+G|Rhp?(hR|1@&6&|>IhG7E`Z7KQbV*_Uz!qL~ zcNj-^?Yg#oYZrEXzs}z@PXtCF753*PEz8C;Fs~t=Y-4Z1ziG-|6v%i@K8~Sk{A+O3 z=>RiioP71KuJ6fGpdS!rD^y0f;NWV*e8j~JZ^Al}w)s2p)2V%dT zK^}xJ2UT&aDE&Y(7V=@BAbPm|``C)+9yHE1ix-1$7L2-;g#|tvD+dQ(46UkCq~1}U zCT%oC6VKLwmIp*0XIt^$K~#7pT%q9KXe3K1{5FUOZPC^GS+SBtJPTzXI)A~D0~Ad8 z1{Ik!U;eE!Q)a*N5Xp<3~Lpt}O$MI8e}Fxvn9}CkW32((_S20&p4}Jd0r)To3^BOY62E=LHy? z(rH@SY*LB%M}X5QUHBkOw*&E$tVnTh_rqGQ@oS4bQQ*N2C0@UbI|AOdgMob~0s{h? z%fOMHctr=@N#i?gd`Q-q`ye|JhkuN-WQ%dwQl-xBhzDOj3=K8kR*bI#=rGgR-A!5h zx%1jNdfe;O%xl-qGvYQok%rFAOyX(Ta>X1zTL>~7?^r@&@3K=H9|!PGF8e23>%rFX zgFZwYA?9XGVE2K3OWV}TU_y3rc6JoBf6g591?4efoe*3_xYEkGS6Xqs=jtiDRnB}L zX14TxfM@KPT)!iFf3#c&N9{iizjEl|!11JIXV+pq4$-d(X<%1~a2ksfvp(jgQ+S8% z`*x(S^;smRupg8J?GIiA-Rk5mkYSrA{F`doA8B7;R_cYOK5Dzww~G;^?TlXx(Gv$V z2iUkTf+#I6CD}iND0*Q><&UUZDg3=dq-?KXBqu5PDRSS1=0zFk=%DE4!XhFo4*h2S zrbMICq{Wrxvrw)c)_*IzSoh*T7Z4lo%B|^UJYYCNR=Hg`-opP~CN1)szWQyrgBy3w zmy?*8d@Z)&)nXd@w(jF6O!oIredf6{Qw-?A^Z4;$>6e#hN}!P@7+my0_ics0`G&yV zpsl?FF$Ix4#+FlDo-5e~XLvYO4czTMdo{&wxu(us`$s%{iGF0K&ULRvp&3nzuaq_F z2bqlxUHiI1Ht!x>ojtDM)McPxfN|I_?pvfMR3jmOf8v$G+cA0_r*pl$Jg7g+D1=2% zS)~8OzrdM)SiT5&-ZOzFxY^peUU6=IzS%eae609M48QTCqdPu+{xvtZyCxW>*54?K zUQm}-wK!0P3blWtP+*kq90Le7u-%oBaV}wjdv3X6B+Pu?_Uk{itneuS*urIltY6FX zh9YCXPX8Aq_R3ft#s2i@!MJ!~<&S8QX&lFgtA%J|$UYd=S;Sd%Lh8z$C-L-*EReg< zm>-8BPUdDNy~RbHeYj1>ZO5GBhflcLQz9+1L}=7fv}AB3xj%j zC^m*u%Z|QNC!bkRbv7Ksf*?0Rp+t zejkhwpe>;3nkw{JCHKlgns8m+9k(aVj2QBD?J{*>ph$clNJuB2J_Vr-FU4^*JX8#k zFH0YD!u&+67q6($4tQ0(^(LYuyyX81X_=e&LxThg1NH!9+a31yX3%E4&U^a#$VfPl zABw8{&flIwMBV{AQ!RU=2s%QNoW2ck=v+Pwi zPl7PAVQ}0u24+RbwQZKs>+tXY6a1!!KP-2MF|dhwa9GRy*j9Agyc(e+jlEEo_?C z#g!2qbZof#Oped-C5D+J7^?ebXLl1qp>>eU4TT{}^*p5Qg&v*)m8UG06YEfJ4>OSF0 zhlpY(99$W|u-0VJ4e84bv9oJK|pieY6@xz*AJQu_`|wc9HiX&Lx&C zWLB}L)*=AXl|ExX)h_U#E=Q_YuqcV-`-uq1Vp9kfEU!6Crxn= zeC%)PBB(?R>HrK-7?kZ-U*t|E9)Fg}$IsXG^st{ zkU&oh9P25t0SvkZ`OP;J9oqq6f10PWOpBaj=~{Dq5I&-IuZNExLwmae7urSBHEPHw z@XYhfMifpSFJy71o1fv3Gag~iVd6ZG&*JXgujq<%)q?jOW0x*BVNGMbfpu3Dil>hc z>srmA8Bg0|dmu3!TEWFhtV(;lKYRj*36lX3QA`$&Mu21C_<^9~8{;1dRSG6BG+7t0 zO6T|~oZ2!@$l!CdeZhPydUruO9AI-*hRRuKmlBDl!Cc8?D}WA$G*6uAF80PUq&lL< zP(Gs)>s56waC#W_5AgAfZA1fm=f3V>FINYJdj_*ZMR0+im^1F6FT zKlTRe0zwge#_{ik-uN(ZPINLSzpg-J~di@=Obx;W+FI2*QP zkzso#pT~0{Z}J4OV2OWq+xG2++XDQ0CKZOEZ2&kMgbm&zFJ=Xs=9O{%#a%m#H;*~9 z6KF$!??+u$R#sr)Sl0!p)YPHpqz_n4zlEOaEV z?+;xm+=iPC#0Xx>=sdQ#RXuwK`zoQY_%8>haiDW2RFYvLONt9IFhl9S@Zf<8S%ONqj$P^*G*{(eF2A^SK1n z@1FT|V1@Vj2k18yn+wazDBCV)W|GglRtJ^m6mOOI&kB)eH^0WHNkS zJfb;0e->PNy<8_*uz30`L}I8PxG4-xox@u?I?n&-BU4pPp50tv58L_e1Ju=)-b?7< zqvgD*9TCS(v-M7~Uy%iJrls}Pq3_f_Ko@1U01WtxSRISFiy7c9LJdR-$H^ zQT9wVS(@QJ*KcoFguzQ(hma29p<+G}Mf@cvDWrOp_mC1Ih6jI2468;cG3uJkNj$i* z@rXbO7l3$clE?>8&nI_`iG41uTdDH@#O~dN5CTBNqrzBJfPb<{D&8p=WsYcWFipLE{I>~1-fsI?ICT&M=h&CyMxD^JSM2mj8G(ia|x?48t0X=ey@g8E`F40 zELlw)b#s^OlscYAdtrG+gGlx<<5AB7N3yej^M6@ zpmT&ylL*tDT`Se&rE-faVi#8&?{i5Q-Y{{g`bk?}h%jmpiCnpC7t^8ofypf%?SdU^ zd)LchTTqhEDnJ0=vri4-Ag5bjW$#P#zIbp>HTT?CUqo&FOYJO*0&_}&2P!nfX~>p(Lr|jP>WVfYQSnSxJ~?4&d&DsT$=y@-kGqrzF78~ z(wh73*q=y@ey+s=wL3|IO0G;#udD5iO)6f^-N}F_{79c?$Hc3wul=bSSO{^Y|Mo}= zV1Czs$8YoOybpOy%FCz*b_g^A19QNFhFycWAG|1_U-VeCwn*33d!1cfZN5D~ZDh-Z zIxJGe=$`)KJ;kM}s!B!94nX=@!KgK|bZl`pQB&s=6Nwc>`fEGuX0Y?Vi7W07`l6V6 z%x`uO{uvpZB1fb}=k3Dmswv}AG4?)TITH7MkT6s?;N%HFPxDX@RM}8MK7F2QaRPQD z4DQnd<%}9ThLq{n(d~HI(jtr0YRUij6NH7s{3Y9AucF0(7}1oIV=Zdwm=%Cc8rK+p zEwKACD@O?og!p^~#5z?wRmOpM;D)p~N^%*c(N17i!67EGsu{BLDUdgW1U38ytcE_N z!>-DlvfRS3w4e;?b1WPxX3CMk_p9OcUD28=J|Rjv=~N529QDvP^8c7*hs_aBn@QFH zPz>7Ds<*VK2zT+;1a?z;3fPl81me*Ob%E2_06x~3bRtWQPDNfq0jPOBX!*ivd z7jA^#-sG!CCqLKENSeVl23HJRC5R@8r~?24Kdb?wqcOz+fkwSBx~I$iv72{y!`KR# zK{kOuFtbbQG~S0b1Kp16#BL|2b4|LA-Wl#p6N>qFF6BoBV|v1%M`B}(>?Qpp)otDF z%*7e`i#ZY)0wgC2v>+TpD&ZDK$q9L^^7fs#jQAZ@dX%;1YmATBg}g@^=#PU{{;HW59a7#xXts{F$--zFO$}(%`1Mi-kQIxRzN-jhktHjDw6Vi~w;;L^b z=l*d(hH9tQdEBiQlNN^8&3KlkrVWT>H!v^&NITY&XgpP@enAFJzUbsC@QGQo@Y$>@ z3QmQ1&G%g&A4Y2sUx_~Cr7U$(7s-DRrK}fehu8c+y7RNgwt7heI3B2ld?_5D>N;9}@EC=vHRK8-wAg7_{nr-1IVCk*;Cco1i<9fW3Z4 z#7ygd_Q{BPB-N%?R0KjL&A%T0FcatAt3SS}dRwo+Hn=G&7-1f6C#nY1yI-HCYcw%C ze)Q;&?wN?UevHkn1F!(uCD=f^;&&7k?UN+Z{dzj^xRd^jZPF#5z*}iIN6*|*%2d(4 zIR*2ubUWfwc`WQfeU*Y~DhHK6rFj&XqfUZrVS>x?(y#MB@fgv@5P1_Lfbr57H}&+x zOT1RqGvZ0{E}35yyJ~_rKGF_-S6H}CS-;Ji5Bj$_8Kf7$dBW}By+Xx0JK2LuaN7O0&@Pz^HSS0Ke%;#( z2x{)npE&*j00lVuP?N6^N=1bXcMdmZw?zaqfE8%`u8A;8teJp^6tth*y)?2&8DRRL zz{;H&PsW<*(A{-Lof`8x$DA z62cRb?LIC5`t=svE zKEn~eh$&Cs_dNIOGn+HxU+u1K|6CM|nB!rLV>MaYrzJ@mpea779YDKNx% z=Gv^1D(ln;aO@-@jo2^MUZ6%{OXo8_pPr`!GA2j~ z2KgU@#F9wX!egjJGg-Lgid)26USm4)E2nrjZM_`b_RhE!KOKx5#DR<#L+fAUIwbXUT|j>l4=dMzFWZKV_8TXG69uq^mE2ac59{7%EmtXeEyDoks3~;^hPQ} zZ$yVs98ucvs9*BRnm|xrlW;Gj9Brx2U7!~cXrVq{rqg1{mwt- z^Y{5Zv+VRcw8?p_?ks_l;zeU|88w9>F)6ws^~qzo%ksUkR`W znhb!Atn_0$Tt>f|fh0rB`u@XZfC%kwDL_aKEyv^$BrhRKdz1rEFHp^~ySVo5!o*Pb zMnyxMJg1vt*a@13*ddMCFUS;%ZzvQ|{f5vu04hSCi7dx!WDMV!#g1*gxDK;IFv~~y zpaldhD{4l4yA3`)uIt2QgrXC=t#6mCg2Ew)o#6!&);=zh=@KN|owRV(IW&9nxI%S& z&Mu!$YX;Pv${|ORBOkzm%IB5M-7*LO7&AIh6Gix2vZ=WeX4US-($b8DdH3)C@OgW{ z>jB4bI}F0p49j$DqjMRvd4@?_SsI6tK}MS;H!X>P=Nf*npTmsbt_mds1Mm$Ezc535 zR8gMg*TSWmtoWdTOY(Y z`0R#(c1SD7X2*R$mv$A!u)jY?ZLcLXWdOsEvae$n?Dt&ZHvDo+fC3po9|MEHG4yaX z6_>YmSXaLN##ew>0Dp)?$*U#B$UBfKQ;SmF9nWt4Gum-j+v# zO{zg)0s<77wa(<*R%Km%bh<-h$6*Pm1Xz9feapdYLOY9H2Wz+9I*jiNS4zkeNQY2O z$DB&so~q+(lE!V+@%Z~vNb*z{ywRp&1ijO+I zEIl>8=nfDD3WSUHb?2^KTOTHaZL!sA;_^qa*?!*ctmmQ(P%Xd?@fR-!o<3bTWClJI z95*4t-OK%NJ#Jz-#&SrrzC~H9xK&c(9ECKHmWzJsV+-7B$c|pJcxeJ6VRb7vfgr9s z{DtsY%67P2Ht4uu*;7qT=pSazg%}XtF}wBY6VGwcY5$7zT}O2Mr=7aF=-ukVLhB>w z34h?js6Cw(67Gw2zztS2w{)s7GF%J_hgkraa)t-y*NlG|45*YdGC&6#Qbl_Qp}Hq_ zTX~I+>X=qwxgVsS=f&bBEUx431Zo?SKX-X zGV~d+1!B?RCKH&mz5uC%v2uCyHt#MXOzltPl!BMPzdC;+j3T$JwC4RXv-@Ei>}BNa zjA7bJ<61Ap@;NSNS?xl!`H_2261reVOn>>kaEJ%O@{=e1zNogT^{R~^C*aYh(4z-y z;Z5JW4=TFHmWZ?X>~p<{x zEWKtLc}-NX9#`X<%!lZ7eP@r1asHRkhrs3qdgcpn=ZVUZye`JZdv#;p&0Br=we2>y zoSjXS6^ce)lqubW^qwLoWjwJ}|$`F-RP&_EC^qzxEF5tRZ zRK&`Pg;rON_jc#kz?=O=rk6UwJQ)hP;ErPI6{PE*No(^iEhT}wrH%LSnQ!pG{PaxUd#onp#&TU-)2fN=CKRS3)qUm;m~YN&3HlD38)_9mRAeq2$f1j%y$76 zzi%p={c@BX-m6=BGo%!ezkbl(7 zJ@b}QfhoI@3LPU%+D!`ic4yVaQN=w68&pSLfFFl`37Kiw6LR#xfhVZZ=q!L8?P*4@ zhE8xs-`o(^ERMiHhQk7>5=U?O#W6YV*;9frSa;s(u`wz4uPQ7*hJYnco`Sg{vugFj z>VCbP!)T}hBz@Sy<&T~Ne~gElezFaH9rM*gKzS;ejH?&L*Rh?p?N&7pp7d49bNoxOy&d1$t4kZy%H>)GEeudT;?0n%7s^FNsKGL(8$xLdwA}*%efG)F^DZTdcY-)6 zYxJR>kI}1e`Vi2$tJ5tyKjT}XfKpV~f!^|a;w<*nt%Pa^48+XWoo^l9y!8u$yW75n zt&_YjBKt;DzNspu(cFxZ^?ED`h%AZgS5(=Qyg1wIFV!SlH;YrCA*tkN=NUU5jGwPE zAALm9oDnb%G5+h58>>~4b!T5TU&)>Pixse_E*4@``oFfiDI^_V)1}>Kz;gUlfZBD` zv1j>ebaL9jBh|}GdtK#}ej}4)!-v%Jsmo?2oSFm3pVGaJAx2LBlqBV<4B-w!gHsNB z^+6O+Hqk$!AOT?#h`hvwbAy!*iQ1v^4s=Fp_CM=JE_l^4o?wp*l5+HuO_t=|mFfxa zSxPay7_Z$TLQW^kJl-U_NsW%dE-wCSbLsAiRl%bcx*90?Z29^Dmo-k~Cfgn+-vTFC zw&C59k{D^6J~d=$K=g*FXF!Ffr9_AbZbpI-`^!E)Q_q(7gb;#WuG;JCdy&^WA4kaP zb3PngAxhJ??oJ3w{~h&5Y-GJVw3$wZRE%K|l%3%YGTkorEVm1Enf1!CKj|0thk>|b zSvVEH#qW*ZUrM!(t}g98ZLv|0!%CxO&A|gfeUL>o?klLuz$AD%jxF5?*5pe0nQ^tN zH1#YHHf@cq0CW-RP?DcdY?AK^7!L)bHVLYSaR=S%+S{Z8*kjk)Oni zJe8zwq!s(XlH>Oq5d;ces4;{?YnnXc9zj;%_hcZp&jRlVKGz~lBCCK zScrgozhcUPQe|a&W%cAQ*ldblZK##@-qsMIB4xyHMr?!m@nRE#4zQw{WlXr?H%8#H zGSv`&=p=sr^luoRW;oflrMZ6{0b)ro;zQ#7;aSVT$EUJ9ZHj)GYYX*lAwH%D!6sO#qS1pbYp256oG+!SY>(b8e2(xDn)~)P$jNI1*J%tMvp

v`{K0V6(nqB_W? z10crKnb7RwH5Dp>7vK7&QHk_zYDyyvJ#xPdD}%5XG{EFpRW2L zUgy`91ntakJBQc*KGCx0QS%-Wp!bfe*s*%hXJn6C$cLt;bDRz30X$4Zq7F24ct%8C z3spg^z8wS6>z*1&EneSVXP;?(_rd7yj{`96hh$(nt4f@2k;@{_%{i**`XPGVE*j)^ zw*{rBSJ9(3lZl{$WxkB*2=r`D6?W@Qa|*szOW&8@NsH6~ z@ksUqncvzs1N+vnX~Y;WNyU9Av)*~oWT+p>B;4<_g86!uqW~Rrmlsgy^tL^0e#u(< zXSN44CGb@H1ocYmKHQQ}l0^hY+x825XaN=U_D5yi#V!gDkD0e!`rYDkvS9cA+Ef5j zU-3@>9-m=CJTLUE*my7_SPJpZ6Ree5v(%)$P;9YgNod<>-|4rdH`{PS-2?`&`V>&l zZ1OkgfajyKgx%+muQ7HSVHMeR&QYH9D-6L01Ly9-bk~fgq$D#mcOTNur%Z%y78Jh|> zZp<<~tM$R+Iq$}EAbu{J+<38M>$zY4plmtvOY?C|rX6p(jz z#AlY6bIwuilxuHkQIS;@maGs_MR4aln3CUS8B;$QX$?^6<^1-JgIbLNyg<$K<=W$O zPqf^wsNU&MH(%#bl>UQcAZbhrnmY6gU&pMW)&%v z&%3Qk7`oN`KqkaJ6qQOSGfm0a_7z-Q@7^N0qC5tHV%FP@lF{7an?2a!b~``p4GM9- zwrpYE#x?w)F6;k4{b|WTpENhl3H~GQZuG*M6VyxPW!!89iJRAG6K6dC=1m*1${d_s!rt6$@P2w~`3z6w*XQ)?@SC;Najzsv7=(ub>H@-hpP_xSnjpW7^@j3>WeGTD+Egf*d< z2TZk!tq0MX7e5hy{HW3LTfR>|+A1I&N6(y1Kox>X?KlF993`M~isz=N>*(u4_5_;} zttX@;O}Y1M-Wy$5+GBS|oe>rg(88qT7K$g{GmO3fY<=Q|3(-xCJ2EdnnNeJ2X0WOB znzud#!iQ)%W1@PKEBOyFC=1+UN&gGfSk8{I(cuXP`6L=~cq2M7uM6;U+k!^^!@qyJ zOTq+yOzf?PWn~exlP6BRLl3BQ9V5zZ=s#!Z*yv~lP|zc$fp1E1L%0WW1_eZ#Mvt)l z31!ZNlZ5-Z(CczPkN!yO?MU8{dIx9`(b3T0AqTPX<>Z~+$DVAGveY{$KpXx;iYw}tGtz4770=T-xuvquypx#5fKrDGxV*= zzctu-^0p724XaI?jt!L2)8tBiu0oPOV`02ziE@XQG{dWo8$qdd6IWpTPnUy{(k{}4 zhf$A=0(M+F?)Cbd%=+>nQSmgd4DoZe=S?Fs`QF@<)KBj&S6ur#E=m4ryq(Qo6X`oSIT+Hli$OZ{tkc0_KldKVy{EPQ7eGxCn%=i)J*tB8S%%mB{ky|hR8!1Fs zVmu^XN$Ehi^!Jmq(pto8rv4M7P)s-tqb{MZ1)hbFAPF*|g7B?dwmd;3;Q)V~4Qx-5 zyu3$Z!v_7M1F9AzlnT@gLV5>V^ZY#4z`H&>eGKuvxZ)}5d$9B=-S@0oS_7)+A-#BB zHb{=Su5-d-q$!Hd^Prw1P2fgy+V`xzd#iX0J}7JySKL_^!O8b$l9)xZGJmWgz(yN; z=guWM88hcki~Fn`EX(z9w=#FoZJc9vyqG16Js+a0JsRX2_P@Ar8#1Yze=hyc0s}3+ zL+}iq-(bj>cS8qP5&FGWHn2dqEdDA59MDtBf^p@lD?9Q=YtuKE{W*&}`XpupTr=2- zu<-An!*xwIbp>3b_&F03PheFc*`e9R)fi)SS`-$uqL|SI>Y4nZ8xY=ED~?|o--Wx} z_Wx1!7Eo1qS>N!bySpzSjnW`UHwa3DfYL|^0-}U;HwcPImo!Q#p-7j4G=kDdi?lSn z`#$r0-#hPGvu4)(1NGi>_SySa+dv@9C`^AL1=I+rASQ^4Buj8{a0rs#KV6dTfWp4+ z$XCb6sP2@4MfK3yTJgdRQfKhYq=7q-)9u>Y-()0ck?e-P$0V3 zGK?DSOj$w0Q&6|!UT0@BTj$@dnt-aR%=Z0hXanri1Lv5yfxt?}@l-jD^Bx8^zEpaeRs_km<$cL{hpQu5|++FdLn@7 zsBlE<=|Pl^8?bHw`%Cm>8I2*q222j74zVWigGc)Mct8-OP^V1(08MKj%+Wm3!$&Bu z*6#ig5d^gqF=yCAPpm>> zo`3u|%=`Zn=>X-TpbW=h?+H(~` zx%-Qh(FFmA0iqS+H}$UngNHK?qL_fXj`{-BC23q&cX!NwG~f}PyM1uJ)-l{%Wk#2D z1k~MNg;#Y{^3cjt&B}YA4DOm;WM+bjPnqxaH_Z@fze*bW$MfkX!B-Gj;x5;mmi%O1tseOA2`#@CoP^y!@ZiFw%%K z$*AH)4_#Iii(@a5GJ5NcIUYfYZ?6?;jt9fMI+BVK_7Q;{H+LL+M{>bU&l#0;1_F}g zV33@CC523`vvz@%$Y6o6*pNB837iSc*NDH(Kyfvw^4x8{?0}IGUPfZZn@L$&+IZ5U zGH|EiTJ|odL7=iS7PcM0Zw8-sKTW)9M}XD@tcycGU%;7f*B2HPOnUuV%>l1g420xz z!@dR3NGY77X#d-yX|jV*l3-F1q!Iifh&6xZSB;R~-*24-zL!`|IHdI5{lZxe`K&u> zf@r$4p!EnpQjeU=&zr3{-?*rnQqkmgwZS$9e0VSY#hR-|g@n4C({njlg$ZBEa%*^1S z6a*RN*Fp%Yg##RUi>PJU%+yd&s~+VT4IAWA;ae?nW)UrnFR8(k`O5m9nV}jL?sqVT z9a(ko&kQN@lDYgP0uX<*m2N@f_J-eFT{l0WlWU$%G{_`MFZdg*70g@FD zFX@g3QXZLv%#z2&3=1OlT|>6#{TPaUse|bVvUz#Z_3tuU`m%7yg*dG z36r%C??MXDifT|s_dh)lvv!u3y;amQ0G z7(V9(NzM?(Fv~DswW$Kh{VK~~PnSDN(nx^gf3MU2E2(3Zz%;*&7rwV<3|i48eLcH$}w zsP#(Tm@Z9!P7dc)&cd}{3?(7Jof)Zt*3a307m%rw2Fy_$#+`rv?B8WluAUNxYfoB_ zzIqavG38mS-D3P3NZ9_7TIoc-&kh)fc+~>Pv8Rt;d80V$$G-&TC=sX`KnS{*DsfG~ z;{(E4W^8}}`1Okjja;*_f!GHazfc7Y!e|%p%K-Pg#2!1pEm*LJRu+gl)E+0DGcs-< zLVcI@=Vzw8J|byCU0@sG@+#g)8qJLaCBzIPVs4Jez+J?aSERsv^L!ew=<1KB-H(o< zM|`Lt8Ird@B~A=^@1;NNTgo66eLR~<6m;Wm$V*6?UCn_lC>*=urhk zyxK2-@(LWqt^l_Jhc*Kb6HGSQ1#|cQQjVs4h5uU*)My$dZElE>fkKCP8VD}Q%wJ_vDOi^7N^f`WsnMCS~Ce9^6X_}&#r z2gI!_ElI(Wnl7d?@OZKO{TC^Z-?*5BaU|DG>mcAENHEb_?Y>r))M4|{>@A3*J(Nh# zC+5WECC}^81`8fA*tEf{1s=AlN1al{Q}QVMS) zl;bNi6CDF%y;u&My%IwuK>?k!{zvz(d4LlOYWkQ%HVa5#6<8p}{MzXfRFqn6B_?cd zEeXP~2E*8Zt-pW2vj_OWzClevk|QDX-d&1f)~2MPJF58fCOkW7Gob$IGY^c|ce8{dz1MxZT3!-H}kOhYzRg?@F*&2SFT_;64614?==q3~caVRyUN{ z+OVfZA1hT5y)kH7V2fYp;?6z`SC%3axEYy|!P)&lL|2-cAj@{7Npwym|rz)h7wS5pc36@XBJ32f)+ z=)F!JI<+G>(4Y{&jzJFy7oTv1Ve^C2c)L_3d^bEk$)qMenAE=sgy|?(rp81kn z3kz+0CgES{-$PGqzC^?%^^!pPl!~*al|t>$&xapZPT2DB{uG7W^xkehnzZKlo2QM( zug-KYD4^{IsjX`T2Izo$0$!x1cF&b1;M3xf&Aa>k1uq?kh-vZ1d1O2$292{54CxiO z)PB&@iVq74KC%pxm2v%?&Q&1gHRX7!~&O-a;-`5KS}LEWtIez#~c-DCH=IQ&72 zZ}iSQ8a?u}F6?p;w)-kie;0CdKugs5+#n{KEJg%C9qIqvKX2aY0yUDjrZL#i@aNga zL}hF$J2AWaDj?h}a}KNU2|i;Fn1SB}ERO)w)6egwq^ELzH@%?UHEFC<$pRHQEGtC z5`^~^#?ZL_+d4ZKy<2IVg9*pGTPN|u2E*ge!DuZ&+^^x9j4ZPK4wU&Uhu-@r4QCyR zD9?t;;NJGoO^77U0(8*IUC3#Sy8;B$u;$pPLAQ>5&G$Bzz>#u3<%7|qu-+w>6)4zwYC25TxN}na zQ6e$zjiZ7#%SK9a@(}D%NaSTk1617)G#s9+2lA^5p81(JQWaLWI+1T&J4w{^fbUbE z24{TV&2I$4O1{Fh!X)#8EOr@9*^m+CJy|PCKQOBFJyG_2r-O0dGXTzr_^5>33K=t{ zUKTxjMCP@TlRJPLjXiF1uI!KHruw7H_799lR6NaKPVTDZ~Wb!>wuxVeJ!{jh|czFEG zL#(d{;p|4=iJ`^;T#}XrY$Dm12!$(`bBZ(6|lW?`{g9ldc<8< z&YKWGvrZp3h&%M@t@GMw>1waUoL`6O{5`T<)njn)3*ND=g0(JpG&RfM6NErDdHEn% z%C}VidG9M`l;q~E*XpAVIuwPD-@x`0%HW>}j0~t5 zb{X%S1>L`q(!d*M%lz1~srUn;d2C337`nIGfeSlNici@C1kw&2p04%4qeIV?OU{a> zXM9syIx~{!ICcJ{GjgqA1qw z*GskLxfTQuXAE?=#|1Mc6IRIkbq+5EZjwQZC7F7Egk_h`*lotX)9)8dY*;Gi6Y0~T z$9vxdgk+y&s^Nn!45p0kvZQt6cjouR!?JeOcbjnEmo z88Yz{Bm_|xDB-#y5-fEc9T?#eXw+Abzk?q33;wdRq4>Zk_U$|TMciLt;uI1QN#UCS ztoRkEU?HJl+BqEatefGnd`LKD{PJ~wE*n;R?K}FOAa48)Uw}s(hVay!Wm+d}g8?JI z6kVkC*yyqXl_}-Vaafph1svorqDUGCk!SnLL6r#_!v^xiKiBMI-&s{fv_H(QSz!LC zRMekEQBnNor&^8bJ|Wp#GpFgjq0C6%MYgPjlg1y~XN-6vcV_;=Fg`5~CnjS(1C_Qb zU6i-|M5ap`1NfeSGHK}Y2y)tz+8{LO5q8qXy<(HgCsEyLgJpo-E%;=@d3-OQpe<-n z4l_V>xIA=-@WFQG0NQ>a3cM_?>YiuEE%H{EgIJg(pBKHCWrHh}=q9ZFIZC;V2ImUF zBQhx7(G(}pp0tEnOEPD?T`xzo7soB5%&T&6sB5R^?>U9XZBJS~rz7)KQal=Zvb}n! zk;#}~0CADEgE!Of#&wcD3>WkdXhHuw5~voO+#}Jr-vMtV=Pyx)Bqq`b#pmdI9W0_1Eyw-~x-h@6u>2>_o~2?X zJKQOs*vglbfpivGj$f1Y%jNE$av(kNhbbxjfXD)TBd?#Sn=yf1SlqE1+G<;oqUZK( z-Qo@>BLtl#^RJ8_yoiOPzLHhetZi>!Va!Y85+k8s;d*qL!wXxo+v zvht-dsF%~KT}w-(AhJ`m1u%d*)w+jtHA6af6@0MK8Ehkb(-6 z4=f4CZum>3h^8 z7(1bIRfKw@v;-_1Qj-ke8H_Sycxo;fI9{R~nzy0!~iYWAIw`3(=sw=0r^jG_1dD#6+@%Rl(6}@agl_o2+z|B=^c4UJ z=FfRnBqBpb6sZU}{JN>kd+Hk^8O%}YXDu>geN1RBq<-N=xHnDGEC*Bst!3e3&=km6 z&gbhfQH#N`?7WMla0Rw5SO~Q&NW}06WxTiS?zWW|XI2qn`Y6SFU^6j)$tz*Ju6+V+ z&_tY*XgU{u^SHd`i|Y=8jw9FByQXur1S{G@w647JJ#WXGF~gyZ^e!r}31Cig#9QqP zQGu=G%FY%BC0+IO#m|R6I2%t`hU7CNS1n-5xx|L~S&VQ|+qBN1ZGRL>A&BQBoW^iN z#ox_82HkNcn$kSE5Vo7%IW5~gA1QLH)DfymjPX~j%64DwBrKNSKKthVV5jgEDyaC7 zO?M8B)75AZu)acjn49`HRLzn)_K$3=|I+eqwNNyw>4SG4|b6o zBH)p^&-u+e3Lf=UB|9oFFpAE$29+u2uV`S8Xm2UCSF-4o>T%rb)hLnuyGs5S^92Bp zf>Wr#*jflE{!UmzUw2xiuy5WFc=biJ9b6pMb}%yHNfaRLwM^GG>ng)gl3>=Kv*3EH#(a@U=GC^q|#$q|4{&}=$KCAWr7qvJR>$Jr3$iQ zrd_)y-wrIG@Z*RL-k-FZ2<><9GC2gl|GXEI(nv5@qRUR_H;D_Df6jqK*5MDPK4@q} zzg$fu+;f;ZuqqXF8Yyw<2snDvi3Vc3M7)xQ{e@OLpAQwap z{ylX#+W261my;UBk`!? z49|v;llBHi$=o(r+7EB_CV?)-^CcqqAGo(YM00uC1qrvI0YYV7m*q%3*jn8UT5O@uvGw+U1%|O6Pu19E5i+bJN1qbEy0tFE z1seBXV=INfLQvmix@5fS{IwEU)33^HdE7wWCS@lc^N|eZw<>&As|I z8M}BH2tbc{i!Qs4c}A?ozS>Q4qaT?tw=IzvCjNnyM^=kv_Z6UFw;RwfRB1)jD8FSK&E4D$eBKe{o01s6ati69hjkH z^(ZU#!9S-5?QS>D`D?Ur6r0Fn4VnZ=)xva(M>Y^9)1^+Z8+t<$mc|Bq4t<~_$%=+j z&Qjp1sv0q1(&jSvd4K$ok{%00HHh9;eAtor- zvYXEi5-P}+&2}F?Zh)zzA!PG-;uK?GUzGtN9S5%x>`x(*k%65jC~^N+rBxESbJ!IZ z4$}M^CRHe}2%L^k+I7H>*4ODE4*-lLP@b*6Ue1S-94J?u3|O99?I=aPHlI`Iw<(;1 z&aJ-|h9N>>&-ZkS_9n3Pp7CZ-ni3-YnqQ)spGO4;Wnh#=Gt6`tE)Y z_ozd*=yY9iqPY8z>cIVyFqN+O)6xN3OeFgNy%+5xkO>IwSd%_``qU2V6|bSDwa(ee zi7r!%<||%EaXps5s|URTZ4A6#yTL6U!XekA&=9H~#NQE#HrI81J^sFp>X@$^*E~JXSh1hSDsIx8! zC!z)dlpiN51^e-Q9Y6%8Odsx%3W z(!m&+u7JJP{R#ZU>-IMvWw_6x#`beNzb!Fr7zXSb(jJX&-$Kn%<*G#cyepWD&Cy}D zZ(H7lou7p=cDF&c*9q>^v$QXPe+GH~tmrR8&~6E*Rvq6<=iMas6(#C0GU=@(q*O8cmBscP=cwf{T%MXsdL zkbTewu4nZ1;edeS1)NQGvAaaw^$Wpls*d<~;x^;O2cugd%cIW?qdOI~TF`TUo^%{W zgC@rPNAoZh)yw9p?@T?+Szr>HRK!*#=$Y=p6?9kyhLe_7*OOE^Hbb_2#VaIS3!EvR zW&uuUY=c&It(F0khILY!r#hhjtdAfh4q;(8hUoLIhGX zTrC18GQx9h)O|We;|XlOJDrp|?^=B+%TykQ9jOwwz`QvqI!NV$uw3}G{iWj|>;rF6 z8YtqJZ@RVmstu!;&p^FVz=l-`nxNl>Y$=AqT4s%Zu*_Xlbaj{;vuNj${{pu00uE#QH4tyUQNsTJx2IVuJJ7EzD zpB5ps4}>L_ADChF5{N^wd4Rb*+S|hy#Dy&lAZH%>4L0D-&2N*5o+rxTx~>O@Yr~;t zeq&cqoFe(Ro~jGQuN;NG5<8M?4E_2+?vy^l^_fiE@3m;4URH^vpZYXfdI>k*60#>) zgZ*o+MLu#pf!8=5zL-w=A!B?Dz~jB+XI))i7@rKnVuR5_0D}fl50k2EV4+RXE?hMOQ3|5L*L;sX&z|-MFi#MuRM-`czeB9B}Nkh%M$;lG4=h? z1*s``%sy6_(-_8toquFp)Xx!;!^^%Q3E_;wwZ&eAIcPr-^8cX)p+->NYLIY|s4sq^ z74LUiT=yn{bYx(s29jD|I^TCK6`Zh5q6^Oj!tirW4?__CYkBd;wMUbeVdnP&VIi>A zfNRqcCm^UPul|9rF6~YItnclK-(!`mWtVkLA88V2kix=xe~UdxbZux#Sode2Z_D(> z(cYQ-P>wwy@*80L zaY-wPKz`>)a}+e$y(=P?+FchbM5USHZA^Zu~(>bK-5ar61xJ?WDlEtlc^{>KZEP|;T3Xg4i*@W4ix2wQk2 z<8nlR9e}+0r90S}JURk<{E#XbGb1{_WTF7S+P5{Ri6P{PQ7#RN;rI$gugF%_#t%4t zO>Y(^2QUGbqNcO^@6f3n(w9J43I@a_&d)edp zP+%`6W5cX|!=oz;!Y?=$HkM(>kqWf&)WXcz4|?mNvv&9momJb~g)@=47`+EnR`oN3 z#T*cD$^?KIff9p#=3L(?L@PY{l|HXMBX6)3dU&h7_HY;lUAJuo|C%gyEPtI@|Co9e z;BaX@Ld9IPn%F=GbPxfqqV~K9jVr)aTNk?gTpxfuKEauXH!e+V2-mpM)X?A+1G58S zr;EnI_Tc*t!NFjrtmk3%QvMGKdf?(qt%ceg6ATU}&G78+h zzJGIsHn*l~BaL!NeXGBAe6vmkgvuV8)aumWs941YsQr1fPllvjsu*+Ex=b2i9{H_E z%ftmcWJhSir0U9%9lCrpEswvPa8jE$_yDx{yHNbb=M>S)Vhj6tDL_esmP@$A=le-Az{vI&i~;+uxf!1 zA&j>@{*KumD2w&;AB#rO^yky#n82UuNQl!ZDg6^~gGCDoXJ-fN9XU2a*_AhEVfCnV zwxL$$w-UF|5Jp))S`}!F5}=Xk7lajLdQcp_Um~pdXTugWLkPD;(v(W01Wr$(>jNQ~ zDpl@{Od`R|JwD>f{7zI}r4B}1iTX$QT!2{yCtsTqC}c{5D^&+mC-)>{623GivB*(9 zkUje&Qi?vTia>dAcsu%gnQ?~BCR0ZS5cFYriFouRzwpvi%sYuZYJC$1wI2&Z@(j*L ziasm%9e;5scH zcnS{>?7Olv#LYQ+sS~`!^2U&QYf=_zS!MB5rHtSkBmQG^|nw~AL-DTs9 zq{C3I=dxRf=^3Lq_-IXpMZY+~%+qA;-lpAev23PZsS`a~^DTTVUFLlm8h~LIM=>OLm$|xt- zg4}nA7pENw@_DCvOR7#)9Aj(k)DiiUu;ONR++GxikkAXmmCa2)^4nQbPjxKrH+fpw z>Dr-%u;_v8w4zyu=aZ!rVDbGhRlxDT1(LD45ir&IdVA0N)7_XKkeCY_3g1N~vPn&b zLubJgdK*uREltxQCA2TxK9!bAa%l_(2nwm|m~OuS5S(mec0Y*fyRV0mx>sgZC+B0B zMj1kf4-r>*syZ}f0`k%w=ii47Nh;GV2vomAr}h*E0jP9IK9E zIJ968M{qpjH3xrS-jxqvp|sli_m%IxrE|uGou`Tru7ZdWG!T1N?Hh zTy!E7<_Nf*Ac2PM!u&^Ew;9|rzJW4tV4xCcdl*FGV`7|O-&)~c2u6mpF|3dMx;st; z_H{hn3>Bjv=&-`oPZ*Jm=p&Md5t{k+(gtfPwkSoo|ME{Z`L#^5p~aji1#Op=H|dQ0 zF`wQQdzP7y=5E#l1Z~p^&L*ZpNc5p8kXECIxI)D3M%z%B!7Gb2V7lO?MZnn1r5*j`Vv1)J?hM5z zhJFD3=v^N0uvc-B|Kl71^4Q3^sXDSh;Z1HsR=zEVb~p~2azYF_Vf5`0D?&528zrqU zMRUlBwy>($(`ojBQJ`1XnEaw2cey#{i6epOf0E9_nqP$#Z z?z63*pNj!hKYukzK1ui^P0J6Cr(fouQyCd;eYG*f>#|@prYQWyK87e7q9F*Uo#G~| z=YSm$YT`y?i0}gvUKBH#taoGta#hwuYBl9+5)#J3=MHw((7-*}`YGo4m>f;KN5?&d zQwq>s$EN%=VmSDm+V`jX5$XMHBdMe%eUHr&u{m_CR$D5mt9mC>H%#ZvRa-bN?AbTBITm6P0jnf#R|;98L+IIyTaH)qv%Z_xIb$1@vN@ERKa+Wf_1K}+ zgCZermvLwkB6ma7uF={*BBY@pCDp05*N$+7oi2hA&ePEw$AF-*?2~n+e*P@|$y&A! zG`W;aoXD{mXp!$_R9HN-631X(*L@?_Mp&+0{6qYS#~ifr2_Bw>Ot)mnOe_wz!@uVH z)68UweAIwpKgqpLAEl5Rog-&r%y*t&0Uk|rYv@L~Wm9%nh9ajQ=rRB@#wKRO6(YZw zfuj!CJ(!=?fDRzS5aiE2q-G1n(MF5Z&4BX&ET(FNbw@iFB%8OTbyl}75cXz|!xaN( z)-S>*JgQ5LkOJB<#@KSccRQhO`}2OnB(o)%E>vw~#~f?Age>x&@Aq0d>Q`}hf^2u6 zILFY(hfJ^Wt@!N^NF)_hAqu}eJS~9mxmz-|@_y}$Sqo{f4ajcS zRYaO>=+8NnP7;c@qF8iJgz#?%02-6@6jHt=A5B&PRal_}4PxR`7$+g0A`wCbYHLM) zKd3NltPL9k`4f-D7ir8tu$5O``02)2TRXK&M1~D?RXsZ)y1nma5YD14%GuQxolFL< zyV18FXFElo${IHW*&8cp+M0SAK3E{i7lvx+mO*af4KS=9=G{{Kg0KP)XdzP>DwB0w zYYAc38=$LGqq%^S<=>wHMxl%omLM*_VH z06nS0M0m4Z$DDPC50yS3aj0EFAIq3WMkJx{0s)yD>`wQ{^AhH2+Ok3{(1o2tsYj zf;4^sSU(Rg#SrqLKK-VAN z_dXGJ8MaW>&BrFNncdb*)P1~N!)s^k-CxpD-bSZ56;xuJT;3iF+4gtk_}305tOOyywaBisAurNSw%B4Z`$7K2>y3D0C0XT^#6dO{^KS z)}F&b)(L|9%Z#7$wMJYd-wOmwI+tnDCdHmwDnvh!A`TH@2=FJ=X$#Rz0g;<51FUsU zJ_&CC2K*F?m>bZ*^~A9$eoBA`@NI?-K^NXXg>+Xv4|NEim4R-$Xuxx@TjZLliHwC= z0%#)`!Rt^!6tRYqEU4^Idxyh`f`R8xYN>IAZy`?=SE$g6;nv3lXaixK=R=O2-j44Tt(Qr5s65oYD%=E|&U2XZF z_+7o1I(5y#yXaVAoALkl$@deMijhy{lYP1j`)@=Ia{@yoEruy0P(5CuwaQ(cpaj{l z{&5Dj4kc4z3Rav|NKi>5t)W&EmX`qmV0|A&bpUqzQld2y2w?tASolm-x(C*xpI@39 z+Q#Rg{Bd9Ln>P&E(`tNyxe>33>m^)+=f1w!bW#PLL_sey&+1UC=k6V&rU`9R{iNJ8 zlBX$75D9DYK!LkUia;#Z&<`HYkho?CAbCK(^7nTXjgxuX2;uG9&>KQRAf#tL^M5$yAbH8hA>u8T5r@lZDauEO=bka=OsqLmt>2h@;T9*>p0I36a!!^_K-$t5D! zz5W7I%eGFl{ibSoK-^Q4pZddc02ADLgV#NLtr8tk$Z1M|P%UfY_JS=MpRy0Lsp{sC zP>$_>ac&GJGiv-|G}t#JF#0+S~@6hizx4Ol1{v?A_C&7#h5TT^|RMFn*%^jF_BBJWiPuI4$ z2l*hft44p{fl1%+!nmDsGN+zC6Kni$yKBQtvNVJV8j5GE37WK?eWj)vj|&3?ng@cy z?>_CgT_G%QYg>5AXP9L=vtRSZH!Mt68%xaTcgncIgu|u9r92DF9(upB>Awcy*VuxE zb0pcP>iSM^O2em5w_9>0Zn1*=KY&g45>JMUjCTj`;Pfsi2(L|j%;3Tq+x(ERS|hpg z>>r5K(vX8;AR}m11N2@-J$h&n#)Ln+(~BQ|Jg`Xx{cC5A96#g!s8PG>>0S{y@n0(@ zCFfpMk>^Ds}TRCc!?2hIm0-dcKs0ut3oQZ z6oa^Fy~u@i%gOt9pF9gN5%sd`>p|xQBR<%KQz!!j9PYi{UBL7SeWgTBCd?jN`E}4f z2PDV8waCZUEnOCp`9h-=>gLxypph4<#e9>DVb2jWl-3JQ7aI}tOMR*(fKw9uS0h;h zX2n)bK*|9XVv7X>4 z-|{5dq-i0KT1?{}rh|-s=#Qv2f81r^)i-@leJv&wtrbcj$}q3O5@CJbwPPlDcqs?P z57#lxRWQ-=Ek-o`CcE!J&!c2W`F!(65(abVhppJY@$J~aB$oWJA-0=5=;!RT9s~}} zW8+eHDHMGWyiZx~@eL9?8Xet0W!V1rtdX1bjMsds&$VjcKs4Aj0gn$1H*go}t4$o9 ztYY5gf~s-3oKIq5r3C?!B*erCuwI$_SDq5iyEfKEkMc~}2v&qyZT>JN#_u7`u>_74 zUTUxDR!YMq=!4ykYG9t3o>5%2-c~AJqLM5pe`P%-pQ=wKt8QH$aCx#i@koYZEU9+m z&KXogqSZLGoL)8j`QJGX3($(Ju- zGluRdsYu}WZDXNh})68QSY^GO%wSI3qmGnMjb>oJH4CKSQoE!&6OSh zum1r`hxC1e#3ktSme36iUlflp&;k?khKo z6hU|VwHE4A7a@`>xqOx3{k(gU%9s1@ca(tK<9J-G+FZvY^=UoknXs-{S=Upd7xT4+ z@X77*4d3U?Z__e(j=?L3=k(cN2L^7FIUT^tN_hV~Q@VXOJ=4z1jOd8<@-lkksp{uq ziie{u>H3#xRSG9egHhI^c?^$*U4GU9nz&Dk{iB!xM6b}FyupTn6*g|;mQcy+(w6s&TZ#0I^cS z(gwDOQ;2k7R#ICn)7YP~;FK+_{cwlm4Y#)jTS4)KgRUYir~UHK8OAlv_iv?omE z{n3m+rw;o@*Ie^{Fx0XQg%66WFInVpwVT@;AhXk*{SxNiRa3$>3}A%vKxmdMK-6i-91 z-uYk;sRu>)fpk4LoFSd9YYEt%(MN;dzLi6N05f~dk`r0BuVx%)wQG-$3Yz8jF8eI|MiWg9T>t zsHhQ01^ekcCxTOpp_~qs^5~gTlU4t{4^D>s%K)B*iP-}vH|$U?h*~6GX7>C*z}K?> zp`S{(QwLjCh}5+hOq;8C)8gsA!c}xXfMP5%tYmtZd6{Tw!Z`0wlUmGs{$cy10itm= zEUSJ6J1{eIaB@zR>fiA>U(%&MO!v3tPeMLpsJw{@EiY4n zKSbk0yrwEf(clNc(6)^sz4Uw{Y>j`ye+7cU=v&3!8*#mZD+JK2Cs+EO6T^)S!>qbD zAbY_(@lhWH3JEezalWd>Q&03e9jzKtUA0?OJ|qd($dPPACtbpm2wp!xtHtOJLrLFz zdTjh!HG{XQY#9R@(@V-uXk8u%={`N!wa~ipnPv0dE-MwF!xR)s7zj#7NR}hO|3)G8 zpiS@f&y(KbCVkVL#6EniR1t#leNb=tMGk~&zs}{TEgE536a@df#w|QybMhpv{Fhg; z#L1u!FK7fg%K`nHKVuX^hkqUvefAtB(g|s-ziaT&Ot=aL!`s{4=qYlm;-9n`5dWVV z8${VfNgjtAhBu1An81FfU*n(Ya%{#{_T~MCryLubqS07Qfu=>a@>A{`3IEogy&gKb zaj)7s+*avXszRHm@)54V@_nShtAd7c*0Qr_8SC|FCgR*G#cym34|M z;x~)7H%*c^*O=g)ON6z%(b1(=I&kUT=fkqYNjw`N5r~eFF^(CQ(agIbu?dgOfshLf zyt*0cJsO|cN%#g3M3ELyfPNW+ME#Q@tTyx2{Xw~U1{>6F)}c)>h-Sf_xHf6<;$Z2) z5rt%o?BHGPP(rQ;{M6mlhz1I~-sG2yhDUniZ5BjzBl#)>r3zxU1E7_9JEHPJrYB0~ z`c~mk{MB9jbpZ%{QhYNMD4qti$L;q(7IP26N(ky2#!bzF<i*yj z<9~)7Tje}}hY_u#Ou!hUI%c6Try=2j?(;5)EYuDp;E`8wsJYq>0wf?jCc}rDJ3f== z3VUlDey)|}6x5Ypk6UB_ney!TWCA^o&iY#8S|!x+^y>Nys|zz00Vj4*Uy@!V@qHFHz1fuEJGcZ)mkEeX15^_~0R(iporBMv1Aus6VBqQ# zpj8AhP);;x&`4c{$ZRmHW)IG|t+5dhHwS-RsE`I!y0|AWP_95;N>1$2CQP!BFGc%A z*~p0c2m9JLlW5Clt;vuG!kH#47$E>V=939IDP??Vo}rcwzDu{^bqC?T4e9Z*;&Cv1C=>U?Jxq2H;UzLqRoajGGsBaiwDHo89-?QjB@=Vxb2Zpb}V;^ zu~0f&5V!Ex>e#X_?k|}Q=IAgs{r*cNSU(ZcSsJMvS(Ji41gGRuNBZw5y{2!9C-+Wy zr)h|5@7zx&woz-_RGQ!*w5jB>6U0Rm;N$~@NqwMR*P-B_a#+O}dqVWN;J8=7rgg9^ zLH2TUWu<#gFR>QzTy=_24cK7NsVIGCIvrd}X#uu~YxsTQ@twW!0$BN50pRQ=7koQF zVLFR6`pDUFp%4W9Bi||dk*y(&0#ZB)y0|k1q_LsbdkOb$J}s9t<6CwXy-OLr=OU5y z2X|yWRlwrL#S6|{<*X&z+Sn4c+ht}lZ~l-|P;ih*M%VJ_Klr%75;=9eR*-ltvc+m~ z^#2;lP!^JNhNpLTz-RX2KH_wjtxRGUBF1~^zrY``hKv;Uv=RhF>^u`*EcvS9OUnU6 zcB=A7yoikzh!ysx!E9i$_XI?zeGl`V4Y2vfC|e(sf0%3o+Hb5qkF7v{)hWc`CAm9| z@)(j_A!ZW^n?7!f*-6<`poZWTO;MfbCicgH0eQh<;WXd+!km^~+BDD^Wy}F%TxeIO zU@-EESr_;<$dHYblO*if&ZbQRICRmh8iiQa1$$jHxMt;y0#jZNtB}G z`8_@LPUA7v**Gwb9b=v%KGjnIk*HVU|da+@2!%+w=r z0$9X8w=Bii{Rh-w7-)spH&4*fs1A@&D7lru*rBXr;x5S}Mv=(R6_ih*%dk7p(Y`fy z%)4<+0?@)WIUkU!aqx(RgwY$R#3#lzYNYq<$L#MIykPQq6AohTUXOVpNIokrEUY5z z%1?}}7#kweTWlO%lmq5+CxWLR|Ii1z(#d{LVkb#Suag(n?Qs>?@^5D33?czn0M$ zUw==|q?fN8eJmc&IDWx&wNOts{Nl<;MdDzF)gKiw-mH$C1s0(a#;)ECXpw)PLTLtz zSu>bir1(_WR}Y~?+KjZBC(4F=5BY`4-e_rWr_;#W28}i(i!bj{DeH^PBZ|oTZ~Cn0 z3oa`ph5m`;gE1H4T{-pS6IGo)vosT)sFg0}r4=759X68q#YQ$OX6qu8^jY$q&MJg_9h$1fzURM}4=TI*4-azAs*ZT^1I zlShZ8$0lR95u)IzJXM%?0v~A^wkUt`V1N-{Y7FpDd$5_nWW35s$%$2@mFMWR)A{Ca zzUOT_(YeFY#EW)UfE+*S$e;QQA;|yOGZrn(XGCm!@bnD+sl3o-v@J1>t9in4sZ491 zB*^g0u35=oyO;W{-_(Bf@6@wQ9_+M0A^K4Xd>y7-aP^j$rrlX#=9G}!rIP?Zl6ww? z656|OStd@=_ARep(-`h60J#AwKnFsE@4r7h&(O;@Jr8x{L(G20?tULBs6OtJ^ZJ`P zf*8x?PYS}A|Xoz-F7!TXev0I@V-?Yr+uoeS3UM^=a^QAB&K@ih%LHT@z zG>T$l(;@L1{lIqc*+>|3IPQe`2+lCa+vl2|)7c$6(IAkh{6E{=~Yz%)wJD_!njbS zMa!2*CYU@72lCDDurFR(z7M+8jBe25Ty)^#YT94pz?=L425Rb}OCvTKiVx|Jh^z0_QeBZMQ<`X0AIiQv`GAtb zI(>uugQ%bHy#LCALKXC~TIr%1*8U))5_S1$U`>XQlau>NI~4c;gw^Y57Kg#tKA&!s zOF!>$0ZsWi>zRt1o8X0IFo4m5@gU3#Y}b^~^_y9K5TIL28n zPEzHELwD~L{qO0kw^M@!9|EDamwn=~58}U9qkMYnej&kz zeDK~2A!VR@VI|#<(iH8r$YgaBlNvZrKEAp&LMb9$05$K%-|UNF&w*VCRr2trcjzVS z{Dkcdse5tJ(F-!xDoEK4NH*&op&%eKxWRIt(eKM_gL{W-02TpI#4QjPWuYO|E!C3dGA_FS%MAZTKz;zWdn-55BhAF&f- zHwW&@?}GfPhQX@S>(As17YDu((}*WVNjz=H)mv`EC~ttMe|`ngB3ZZJt#g%bL^T&=~}+x8|&FY%Dt4SOMNf&3sdo&_^nRPLHNn0-~8Nd4W(?vKpOFeS6< z73qCBosMko>Uh99*oMdjr39(s@kub9yy4}=cA(k^h|#0FXoA_m1P4brKm_by0G+2a@ zQQ_B*Fp=!x5fS+bkh`brj^anax1wqsb&>z)+hID0aGg>(=N65zV$r1ulF4(vz-?h1 z{ik=~Tqw=OH8Nn6kgo zq{}8Res$ytj%=@NEvCmc!eF*sc6}Ze%Wh>a>SjOOoIk}u#5XZ{ms0?5gw~E3MnW|V#|*lcp;0}NndhHSeMgm%hENCj>ck=; zL&bw;)6%QJp8adqs*1>I%_S`zMfsqwulTM$Vck6LQVTDWGw>1^)gI+nc$$`#HFsAt zfwipkv8}%qXo<{F(zt^PXsOCCTw#~W2*>yu-E<}SSlXl*8De7RJlp8 z`H~!der_AbHJZ1gW#yKcUp*@|3>t#~{9!xLkRrD%W^}OO#@N-efWp~@BuObQ6^sy& zXwJ&i6}$qI6@s;IX; z0o}8Y=rVW=YdieJ-pDKqvR}Iv7QRILhk5JGQEvFYF^_zFdo2TYj$s^t+KbM`s`h3`dg5`hbDQ-6*J}7T*&k4+JyQkkCKEQ_S`O zJb%cd;YRVvzgAL}kYCm#_=VHJd6;$ST3-c%d$oLCBBb>k=pDcTcF76bi`a-kLC0yo zN{#`W@SyRvt;gU?H9)-flvn`yoO_arC4_#Z%o=r(%gS~0%8ikpLsn|B3+xWRpZP3{923S~6vR!4W#YD5Pv z?Giz(pAsP~>3XtFVHhJ_s~Gk#^tVG>Bo<>yB>ga<90b-t9j{4+YZKHuXoei7MZ{E# zX&}Qsx{B5dL;uLRXbDsvX4^83iYUB4!sAw#cmOP z3ui7F)S;MK7fwyTrKEaDg2a_R<&kQI&Rv{b7~oVY-i3p;`(e>pE`x*=ue5Q>DxL9= z8VG{}qIKF2od_cK{P88i-m-l7ajWIK6%mgWlh%YvTH4~y+74x>VErIaR{HRaz-JZS zlpdjRPZD+6zT-zBC5YejsuKv!!b zyrM~J`$|kgCW)O*=lT@Wvwm$w><5=Tib{o1QfKAL0=bI%K_e1=dKiw8bzDnYM;rCTmOnA?V4&8N-s^t%)**~|JZaR6gZw)| z?^54O*F;4;!a(aLI)Spct@L3VkA?ddA7o~)S3K(LNczaH1h6DwC)ytwr^0iNu{te9 z;{RLH0X^hueP)xN5*uz|7k!~YAgh49wOkNt7j<#f6k+Z*4@lQ^J7ci!>#wc+canCPd6?@@1f#2TI{jHwWwL*6UJULl~susI@A3@4bRSr#qGSow+ zR;n|xu+Rn!IknF&kKd(069K9hA8+r%Bxnf&+tf6W0Bm2us5t=LHVCkmXTj|$-BD6p z{519S>`c8-N%Zv@&Z8gBs)x5=hlNgSiQ%!5p!psbz!@FJ2lmoYLfP;E;OGKY6tKbh zgK%{DmK?wiwRYgL^m@VIZ$D7TU$Yf4bwf>NQLJ>o?FCbNu4(aCS&%*?j%FR;l8?{O zSj!pwUR`B0M8m)sRJ@1ds{S(SR~6b3jtcIN)cCfr=Qo$mn~_5DW@TmyW)ZpUDe|wf zvl%kc&+x@=DdT6x%kIwDl)qG#ra>5StPgrKMQ>v@o8K?s9H4N@?-^tLn2u;X;$DG6 z2^RB%2O|7W`5&NSUP?%HNop;z_;5eyW+s#TF34__nt zGY}j($qi+KL2!<#Y5I(lit}SMHaYc%Fh+UHjKfID9C@4&$`6IVLbFn3WMQi-r}+={ zr(3sze=I*J#0GP%I=o<5?Ifkm9}akS1t=p#QGqoK#h)lJlM$DcG+`Fkx1Fu~zSk!R zSq^ZmB}O7FX1@ilqqth0MF{fWYG1fH*|tp|}3_kmGS7xp(UdE)gk7X&tj`SD}8v^C3GB&=7$d#?a>a4QVw8o|Bjz$aKB| zMaW?V(k0yOV3U1Lo*BB-mfZNQ;EEWr2;sI({A~N&ssY;FDxBhu7=Nt^LnZ{PN z6D!qj$CDcBWl8^Qg>p?-qVsc7P@6P8x*~7ok6Un1V+ihO{wYVOcaJ1Ce1C%q_#X_f zVk;Sk1$H&Z=a!e2)@rgrUk7hQvgf^b4Ya54Ufuta{}DO&*19M>v#*AIG&KQ5VCiIr z#B0R(B)Yp2ZNFV07Fn<3`z6QGQDe(Wr$)PV68YKql$pjzY^*N-;9m!3pZGqiz!}Bx zrdXA33&luuHh;Z>)@FvM6}G=`zq6*zw9R9~cf?!#S@`LlvgQ?WX8w4F?%bjnajlui zcY`;+M6;Bkba+Dg);x+qfb>dkSrKa*!iAWoMFZ7FYBrc>82P=SAo6;uX2T<~Tq3NSpa(LluxZ17K2JfQk!N)BJ zX>^ccm$kAsc%78hiu0hMlbMZMJQ6s9PfVJj=COLugOk1=2zu^U%3!a?Op!`H3+wrm zIt(z*QsfHEePT0-L;YS_M~D0xR`Q{+_oxH#37mX@ys&)_Eb|LLiY}u9_a}WPkyU$i zx_y3LUk;ldCvYSAd;e|^FkGlfN&7xM^*mNl-x#}pC+KxZdeqDeTZe9;ch3GrsU4zv zK|TA7#>=#Ue+_Qgo5Bkofm*>2I9?t-!pfGS^vHMpWx=yfs@0w$7YtVs`!qRZ$De^wF^sG z>pB$SmHRz8spoGdS&7vUtUe3UE~jXdpa7EG7|ELVKYwb;Q9q=G8t!5|;fZ^n>w*i0no-! z0Q?sA6ReT$s?%BP9n-59Eab#lhTmhDIokeRxV>|V*9KuoAI4!!JAmE`N{M{gU7Q|i zFhABTtFfCW&*2h|3=s<9j>~Bme+ou69ezgkrnE+)VC%gV(-asr zJXvO1^Eu_E-NU8f|O%j$;{$_L#dLDcuTX@?^w{>Ia~& z(=%HG#e7U{`9~-dhXMe_ZxQ;hu<1NkU(`=HeXkaK!uM&enhS&M`lxcC#QH990Pf${ zpGKnP@)U)DK)CgU&??ozhI~H9pH>3~a@mmfHrWleg0C zBmukcB=X*3)D*rlN&N6Q)>mqLxAFbm&v|;kJDNoH6&u`@W8IjnAIfHjJVpmS=G^4n z2Zu7;58jiF7gt&hqmvDi@uE%B(CD!jfxWiK_uS>W3LVvBT1(Na5kkuS{fs9gu7SIN z4HW7VAqZ|;gE;lHS_*hHGDRq8^{S{i`y2Ji=Hm;rk9b!A!YUk6pV$jXu^b%9I&R7I zp!|QvT=MTI2nVN$FMN>`0TSx-OI>2>_zF*g#<86YSi-(?f`gxOTkSt8l{Ojk)7{05 zF(#mjuHv@bTwhqPdQ_F|9}+&tA}5|=eWS{tW)40R*6YvCJzfOp7K=>qF(!r)w`V7A z{kc0s+vhNB{<3$3+!$CViI-Y93RU;jYC6DujFqFTJ|=^2qcOkxp;FG?k7TYO-a_W` zdhmd>MyQ=_NnPH)EUfq;wh6sm&UbMM2?QS;L^Y$;I45#^nG(Ml zLtKAY*li1g!>CU<=GX>m-(~+~mH=SCPb>XNQF*!klaN>9$46FNSe()b6Z$csrj7SB zVqnM%vV*Vpz@?R4y_m2m0nXt$Av3u?OrJ;$zis>}w4ORqhDG(5(LdmdJntr!zeaYF& zDcLDfV?IcHuC-znPOKn7DW@X(wM7Xewl71gaM8)EH&ygQL_hhqVQM=f%q!m?aLF0l zc6^I=@_oBF4aPhTAS&p?gANq<%(;4+xJc*lrz-7JipLgsO0}WfeHu*C_b@BX`P#N( z7}}b~YIj7#WMta{m!IbzOxZVk)Hju&mTJ5Q4ZcO!P|R?;UnX)p&~;+PYDCU32Simw z4pJYRM|dB__+2gsK&=Xee^bLkqw@y5J`4_bXf%X|k$yvHUYKSDDUu#UA$+uya@Rsf z%{^O+y|y`S9@) zeb_V$lIBS+YR)hsT}FFUhV?-4#k)grH+6l-m)(rlJ<|eorItYag!3y>%{_w4_speB zt0FBFoSuqJ`0W_eBI-J*KmN0L`(~Q+3hQaj2F(U>p}S=JT^MiH$qv` zdy#0*QX<1cKYv4iM&m|glem;yl7fS(=s2WC{o&-b4`w?Oc%Opbz>1HbZYB-jc~-2m z#mAS&`r=!q(*tEWMqKNapK=(ItK$9pU))z`?PZyBzRRkxH}*YKeo&T)Da2T;)hSt$K8WelUECL5 zW#njP;`}tOUt>-h8<+(ehLZ;FEiyO0l#0Kf!|fz{ru7-W>OSSfPs&?#{@@cL>jNmP z&c-;jQl&0TUF9t+RQ{7DFC%<6)hQL1DNB{A?tiFrrZP*+oVU24g(GFDT~xLQe#vwz zpX=TOmM_iAK-#(s{We^zT`2ZIOM!Og6;hr|f|IK&n`YT3(0F`B^=-FmxA(Zm(aQ|p z$_N8VFgvT`p(1_73GD{Z;1I4p{>V-jky#V@fl=K>^_aA*aFXA#`%J5B!s5# z<+X)PZGF~8a)gyBHngs))QhB;q93M$0ro58a02V)!PM22lRb+{79e9~Tx!gn5k!6o znCWeHS5(-RZVb z>9Ehv&a&Dv`@ueZz|>>h;Pv=Bmv` zewo80`Fr?o?t>y^xi=hN4po33!ufasP)C4?yINxKl5cZ<7FB2$f;Qg|mLcG4^esd1 zh1WGyG*G$U>qHW_@dg*?$4)GU65`?_`@sF;d!EC)RDwZ{lo{T-+lg(Ze~J@(Wbb#8 zy0(BA0!*JWI$mpDEp{PLJ^w}|6P%e{@MAb%45fy}3(`;^(q#pX5kuJQ=`4U57aV?P zgP{24^wKC&K{H%j0wA09D^i;klD`b(GrchEqI(dv9Fdwj3v|%WCqh)D4AJ)mV6@1F zEu7eoZd)oKX8A_hfsuyA2SIzWiDke|>$sfo%~BD>88a(3Z9vJ?2y$iQ_IPkRQ%E=p zuOe?Otk%#z;&w5jV-P;->I_@t3dUkq8&ONP_*y8A-?V1sZ-UPDtw<%jyL5;}t#4oa zjn3{H!aVUu@_{d9#zkYB%#qP(zAw2^`^2Z5S5ZkN{Uz#iZ@~z<=f_aoPkSkcj+xJ|IH4= zt61|&X7_FCvdHPN*$0{0GzXG}+@psV&J*L1cw3x8;NSA{dH=HQNjQEiQsvvO$lq&z zxj+lHeuY9e4g!{Qi%(kk`{hd1`oyG} zA&i2&W9jnwJZFcx6uMLf2l0xR9C0Fr*ozq_>SgHkOF17m1N{i18?Ykc%D@2S@EBHjU8!%?kh|*utMm zN+?Z;g+Kj5z-Rl@$fF1xowW@p|Kq+WBJsiR`XD3Gt<8NeJ1@)$N3$a_&gHYGRMcy! z8zD@*eOyNVa%Q!MYL2!j4|sVe_LX`qjGV*9Z+ZperM$Cp$D3m!#CzLNh@qxoI{RAvkp7HPyKI$eb zV~1IpM}o06UH)d zp>2rRu3Hbd$S{M$FXhLM1u}Hl;%bBTk(Iyz^^_fW z$_P=AkIe*CUIQ{+(DzSkBjymI6>|wbj9n=sv{;Pt->RWHAZ(UCFFZqV&NFLh+|13) zGUvqID4p!h)c$0yi>cRMZ`&VxF9wq!NPh*wH~nKnB`+NEGzY$%Q~RC9bZB=?%swu_ ziwcXVczS^Wh#H7kJBDs{Lxq3uz{2``CCL!#Gw-8E>xgnR$kB#_iA>ivcXNB|ER+n1 z5_;C1{Q}{ub-(Msew^=3y5q}%rKb+`I>EIazZ-$p&d-M;DE_c~z7>C8#i;p(aRf4w ziU{N00e}R9c+|&;(Ra?U^ABRDJaYi==mAwa@*yLAw16ymrqmxYa$&3Eo3o6Yvjtdk z6q4io+yC4mMUG^2+&dn`4B9CF&n#0T1;e-r6x3MxL2%)jGMs|MB+C%kwqj*L4?@}B zX(H|f7*N!S{0PY<(A2QrO1h}Oc{A4th4IfKw6v3!R+J+3RjD%@mckdT5NA=wHV^BEZQrt_2^o?s7jn7L@L{VkcP&1;l63#kgK0Xtl zGD?TRexSZ@(4P78Bk{f;{UAhmaBSqr+;pl}_YQiI;Y?8>c~0IE;(y76#VI0Iy&q^= zcV7i45_#huZKW8MfJeVji{4BCg-s+c#H)}&kyQMm*eO?>58 z9XD7dmjUK9eH2?jVu!FP-w675Mt6bFwU7TWU&q0v%sAXHdH2B%N;Hef-Me><8rh!v zJrMUt`@G^v+&0dK@kI-%a$x_B2|hjHRvJsxDj6bVLzcu@kRi+6qGKet*Rfvj9}e2> zZ;2ydfYkN4Q_K4ohLH5uugCrcsj(?)tUq?D?+ZRD`oY`BVb>1koPYn9z1@G@sR{Me zG%f!Csn9AEHV`ZiTvZsp zp@qs>asmbZJZi-88(62k68yci^kV)t|9#YA(-(RLy`t8f!!`bejudQIImvC+Gc@3;Xi~8fV3TG5%?k715vUt(WwhjyI4R2ahJij<^WPViq|s*3h}Zd zT9gVD?(>!qF{@RyM7OoId5V2nY|}8>Do8He3C1w~oc~4v2#Ggs$J?kf#T!4C8@@hc zV*JR>{cQPDMPD`wy4eD`cwK%lI;z<#4Q(@JRLqA}k4pS+Rbl)}%Om5TOe*1EK?die6Rq5u|MRAIx_4ON zg@>mcSu4*s_c;n5A#aN0tah+W>nBo947}n`b1zgH7?*d$B+O@%zo0B@D3$?Wwqwas$8!MRocUj$3_2$ zt5qzy@wJ6J|R{jR7|D;U&&Knoz%!*h_$P=pj=o2Ofgw3Lw8#3&7J*LYaaAz-aMC0woV* zl&;Wt%;e8VebLC1$OsxI*LjfRX$L@pW=x6r;hA(#IfxtizdN}XHzTuBMm zwHZC-x>WgjvXBoko}xM5cXW?TmG8b1pz8u2BbSp5)!scYn_Js3<9Q8L9J=~&RxZy| zFy!kbwc|1vVLYF`DBR|!(FmYuBgnQ(9!OOP#>%qhOpTTEd%aI~r5>X!3p}tfFx)JS z|M*cGJhiar+^EUs!Y;~&@>^@R{NX>jXM~Q%RqJM9Ctrv1GNYx6dlEX&rGW&;JXs+{ zb+^f!L@2q%He8Ua(%d>ZQ9qQ<%*My#3+f;B#v(oS@U`k(&v|p9;uEZ}9?iy4L+oh| zaIT-^AgGKfVLY|x{M?#e6>uOSka@2pyM!tdtIh{$#K-u;{_>sqh>w~1#OH6q4o=?h zBN2OgTtzFI?%wJXCVCeQPi}$~3~KM;1>GElFh6mV0~s`Sm_#)3l5gqv_%hA;_K^^w$RWNTh_v;k>3j))6R5m=XyA09P2}id*R7>pi4Cl z2wS?>2^Dm(ukUs}DP(R)pC0#zgvSAngdtVai5@XiCPmh0)Y53HVDw`RPX*gN=}C_CQFH55nV(nIXvH_UX< zacG5vb3+ZgX`A^A5M|R*dGj{EEKbGi!Y4{xoN9bQmv*G( zca*Ooqh7%_onz7y!Jns7HgOmYP*MdKBWz7Qr}W*{HT1zQq3jemi`0qlZuMX+Tz$9q zH8tVKr)v-1^dkiyEhX=#mI05v)Z7or4;sUktyP> z;!7`KIMzZj9x+Y)R+RKD+Tu~=qsO+7--CGv=iCUj8hFUK>K65DEay?ke-Yd{Yrz70Zc``)9laZRYVwNzH& z#d<<{uH!ycN9wHlZj~^a)~$H{dN{-0==S0R-m}@%!=b#f3u-Y1p#+i}k`=T@Rck)` zU$&OB4a)D?xU|N($9bLNxeKL=eP4mPQXNT0Vw_B4P#sLTnh(z}^Q?PKR@hJk!`kXA zya@2V*RK%A`hniJ8G}#j`s|FUh~TxMz2)*sr0c&_cL>HPRD>`5X&-$?T5|U{kL||4 zmjp|S4=e(+NvRJ^?5m79{sfa4>@M`td@#duJ3N}2rixf~kIw0GxXzM?bUUgK7F zh)Mm;LTGwWho-iIJ|PPbTxY_@gfqeZQ+D+sx7pYAXkKfcs;#G=KJuyR`9JkST2Q#e zKK@dP@7En-dYe8i?jb2{FOD2v;DmX2;}7Zos(BGHkbe=n@Raf5hodR&TXqL9I1Dx9 zXO#RCh27@zuA9zh(*r-_x$yazB{Ek)F5OzMpU;!)wVBr3yE-lcd*#dPETDlBFD^>@O)0~EbnpeW9*+u49Qs;yQQXV`+SK~nH*OS{{QGqH zZ+h%&s>|{~q)hGP$k+LiX{&MtPC+wH3qMXHKTaJ#PBlNy>TW36m$KA2W?;~MT|t`W zyVtY{O~#&^lWCR51hvnh@(riFV2O-GkFaLiy^hxBexJO0aou+iNzSRGE6y~c*_}C& zo09$%g6#ZO7E+3O@185KNGGI z28Y9Kr&Hu}3WAPEMk=av@J4O4eDZq6jKgsq_1Yn8J8&bh()kzkdu|dgO51n!9OC>w zvK|<_U*D!DasQ*(xXe_MHhD<#$&N1~H-V*j_|1GNy;g@(hlX-xyQnN_MX@`C?K1y? z_D#7XmFHC)xxm##ngA$S=6m>_g>Q#>hUW^*vzGN~Tdx4A@W=pc2y|mf6E7gQZS30n ze7#^X+SkoREP-8LCp1S!tl9BsLKD2oB`+-2U(X;`UK@!xhQ#4aNZt_hGvDMk2nDK( z<8La)gLUE3u_~W8Q``DS#YO@P6zcND4WEggLwg&FOF>)~tH6r<8%Ezv82-mm*EUKX z`z?htT`Fg=ZMWGkw1Od-8fImpguDm{Ysblpk4E$}O>D!{V72Qz+Grkukw#o{JV7$E zCeZ{|q*9i@A)SG0d&Oc_Tzzu8;k^s*OZa-%CWpVJ2~6>sPz%}K`2^5x@Xpo!z2@^W z3Fn-D3tl^4G?0retz0t;MeFIT;naP3*f@eWtywSne#JU zEsdJ%NB%3IZhjOr-tQ=oY2AA~;A4Fd$_B`3{rw+!aW@WMRq&liW>7QsmZ?_~+4YNK zB-mHf)szH0$9I7gbU&u7c_teBUZ9<2_!?bHWO4nQUaD@>Jz2)wi1Dd6tgVJS*(WMz z${p3$1?E;IF4`d?kmh+5|N2e_+=h*Gqn7+P@2|2xKzgi4LP?T==el}~d@FufMibGa z(IZQy^LxlFe~>p{p)UOuwdBw%G7uV^#$Nk@yYN%ie{cHI2stg{cl+z(3jg8{6}!l< z{sBCz>mDy&XFGjwPKiQQjWL^D+dV0t^jfS8GKF?m6}e1H3%}keChUFw8?~ysx=X&` z)P#++zw;%(jccgH#+}anRZc5feyCs7krTb>+#S9 zbIEj2H||Cb5wQ?+7*P63kcWrf+)?COb}k@X{xu;RTcbpazd@38DCG$%g$f6ITJUAH zY1oWa!^H8;v^D~_!^I-G-Q_Poyb&+IJJKUbk-212Ti)Gv6uh@gJdd6J^zGilq4tHS zq!3LOzzVB8!z=Ta;axV1RphhE!|r>t*au|hDocdLp+=cjWpV) zoZl<|YjATg1aPdb?FGDcS4rS#+#^tAh-ne!5bL1uKHx63{}v9J&IPoV9nn2w_u`@# z38^_b*6}Vof}s=R7PVFK%ZL;Jf0(jC++o+2OIVpV|EnrzdeAp8nlRAc?@0FOD{j60 zp`os-A6xoR=d~I}F(m}PECyn;S4eY2E?^yZpDq_~l)uqJYqkwM=6i~IH8s#Ul4fFT zV3Mb=R_5Y+=_CM(27&E{u9e+FOn2A@T-hvUH~XG6FKDAC`?+4?tIYGc+BB$+JLQ%BZe{AR)=_-JT%OPhyqGEGcxHGBcUSF8yww$>cz-J{SMDlqj zv;g!>{l~reKAd*%oZGMqmPn{xhkRlf!1J5H79qO57X#wvy%-u-bWJ`8CO< z;XI&U^4ZqU${d!VR(H)tag*m6{yGN^Im)V3tv(ySr;J+s2tHszs(5=IqW~iU5!8z~L_~oG?3g!C1And=Q73z2PB_jk0`ZH}_Pa z5`0>AW@<|!f7t=OE$WLWU@1*4w~F){t9n#62XqAAV{&*u=3C2GD^%*h8w5E)U5{Cb z|N535CjxG`8WDq)>C&5W`0AMHb(Tm{5w3=I^E0J%u#T-W2i4ao-+Tb+*2{|c86lE& zJ2c+s-f0qJ=S#vh%~=cg=W{!Ft2#V=EN5JHuXoQByh%F&C_%+&ruCz2As9*>?Z;8D zXX4Q8W@G?|I7wFy^btgg99umnFG-|@`6&8_3B!a5@L-vdL(}kQf#OmaXb#XFysdwdHSM<@dEb=9PV!+X|M&zI!8i1_3?bnH0+r|7>@dVfJyi*^f-!YJx836S2Af6o7&!e`~(&ufY?!AvTrWN%>s9G^^@)r{mUIjX4%t)D4VT{BfAb zcjbUcy=4bH69CEVn-9UWypeh8dv-oma7hOIAOS%v?MdBDb`AUQI^QWY>cKjV-ZpRA95-=zK~7cOnLt>6VBw}XSV(PkmP&C>c-JEr4p zbtziG8E*f9;#o0*9fe+u)A8Ws=RTe$;-{F_Nq$EqhD9ps<~Bm=gK0`>pJHQ;m*+MG zo0Gk;8#IULsl*K)r%?9ua}yiybZ);6S8r6gLtacS_R zC{_2fIf6)4HZ{*O`UuPQ9xpBNhjI-OjIw)HH?<5Je3#F|Io9WViWse(`}?17SvQPG zkFH;UW6)wmA;-GI(Mzdk>MM6Y+;z4{w9GUrYSo@e^+ZLDccDbhu@i9*@>t3^{dB9< zMus1?>g74ta3M4HRCFQporMQ9uTJwTU8Wn<40+C~2T8ApWc9z8&r5nfeN-)bB zg{OY%kng)2g9#NuZPy^%)Rr*Hc;^RfsOkhlMSSi>2^u=g9njfpj zUqUIm=l47H-=TlkC#%ddhCk>F2MtU~N0(D{m?MJE?j?THqqE_{bmOE+pY>``md z>P@ZW{7>bx18%cG>kY)#uoF9m5K?kmp2V_P zJws``J}P&uQ{S9nSc3;$ARd;0)mEn8XqAmO@s_Vz3bFNYS?pL!h3Q|w3FfxNA`k~t$Ov2q91@BjL+MJdLP1@UGJMq7^zEHE4ro5IlF>yCSY_ z)jUuutpBAo<>&uu^??({`(nQ?R6!Qih z%yF2CoePQ8lkqSufinK-9b=szKm9zf0t_V`&&mQSv_&a}R;*25Ye%J+A2mF+^^xd^Y@>TAmSRgwm?02aT=lv9DToxOyvX=n3Kc9sfllx&%aC^kf#1`a zYfcshjD4(p7OYqEWx$~275h5Feu6^x+=WWd^o8yH*$)yMoA%RI&uK|K47p55YGlL( zw3$*GWuM4`(s_bdSFx**rp_Q#F{+B$lC7S_KuG3~!|Sq)qs0T~8*jVwV3FdE-1&cE zt!mVgF8702Y{1!*jQAFM{MVBoHy={eI!1OW4yKM@LarH8i>@G=w>DQx`wj~~5hMx3aUE@ z{uaZdA+L(Fg-@D^{=aGqQbbH{P>uk=zPNtEdxVri@2v++m%si~v<9l|;?9dRUg%3M z<@@8UITZ{~Q^ad&&z1*6kz zfqaZQ)9YUYh$v6_lt0;X-PE2A?mWZx?YFS1tE=^I`3p}4tnnNC%VT&-qVJ-NjPfus zEP5QhqTcw(M@NNO`>-*c*|4<*TZQMk@1*vg^CPf-U%>U5^^)Kl!8t*fcJ6x7oJ^5s zth;0pRpG#Ye-IE;GZM`n^9D-!#xnZOF$E-KT&!4c;Hf*MswZCIi&FEI?kzE#E&??Q82yP{;@%z zD7ZM)-|+_>!IJ6fRQC1(gopnR(CvN}x$pNg=@(UJ4eNhwqaS!%8CjO1;Z{%8%Z*$k zPGSO;r3o2kF8tAc0%AxoU}y^-suDz1Da!MMqEDN1QZ-NVnHeU8tto3;iJ_-h zu;Y1>yJGKqK+((vX=CMq1(L%XPoAVSxaxzz$0qd)Cs*@_Uti*rta9(f8Tf)%`(~6{ zi25%ouYX(o|ildp3h=ky~!vtaZVnfu__}U`fj5UCLUUGL;#ybaUTaEzlV(wGE`% zLKu|P^71on(~};3xU?Qy>$X@e$QfqK=o7n4St(7Bujs~eW2Ur0Yq40QSS)_@lUPRL zrQLQV@5Al+s9HK=Z+mTr$;-wMNH*p*-HD+)AB^Tn+bpfPPM9M!7*Ry)Qg`ZR?GDJZ z?Q_X{dDLA#3zrA(z>?JXmB`L;BlnV`2m3%^0 z24 zR#zYRdEM0lT-@D)Km}KjV{rvF;=#TDE&~r|;LRq!jz~agk2?+9_q_ln?=l?Puih)h z)^H$xyAJ&E(=kI|D2Ld7&*btsk*$G=>$v9tq(r9RdNR14y2C~b(2?psFAu;6 z_5NI;;0eA@O%)Jog9|(=e61rYe1shqO-gI@Q5}2It)>W@z2U(P!+C^oK-ZI;eaX4!mXh2kKOdZ*+k8R}8 z;l_llTfoZkw+vy!DOtBD{c`XT{MnCl3r&H~ZRNr&UijxS6s$EJO~qJDCFY39(xkBd zvF{SMMD`p&IRQ(%4^i2_TshW4)MfuJ@Os*+*ASr-1p%nFVW3oMJ~SD=`b!yi7`*4y z(TbT%eJf(;C-^r-K`>j>y=q%IQwx%vVjb7!#u=@hO&#Q$`O?u{x0iV%v~5wsLGgB4 zvPcz;9N_$)jcx;!k~c#eA?o#J;Nao;oBX*4aI^GSiim8pWEyO)DPNZY1TfP&)2xBX zHxltf>^o|bQ)ZCUy5eM$Hm{ZlnmVmUb+97I!|Rzi1D`dmU7yn>)U8~!1;e{5E{)_W zE-w5k6H#Rn+)bIFgTL*B$J+V^j1Y{6I7Gk1RxG~kTaffw2D_zW7OlT;UlEj}X3o0>LSNpNlQ?DB`>l^9)s-3u zxX{e%o^B+CSTaevn!0D3cG?ifGh=*x7Js3Dgf%38sK%KjrQ_de#w7&9y>S|mYBIkc z=!%T*ljr!_Ymxjdi0Lv8PR}^kS+v<_ao34aU`l!CavF}ltOZgAcXS~|p(8bS-2GWF zHE^d9LcG~pcv)g@u z=in@`KKSFu@n2(yI0ccBHgGg_0V+qKC6=cLBF@`w``-`=4IJQ#+sZ;VTvzPF3%6S5 z~(I{RCcUkG+s3!(6vY9vvuf}YyhWBqSmsGG^?g@20l z5%<38XfUz-It5w7Z6b_mho(T6WIy{8EwFtXs}UfOZoGPZ@Xd0sx1B>!AHP8Bc>M>@ z0@w-L!7v(?dm(~X!8+0z^zgp!+kic|45Ee(gAxwLsHsTw`);C5{6q=a>xArKt1O)S zrhlcz=(JouJ0|6DlH_p);wHW;%lT5IFWQXEhBPJd%*NhET(67O8JeLyHAuH83~Y;myJ!dn|7Gbt`>#>z=Y}Y(hfb0VR4| zPG8gCbgzN#)%cMW4b^O$*UMLkNgM2~rv_j=ABoDB(LD;+ou22_@Mm+lgoW$pyUN;C z%p+7*)rrJ;y&5Fb_pUVkIHE0D5D*X5X8CK;8s6E@KYccSUNkLjkj#t?d`T(^m}gIj zk}N0Lsw)ZD+I97B3ZAgmjfgEaOp?s2E=Fe$%hH4u?A%jz7ijVhSSAc1)w;}PZ_arr z_~_M(Vh4|r37qY#Y?kMiFKCM9F%`9G!0lvabDBtlCTPS5D%_pa*;|dz46ugqcm%VI z6iNe&h_DSaQU?4QEQ^5D!Xvm(nC~^_K*ZXj^C&lElqU0soYr{U8Cd6aX5;+EI06hX zbsmzSh86rVwu_K4d%kdoWOAgxP?liwqQ4NEr1deG4aGwa+WF|8&f{tbB`#+WGn+?bgEeMKQj{DOrx)wR|aw;kv& z$*#MEu&*Do-zfKj_tgXE^r4 zi%+RfI2CWC6z z-Dhmf623-dLmbLbt6d&}FArJxyHtBHCt&5~9*aUNWBE0ggN=x?0EXr)4}MbvY|=F< z$MdbF{tZmRAqNu0aWsiUyfi7v;b)AQzYpTOe-sPUU#oNT6CKwZX%Ip7<)4q1>11Hv ze@CV{OQpCI)ZAyI%2r2b;h%}>J7B&H4fyjg5Qghw+4Uq0{XM|ubjVakzN{qb8hES_ z)e@eiMwg$K^Jir`BpzC z31t`{@n45_J&!FaQV!&{`Idk~*^MSNJWyZnXUf1yZCCxz=H>O@dRK@a3B`9{Du+#N z+5O8mY6Kxs)BR7U1;q?U6Kgo(5tC3!(&)KPs4uRwyzRuu5n!e_ZhZ7UqR`Mp5k$W7 z#y906){LDxNFvtoV&Uy%$HyKnYtaxrpA>WeBZLrMWg59Oe%{afSXpqf7$14kMrD)z z#43lKfN_v==Em9I6_%fvQkHm-VE9lF?U7tECyijwqqTT-@v>U#L3@7VapWi{M+>Eyuk)@RT`2N)^%G&;OC;%S_{?Hl7Q8u>E#Q+}J}PEJDBy}e_{NS9)&$xIF>T@ZDbUm@tTV@(3Mb$_8}B@ z?IBn=Ov4ulW{_*`p*UpweRMJZX6qH*=Ad6GCa|J#v(F-U{CbAx$pK14AuN5tiuiOz zG^s(4XESp91U%K|oCevzsZd3EXR3Sw=qe4CLsXhX7v;=?^HRnuNBXK0gc^Yh!JB2x z?6xQh9q>wZ9k0&9kJbKJk=B9$0F1jl;t+fSjWe^KHT3>Fgv$krBM`Jnk+?qw^mpDx zp|iB}H_G!&E?~qTqCWeG6lhvnTfVwX|9nY8_#=!_-o&vU9Oz!WA(P22^Ky5>w(JhW zU*u)~G&eU_@hzgDEbMPz{Vm?A?B!X8ZQNAoiuw3S%jUm8SRaWm(=!34m;i&-W zG;-#~-1@BQZk!`sY=IMW%b_C%5=sDefMsFf^45Iw3Ai5uKk@T#Xqj2y+YecJGmW;r z1BnO}cNk-`*VDbWm5nFO)jf{6Umi~FtE;_$rTj76ba)hepW$pUv%yB~0A7#-q8!!y z6RaTWG08|`wtXr4r9*2N)ZB4L)nU`nrMo8w-FL7US&SG;Rxh57}kZ1JKu6fP)bZghfL*nxd-tn8lSZ) zLC^^wQMGAjD}#0QePBSR#l2Dn;rZ|E^t3HRh!>I2?SF6z_zGhWfveAFH}|v0p$%FDI4#v9nfh~W*PFdp5Z~9X0fJQv z(ihR${{XtGXYsl`bxgv9Bv@brZNZe?Xoe(wHkKY0PR0#>W9Ps^Vfvg z4AB;axYncof4yCMOjB1JK9X+2G^l9M;do3VI+SU~!d9@>Y-6Yd(D=d?K?0crEuaX+ zqO>Cs7l?wgfiOCBbYiJIoIu?KaW#ktYZakRELI-n{g|NgD!uz|ZCo+3{j-b^l3q^l zJ@-7m^E9bfXgwg;X=#Iks^+4;Md;5UQH#1 z)K|^&g!v?%n$SQ4=RXLLlK~f%*5~>A`gQII!~nAIbA-AmYPxbD*BXJa&cRZEg<}UV z^rv%JA<|G^wbbr#`#f$K(gV;myO=sbzPWKR5q|jh$s$@(EZSy4wl77cMK~J490IHT zLM5!ANenHtS0ik34i^)fJyzqJXla|@-vC`4#E9Tbx>=tnEz;5?ZLkr`hLZzz@;654 zES_|#AFPgS{EHe*q0WE;6jzPsAhobcRrpIs7PRgl@K+`F`#w`u|L7eW0i|V;3;yURGqeanXsZ3JAl?%Z4q3m@%#raF3q>;Q~|af z2$T*k@8k5Vyavl0-lM}1u$5>L0}Uh5r(VPyHqNm1Sq@Xiug?q%B2ynsX6fn8g8~M+~)V zrwM2aF=XN{dV8N)^Be0sk%)!C7A%=h_8-SWpx#QmD*vTM`qA;dqG*-Z>WUC*H)fZw zV0Vi^k;7}yAEL`&Be#AMp2QEFb7C{ zH9AP?BMLB~_?%cY#IV9$Sys01{`(an0N{aTKKTqG2H;K(2P}ljHI1ByZE4=5*fu~G zE{e+!sX4uo*HPzboco~(riiq`J?}JefLF4-@8??h5sRv!M5S^UUnm&3^`IG${6eBTZ8r=VyPvx*^{vu>15EytdCpj2SSW4Uk+yrxdCe5T?+ z=*$G2jnW3{f1*#zno6TM53lkfKtR@a=jgiw5y3>s?99fo-@_r`H9`RVkeMmT(6ujT z-<9IDwIW!e8TZ@26$t$YB7FnuN^VfkBdGMWG;G#?-{u~bXoS|+LB(=9NEBAEw*WU~ zaE?~$w5wa{Z(+BJ`Ig@EK#b>RWMuHV?z}4q%9rV5@O(tw3}9XGyVMHSojflYl`{Oi z`r>0gR^-}B;7XXyWl+x|EAH)q?{|Qfk_n#X+8<>PPKc(fnTykm$AFLTgVE-53wbv1 zP)U!B4edG=rDXBo2WheKU`#jAwpbAM*`=N*Y4P~ETi{*KTetg=5l>#7rj{0@YW6fY zN+p}?{@br9Ak`m)Im+3xN-dx!r&)y%(&m4x(?7LYIXTDfFEaG<0?(<# zdl&YEZ7FQK&rjuk#fSGF8rHW1f(=DCzca=B=e_NID!<+*_C-yRp{(d9OJ_ Self } diff --git a/docs/config.svg b/docs/config.svg index 5ccf471b3..0013960ee 100644 --- a/docs/config.svg +++ b/docs/config.svg @@ -298,7 +298,7 @@ api_key : Optional[SecretStr] library_client_config_path : Optional[str] timeout - url : Optional[str] + url : Optional[AnyHttpUrl] use_as_library_client : Optional[bool] check_llama_stack_model() -> Self @@ -702,6 +702,6 @@ user_data_collection - + diff --git a/docs/openapi.json b/docs/openapi.json index 8954eef46..d49f10ffe 100644 --- a/docs/openapi.json +++ b/docs/openapi.json @@ -13,7 +13,7 @@ "name": "Apache 2.0", "url": "https://www.apache.org/licenses/LICENSE-2.0.html" }, - "version": "0.4.0" + "version": "0.4.1" }, "servers": [ { @@ -245,8 +245,32 @@ "models" ], "summary": "Models Endpoint Handler", - "description": "Handle requests to the /models endpoint.\n\nProcess GET requests to the /models endpoint, returning a list of available\nmodels from the Llama Stack service.\n\nRaises:\n HTTPException: If unable to connect to the Llama Stack server or if\n model retrieval fails for any reason.\n\nReturns:\n ModelsResponse: An object containing the list of available models.", + "description": "Handle requests to the /models endpoint.\n\nProcess GET requests to the /models endpoint, returning a list of available\nmodels from the Llama Stack service.\n\nParameters:\n request: The incoming HTTP request.\n auth: Authentication tuple from the auth dependency.\n model_type: Optional filter to return only models matching this type.\n\nRaises:\n HTTPException: If unable to connect to the Llama Stack server or if\n model retrieval fails for any reason.\n\nReturns:\n ModelsResponse: An object containing the list of available models.", "operationId": "models_endpoint_handler_v1_models_get", + "parameters": [ + { + "name": "model_type", + "in": "query", + "required": false, + "schema": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Optional filter to return only models matching this type", + "examples": [ + "llm", + "embeddings" + ], + "title": "Model Type" + }, + "description": "Optional filter to return only models matching this type" + } + ], "responses": { "200": { "description": "Successful response", @@ -275,9 +299,6 @@ "description": "Unauthorized", "content": { "application/json": { - "schema": { - "$ref": "#/components/schemas/UnauthorizedResponse" - }, "examples": { "missing header": { "value": { @@ -295,6 +316,9 @@ } } } + }, + "schema": { + "$ref": "#/components/schemas/UnauthorizedResponse" } } } @@ -303,9 +327,6 @@ "description": "Permission denied", "content": { "application/json": { - "schema": { - "$ref": "#/components/schemas/ForbiddenResponse" - }, "examples": { "endpoint": { "value": { @@ -315,6 +336,9 @@ } } } + }, + "schema": { + "$ref": "#/components/schemas/ForbiddenResponse" } } } @@ -323,9 +347,6 @@ "description": "Internal server error", "content": { "application/json": { - "schema": { - "$ref": "#/components/schemas/InternalServerErrorResponse" - }, "examples": { "configuration": { "value": { @@ -335,6 +356,9 @@ } } } + }, + "schema": { + "$ref": "#/components/schemas/InternalServerErrorResponse" } } } @@ -343,9 +367,6 @@ "description": "Service unavailable", "content": { "application/json": { - "schema": { - "$ref": "#/components/schemas/ServiceUnavailableResponse" - }, "examples": { "llama stack": { "value": { @@ -355,6 +376,19 @@ } } } + }, + "schema": { + "$ref": "#/components/schemas/ServiceUnavailableResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" } } } @@ -1293,11 +1327,11 @@ "/v1/query": { "post": { "tags": [ - "query_v1" + "query" ], - "summary": "Query Endpoint Handler V1", - "description": "Handle request to the /query endpoint using Responses API.\n\nThis is a wrapper around query_endpoint_handler_base that provides\nthe Responses API specific retrieve_response and get_topic_summary functions.\n\nReturns:\n QueryResponse: Contains the conversation ID and the LLM-generated response.", - "operationId": "query_endpoint_handler_v2_v1_query_post", + "summary": "Query Endpoint Handler", + "description": "Handle request to the /query endpoint using Responses API.\n\nProcesses a POST request to a query endpoint, forwarding the\nuser's query to a selected Llama Stack LLM and returning the generated response.\n\nReturns:\n QueryResponse: Contains the conversation ID and the LLM-generated response.\n\nRaises:\n HTTPException:\n - 401: Unauthorized - Missing or invalid credentials\n - 403: Forbidden - Insufficient permissions or model override not allowed\n - 404: Not Found - Conversation, model, or provider not found\n - 413: Prompt too long - Prompt exceeded model's context window size\n - 422: Unprocessable Entity - Request validation failed\n - 429: Quota limit exceeded - The token quota for model or user has been exceeded\n - 500: Internal Server Error - Configuration not loaded or other server errors\n - 503: Service Unavailable - Unable to connect to Llama Stack backend", + "operationId": "query_endpoint_handler_v1_query_post", "requestBody": { "content": { "application/json": { @@ -1453,6 +1487,26 @@ } } }, + "413": { + "description": "Prompt is too long", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PromptTooLongResponse" + }, + "examples": { + "prompt too long": { + "value": { + "detail": { + "cause": "The prompt exceeds the maximum allowed length.", + "response": "Prompt is too long" + } + } + } + } + } + } + }, "422": { "description": "Request validation failed", "content": { @@ -1480,7 +1534,7 @@ "invalid value": { "value": { "detail": { - "cause": "Invalid attatchment type: must be one of ['text/plain', 'application/json', 'application/yaml', 'application/xml']", + "cause": "Invalid attachment type: must be one of ['text/plain', 'application/json', 'application/yaml', 'application/xml']", "response": "Invalid attribute value" } } @@ -1603,11 +1657,11 @@ "/v1/streaming_query": { "post": { "tags": [ - "streaming_query_v1" + "streaming_query" ], - "summary": "Streaming Query Endpoint Handler V1", - "description": "Handle request to the /streaming_query endpoint using Responses API.\n\nReturns a streaming response using Server-Sent Events (SSE) format with\ncontent type text/event-stream.\n\nReturns:\n StreamingResponse: An HTTP streaming response yielding\n SSE-formatted events for the query lifecycle with content type\n text/event-stream.\n\nRaises:\n HTTPException:\n - 401: Unauthorized - Missing or invalid credentials\n - 403: Forbidden - Insufficient permissions or model override not allowed\n - 404: Not Found - Conversation, model, or provider not found\n - 422: Unprocessable Entity - Request validation failed\n - 429: Too Many Requests - Quota limit exceeded\n - 500: Internal Server Error - Configuration not loaded or other server errors\n - 503: Service Unavailable - Unable to connect to Llama Stack backend", - "operationId": "streaming_query_endpoint_handler_v2_v1_streaming_query_post", + "summary": "Streaming Query Endpoint Handler", + "description": "Handle request to the /streaming_query endpoint using Responses API.\n\nReturns a streaming response using Server-Sent Events (SSE) format with\ncontent type text/event-stream.\n\nReturns:\n SSE-formatted events for the query lifecycle.\n\nRaises:\n HTTPException:\n - 401: Unauthorized - Missing or invalid credentials\n - 403: Forbidden - Insufficient permissions or model override not allowed\n - 404: Not Found - Conversation, model, or provider not found\n - 413: Prompt too long - Prompt exceeded model's context window size\n - 422: Unprocessable Entity - Request validation failed\n - 429: Quota limit exceeded - The token quota for model or user has been exceeded\n - 500: Internal Server Error - Configuration not loaded or other server errors\n - 503: Service Unavailable - Unable to connect to Llama Stack backend", + "operationId": "streaming_query_endpoint_handler_v1_streaming_query_post", "requestBody": { "content": { "application/json": { @@ -1731,6 +1785,26 @@ } } }, + "413": { + "description": "Prompt is too long", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PromptTooLongResponse" + }, + "examples": { + "prompt too long": { + "value": { + "detail": { + "cause": "The prompt exceeds the maximum allowed length.", + "response": "Prompt is too long" + } + } + } + } + } + } + }, "422": { "description": "Request validation failed", "content": { @@ -1758,7 +1832,7 @@ "invalid value": { "value": { "detail": { - "cause": "Invalid attatchment type: must be one of ['text/plain', 'application/json', 'application/yaml', 'application/xml']", + "cause": "Invalid attachment type: must be one of ['text/plain', 'application/json', 'application/yaml', 'application/xml']", "response": "Invalid attribute value" } } @@ -2445,26 +2519,6 @@ } } } - }, - "503": { - "description": "Service unavailable", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ServiceUnavailableResponse" - }, - "examples": { - "llama stack": { - "value": { - "detail": { - "cause": "Connection error while trying to reach backend service.", - "response": "Unable to connect to Llama Stack" - } - } - } - } - } - } } } } @@ -2510,7 +2564,11 @@ "type": "assistant" } ], - "started_at": "2024-01-01T00:01:00Z" + "model": "gpt-4o-mini", + "provider": "openai", + "started_at": "2024-01-01T00:01:00Z", + "tool_calls": [], + "tool_results": [] } ], "conversation_id": "123e4567-e89b-12d3-a456-426614174000" @@ -3196,7 +3254,11 @@ "type": "assistant" } ], - "started_at": "2024-01-01T00:01:00Z" + "model": "gpt-4o-mini", + "provider": "openai", + "started_at": "2024-01-01T00:01:00Z", + "tool_calls": [], + "tool_results": [] } ], "conversation_id": "123e4567-e89b-12d3-a456-426614174000" @@ -3768,7 +3830,7 @@ "invalid value": { "value": { "detail": { - "cause": "Invalid attatchment type: must be one of ['text/plain', 'application/json', 'application/yaml', 'application/xml']", + "cause": "Invalid attachment type: must be one of ['text/plain', 'application/json', 'application/yaml', 'application/xml']", "response": "Invalid attribute value" } } @@ -5924,8 +5986,7 @@ }, "chat_history": { "items": { - "additionalProperties": true, - "type": "object" + "$ref": "#/components/schemas/ConversationTurn" }, "type": "array", "title": "Chat History", @@ -5943,7 +6004,11 @@ "type": "assistant" } ], - "started_at": "2024-01-01T00:01:00Z" + "model": "gpt-4o-mini", + "provider": "openai", + "started_at": "2024-01-01T00:01:00Z", + "tool_calls": [], + "tool_results": [] } ] } @@ -5954,7 +6019,7 @@ "chat_history" ], "title": "ConversationResponse", - "description": "Model representing a response for retrieving a conversation.\n\nAttributes:\n conversation_id: The conversation ID (UUID).\n chat_history: The simplified chat history as a list of conversation turns.\n\nExample:\n ```python\n conversation_response = ConversationResponse(\n conversation_id=\"123e4567-e89b-12d3-a456-426614174000\",\n chat_history=[\n {\n \"messages\": [\n {\"content\": \"Hello\", \"type\": \"user\"},\n {\"content\": \"Hi there!\", \"type\": \"assistant\"}\n ],\n \"started_at\": \"2024-01-01T00:01:00Z\",\n \"completed_at\": \"2024-01-01T00:01:05Z\"\n }\n ]\n )\n ```", + "description": "Model representing a response for retrieving a conversation.\n\nAttributes:\n conversation_id: The conversation ID (UUID).\n chat_history: The chat history as a list of conversation turns.", "examples": [ { "chat_history": [ @@ -5970,13 +6035,86 @@ "type": "assistant" } ], - "started_at": "2024-01-01T00:01:00Z" + "model": "gpt-4o-mini", + "provider": "openai", + "started_at": "2024-01-01T00:01:00Z", + "tool_calls": [], + "tool_results": [] } ], "conversation_id": "123e4567-e89b-12d3-a456-426614174000" } ] }, + "ConversationTurn": { + "properties": { + "messages": { + "items": { + "$ref": "#/components/schemas/Message" + }, + "type": "array", + "title": "Messages", + "description": "List of messages in this turn" + }, + "tool_calls": { + "items": { + "$ref": "#/components/schemas/ToolCallSummary" + }, + "type": "array", + "title": "Tool Calls", + "description": "List of tool calls made in this turn" + }, + "tool_results": { + "items": { + "$ref": "#/components/schemas/ToolResultSummary" + }, + "type": "array", + "title": "Tool Results", + "description": "List of tool results from this turn" + }, + "provider": { + "type": "string", + "title": "Provider", + "description": "Provider identifier used for this turn", + "examples": [ + "openai" + ] + }, + "model": { + "type": "string", + "title": "Model", + "description": "Model identifier used for this turn", + "examples": [ + "gpt-4o-mini" + ] + }, + "started_at": { + "type": "string", + "title": "Started At", + "description": "ISO 8601 timestamp when the turn started", + "examples": [ + "2024-01-01T00:01:00Z" + ] + }, + "completed_at": { + "type": "string", + "title": "Completed At", + "description": "ISO 8601 timestamp when the turn completed", + "examples": [ + "2024-01-01T00:01:05Z" + ] + } + }, + "type": "object", + "required": [ + "provider", + "model", + "started_at", + "completed_at" + ], + "title": "ConversationTurn", + "description": "Model representing a single conversation turn.\n\nAttributes:\n messages: List of messages in this turn.\n tool_calls: List of tool calls made in this turn.\n tool_results: List of tool results from this turn.\n provider: Provider identifier used for this turn.\n model: Model identifier used for this turn.\n started_at: ISO 8601 timestamp when the turn started.\n completed_at: ISO 8601 timestamp when the turn completed." + }, "ConversationUpdateRequest": { "properties": { "topic_summary": { @@ -6911,14 +7049,16 @@ "url": { "anyOf": [ { - "type": "string" + "type": "string", + "minLength": 1, + "format": "uri" }, { "type": "null" } ], "title": "Llama Stack URL", - "description": "URL to Llama Stack service; used when library mode is disabled" + "description": "URL to Llama Stack service; used when library mode is disabled. Must be a valid HTTP or HTTPS URL." }, "api_key": { "anyOf": [ @@ -7029,6 +7169,42 @@ "title": "MCPServerAuthInfo", "description": "Information about MCP server client authentication options." }, + "Message": { + "properties": { + "content": { + "type": "string", + "title": "Content", + "description": "The message content", + "examples": [ + "Hello, how can I help you?" + ] + }, + "type": { + "type": "string", + "enum": [ + "user", + "assistant", + "system", + "developer" + ], + "title": "Type", + "description": "The type of message", + "examples": [ + "user", + "assistant", + "system", + "developer" + ] + } + }, + "type": "object", + "required": [ + "content", + "type" + ], + "title": "Message", + "description": "Model representing a message in a conversation turn.\n\nAttributes:\n content: The message content.\n type: The type of message." + }, "ModelContextProtocolServer": { "properties": { "name": { @@ -7417,6 +7593,33 @@ "title": "PostgreSQLDatabaseConfiguration", "description": "PostgreSQL database configuration.\n\nPostgreSQL database is used by Lightspeed Core Stack service for storing\ninformation about conversation IDs. It can also be leveraged to store\nconversation history and information about quota usage.\n\nUseful resources:\n\n- [Psycopg: connection classes](https://www.psycopg.org/psycopg3/docs/api/connections.html)\n- [PostgreSQL connection strings](https://www.connectionstrings.com/postgresql/)\n- [How to Use PostgreSQL in Python](https://www.freecodecamp.org/news/postgresql-in-python/)" }, + "PromptTooLongResponse": { + "properties": { + "status_code": { + "type": "integer", + "title": "Status Code" + }, + "detail": { + "$ref": "#/components/schemas/DetailModel" + } + }, + "type": "object", + "required": [ + "status_code", + "detail" + ], + "title": "PromptTooLongResponse", + "description": "413 Payload Too Large - Prompt is too long.", + "examples": [ + { + "detail": { + "cause": "The prompt exceeds the maximum allowed length.", + "response": "Prompt is too long" + }, + "label": "prompt too long" + } + ] + }, "ProviderHealthStatus": { "properties": { "provider_id": { @@ -7865,7 +8068,7 @@ "truncated": { "type": "boolean", "title": "Truncated", - "description": "Whether conversation history was truncated", + "description": "Deprecated:Whether conversation history was truncated", "default": false, "examples": [ false, @@ -8779,6 +8982,12 @@ "title": "TLS configuration", "description": "Transport Layer Security configuration for HTTPS support" }, + "root_path": { + "type": "string", + "title": "Root path", + "description": "ASGI root path for serving behind a reverse proxy on a subpath", + "default": "" + }, "cors": { "$ref": "#/components/schemas/CORSConfiguration", "title": "CORS configuration", @@ -9227,7 +9436,7 @@ }, { "detail": { - "cause": "Invalid attatchment type: must be one of ['text/plain', 'application/json', 'application/yaml', 'application/xml']", + "cause": "Invalid attachment type: must be one of ['text/plain', 'application/json', 'application/yaml', 'application/xml']", "response": "Invalid attribute value" }, "label": "invalid value" @@ -9301,6 +9510,13 @@ "type": { "type": "string", "title": "Error Type" + }, + "input": { + "title": "Input" + }, + "ctx": { + "type": "object", + "title": "Context" } }, "type": "object", diff --git a/docs/openapi.md b/docs/openapi.md index b2633d8c3..fa452ad1c 100644 --- a/docs/openapi.md +++ b/docs/openapi.md @@ -249,6 +249,11 @@ Handle requests to the /models endpoint. Process GET requests to the /models endpoint, returning a list of available models from the Llama Stack service. +Parameters: + request: The incoming HTTP request. + auth: Authentication tuple from the auth dependency. + model_type: Optional filter to return only models matching this type. + Raises: HTTPException: If unable to connect to the Llama Stack server or if model retrieval fails for any reason. @@ -258,6 +263,11 @@ Returns: +### 🔗 Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| model_type | | False | Optional filter to return only models matching this type | ### ✅ Responses @@ -345,6 +355,7 @@ Examples } ``` | +| 422 | Validation Error | [HTTPValidationError](#httpvalidationerror) | ## GET `/v1/tools` > **Tools Endpoint Handler** @@ -1119,16 +1130,27 @@ Examples | 422 | Validation Error | [HTTPValidationError](#httpvalidationerror) | ## POST `/v1/query` -> **Query Endpoint Handler V1** +> **Query Endpoint Handler** Handle request to the /query endpoint using Responses API. -This is a wrapper around query_endpoint_handler_base that provides -the Responses API specific retrieve_response and get_topic_summary functions. +Processes a POST request to a query endpoint, forwarding the +user's query to a selected Llama Stack LLM and returning the generated response. Returns: QueryResponse: Contains the conversation ID and the LLM-generated response. +Raises: + HTTPException: + - 401: Unauthorized - Missing or invalid credentials + - 403: Forbidden - Insufficient permissions or model override not allowed + - 404: Not Found - Conversation, model, or provider not found + - 413: Prompt too long - Prompt exceeded model's context window size + - 422: Unprocessable Entity - Request validation failed + - 429: Quota limit exceeded - The token quota for model or user has been exceeded + - 500: Internal Server Error - Configuration not loaded or other server errors + - 503: Service Unavailable - Unable to connect to Llama Stack backend + @@ -1251,6 +1273,23 @@ Examples "response": "Model not found" } } +``` + | +| 413 | Prompt is too long | [PromptTooLongResponse](#prompttoolongresponse) + +Examples + + + + + +```json +{ + "detail": { + "cause": "The prompt exceeds the maximum allowed length.", + "response": "Prompt is too long" + } +} ``` | | 422 | Request validation failed | [UnprocessableEntityResponse](#unprocessableentityresponse) @@ -1288,7 +1327,7 @@ Examples ```json { "detail": { - "cause": "Invalid attatchment type: must be one of ['text/plain', 'application/json', 'application/yaml', 'application/xml']", + "cause": "Invalid attachment type: must be one of ['text/plain', 'application/json', 'application/yaml', 'application/xml']", "response": "Invalid attribute value" } } @@ -1419,7 +1458,7 @@ Examples | ## POST `/v1/streaming_query` -> **Streaming Query Endpoint Handler V1** +> **Streaming Query Endpoint Handler** Handle request to the /streaming_query endpoint using Responses API. @@ -1427,17 +1466,16 @@ Returns a streaming response using Server-Sent Events (SSE) format with content type text/event-stream. Returns: - StreamingResponse: An HTTP streaming response yielding - SSE-formatted events for the query lifecycle with content type - text/event-stream. + SSE-formatted events for the query lifecycle. Raises: HTTPException: - 401: Unauthorized - Missing or invalid credentials - 403: Forbidden - Insufficient permissions or model override not allowed - 404: Not Found - Conversation, model, or provider not found + - 413: Prompt too long - Prompt exceeded model's context window size - 422: Unprocessable Entity - Request validation failed - - 429: Too Many Requests - Quota limit exceeded + - 429: Quota limit exceeded - The token quota for model or user has been exceeded - 500: Internal Server Error - Configuration not loaded or other server errors - 503: Service Unavailable - Unable to connect to Llama Stack backend @@ -1563,6 +1601,23 @@ Examples "response": "Model not found" } } +``` + | +| 413 | Prompt is too long | [PromptTooLongResponse](#prompttoolongresponse) + +Examples + + + + + +```json +{ + "detail": { + "cause": "The prompt exceeds the maximum allowed length.", + "response": "Prompt is too long" + } +} ``` | | 422 | Request validation failed | [UnprocessableEntityResponse](#unprocessableentityresponse) @@ -1600,7 +1655,7 @@ Examples ```json { "detail": { - "cause": "Invalid attatchment type: must be one of ['text/plain', 'application/json', 'application/yaml', 'application/xml']", + "cause": "Invalid attachment type: must be one of ['text/plain', 'application/json', 'application/yaml', 'application/xml']", "response": "Invalid attribute value" } } @@ -2156,23 +2211,6 @@ Examples "response": "Database query failed" } } -``` - | -| 503 | Service unavailable | [ServiceUnavailableResponse](#serviceunavailableresponse) - -Examples - - - - - -```json -{ - "detail": { - "cause": "Connection error while trying to reach backend service.", - "response": "Unable to connect to Llama Stack" - } -} ``` | ## GET `/v1/conversations/{conversation_id}` @@ -3304,7 +3342,7 @@ Examples ```json { "detail": { - "cause": "Invalid attatchment type: must be one of ['text/plain', 'application/json', 'application/yaml', 'application/xml']", + "cause": "Invalid attachment type: must be one of ['text/plain', 'application/json', 'application/yaml', 'application/xml']", "response": "Invalid attribute value" } } @@ -4361,24 +4399,7 @@ Model representing a response for retrieving a conversation. Attributes: conversation_id: The conversation ID (UUID). - chat_history: The simplified chat history as a list of conversation turns. - -Example: - ```python - conversation_response = ConversationResponse( - conversation_id="123e4567-e89b-12d3-a456-426614174000", - chat_history=[ - { - "messages": [ - {"content": "Hello", "type": "user"}, - {"content": "Hi there!", "type": "assistant"} - ], - "started_at": "2024-01-01T00:01:00Z", - "completed_at": "2024-01-01T00:01:05Z" - } - ] - ) - ``` + chat_history: The chat history as a list of conversation turns. | Field | Type | Description | @@ -4387,6 +4408,32 @@ Example: | chat_history | array | The simplified chat history as a list of conversation turns | +## ConversationTurn + + +Model representing a single conversation turn. + +Attributes: + messages: List of messages in this turn. + tool_calls: List of tool calls made in this turn. + tool_results: List of tool results from this turn. + provider: Provider identifier used for this turn. + model: Model identifier used for this turn. + started_at: ISO 8601 timestamp when the turn started. + completed_at: ISO 8601 timestamp when the turn completed. + + +| Field | Type | Description | +|-------|------|-------------| +| messages | array | List of messages in this turn | +| tool_calls | array | List of tool calls made in this turn | +| tool_results | array | List of tool results from this turn | +| provider | string | Provider identifier used for this turn | +| model | string | Model identifier used for this turn | +| started_at | string | ISO 8601 timestamp when the turn started | +| completed_at | string | ISO 8601 timestamp when the turn completed | + + ## ConversationUpdateRequest @@ -4858,7 +4905,7 @@ Useful resources: | Field | Type | Description | |-------|------|-------------| -| url | | URL to Llama Stack service; used when library mode is disabled | +| url | | URL to Llama Stack service; used when library mode is disabled. Must be a valid HTTP or HTTPS URL. | | api_key | | API key to access Llama Stack service | | use_as_library_client | | When set to true Llama Stack will be used in library mode, not in server mode (default) | | library_client_config_path | | Path to configuration file used when Llama Stack is run in library mode | @@ -4888,6 +4935,22 @@ Information about MCP server client authentication options. | client_auth_headers | array | List of authentication header names for client-provided tokens | +## Message + + +Model representing a message in a conversation turn. + +Attributes: + content: The message content. + type: The type of message. + + +| Field | Type | Description | +|-------|------|-------------| +| content | string | The message content | +| type | string | The type of message | + + ## ModelContextProtocolServer @@ -5033,6 +5096,18 @@ Useful resources: | ca_cert_path | | Path to CA certificate | +## PromptTooLongResponse + + +413 Payload Too Large - Prompt is too long. + + +| Field | Type | Description | +|-------|------|-------------| +| status_code | integer | | +| detail | | | + + ## ProviderHealthStatus @@ -5139,7 +5214,7 @@ Attributes: | response | string | Response from LLM | | rag_chunks | array | Deprecated: List of RAG chunks used to generate the response. | | referenced_documents | array | List of documents referenced in generating the response | -| truncated | boolean | Whether conversation history was truncated | +| truncated | boolean | Deprecated:Whether conversation history was truncated | | input_tokens | integer | Number of tokens sent to LLM | | output_tokens | integer | Number of tokens received from LLM | | available_quotas | object | Quota available as measured by all configured quota limiters | @@ -5512,6 +5587,7 @@ the service can handle requests concurrently. | color_log | boolean | Enables colorized logging | | access_log | boolean | Enables logging of all access information | | tls_config | | Transport Layer Security configuration for HTTPS support | +| root_path | string | ASGI root path for serving behind a reverse proxy on a subpath | | cors | | Cross-Origin Resource Sharing configuration for cross-domain requests | @@ -5700,3 +5776,5 @@ User data collection configuration. | loc | array | | | msg | string | | | type | string | | +| input | | | +| ctx | object | | diff --git a/docs/splunk.md b/docs/splunk.md index e60dc41f0..dd9a586dc 100644 --- a/docs/splunk.md +++ b/docs/splunk.md @@ -85,7 +85,7 @@ Events follow the rlsapi telemetry format for consistency with existing analytic "system_id": "abc-def-123", "total_llm_tokens": 0, "request_id": "req_xyz789", - "cla_version": "CLA/0.4.0", + "cla_version": "CLA/0.4.1", "system_os": "RHEL", "system_version": "9.3", "system_arch": "x86_64" diff --git a/pyproject.toml b/pyproject.toml index 503303091..4fd421820 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,6 +30,7 @@ dependencies = [ # Used to call Llama Stack APIs "llama-stack==0.4.3", "llama-stack-client==0.4.3", + "llama-stack-api==0.4.4", # Used by Logger "rich>=14.0.0", # Used by JWK token auth handler @@ -147,7 +148,6 @@ llslibdev = [ # "pillow>=11.1.0", # "pandas>=2.2.3", # "scikit-learn>=1.5.2", - # "psycopg2-binary>=2.9.10", # API eval: inline::meta-reference "tree_sitter>=0.24.0", "pythainlp>=3.0.10", @@ -156,8 +156,11 @@ llslibdev = [ "nltk>=3.8.1", # API inference: inline::sentence-transformers "sentence-transformers>=5.0.0", - # API vector_io: inline::faiss + # API vector_io: inline::faiss, inline::sqlite-vec, remote::pgvector "faiss-cpu>=1.11.0", + "sqlite-vec>=0.1.6", + "chardet>=5.2.0", + "psycopg2-binary>=2.9.10", # API scoring: inline::basic "requests>=2.32.4", # API datasetio: inline::localfs @@ -171,11 +174,15 @@ llslibdev = [ "transformers>=4.34.0", "numpy==2.3.5", # API tool_runtime: remote::model-context-protocol - "mcp>=1.9.4", + "mcp>=1.23.0", # API post_training: inline::huggingface "torch==2.9.0", "trl>=0.18.2", "peft>=0.15.2", + # API inference: remote::vertexai + "google-cloud-aiplatform>=1.130.0", + # API inference: remote::watsonx + "litellm>=1.81.0", # Other "autoevals>=0.0.129", "fire>=0.7.0", @@ -210,6 +217,8 @@ build-backend = "pdm.backend" disable = ["R0801"] [tool.ruff] +exclude = ["tests/profiles/syntax_error.py"] + [tool.ruff.lint.flake8-tidy-imports] banned-api = { "unittest" = { msg = "use pytest instead of unittest" }, "unittest.mock" = { msg = "use pytest-mock instead of unittest.mock" } } diff --git a/requirements-build.txt b/requirements-build.txt index ad18ba62e..091a7d55e 100644 --- a/requirements-build.txt +++ b/requirements-build.txt @@ -56,6 +56,7 @@ maturin==1.10.2 # via fastuuid packaging==26.0 # via + # dunamai # hatchling # setuptools-scm # wheel @@ -122,7 +123,9 @@ setuptools==80.10.2 # multiprocess # pathspec # pluggy + # polyleven # prometheus-client + # proto-plus # psutil # pycparser # pycryptodomex diff --git a/requirements.hashes.source.txt b/requirements.hashes.source.txt index a20df7bc4..2ea7ce506 100644 --- a/requirements.hashes.source.txt +++ b/requirements.hashes.source.txt @@ -32,9 +32,9 @@ email-validator==2.3.0 \ emoji==2.15.0 \ --hash=sha256:205296793d66a89d88af4688fa57fd6496732eb48917a87175a023c8138995eb \ --hash=sha256:eae4ab7d86456a70a00a985125a03263a5eac54cd55e51d7e184b1ed3b6757e4 -fastapi==0.128.0 \ - --hash=sha256:1cc179e1cef10a6be60ffe429f79b829dce99d8de32d7acb7e6c8dfdf7f2645a \ - --hash=sha256:aebd93f9716ee3b4f4fcfe13ffb7cf308d99c9f3ab5622d8877441072561582d +fastapi==0.128.1 \ + --hash=sha256:ce5be4fa26d4ce6f54debcc873d1fb8e0e248f5c48d7502ba6c61457ab2dc766 \ + --hash=sha256:ee82146bbf91ea5bbf2bb8629e4c6e056c4fbd997ea6068501b11b15260b50fb fastuuid==0.14.0 \ --hash=sha256:05a8dde1f395e0c9b4be515b7a521403d1e8349443e7641761af07c7ad1624b1 \ --hash=sha256:0737606764b29785566f968bd8005eace73d3666bd0862f33a760796e26d1ede \ @@ -177,6 +177,9 @@ greenlet==3.3.1 \ --hash=sha256:e2e7e882f83149f0a71ac822ebf156d902e7a5d22c9045e3e0d1daf59cee2cc9 \ --hash=sha256:e84b51cbebf9ae573b5fbd15df88887815e3253fc000a7d0ff95170e8f7e9729 \ --hash=sha256:ed6b402bc74d6557a705e197d47f9063733091ed6357b3de33619d8a8d93ac53 +huggingface-hub==0.36.1 \ + --hash=sha256:5a3b8bf87e182ad6f1692c196bb9ec9ade7755311d5d5e792dc45045f77283ad \ + --hash=sha256:c6fa8a8f7b8559bc624ebb7e218fb72171b30f6049ebe08f8bfc2a44b38ece50 importlib-metadata==8.7.1 \ --hash=sha256:49fef1ae6440c182052f407c8d34a68f72efc36db9ca90dc0113398f2fdde8bb \ --hash=sha256:5a1f80bf1daa489495071efbb095d75a634cf28a8bc299581244063b53176151 @@ -193,9 +196,9 @@ kubernetes==35.0.0 \ langdetect==1.0.9 \ --hash=sha256:7cbc0746252f19e76f77c0b1690aadf01963be835ef0cd4b56dddf2a8f1dfc2a \ --hash=sha256:cbc1fef89f8d062739774bd51eda3da3274006b3661d199c2655f6b3f6d605a0 -litellm==1.81.6 \ - --hash=sha256:573206ba194d49a1691370ba33f781671609ac77c35347f8a0411d852cf6341a \ - --hash=sha256:f02b503dfb7d66d1c939f82e4db21aeec1d6e2ed1fe3f5cd02aaec3f792bc4ae +litellm==1.81.8 \ + --hash=sha256:5cc6547697748b8ca38d17d755662871da125df6e378cc987eaf2208a15626fb \ + --hash=sha256:78cca92f36bc6c267c191d1fe1e2630c812bff6daec32c58cade75748c2692f6 llama-stack==0.4.3 \ --hash=sha256:423207eae2b640894992a9075ff9dd6300ff904ab06a49fe38cfe0bb809d4669 \ --hash=sha256:70d379ae9dbb5b1d0693f14054d9817aba183ffcd805133f0a4442baee132c6d @@ -477,9 +480,97 @@ openai==2.16.0 \ peft==0.18.1 \ --hash=sha256:0bf06847a3551e3019fc58c440cffc9a6b73e6e2962c95b52e224f77bbdb50f1 \ --hash=sha256:2dd0d6bfce936d1850e48aaddbd250941c5c02fc8ef3237cd8fd5aac35e0bae2 +polyleven==0.10.0 \ + --hash=sha256:00e8aeaccd5e40a6e0685e1a4bdc404fbf009b498597373bc3a83ebb41b360ee \ + --hash=sha256:03a22a7f4d129a1f82362c8da050ceda44d58842da8703180863888bca62efd0 \ + --hash=sha256:04f321bf1d3e4de4095393356caa399e0b3ae9f4610908b9c09b45cbbf3c09fe \ + --hash=sha256:0513c5018e5941839da331e910b8e694c4dda56317b8c68f73a9571b184b7b6e \ + --hash=sha256:074c5ead1f0ee5d429d0d45d1bd33fdee29757e9bfb07aabfd55eb0f6058b758 \ + --hash=sha256:08d8103f4052fecf0a86020a3623b03b676c0b5a6d288c8c5e51a1256310b2b0 \ + --hash=sha256:09a718a521a61893ab989a120eca259208690bd9c82e5bbe9d2f8d715e2c0d1e \ + --hash=sha256:0a33c50d15ae3bf7c10b18df8fcea8ce2d67286f81219bcf47976a9c61c8297c \ + --hash=sha256:0e50a96ec039b0e6ebddad4e16968b5ba48e47279c1b8049c4cffddea06306b0 \ + --hash=sha256:10d7bc1312f1a2431679a49cc4ba0f70cbd82e3d674120a84545b8c39e21ff62 \ + --hash=sha256:13bb198acb966b94e4f9b08f66062ce6f84829cd5adc4559e26ba690a54d684c \ + --hash=sha256:1c9d0148436ce0ca86675a0d79c40e135166447e102b1ed1465ced2e1eb589f5 \ + --hash=sha256:2081ab2afe7d7b6e0f28d6d21ed38c56fe06b437843ecf0ee3f84e92c0dc8203 \ + --hash=sha256:20cd7950f067973fcbd6e976e86fce9b5646593300cba35444ecf4219f095022 \ + --hash=sha256:21ee3982e2ed0b696070096fc973178f50fb6c63c9a1ecacb46c7345936ac55f \ + --hash=sha256:22b4ed4fc73f28e7aa44a4e6146bf88dcb3d0a175f08c0978f3a486f0009bce2 \ + --hash=sha256:25e7f8ec69af11888d3ded84569b437f78ddfbbb8bb3ceeee278e2ca85b87a2d \ + --hash=sha256:288470f18b6af376bc90ef26db58b42c2daebdbf3e6ef8c736e667d466e7c082 \ + --hash=sha256:2b429266edd6dc672f35b0834bef93e3afca06ccbc39d119a4923dbe278a8c70 \ + --hash=sha256:2cdb62d17d36ebe811d402fbd3ca09a95e6863bd18e21bfc81c82c1a0e91e5e9 \ + --hash=sha256:343ab52ad8d111e46ba800f938484c4346524e6db13dfd99ee2693cb680e4ef6 \ + --hash=sha256:3517df703f0071582d23246343d35eaa860d206169502fd1ff83843c275b6626 \ + --hash=sha256:3679ce75add5a4418530b1ffd798192aab454524fd592276ea324d7d740e5451 \ + --hash=sha256:3732c6b93ab755155041303fbe52b9ab3d641a271b8c78034d338366d77e29bb \ + --hash=sha256:37f5fa2018efac4b243f1f62bd81d8cc830245407258940119c26a4f95ab301a \ + --hash=sha256:3876f0f2f67f1f183f10ead6c226cfa8e65f682225e3295c311df7b371ce9f71 \ + --hash=sha256:3a3a3e9e5f7733f64b8aea794beb757e20cca8b7a90a52d8dfe02e973746e457 \ + --hash=sha256:3f4b30a8054b9c31f477a15d727dc944bc7450a7d84f6c90e30d354da90a4388 \ + --hash=sha256:412996ddf16b6b73bbfc21ee8f16120b2b3fa5249c68f746fa9ef521c0321546 \ + --hash=sha256:41c8d61a8c742921a7ebc286e2d5dee03cd6659e22702e573329de84f4abfe98 \ + --hash=sha256:4c92ae3f6d3c5b14f0c9ea2f8054346e7bcd5fbd643d58e0960c0408bb004aed \ + --hash=sha256:5011e9ce10e875cf0f487b0bff7b21c3320287087fe9d91a7be0ceb23b463b54 \ + --hash=sha256:50d2c2dcaae06253bc97e53a8a537d5c48d259e1a3bd3b85f90daa77649c85aa \ + --hash=sha256:546ca556f85f6972ae1a2393d875d57fabf5679162650c2ad777aa9a5af2867c \ + --hash=sha256:547e9b4197230e2fae8110d0ac57f5de8d65dbf0aa94f4882fe5faf3f8e7c700 \ + --hash=sha256:595d8d0cd934e1e4042abbff9838b3a673eda6d269c3fa5727eec313c3df3d60 \ + --hash=sha256:5a189e57cf00f185333402967201071fda1db78446ea40c8a91d5fe27865eae3 \ + --hash=sha256:5ad04871576432e00129c3a8c5408eb0ecbf0321883a8332accf6c8011e9c003 \ + --hash=sha256:617f6da9704d0affa0f9619e6a7ef79ef535ab28e1f69e9d2e2a137bff985738 \ + --hash=sha256:6280eaeb216435de74e9c35a3276fbdb681f6f912ed428f649c27acdc39dd1d0 \ + --hash=sha256:644cd3730f43580cf90557fa1a149ea0e09571c2ddef9d9c3071ea0bd3b00b4a \ + --hash=sha256:675db0544b1e4fc08a78c7c777b40c56c1a6a8a1b538bd177610c3cdc3c6934e \ + --hash=sha256:68f3f7eac393d71ab767f3273b77e207fcd1ac096e5617b82acbf9bf899e6939 \ + --hash=sha256:6e765e51ef7931a9e5724f2a2f00aecd974db329be43cf878f7b56f07433f41f \ + --hash=sha256:713e87bf29592ef16d773b07dc4bad8a5575f230f89ccddf1fdde2e92d4b19b9 \ + --hash=sha256:745e70fd5dc8cb61f040a35a0a5b6565d8d955d72ad6cd8aac17306db4cf8ce3 \ + --hash=sha256:74af72c34ebbf9ef3a82652fc2f50a13a00999577dda969bab589ca79d78fa07 \ + --hash=sha256:75426e16960fe5f29be8ddcb64ab9c2c075ea1f835157023fc2a7792d740cdc0 \ + --hash=sha256:7a7c939fd2999413235b20d7442605cdb5361213981e70932d63a08d1090036a \ + --hash=sha256:7b128bc626199b3071f33c1aadd8dd535185abb7370f011ef58f4ca93b753032 \ + --hash=sha256:7b4df9d778c5c55ba377dceb3ff39ca46291fcff278ac9c558299051be38c0fb \ + --hash=sha256:7d5722484164e5529975d40b5e99290f221d259084b3d0b824c914f50cd103d2 \ + --hash=sha256:7eb189e7c2f713d9a3c24eeb8d2a03064d7365a3ec1eeb1667d3f82284a56833 \ + --hash=sha256:7fb473bfa1a68597cae78d14a4cb5fce75f619b978d5469f4bfb15e1b43f6214 \ + --hash=sha256:84edc2c9517fc30dd6c5ff3f140e4bac1f3dcc37e2c36f1ce67edaef7497777c \ + --hash=sha256:8777b2d3a67516803a86dc5bdaa66455f6c22dd98c6421f8506c0beac84ca9ff \ + --hash=sha256:89c481dcd59b9e7ca31d27a0633575b06909a246db1eb3e4318a5050a770234f \ + --hash=sha256:91bc30479d1b66a081c877473f7ff2943daa7d2573f192492abeabe4deca1db8 \ + --hash=sha256:93658d6c6d3581adfebc419866123dd317d37c28862d1b5f44c2949d816d6943 \ + --hash=sha256:99a4e6e89ff4f1e73dd3e160a3b87df0f7600513fefe8ea436cc66fd9e4719c3 \ + --hash=sha256:9d83ddc05fba8b75cd23404bc218a3fea888778f061300e220a705e7b303f24c \ + --hash=sha256:a1de0caaf1670feb7e4e55cb5c8a5a26026fac232c9d3ab8b8c0bc9257207882 \ + --hash=sha256:a37e658e0fd7baf10af94a477ad23737c9d944122976636a87728dee7e6ffff9 \ + --hash=sha256:a805860e131531732409152055af8fed1e26c83e26b1ccc8ee68bb34699781df \ + --hash=sha256:aab2104152bc7faae45ea8e43c59abea1db6e9d3f8a6d9eef65bb44042137d0e \ + --hash=sha256:ab8ac71db516befeda8f4ab61875c938b1c67315f2872fa262f0ba3856de6c73 \ + --hash=sha256:b527a11223d0a57dacbac342f464861dff98bf8e047c4daa4c1787134d1b53f3 \ + --hash=sha256:b67f1dbdb354f6015ab9780a89cfa3db0628608eee85db32ffcb3595009d99ca \ + --hash=sha256:bfe59466799c86a1d7a026303460a41cc3bd990bf05a2477af5e42fe20e77339 \ + --hash=sha256:c5a2da058154d456fbbd69c3e4a956546c28a62507ff175fbbb1e23bfe53ab8d \ + --hash=sha256:ccfbb44da427c66040b668fa67ebfa5d87a21919a5ba1771c4ea67affd492a26 \ + --hash=sha256:d0e2cac82d683bccebed36b1d85d2833c0b0db46c78c8e2212763b3f93943fd3 \ + --hash=sha256:d1c5363e0582ac88edf3d3a7aafe24300ac3bc8a9aa8bc54ea3cb544da2cfeeb \ + --hash=sha256:d607c7fcf345ca76a6b00df4abe5fd1f0628c402c6144118f7da82bb883aec57 \ + --hash=sha256:d9bf0338d263fb859cfb70c00e283524ad0f1e01a96810dabba8d81f3f154645 \ + --hash=sha256:db3752b0630be1ad87ff02040cee76fdd9f83cb4f8c335d7da6988de0e6a1f0f \ + --hash=sha256:e85c4fcb4f0d1622a38b1619067167f2aed8b814c2e08ec03b1437f60070f1d8 \ + --hash=sha256:ea096c4cc8e8f089f082fe1caefb152267b138eb314d378103fee63418490cc3 \ + --hash=sha256:ea673445dee5a2cfe303edb96541d7a5b359070b57357ecd14a1b63b39abb370 \ + --hash=sha256:eab7347c4aedf6d76e6eb27a84d6f5875ba41152c15f61508c6c23658ece976e \ + --hash=sha256:f486a2755894438d26110034a5d0b7ec02bab41a674998aa35c7fe0b453ae722 \ + --hash=sha256:f7bb2367cba98c1f356e8085f9689207aea9b42416d9d2c9d44c3b89a3f097cc \ + --hash=sha256:f9d14ca3ee1dfab08f29d61bf81159d830bf016bf951e3e8fcfd20c551272aef \ + --hash=sha256:fc0ea0d503ea8c18b37c951b972ce0f5e6e6bd51ec74febc109df56f891b2efc prometheus-client==0.24.1 \ --hash=sha256:150db128af71a5c2482b36e588fc8a6b95e498750da4b17065947c16070f4055 \ --hash=sha256:7e0ced7fbbd40f7b84962d5d2ab6f17ef88a72504dcf7c0b40737b43b2a461f9 +proto-plus==1.27.1 \ + --hash=sha256:912a7460446625b792f6448bade9e55cd4e41e6ac10e27009ef71a7f317fa147 \ + --hash=sha256:e4643061f3a4d0de092d62aa4ad09fa4756b2cbb89d4627f3985018216f9fefc protobuf==6.33.5 \ --hash=sha256:3093804752167bcab3998bec9f1048baae6e29505adaf1afd14a37bddede533c \ --hash=sha256:69915a973dd0f60f31a08b8318b73eab2bd6a392c79184b3612226b0a3f8ec02 \ @@ -714,9 +805,9 @@ sse-starlette==3.2.0 \ termcolor==3.3.0 \ --hash=sha256:348871ca648ec6a9a983a13ab626c0acce02f515b9e1983332b17af7979521c5 \ --hash=sha256:cf642efadaf0a8ebbbf4bc7a31cec2f9b5f21a9f726f4ccbb08192c9c26f43a5 -tqdm==4.67.2 \ - --hash=sha256:649aac53964b2cb8dec76a14b405a4c0d13612cb8933aae547dd144eacc99653 \ - --hash=sha256:9a12abcbbff58b6036b2167d9d3853042b9d436fe7330f06ae047867f2f8e0a7 +tqdm==4.67.3 \ + --hash=sha256:7d825f03f89244ef73f1d4ce193cb1774a8179fd96f31d7e1dcde62092b960bb \ + --hash=sha256:ee1e4c0e59148062281c49d80b25b67771a127c85fc9676d3be5f243206826bf tree-sitter==0.25.2 \ --hash=sha256:0628671f0de69bb279558ef6b640bcfc97864fe0026d840f872728a86cd6b6cd \ --hash=sha256:0c8b6682cac77e37cfe5cf7ec388844957f48b7bd8d6321d0ca2d852994e10d5 \ @@ -754,9 +845,9 @@ tree-sitter==0.25.2 \ --hash=sha256:f5ddcd3e291a749b62521f71fc953f66f5fd9743973fd6dd962b092773569601 \ --hash=sha256:fbb1706407c0e451c4f8cc016fec27d72d4b211fdd3173320b1ada7a6c74c3ac \ --hash=sha256:fe43c158555da46723b28b52e058ad444195afd1db3ca7720c59a254544e9c20 -trl==0.27.1 \ - --hash=sha256:641843c8556516c39896113b79c9b0b668236670b3eae3697107117c75cc65eb \ - --hash=sha256:9d502626c3ac1d32cdc7d8978c742de31bfc11135b4d15be1d83909632dcb75c +trl==0.27.2 \ + --hash=sha256:05fbaa257d5d55bc659d8470c4d189eb046c18332d34dbe679e595bd5d6553cc \ + --hash=sha256:b0a5b3ba4c28cf3736647b77925feccee8c542c509f63f4f5df070f3abe602df urllib3==2.6.3 \ --hash=sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed \ --hash=sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4 diff --git a/requirements.hashes.wheel.txt b/requirements.hashes.wheel.txt index 9b8bf539b..42dc9d89f 100644 --- a/requirements.hashes.wheel.txt +++ b/requirements.hashes.wheel.txt @@ -84,8 +84,6 @@ httpx==0.28.1 \ --hash=sha256:bd045b6f2958f98a2dd325ceb407d65980e0cb8fafc5d4f5cec20d7bda6374c1 httpx-sse==0.4.3 \ --hash=sha256:b8c7f0641e92eb1b6119c02803a7f541765a9967778bb31f0bec06654bbb618b -huggingface-hub==0.36.0 \ - --hash=sha256:ed9c6ed6d31bdd77e52fbb8a940e6e6e21d4606b24dd77c42326457c2529f072 idna==3.11 \ --hash=sha256:4c792a69b0c96274db373531875f687f61350e5cc4e0d8596da56db7c7f5dd4c jinja2==3.1.6 \ @@ -151,9 +149,6 @@ pillow==12.0.0 \ --hash=sha256:bebdb5e7e80bb56b9e6eb5658a55a17e2062d9d108e390d9f4e64a521ae98253 ply==3.11 \ --hash=sha256:77d772accd72b1a7ab75ad3c3b09d56cb133aaf43a717900d9249a2548c22824 -polyleven==0.9.0 \ - --hash=sha256:8b48ab19c3ebcb3a3bef16707f5ab3004c153e5fc957698e9095b4942806493b \ - --hash=sha256:9444017da1fbd898566c559d432da8397df0cdb029f49383fa68a98c679b9329 prompt-toolkit==3.0.52 \ --hash=sha256:41fd066c002758c2973fc0ccf310b1b04e7cc7182cc8a617a74c1d402f526cac propcache==0.4.1 \ @@ -161,8 +156,6 @@ propcache==0.4.1 \ --hash=sha256:520ea2032da4fec5b747948795393f296cb57a943d80257bb35fc27baa50591b \ --hash=sha256:62ce4428eb58f9996018135d10e185c1185c2fa5b897fce6c229c9dc78d1244a \ --hash=sha256:e451db5f476293c58dd1bdd9442ca6aff9e4f4061fc67a68cd98474a1fb53d67 -proto-plus==1.27.0 \ - --hash=sha256:74246302daae35f5a5d1b75113419c0627d2506ed65f58fd9f25f7e571ad47c4 psycopg2-binary==2.9.11 \ --hash=sha256:3f0e796e9a41255c22f0ebd5b2953b70d8bb624e7b9e4e89afeeda71ac0773b6 \ --hash=sha256:4f883ef06d3b8bef9fb7fc6d79ab9dadf6c658d17998f39222852a6b1b62dc89 diff --git a/requirements.overrides.txt b/requirements.overrides.txt index 02eac8bda..90c9599ca 100644 --- a/requirements.overrides.txt +++ b/requirements.overrides.txt @@ -14,3 +14,4 @@ pillow==12.0.0 faiss-cpu==1.12.0 sqlalchemy==2.0.45 setuptools==80.9 +jiter==0.12 diff --git a/rpms.in.yaml b/rpms.in.yaml index 569f717e4..ca64f8cf2 100644 --- a/rpms.in.yaml +++ b/rpms.in.yaml @@ -1,17 +1,12 @@ packages: [ gcc, + gcc-c++, jq, patch, cmake, cargo, - libpq, - libtiff, - openjpeg2, - lcms2, - libjpeg-turbo, - libwebp, ] contentOrigin: - repofiles: ["./ubi.repo"] + repofiles: ["./redhat.repo"] arches: [x86_64, aarch64] diff --git a/rpms.lock.yaml b/rpms.lock.yaml index 17f7c7b82..2ca7ed624 100644 --- a/rpms.lock.yaml +++ b/rpms.lock.yaml @@ -4,709 +4,177 @@ lockfileVendor: redhat arches: - arch: aarch64 packages: - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/c/cargo-1.88.0-1.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-appstream-rpms - size: 7738248 - checksum: sha256:db106a81f1e6afa16afc7a28d008f42784f6602bca19bd147cb046b5dacc11e5 + - url: https://cdn.redhat.com/content/eus/rhel9/9.6/aarch64/appstream/os/Packages/c/cargo-1.84.1-1.el9.aarch64.rpm + repoid: rhel-9-for-aarch64-appstream-eus-rpms + size: 7744425 + checksum: sha256:5db626d49748f31fb02916c24fa1a7e5759ce7b905ac3e781d42079fba8fa1c4 name: cargo - evr: 1.88.0-1.el9 - sourcerpm: rust-1.88.0-1.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/c/cmake-3.26.5-2.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-appstream-rpms + evr: 1.84.1-1.el9 + sourcerpm: rust-1.84.1-1.el9.src.rpm + - url: https://cdn.redhat.com/content/eus/rhel9/9.6/aarch64/appstream/os/Packages/c/cmake-3.26.5-2.el9.aarch64.rpm + repoid: rhel-9-for-aarch64-appstream-eus-rpms size: 7432689 checksum: sha256:6ac0e5e9a4fd761f8688678ac83580c7eebeacf6c241bd8089d72c4a477b22c3 name: cmake evr: 3.26.5-2.el9 sourcerpm: cmake-3.26.5-2.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/c/cmake-data-3.26.5-2.el9.noarch.rpm - repoid: ubi-9-for-aarch64-appstream-rpms + - url: https://cdn.redhat.com/content/eus/rhel9/9.6/aarch64/appstream/os/Packages/c/cmake-data-3.26.5-2.el9.noarch.rpm + repoid: rhel-9-for-aarch64-appstream-eus-rpms size: 2488227 checksum: sha256:84da65a7b8921f031d15903d91c5967022620f9e96b7493c8ab8024014755ee7 name: cmake-data evr: 3.26.5-2.el9 sourcerpm: cmake-3.26.5-2.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/c/cmake-filesystem-3.26.5-2.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-appstream-rpms - size: 23401 - checksum: sha256:c76e4d4a355a4f6599bee009c9b4408e6b82c31265f2db824efdeb278d596024 - name: cmake-filesystem - evr: 3.26.5-2.el9 - sourcerpm: cmake-3.26.5-2.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/c/cmake-rpm-macros-3.26.5-2.el9.noarch.rpm - repoid: ubi-9-for-aarch64-appstream-rpms + - url: https://cdn.redhat.com/content/eus/rhel9/9.6/aarch64/appstream/os/Packages/c/cmake-rpm-macros-3.26.5-2.el9.noarch.rpm + repoid: rhel-9-for-aarch64-appstream-eus-rpms size: 12250 checksum: sha256:1c74969c8a4f21851f5b89f25ac55c689b75bed1318d0435fc3a14a49c39d0e3 name: cmake-rpm-macros evr: 3.26.5-2.el9 sourcerpm: cmake-3.26.5-2.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/c/cpp-11.5.0-11.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-appstream-rpms - size: 10797009 - checksum: sha256:eab64632a86902a074d60f7f32d444e1911fcc53b9a8b0de60082eea20bea808 - name: cpp - evr: 11.5.0-11.el9 - sourcerpm: gcc-11.5.0-11.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/e/emacs-filesystem-27.2-18.el9.noarch.rpm - repoid: ubi-9-for-aarch64-appstream-rpms - size: 9495 - checksum: sha256:49d7b88a05a72c15b78191a987e6def04fda8e2e4ff75711f715d0c0ecadc60f - name: emacs-filesystem - evr: 1:27.2-18.el9 - sourcerpm: emacs-27.2-18.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/g/gcc-11.5.0-11.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-appstream-rpms - size: 31296441 - checksum: sha256:6831e31f3fecd845b4058d68c3c3a9cc1fae525f81dda36368ddc550f28bbc5e - name: gcc - evr: 11.5.0-11.el9 - sourcerpm: gcc-11.5.0-11.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/g/glibc-devel-2.34-231.el9_7.2.aarch64.rpm - repoid: ubi-9-for-aarch64-appstream-rpms - size: 568363 - checksum: sha256:5e3bbdb64dad55fdb07540756c333e0a73afe4ab493de199277a82138c224352 - name: glibc-devel - evr: 2.34-231.el9_7.2 - sourcerpm: glibc-2.34-231.el9_7.2.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/j/jbigkit-libs-2.1-23.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-appstream-rpms - size: 57006 - checksum: sha256:f9fd62dfb74900a238cba5346d3932f32a802b6d6a161c47935938f392a7adf2 - name: jbigkit-libs - evr: 2.1-23.el9 - sourcerpm: jbigkit-2.1-23.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/k/kernel-headers-5.14.0-611.24.1.el9_7.aarch64.rpm - repoid: ubi-9-for-aarch64-appstream-rpms - size: 2968477 - checksum: sha256:a60380cbf908bc16b00e560146cda2f106b4eab1932ed2a3420921419897b0f4 - name: kernel-headers - evr: 5.14.0-611.24.1.el9_7 - sourcerpm: kernel-5.14.0-611.24.1.el9_7.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/l/lcms2-2.12-3.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-appstream-rpms - size: 171119 - checksum: sha256:9d35f533ef3fcac403f775e658921df31e989fc8748ff1c9ebf9a3a6c027222b - name: lcms2 - evr: 2.12-3.el9 - sourcerpm: lcms2-2.12-3.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/l/libasan-11.5.0-11.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-appstream-rpms - size: 408716 - checksum: sha256:247090a8241441529d2c4dc5932ddc1c1075418ba9618d4b8b5e65d1e2aef7b7 - name: libasan - evr: 11.5.0-11.el9 - sourcerpm: gcc-11.5.0-11.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/l/libjpeg-turbo-2.0.90-7.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-appstream-rpms - size: 175739 - checksum: sha256:b549971d7418fffff89092888c8d213dd63401f4b9cd2ecd1a9892c7cee9ab24 - name: libjpeg-turbo - evr: 2.0.90-7.el9 - sourcerpm: libjpeg-turbo-2.0.90-7.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/l/libmpc-1.2.1-4.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-appstream-rpms - size: 67120 - checksum: sha256:3763354a5f45d886f9976eec20eb34f8afc2144c69ffba07de546f2820893c70 - name: libmpc - evr: 1.2.1-4.el9 - sourcerpm: libmpc-1.2.1-4.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/l/libpq-13.23-1.el9_7.aarch64.rpm - repoid: ubi-9-for-aarch64-appstream-rpms - size: 210763 - checksum: sha256:db5dd6b6f6885ff5c349486ae9320de46aecb61ce9648fc88806972317d72acf - name: libpq - evr: 13.23-1.el9_7 - sourcerpm: libpq-13.23-1.el9_7.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/l/libtiff-4.4.0-15.el9_7.2.aarch64.rpm - repoid: ubi-9-for-aarch64-appstream-rpms - size: 195402 - checksum: sha256:1da257e0663d88b30d4960e774564e5399464e933060f1bc50c0b7c39df7fc53 - name: libtiff - evr: 4.4.0-15.el9_7.2 - sourcerpm: libtiff-4.4.0-15.el9_7.2.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/l/libubsan-11.5.0-11.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-appstream-rpms - size: 178723 - checksum: sha256:03aa0392d5d7a442ee81963eb659b011446e6fcd5904c7b4c2850acdb81e22dc - name: libubsan - evr: 11.5.0-11.el9 - sourcerpm: gcc-11.5.0-11.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/l/libuv-1.42.0-2.el9_4.aarch64.rpm - repoid: ubi-9-for-aarch64-appstream-rpms + - url: https://cdn.redhat.com/content/eus/rhel9/9.6/aarch64/appstream/os/Packages/g/gcc-c++-11.5.0-5.el9_5.aarch64.rpm + repoid: rhel-9-for-aarch64-appstream-eus-rpms + size: 12999288 + checksum: sha256:a9ff0bd2a2b3483e07dcf87f8137a6358f36f5300c934b90500f119f884e3463 + name: gcc-c++ + evr: 11.5.0-5.el9_5 + sourcerpm: gcc-11.5.0-5.el9_5.src.rpm + - url: https://cdn.redhat.com/content/eus/rhel9/9.6/aarch64/appstream/os/Packages/l/libstdc++-devel-11.5.0-5.el9_5.aarch64.rpm + repoid: rhel-9-for-aarch64-appstream-eus-rpms + size: 2526795 + checksum: sha256:83a2006137335a9b17a05a02a54481abcdfd295b280b924c51caaacd7bf07ad6 + name: libstdc++-devel + evr: 11.5.0-5.el9_5 + sourcerpm: gcc-11.5.0-5.el9_5.src.rpm + - url: https://cdn.redhat.com/content/eus/rhel9/9.6/aarch64/appstream/os/Packages/l/libuv-1.42.0-2.el9_4.aarch64.rpm + repoid: rhel-9-for-aarch64-appstream-eus-rpms size: 150129 checksum: sha256:4dc8a40da74e0f9823356460ee11f183c70f382953700fffef0c448198a677cc name: libuv evr: 1:1.42.0-2.el9_4 sourcerpm: libuv-1.42.0-2.el9_4.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/l/libwebp-1.2.0-8.el9_3.aarch64.rpm - repoid: ubi-9-for-aarch64-appstream-rpms - size: 272276 - checksum: sha256:5692fd846f9b41b3b6d6194f80dc52248c2ae1e7b0560b29bd0ed2f5bcb4506a - name: libwebp - evr: 1.2.0-8.el9_3 - sourcerpm: libwebp-1.2.0-8.el9_3.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/l/libxcrypt-devel-4.4.18-3.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-appstream-rpms - size: 33051 - checksum: sha256:9d621f33df35b9c274b8d65457d6c67fc1522b6c62cf7b2341a4a99f39a93507 - name: libxcrypt-devel - evr: 4.4.18-3.el9 - sourcerpm: libxcrypt-4.4.18-3.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/l/llvm-filesystem-20.1.8-3.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-appstream-rpms - size: 9320 - checksum: sha256:e92a53ac2ca3dfad1c286f67b86fd80c1ded3e7714a745c7222d8012575a7180 - name: llvm-filesystem - evr: 20.1.8-3.el9 - sourcerpm: llvm-20.1.8-3.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/l/llvm-libs-20.1.8-3.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-appstream-rpms - size: 29379083 - checksum: sha256:ab5ca15a0edd98c358879337c4983f33b433bb7ca39f3252ec69d1523e56065d - name: llvm-libs - evr: 20.1.8-3.el9 - sourcerpm: llvm-20.1.8-3.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/o/oniguruma-6.9.6-1.el9.5.aarch64.rpm - repoid: ubi-9-for-aarch64-appstream-rpms - size: 222582 - checksum: sha256:bc2305dad655ddb94f966158112efd6cefa6824d5aa2e80f63881f16cee74598 - name: oniguruma - evr: 6.9.6-1.el9.5 - sourcerpm: oniguruma-6.9.6-1.el9.5.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/o/openjpeg2-2.4.0-8.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-appstream-rpms - size: 166456 - checksum: sha256:52b5696209e97f16155a878b545203edb2d3e59b0de30ed3abcb6b3af8c27ea3 - name: openjpeg2 - evr: 2.4.0-8.el9 - sourcerpm: openjpeg2-2.4.0-8.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/p/patch-2.7.6-16.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-appstream-rpms + - url: https://cdn.redhat.com/content/eus/rhel9/9.6/aarch64/appstream/os/Packages/p/patch-2.7.6-16.el9.aarch64.rpm + repoid: rhel-9-for-aarch64-appstream-eus-rpms size: 129037 checksum: sha256:335c720da3caa41822737dd431d91a4adc79c85dedbe4483ecaf58bc83767610 name: patch evr: 2.7.6-16.el9 sourcerpm: patch-2.7.6-16.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/p/python-unversioned-command-3.9.25-2.el9_7.noarch.rpm - repoid: ubi-9-for-aarch64-appstream-rpms - size: 9351 - checksum: sha256:ddc75f8460178a142a203ba8d5082c7d58393281238400d11a82cc5ee6487390 - name: python-unversioned-command - evr: 3.9.25-2.el9_7 - sourcerpm: python3.9-3.9.25-2.el9_7.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/r/rust-1.88.0-1.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-appstream-rpms - size: 28639155 - checksum: sha256:ef479c53d6d2e75753f5d36661ec01746d70ad16dcbb82f51dc4296de8e32613 + - url: https://cdn.redhat.com/content/eus/rhel9/9.6/aarch64/appstream/os/Packages/r/rust-1.84.1-1.el9.aarch64.rpm + repoid: rhel-9-for-aarch64-appstream-eus-rpms + size: 26093725 + checksum: sha256:5be9185a7d684022bc0686049c22ef901c4df6dce2822bdec16a1a47c46b6861 name: rust - evr: 1.88.0-1.el9 - sourcerpm: rust-1.88.0-1.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/appstream/os/Packages/r/rust-std-static-1.88.0-1.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-appstream-rpms - size: 39777482 - checksum: sha256:9431bb9d0a3dd5fbfe3bfef2c28ef5149c72173ad53b75b046e1f4dad9d9d48d + evr: 1.84.1-1.el9 + sourcerpm: rust-1.84.1-1.el9.src.rpm + - url: https://cdn.redhat.com/content/eus/rhel9/9.6/aarch64/appstream/os/Packages/r/rust-std-static-1.84.1-1.el9.aarch64.rpm + repoid: rhel-9-for-aarch64-appstream-eus-rpms + size: 39259196 + checksum: sha256:5889bced81144c4ea201085e5bfd040300c56048e5d7987e9eb69d4d252f87bf name: rust-std-static - evr: 1.88.0-1.el9 - sourcerpm: rust-1.88.0-1.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/baseos/os/Packages/b/binutils-2.35.2-67.el9_7.1.aarch64.rpm - repoid: ubi-9-for-aarch64-baseos-rpms - size: 5017674 - checksum: sha256:5c26e9da5ebaf4d5feb38f117b4468c41ad0c66cd80e52a68a9c322abf2b04ba - name: binutils - evr: 2.35.2-67.el9_7.1 - sourcerpm: binutils-2.35.2-67.el9_7.1.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/baseos/os/Packages/b/binutils-gold-2.35.2-67.el9_7.1.aarch64.rpm - repoid: ubi-9-for-aarch64-baseos-rpms - size: 902260 - checksum: sha256:a9e2c2aac2f03056149fb55ed37a0df540dd65c921612ef3cde3d899ea7d8224 - name: binutils-gold - evr: 2.35.2-67.el9_7.1 - sourcerpm: binutils-2.35.2-67.el9_7.1.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/baseos/os/Packages/e/ed-1.14.2-12.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-baseos-rpms + evr: 1.84.1-1.el9 + sourcerpm: rust-1.84.1-1.el9.src.rpm + - url: https://cdn.redhat.com/content/eus/rhel9/9.6/aarch64/baseos/os/Packages/e/ed-1.14.2-12.el9.aarch64.rpm + repoid: rhel-9-for-aarch64-baseos-eus-rpms size: 78931 checksum: sha256:3bce4ce6243886c448e58f589b79e3ac829fcde53d1ff13d5906a8cdc22be091 name: ed evr: 1.14.2-12.el9 sourcerpm: ed-1.14.2-12.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/baseos/os/Packages/e/elfutils-debuginfod-client-0.193-1.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-baseos-rpms - size: 43664 - checksum: sha256:f4eaff2bb0d77405c94e1877ae2dc3c741a5d06172ba75056af070b8c06b50a4 - name: elfutils-debuginfod-client - evr: 0.193-1.el9 - sourcerpm: elfutils-0.193-1.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/baseos/os/Packages/e/elfutils-default-yama-scope-0.193-1.el9.noarch.rpm - repoid: ubi-9-for-aarch64-baseos-rpms - size: 9949 - checksum: sha256:8f64d1675627246b912a6b7b71bb4c28c2d1ef09753208253c90253a4a31132f - name: elfutils-default-yama-scope - evr: 0.193-1.el9 - sourcerpm: elfutils-0.193-1.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/baseos/os/Packages/e/elfutils-libelf-0.193-1.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-baseos-rpms - size: 208617 - checksum: sha256:481f731dd9877eedebe6b99cb1af171e091ce59265aa6bbee04f9b6b589c9ce6 - name: elfutils-libelf - evr: 0.193-1.el9 - sourcerpm: elfutils-0.193-1.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/baseos/os/Packages/e/elfutils-libs-0.193-1.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-baseos-rpms - size: 271508 - checksum: sha256:d99325980fe5826b62a717aa63f863b66a65e047dc5f2d593a0ddcfa4308d0bf - name: elfutils-libs - evr: 0.193-1.el9 - sourcerpm: elfutils-0.193-1.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/baseos/os/Packages/i/info-6.7-15.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-baseos-rpms + - url: https://cdn.redhat.com/content/eus/rhel9/9.6/aarch64/baseos/os/Packages/i/info-6.7-15.el9.aarch64.rpm + repoid: rhel-9-for-aarch64-baseos-eus-rpms size: 230301 checksum: sha256:c5ae65876c73c6f4e240081431745f5ba0a91d10a4bfb8a5d162ca3d6f039202 name: info evr: 6.7-15.el9 sourcerpm: texinfo-6.7-15.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/baseos/os/Packages/j/jq-1.6-19.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-baseos-rpms - size: 185334 - checksum: sha256:a70a2f81df88595008fb897ee875af42150e11d1420b6e4989d40628816d4731 - name: jq - evr: 1.6-19.el9 - sourcerpm: jq-1.6-19.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/baseos/os/Packages/l/libedit-3.1-38.20210216cvs.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-baseos-rpms - size: 107505 - checksum: sha256:a56a79e2254db3d351dce58e9960921aec45715b6b7c93eb7a0f453d1e60bae4 - name: libedit - evr: 3.1-38.20210216cvs.el9 - sourcerpm: libedit-3.1-38.20210216cvs.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/baseos/os/Packages/l/libgomp-11.5.0-11.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-baseos-rpms - size: 261873 - checksum: sha256:eef29b0651ac6b2c3087f78dbca4066e9674fcd272926157d55ada53b1755c8f - name: libgomp - evr: 11.5.0-11.el9 - sourcerpm: gcc-11.5.0-11.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/baseos/os/Packages/l/libpkgconf-1.7.3-10.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-baseos-rpms - size: 38310 - checksum: sha256:9bdfccf6b092e0683aa6984f7c6caa737b30c0b1495e16abb03b5d1a5f8e787a - name: libpkgconf - evr: 1.7.3-10.el9 - sourcerpm: pkgconf-1.7.3-10.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/baseos/os/Packages/m/make-4.3-8.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-baseos-rpms - size: 550249 - checksum: sha256:351a22b0e6744bd329b1b0f22d9c3b69a6da970b575e6c76190cc84b0fe77450 - name: make - evr: 1:4.3-8.el9 - sourcerpm: make-4.3-8.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/baseos/os/Packages/p/pkgconf-1.7.3-10.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-baseos-rpms - size: 45196 - checksum: sha256:aa38a3951a690d721a815ea8f9b01995a85f35a8540d8075205821011d0385e6 - name: pkgconf - evr: 1.7.3-10.el9 - sourcerpm: pkgconf-1.7.3-10.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/baseos/os/Packages/p/pkgconf-m4-1.7.3-10.el9.noarch.rpm - repoid: ubi-9-for-aarch64-baseos-rpms - size: 16054 - checksum: sha256:91bafd6e06099451f60288327b275cfcc651822f6145176a157c6b0fa5131e02 - name: pkgconf-m4 - evr: 1.7.3-10.el9 - sourcerpm: pkgconf-1.7.3-10.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/baseos/os/Packages/p/pkgconf-pkg-config-1.7.3-10.el9.aarch64.rpm - repoid: ubi-9-for-aarch64-baseos-rpms - size: 12398 - checksum: sha256:47f1f744f96a2f3d360bc129837738dcebb1ee5032effc4472a891eea1d6a907 - name: pkgconf-pkg-config - evr: 1.7.3-10.el9 - sourcerpm: pkgconf-1.7.3-10.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/baseos/os/Packages/p/python3-3.9.25-2.el9_7.aarch64.rpm - repoid: ubi-9-for-aarch64-baseos-rpms - size: 26375 - checksum: sha256:bbb23567ae8df61c75e65e95c017d424ec6662a2c9908c63a5ddd958cb7c18ee - name: python3 - evr: 3.9.25-2.el9_7 - sourcerpm: python3.9-3.9.25-2.el9_7.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/baseos/os/Packages/p/python3-libs-3.9.25-2.el9_7.aarch64.rpm - repoid: ubi-9-for-aarch64-baseos-rpms - size: 8462763 - checksum: sha256:f08b46456cab63d937c4d3af9351918c6b1503d9d15824066f40972e5a48bd3c - name: python3-libs - evr: 3.9.25-2.el9_7 - sourcerpm: python3.9-3.9.25-2.el9_7.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/baseos/os/Packages/p/python3-pip-wheel-21.3.1-1.el9.noarch.rpm - repoid: ubi-9-for-aarch64-baseos-rpms - size: 1193706 - checksum: sha256:75c46aab03898c66ce16be556432b71aed7efcedce02b9263339c14f57b4fdc0 - name: python3-pip-wheel - evr: 21.3.1-1.el9 - sourcerpm: python-pip-21.3.1-1.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/baseos/os/Packages/p/python3-setuptools-wheel-53.0.0-15.el9.noarch.rpm - repoid: ubi-9-for-aarch64-baseos-rpms - size: 479203 - checksum: sha256:36dacb345e21bc0308ef2508f0c93995520a15ef0b56aab3593186c8dc9c0c5a - name: python3-setuptools-wheel - evr: 53.0.0-15.el9 - sourcerpm: python-setuptools-53.0.0-15.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/aarch64/baseos/os/Packages/v/vim-filesystem-8.2.2637-23.el9_7.noarch.rpm - repoid: ubi-9-for-aarch64-baseos-rpms - size: 13179 - checksum: sha256:793710bbfc6627228c7811bdd3cbecb2c667a4581bd8b5fe9b9a2ebb20e57f79 - name: vim-filesystem - evr: 2:8.2.2637-23.el9_7 - sourcerpm: vim-8.2.2637-23.el9_7.src.rpm source: [] module_metadata: [] - arch: x86_64 packages: - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/c/cargo-1.88.0-1.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-appstream-rpms - size: 8326606 - checksum: sha256:8d5b570c23f08d8e619cd9d69f4e6a25572cc4df0747f9cdc8c531621ce45480 + - url: https://cdn.redhat.com/content/eus/rhel9/9.6/x86_64/appstream/os/Packages/c/cargo-1.84.1-1.el9.x86_64.rpm + repoid: rhel-9-for-x86_64-appstream-eus-rpms + size: 8292467 + checksum: sha256:7dd011cd79a635654ade4e3186c5f7545d692de81157d1ce1d42656eaa6993b2 name: cargo - evr: 1.88.0-1.el9 - sourcerpm: rust-1.88.0-1.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/c/cmake-3.26.5-2.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-appstream-rpms + evr: 1.84.1-1.el9 + sourcerpm: rust-1.84.1-1.el9.src.rpm + - url: https://cdn.redhat.com/content/eus/rhel9/9.6/x86_64/appstream/os/Packages/c/cmake-3.26.5-2.el9.x86_64.rpm + repoid: rhel-9-for-x86_64-appstream-eus-rpms size: 9159462 checksum: sha256:f553370cb02b87e7388697468256556e765b102c2fcb56be6bc250cb2351e8ad name: cmake evr: 3.26.5-2.el9 sourcerpm: cmake-3.26.5-2.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/c/cmake-data-3.26.5-2.el9.noarch.rpm - repoid: ubi-9-for-x86_64-appstream-rpms + - url: https://cdn.redhat.com/content/eus/rhel9/9.6/x86_64/appstream/os/Packages/c/cmake-data-3.26.5-2.el9.noarch.rpm + repoid: rhel-9-for-x86_64-appstream-eus-rpms size: 2488227 checksum: sha256:84da65a7b8921f031d15903d91c5967022620f9e96b7493c8ab8024014755ee7 name: cmake-data evr: 3.26.5-2.el9 sourcerpm: cmake-3.26.5-2.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/c/cmake-filesystem-3.26.5-2.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-appstream-rpms - size: 23450 - checksum: sha256:49fafe6c2b29fdede611a0a78664021d13f7126599e37ebff92bcb06d18f58b6 - name: cmake-filesystem - evr: 3.26.5-2.el9 - sourcerpm: cmake-3.26.5-2.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/c/cmake-rpm-macros-3.26.5-2.el9.noarch.rpm - repoid: ubi-9-for-x86_64-appstream-rpms + - url: https://cdn.redhat.com/content/eus/rhel9/9.6/x86_64/appstream/os/Packages/c/cmake-rpm-macros-3.26.5-2.el9.noarch.rpm + repoid: rhel-9-for-x86_64-appstream-eus-rpms size: 12250 checksum: sha256:1c74969c8a4f21851f5b89f25ac55c689b75bed1318d0435fc3a14a49c39d0e3 name: cmake-rpm-macros evr: 3.26.5-2.el9 sourcerpm: cmake-3.26.5-2.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/c/cpp-11.5.0-11.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-appstream-rpms - size: 11224872 - checksum: sha256:421a6f9e65d57c0b34128d7c5712c6617d87f7fc2fa896feb291f01aede6c4d2 - name: cpp - evr: 11.5.0-11.el9 - sourcerpm: gcc-11.5.0-11.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/e/emacs-filesystem-27.2-18.el9.noarch.rpm - repoid: ubi-9-for-x86_64-appstream-rpms - size: 9495 - checksum: sha256:49d7b88a05a72c15b78191a987e6def04fda8e2e4ff75711f715d0c0ecadc60f - name: emacs-filesystem - evr: 1:27.2-18.el9 - sourcerpm: emacs-27.2-18.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/g/gcc-11.5.0-11.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-appstream-rpms - size: 33986019 - checksum: sha256:9d6a29987112382e29640de757c7d6360b5742e8bded1696b335b5e98898acb9 - name: gcc - evr: 11.5.0-11.el9 - sourcerpm: gcc-11.5.0-11.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/g/glibc-devel-2.34-231.el9_7.2.x86_64.rpm - repoid: ubi-9-for-x86_64-appstream-rpms - size: 37885 - checksum: sha256:6468a64e723d9fff4921fe05b8b5117b19277999053b20d67416f727b2b8d3dd - name: glibc-devel - evr: 2.34-231.el9_7.2 - sourcerpm: glibc-2.34-231.el9_7.2.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/g/glibc-headers-2.34-231.el9_7.2.x86_64.rpm - repoid: ubi-9-for-x86_64-appstream-rpms - size: 558293 - checksum: sha256:f4405218c4527e240f0739ba1b63e8a653e74ef48e960c0e164da55eec8c51dc - name: glibc-headers - evr: 2.34-231.el9_7.2 - sourcerpm: glibc-2.34-231.el9_7.2.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/j/jbigkit-libs-2.1-23.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-appstream-rpms - size: 57187 - checksum: sha256:7da8bd49c92d873386b40567a7fa6b8604425bef2b5b1c5b8197bb999422dfb7 - name: jbigkit-libs - evr: 2.1-23.el9 - sourcerpm: jbigkit-2.1-23.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/k/kernel-headers-5.14.0-611.24.1.el9_7.x86_64.rpm - repoid: ubi-9-for-x86_64-appstream-rpms - size: 3007525 - checksum: sha256:45e0c38ff72eae65683887a45ffcf11e44444c9f4732c808b13e4e0c2ccd9006 - name: kernel-headers - evr: 5.14.0-611.24.1.el9_7 - sourcerpm: kernel-5.14.0-611.24.1.el9_7.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/l/lcms2-2.12-3.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-appstream-rpms - size: 173479 - checksum: sha256:02da413dcff37e7c01c01b230039a51a18a24f69e8c4a72ae79fe5edd3330c80 - name: lcms2 - evr: 2.12-3.el9 - sourcerpm: lcms2-2.12-3.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/l/libjpeg-turbo-2.0.90-7.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-appstream-rpms - size: 181774 - checksum: sha256:281d740f3732d785382e56fdd61a62c2608bcce740c3dc34b57ec55136cf7201 - name: libjpeg-turbo - evr: 2.0.90-7.el9 - sourcerpm: libjpeg-turbo-2.0.90-7.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/l/libmpc-1.2.1-4.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-appstream-rpms - size: 66075 - checksum: sha256:b97b4e98c3c6f41dcfc2ceb4ffa1aba7a338b7cfd9e6c4f63e3160dd3cc033d3 - name: libmpc - evr: 1.2.1-4.el9 - sourcerpm: libmpc-1.2.1-4.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/l/libpq-13.23-1.el9_7.x86_64.rpm - repoid: ubi-9-for-x86_64-appstream-rpms - size: 213691 - checksum: sha256:36674e762f7e3d5f8e3c0c4b607c8b03eaaa5a830729dd8c8a9c8f8be93f6d60 - name: libpq - evr: 13.23-1.el9_7 - sourcerpm: libpq-13.23-1.el9_7.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/l/libtiff-4.4.0-15.el9_7.2.x86_64.rpm - repoid: ubi-9-for-x86_64-appstream-rpms - size: 200896 - checksum: sha256:dfa983bbb35c44a665e93872f8860a30b5404ca97663ee788c9762079ad7155f - name: libtiff - evr: 4.4.0-15.el9_7.2 - sourcerpm: libtiff-4.4.0-15.el9_7.2.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/l/libuv-1.42.0-2.el9_4.x86_64.rpm - repoid: ubi-9-for-x86_64-appstream-rpms + - url: https://cdn.redhat.com/content/eus/rhel9/9.6/x86_64/appstream/os/Packages/g/gcc-c++-11.5.0-5.el9_5.x86_64.rpm + repoid: rhel-9-for-x86_64-appstream-eus-rpms + size: 13479598 + checksum: sha256:b8392274e302d665bc132aee4ed023f8a777d9c446531679ede18150d7867189 + name: gcc-c++ + evr: 11.5.0-5.el9_5 + sourcerpm: gcc-11.5.0-5.el9_5.src.rpm + - url: https://cdn.redhat.com/content/eus/rhel9/9.6/x86_64/appstream/os/Packages/l/libstdc++-devel-11.5.0-5.el9_5.x86_64.rpm + repoid: rhel-9-for-x86_64-appstream-eus-rpms + size: 2531717 + checksum: sha256:84695eeeb1daa8ff74baf7efd9fc57fb136bec7e8a2ca56c105be6d83ec22d07 + name: libstdc++-devel + evr: 11.5.0-5.el9_5 + sourcerpm: gcc-11.5.0-5.el9_5.src.rpm + - url: https://cdn.redhat.com/content/eus/rhel9/9.6/x86_64/appstream/os/Packages/l/libuv-1.42.0-2.el9_4.x86_64.rpm + repoid: rhel-9-for-x86_64-appstream-eus-rpms size: 154427 checksum: sha256:e1fab39251239ccaad2fb4dbe6c55ec1ae60f76d4ae81582b06e6a58e30879b2 name: libuv evr: 1:1.42.0-2.el9_4 sourcerpm: libuv-1.42.0-2.el9_4.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/l/libwebp-1.2.0-8.el9_3.x86_64.rpm - repoid: ubi-9-for-x86_64-appstream-rpms - size: 289212 - checksum: sha256:6b99032107aa1d6b28dd98c44b0dc6451ce632627ccf6da0c29ac34fd5f501e8 - name: libwebp - evr: 1.2.0-8.el9_3 - sourcerpm: libwebp-1.2.0-8.el9_3.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/l/libxcrypt-compat-4.4.18-3.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-appstream-rpms - size: 93189 - checksum: sha256:2bd6c288e1970a001d3a1ae69166c0d926d9c87ce892edcb2110f4e142c12a7a - name: libxcrypt-compat - evr: 4.4.18-3.el9 - sourcerpm: libxcrypt-4.4.18-3.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/l/libxcrypt-devel-4.4.18-3.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-appstream-rpms - size: 33101 - checksum: sha256:c1d171391a7d2e043a6953efd3df3e01edc9b4c6cdb54517e1608d204a5fce18 - name: libxcrypt-devel - evr: 4.4.18-3.el9 - sourcerpm: libxcrypt-4.4.18-3.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/l/llvm-filesystem-20.1.8-3.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-appstream-rpms - size: 9374 - checksum: sha256:b1584007e959eddcba9b5c930ca001a741ce8c5db53b60c97a1eeb1483e0444c - name: llvm-filesystem - evr: 20.1.8-3.el9 - sourcerpm: llvm-20.1.8-3.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/l/llvm-libs-20.1.8-3.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-appstream-rpms - size: 31501653 - checksum: sha256:5ae29a9cf690992010987b3dfc8a249a869bfca8ae3a45178685411d7f70c358 - name: llvm-libs - evr: 20.1.8-3.el9 - sourcerpm: llvm-20.1.8-3.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/o/oniguruma-6.9.6-1.el9.5.x86_64.rpm - repoid: ubi-9-for-x86_64-appstream-rpms - size: 226331 - checksum: sha256:6c884cc2216e5b4699ebd8cde27b39e99532520b367f645ed6cc660d081916dc - name: oniguruma - evr: 6.9.6-1.el9.5 - sourcerpm: oniguruma-6.9.6-1.el9.5.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/o/openjpeg2-2.4.0-8.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-appstream-rpms - size: 168804 - checksum: sha256:5e532b4206b8af2dcb6e787ca9497b5eb3d333b743b5e7729ded66aa50e8ae78 - name: openjpeg2 - evr: 2.4.0-8.el9 - sourcerpm: openjpeg2-2.4.0-8.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/p/patch-2.7.6-16.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-appstream-rpms + - url: https://cdn.redhat.com/content/eus/rhel9/9.6/x86_64/appstream/os/Packages/p/patch-2.7.6-16.el9.x86_64.rpm + repoid: rhel-9-for-x86_64-appstream-eus-rpms size: 133240 checksum: sha256:d2e0307a2d1d4eff0c2db406841030461b35864926916f2a92244427d89316be name: patch evr: 2.7.6-16.el9 sourcerpm: patch-2.7.6-16.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/p/python-unversioned-command-3.9.25-2.el9_7.noarch.rpm - repoid: ubi-9-for-x86_64-appstream-rpms - size: 9351 - checksum: sha256:ddc75f8460178a142a203ba8d5082c7d58393281238400d11a82cc5ee6487390 - name: python-unversioned-command - evr: 3.9.25-2.el9_7 - sourcerpm: python3.9-3.9.25-2.el9_7.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/r/rust-1.88.0-1.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-appstream-rpms - size: 30892199 - checksum: sha256:d976ea2f80c38598484e6e6e5501bc92f8581b94227dd554e0492bf5d2234f04 + - url: https://cdn.redhat.com/content/eus/rhel9/9.6/x86_64/appstream/os/Packages/r/rust-1.84.1-1.el9.x86_64.rpm + repoid: rhel-9-for-x86_64-appstream-eus-rpms + size: 28050444 + checksum: sha256:9ba3c53fd811af2f294e31360d75e33e4cb89893130c7b3fe0c6191e20a09f3e name: rust - evr: 1.88.0-1.el9 - sourcerpm: rust-1.88.0-1.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/appstream/os/Packages/r/rust-std-static-1.88.0-1.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-appstream-rpms - size: 41209382 - checksum: sha256:5ac616ad878773059445a8c8cbc8ee013541712b321435a9adff5989558a3227 + evr: 1.84.1-1.el9 + sourcerpm: rust-1.84.1-1.el9.src.rpm + - url: https://cdn.redhat.com/content/eus/rhel9/9.6/x86_64/appstream/os/Packages/r/rust-std-static-1.84.1-1.el9.x86_64.rpm + repoid: rhel-9-for-x86_64-appstream-eus-rpms + size: 41211472 + checksum: sha256:73bb90884432e2b43758f1043f107a570b5d54b38f17d5d0af51bac103ceb4f5 name: rust-std-static - evr: 1.88.0-1.el9 - sourcerpm: rust-1.88.0-1.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/baseos/os/Packages/b/binutils-2.35.2-67.el9_7.1.x86_64.rpm - repoid: ubi-9-for-x86_64-baseos-rpms - size: 4813551 - checksum: sha256:1e7ccdae7390ee9323971fef398e41687eb39ca06242ca1ab673ed8b31e99184 - name: binutils - evr: 2.35.2-67.el9_7.1 - sourcerpm: binutils-2.35.2-67.el9_7.1.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/baseos/os/Packages/b/binutils-gold-2.35.2-67.el9_7.1.x86_64.rpm - repoid: ubi-9-for-x86_64-baseos-rpms - size: 751923 - checksum: sha256:9dbb88e0bacb4985c5ae21b002fc2a2b2ad316ad3d8bd18e5f5a79729e92e9ee - name: binutils-gold - evr: 2.35.2-67.el9_7.1 - sourcerpm: binutils-2.35.2-67.el9_7.1.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/baseos/os/Packages/e/ed-1.14.2-12.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-baseos-rpms + evr: 1.84.1-1.el9 + sourcerpm: rust-1.84.1-1.el9.src.rpm + - url: https://cdn.redhat.com/content/eus/rhel9/9.6/x86_64/baseos/os/Packages/e/ed-1.14.2-12.el9.x86_64.rpm + repoid: rhel-9-for-x86_64-baseos-eus-rpms size: 79993 checksum: sha256:5fb3c625fd1ace94f133522bdaf4768abd78f029e20886b8e4aed2d6d1aac664 name: ed evr: 1.14.2-12.el9 sourcerpm: ed-1.14.2-12.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/baseos/os/Packages/e/elfutils-debuginfod-client-0.193-1.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-baseos-rpms - size: 44629 - checksum: sha256:595b16ef65e5310e6091af8e9ff9dc378249ab3d739f7b02881b3eb33c9acce6 - name: elfutils-debuginfod-client - evr: 0.193-1.el9 - sourcerpm: elfutils-0.193-1.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/baseos/os/Packages/e/elfutils-default-yama-scope-0.193-1.el9.noarch.rpm - repoid: ubi-9-for-x86_64-baseos-rpms - size: 9949 - checksum: sha256:8f64d1675627246b912a6b7b71bb4c28c2d1ef09753208253c90253a4a31132f - name: elfutils-default-yama-scope - evr: 0.193-1.el9 - sourcerpm: elfutils-0.193-1.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/baseos/os/Packages/e/elfutils-libelf-0.193-1.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-baseos-rpms - size: 209533 - checksum: sha256:c37308dadac722a4fc928cb4b919c0c5561c458169f754beb7375eb067012195 - name: elfutils-libelf - evr: 0.193-1.el9 - sourcerpm: elfutils-0.193-1.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/baseos/os/Packages/e/elfutils-libs-0.193-1.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-baseos-rpms - size: 274462 - checksum: sha256:a1e6d8396c33dadf7f8f568284e90238e0e1d68a77b2c6c4b2e4ff00ff233e70 - name: elfutils-libs - evr: 0.193-1.el9 - sourcerpm: elfutils-0.193-1.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/baseos/os/Packages/i/info-6.7-15.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-baseos-rpms + - url: https://cdn.redhat.com/content/eus/rhel9/9.6/x86_64/baseos/os/Packages/i/info-6.7-15.el9.x86_64.rpm + repoid: rhel-9-for-x86_64-baseos-eus-rpms size: 233806 checksum: sha256:3643f98b45cc973073096608aaa45976d722fe284590ff7c1d5f93ad77ba0f8b name: info evr: 6.7-15.el9 sourcerpm: texinfo-6.7-15.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/baseos/os/Packages/j/jq-1.6-19.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-baseos-rpms - size: 191662 - checksum: sha256:6b4d82714813d7b4a3200bf2856a3c1493d186e9caa916d7a700ec25e4996462 - name: jq - evr: 1.6-19.el9 - sourcerpm: jq-1.6-19.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/baseos/os/Packages/l/libedit-3.1-38.20210216cvs.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-baseos-rpms - size: 109330 - checksum: sha256:9e41ff5754a5dca1308adf9617828934d56cb60d8d08f128f80e4328f69bc78c - name: libedit - evr: 3.1-38.20210216cvs.el9 - sourcerpm: libedit-3.1-38.20210216cvs.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/baseos/os/Packages/l/libgomp-11.5.0-11.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-baseos-rpms - size: 263529 - checksum: sha256:da7aa3b4934ff0ccf24f925b8216654cf9c9881f64075e2fde1da4f560ca5c2f - name: libgomp - evr: 11.5.0-11.el9 - sourcerpm: gcc-11.5.0-11.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/baseos/os/Packages/l/libpkgconf-1.7.3-10.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-baseos-rpms - size: 38387 - checksum: sha256:4feae5941b73640bd86b8d506a657cac5b770043db1464fbcd207721b2159dda - name: libpkgconf - evr: 1.7.3-10.el9 - sourcerpm: pkgconf-1.7.3-10.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/baseos/os/Packages/m/make-4.3-8.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-baseos-rpms - size: 553896 - checksum: sha256:561f0c2251e9217c81a6c88de4d2d9231a039aaab37e8a0d2559d36ce9fa85fd - name: make - evr: 1:4.3-8.el9 - sourcerpm: make-4.3-8.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/baseos/os/Packages/p/pkgconf-1.7.3-10.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-baseos-rpms - size: 45675 - checksum: sha256:bb47b4ecc499c308f41031a99e723827d152d5d750f59849d0c265d820944a26 - name: pkgconf - evr: 1.7.3-10.el9 - sourcerpm: pkgconf-1.7.3-10.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/baseos/os/Packages/p/pkgconf-m4-1.7.3-10.el9.noarch.rpm - repoid: ubi-9-for-x86_64-baseos-rpms - size: 16054 - checksum: sha256:91bafd6e06099451f60288327b275cfcc651822f6145176a157c6b0fa5131e02 - name: pkgconf-m4 - evr: 1.7.3-10.el9 - sourcerpm: pkgconf-1.7.3-10.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/baseos/os/Packages/p/pkgconf-pkg-config-1.7.3-10.el9.x86_64.rpm - repoid: ubi-9-for-x86_64-baseos-rpms - size: 12438 - checksum: sha256:9a502d81d73d3303ceb53a06ad7ce525c97117ea64352174a33708bf3429283d - name: pkgconf-pkg-config - evr: 1.7.3-10.el9 - sourcerpm: pkgconf-1.7.3-10.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/baseos/os/Packages/p/python3-3.9.25-2.el9_7.x86_64.rpm - repoid: ubi-9-for-x86_64-baseos-rpms - size: 26401 - checksum: sha256:f0aebc2ba2783ad81c9989e23405ce5ccd9f2df0e67d89ce41c61e7c12c6585c - name: python3 - evr: 3.9.25-2.el9_7 - sourcerpm: python3.9-3.9.25-2.el9_7.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/baseos/os/Packages/p/python3-libs-3.9.25-2.el9_7.x86_64.rpm - repoid: ubi-9-for-x86_64-baseos-rpms - size: 8476647 - checksum: sha256:a4954756304bce5257f4b494c61fee45a1d733e1791fd9a0c3eac6eed97f2e6f - name: python3-libs - evr: 3.9.25-2.el9_7 - sourcerpm: python3.9-3.9.25-2.el9_7.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/baseos/os/Packages/p/python3-pip-wheel-21.3.1-1.el9.noarch.rpm - repoid: ubi-9-for-x86_64-baseos-rpms - size: 1193706 - checksum: sha256:75c46aab03898c66ce16be556432b71aed7efcedce02b9263339c14f57b4fdc0 - name: python3-pip-wheel - evr: 21.3.1-1.el9 - sourcerpm: python-pip-21.3.1-1.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/baseos/os/Packages/p/python3-setuptools-wheel-53.0.0-15.el9.noarch.rpm - repoid: ubi-9-for-x86_64-baseos-rpms - size: 479203 - checksum: sha256:36dacb345e21bc0308ef2508f0c93995520a15ef0b56aab3593186c8dc9c0c5a - name: python3-setuptools-wheel - evr: 53.0.0-15.el9 - sourcerpm: python-setuptools-53.0.0-15.el9.src.rpm - - url: https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/x86_64/baseos/os/Packages/v/vim-filesystem-8.2.2637-23.el9_7.noarch.rpm - repoid: ubi-9-for-x86_64-baseos-rpms - size: 13179 - checksum: sha256:793710bbfc6627228c7811bdd3cbecb2c667a4581bd8b5fe9b9a2ebb20e57f79 - name: vim-filesystem - evr: 2:8.2.2637-23.el9_7 - sourcerpm: vim-8.2.2637-23.el9_7.src.rpm source: [] module_metadata: [] diff --git a/src/app/endpoints/README.md b/src/app/endpoints/README.md index 3b95e5b2e..99d8651c8 100644 --- a/src/app/endpoints/README.md +++ b/src/app/endpoints/README.md @@ -12,15 +12,12 @@ Handler for REST API call to authorized endpoint. ## [config.py](config.py) Handler for REST API call to retrieve service configuration. -## [conversations.py](conversations.py) -Handler for REST API calls to manage conversation history. +## [conversations_v1.py](conversations_v1.py) +Handler for REST API calls to manage conversation history using Conversations API. ## [conversations_v2.py](conversations_v2.py) Handler for REST API calls to manage conversation history. -## [conversations_v3.py](conversations_v3.py) -Handler for REST API calls to manage conversation history using Conversations API. - ## [feedback.py](feedback.py) Handler for REST API endpoint for user feedback. @@ -43,9 +40,6 @@ Handler for REST API call to list available models. Handler for REST API calls to list and retrieve available providers. ## [query.py](query.py) -Handler for REST API call to provide answer to query. - -## [query_v2.py](query_v2.py) Handler for REST API call to provide answer to query using Response API. ## [rags.py](rags.py) @@ -61,10 +55,7 @@ Handler for the / endpoint. Handler for REST API call to list available shields. ## [streaming_query.py](streaming_query.py) -Handler for REST API call to provide answer to streaming query. - -## [streaming_query_v2.py](streaming_query_v2.py) -Streaming query handler using Responses API (v2). +Streaming query handler using Responses API. ## [tools.py](tools.py) Handler for REST API call to list available tools from MCP servers. diff --git a/src/app/endpoints/a2a.py b/src/app/endpoints/a2a.py index 15fb3e5d1..ac7557357 100644 --- a/src/app/endpoints/a2a.py +++ b/src/app/endpoints/a2a.py @@ -36,20 +36,21 @@ from starlette.responses import Response, StreamingResponse from a2a_storage import A2AContextStore, A2AStorageFactory -from app.endpoints.query_old import ( - evaluate_model_hints, - select_model_and_provider_id, -) -from app.endpoints.streaming_query import retrieve_response + from authentication import get_auth_dependency from authentication.interface import AuthTuple from authorization.middleware import authorize from client import AsyncLlamaStackClientHolder from configuration import configuration +from constants import MEDIA_TYPE_EVENT_STREAM from models.config import Action from models.requests import QueryRequest -from utils.mcp_headers import mcp_headers_dependency -from utils.responses import extract_text_from_response_output_item +from utils.mcp_headers import mcp_headers_dependency, McpHeaders +from utils.responses import ( + extract_text_from_response_output_item, + prepare_responses_params, +) +from utils.suid import normalize_conversation_id from version import __version__ logger = logging.getLogger("app.endpoints.handlers") @@ -183,9 +184,7 @@ class A2AAgentExecutor(AgentExecutor): routing queries to the LLM backend using the Responses API. """ - def __init__( - self, auth_token: str, mcp_headers: Optional[dict[str, dict[str, str]]] = None - ): + def __init__(self, auth_token: str, mcp_headers: Optional[McpHeaders] = None): """Initialize the A2A agent executor. Args: @@ -193,7 +192,7 @@ def __init__( mcp_headers: MCP headers for context propagation """ self.auth_token: str = auth_token - self.mcp_headers: dict[str, dict[str, str]] = mcp_headers or {} + self.mcp_headers: McpHeaders = mcp_headers or {} async def execute( self, @@ -317,23 +316,17 @@ async def _process_task_streaming( # pylint: disable=too-many-locals # Get LLM client and select model client = AsyncLlamaStackClientHolder().get_client() try: - llama_stack_model_id, _model_id, _provider_id = ( - select_model_and_provider_id( - await client.models.list(), - *evaluate_model_hints( - user_conversation=None, query_request=query_request - ), - ) - ) - - # Stream response from LLM using the Responses API - stream, conversation_id = await retrieve_response( + responses_params = await prepare_responses_params( client, - llama_stack_model_id, query_request, + None, self.auth_token, - mcp_headers=self.mcp_headers, + self.mcp_headers, + stream=True, + store=True, ) + # Stream response from LLM using the Responses API + stream = await client.responses.create(**responses_params.model_dump()) except APIConnectionError as e: error_message = ( f"Unable to connect to Llama Stack backend service: {str(e)}. " @@ -356,6 +349,9 @@ async def _process_task_streaming( # pylint: disable=too-many-locals return # Persist conversation_id for next turn in same A2A context + conversation_id = conversation_id or normalize_conversation_id( + responses_params.conversation + ) if conversation_id: await context_store.set(a2a_context_id, conversation_id) logger.info( @@ -379,7 +375,7 @@ async def _process_task_streaming( # pylint: disable=too-many-locals context_id=context_id, final=False, metadata={ - "model": llama_stack_model_id, + "model": responses_params.model, "conversation_id": conversation_id, }, ) @@ -651,9 +647,7 @@ async def get_agent_card( # pylint: disable=unused-argument raise -async def _create_a2a_app( - auth_token: str, mcp_headers: dict[str, dict[str, str]] -) -> Any: +async def _create_a2a_app(auth_token: str, mcp_headers: McpHeaders) -> Any: """Create an A2A Starlette application instance with auth context. Args: @@ -684,7 +678,7 @@ async def _create_a2a_app( async def handle_a2a_jsonrpc( # pylint: disable=too-many-locals,too-many-statements request: Request, auth: Annotated[AuthTuple, Depends(auth_dependency)], - mcp_headers: dict[str, dict[str, str]] = Depends(mcp_headers_dependency), + mcp_headers: McpHeaders = Depends(mcp_headers_dependency), ) -> Response | StreamingResponse: """ Handle A2A JSON-RPC requests following the A2A protocol specification. @@ -834,7 +828,7 @@ async def response_generator() -> AsyncIterator[bytes]: # Return streaming response with SSE content type for A2A protocol return StreamingResponse( response_generator(), - media_type="text/event-stream", + media_type=MEDIA_TYPE_EVENT_STREAM, ) # Non-streaming mode: Buffer entire response diff --git a/src/app/endpoints/conversations.py b/src/app/endpoints/conversations.py deleted file mode 100644 index 1e9891370..000000000 --- a/src/app/endpoints/conversations.py +++ /dev/null @@ -1,390 +0,0 @@ -"""Handler for REST API calls to manage conversation history.""" - -import logging -from typing import Any - -from fastapi import APIRouter, Depends, HTTPException, Request -from llama_stack_client import APIConnectionError, NotFoundError -from sqlalchemy.exc import SQLAlchemyError - -from app.database import get_session -from authentication import get_auth_dependency -from authorization.middleware import authorize -from client import AsyncLlamaStackClientHolder -from configuration import configuration -from models.config import Action -from models.database.conversations import UserConversation -from models.responses import ( - BadRequestResponse, - ConversationDeleteResponse, - ConversationDetails, - ConversationResponse, - ConversationsListResponse, - ForbiddenResponse, - InternalServerErrorResponse, - NotFoundResponse, - ServiceUnavailableResponse, - UnauthorizedResponse, -) -from utils.endpoints import ( - can_access_conversation, - check_configuration_loaded, - delete_conversation, - retrieve_conversation, -) -from utils.suid import check_suid - -logger = logging.getLogger("app.endpoints.handlers") -router = APIRouter(tags=["conversations"]) - - -conversation_get_responses: dict[int | str, dict[str, Any]] = { - 200: ConversationResponse.openapi_response(), - 400: BadRequestResponse.openapi_response(), - 401: UnauthorizedResponse.openapi_response( - examples=["missing header", "missing token"] - ), - 403: ForbiddenResponse.openapi_response(examples=["conversation read", "endpoint"]), - 404: NotFoundResponse.openapi_response(examples=["conversation"]), - 500: InternalServerErrorResponse.openapi_response( - examples=["database", "configuration"] - ), - 503: ServiceUnavailableResponse.openapi_response(), -} - -conversation_delete_responses: dict[int | str, dict[str, Any]] = { - 200: ConversationDeleteResponse.openapi_response(), - 400: BadRequestResponse.openapi_response(), - 401: UnauthorizedResponse.openapi_response( - examples=["missing header", "missing token"] - ), - 403: ForbiddenResponse.openapi_response( - examples=["conversation delete", "endpoint"] - ), - 404: NotFoundResponse.openapi_response(examples=["conversation"]), - 500: InternalServerErrorResponse.openapi_response( - examples=["database", "configuration"] - ), - 503: ServiceUnavailableResponse.openapi_response(), -} - -conversations_list_responses: dict[int | str, dict[str, Any]] = { - 200: ConversationsListResponse.openapi_response(), - 401: UnauthorizedResponse.openapi_response( - examples=["missing header", "missing token"] - ), - 403: ForbiddenResponse.openapi_response(examples=["endpoint"]), - 500: InternalServerErrorResponse.openapi_response( - examples=["database", "configuration"] - ), - 503: ServiceUnavailableResponse.openapi_response(), -} - - -def simplify_session_data(session_data: dict) -> list[dict[str, Any]]: - """Simplify session data to include only essential conversation information. - - Args: - session_data: The full session data dict from llama-stack - - Returns: - Simplified session data with only input_messages and output_message per turn - """ - # Create simplified structure - chat_history = [] - - # Extract only essential data from each turn - for turn in session_data.get("turns", []): - # Clean up input messages - cleaned_messages = [] - for msg in turn.get("input_messages", []): - cleaned_msg = { - "content": msg.get("content"), - "type": msg.get("role"), # Rename role to type - } - cleaned_messages.append(cleaned_msg) - - # Clean up output message - output_msg = turn.get("output_message", {}) - cleaned_messages.append( - { - "content": output_msg.get("content"), - "type": output_msg.get("role"), # Rename role to type - } - ) - - simplified_turn = { - "messages": cleaned_messages, - "started_at": turn.get("started_at"), - "completed_at": turn.get("completed_at"), - } - chat_history.append(simplified_turn) - - return chat_history - - -@router.get("/conversations", responses=conversations_list_responses) -@authorize(Action.LIST_CONVERSATIONS) -async def get_conversations_list_endpoint_handler( - request: Request, - auth: Any = Depends(get_auth_dependency()), -) -> ConversationsListResponse: - """Handle request to retrieve all conversations for the authenticated user.""" - check_configuration_loaded(configuration) - - user_id = auth[0] - - logger.info("Retrieving conversations for user %s", user_id) - - with get_session() as session: - try: - query = session.query(UserConversation) - - filtered_query = ( - query - if Action.LIST_OTHERS_CONVERSATIONS in request.state.authorized_actions - else query.filter_by(user_id=user_id) - ) - - user_conversations = filtered_query.all() - - # Return conversation summaries with metadata - conversations = [ - ConversationDetails( - conversation_id=conv.id, - created_at=conv.created_at.isoformat() if conv.created_at else None, - last_message_at=( - conv.last_message_at.isoformat() - if conv.last_message_at - else None - ), - message_count=conv.message_count, - last_used_model=conv.last_used_model, - last_used_provider=conv.last_used_provider, - topic_summary=conv.topic_summary, - ) - for conv in user_conversations - ] - - logger.info( - "Found %d conversations for user %s", len(conversations), user_id - ) - - return ConversationsListResponse(conversations=conversations) - - except SQLAlchemyError as e: - logger.exception( - "Error retrieving conversations for user %s: %s", user_id, e - ) - response = InternalServerErrorResponse.database_error() - raise HTTPException(**response.model_dump()) from e - - -@router.get("/conversations/{conversation_id}", responses=conversation_get_responses) -@authorize(Action.GET_CONVERSATION) -async def get_conversation_endpoint_handler( - request: Request, - conversation_id: str, - auth: Any = Depends(get_auth_dependency()), -) -> ConversationResponse: - """ - Handle request to retrieve a conversation by ID. - - Retrieve a conversation's chat history by its ID. Then fetches - the conversation session from the Llama Stack backend, - simplifies the session data to essential chat history, and - returns it in a structured response. Raises HTTP 400 for - invalid IDs, 404 if not found, 503 if the backend is - unavailable, and 500 for unexpected errors. - - Parameters: - conversation_id (str): Unique identifier of the conversation to retrieve. - - Returns: - ConversationResponse: Structured response containing the conversation - ID and simplified chat history. - """ - check_configuration_loaded(configuration) - - # Validate conversation ID format - if not check_suid(conversation_id): - logger.error("Invalid conversation ID format: %s", conversation_id) - response = BadRequestResponse( - resource="conversation", resource_id=conversation_id - ) - raise HTTPException(**response.model_dump()) - - user_id = auth[0] - if not can_access_conversation( - conversation_id, - user_id, - others_allowed=( - Action.READ_OTHERS_CONVERSATIONS in request.state.authorized_actions - ), - ): - logger.warning( - "User %s attempted to read conversation %s they don't have access to", - user_id, - conversation_id, - ) - response = ForbiddenResponse.conversation( - action="read", resource_id=conversation_id, user_id=user_id - ) - raise HTTPException(**response.model_dump()) - - # If reached this, user is authorized to retreive this conversation - conversation = retrieve_conversation(conversation_id) - if conversation is None: - response = NotFoundResponse( - resource="conversation", resource_id=conversation_id - ) - raise HTTPException(**response.model_dump()) - - agent_id = conversation_id - logger.info("Retrieving conversation %s", conversation_id) - - try: - client = AsyncLlamaStackClientHolder().get_client() - - agent_sessions = (await client.agents.session.list(agent_id=agent_id)).data - if not agent_sessions: - logger.error("No sessions found for conversation %s", conversation_id) - response = NotFoundResponse( - resource="conversation", resource_id=conversation_id - ) - raise HTTPException(**response.model_dump()) - session_id = str(agent_sessions[0].get("session_id")) - - session_response = await client.agents.session.retrieve( - agent_id=agent_id, session_id=session_id - ) - session_data = session_response.model_dump() - - logger.info("Successfully retrieved conversation %s", conversation_id) - - # Simplify the session data to include only essential conversation information - chat_history = simplify_session_data(session_data) - - return ConversationResponse( - conversation_id=conversation_id, - chat_history=chat_history, - ) - - except APIConnectionError as e: - logger.error("Unable to connect to Llama Stack: %s", e) - response = ServiceUnavailableResponse(backend_name="Llama Stack", cause=str(e)) - raise HTTPException(**response.model_dump()) from e - - except NotFoundError as e: - logger.error("Conversation not found: %s", e) - response = NotFoundResponse( - resource="conversation", resource_id=conversation_id - ) - raise HTTPException(**response.model_dump()) from e - - except SQLAlchemyError as e: - logger.exception("Error retrieving conversation %s: %s", conversation_id, e) - response = InternalServerErrorResponse.database_error() - raise HTTPException(**response.model_dump()) from e - - -@router.delete( - "/conversations/{conversation_id}", responses=conversation_delete_responses -) -@authorize(Action.DELETE_CONVERSATION) -async def delete_conversation_endpoint_handler( - request: Request, - conversation_id: str, - auth: Any = Depends(get_auth_dependency()), -) -> ConversationDeleteResponse: - """ - Handle request to delete a conversation by ID. - - Validates the conversation ID format and attempts to delete the - corresponding session from the Llama Stack backend. Raises HTTP - errors for invalid IDs, not found conversations, connection - issues, or unexpected failures. - - Returns: - ConversationDeleteResponse: Response indicating the result of the deletion operation. - """ - check_configuration_loaded(configuration) - - # Validate conversation ID format - if not check_suid(conversation_id): - logger.error("Invalid conversation ID format: %s", conversation_id) - response = BadRequestResponse( - resource="conversation", resource_id=conversation_id - ) - raise HTTPException(**response.model_dump()) - - user_id = auth[0] - if not can_access_conversation( - conversation_id, - user_id, - others_allowed=( - Action.DELETE_OTHERS_CONVERSATIONS in request.state.authorized_actions - ), - ): - logger.warning( - "User %s attempted to delete conversation %s they don't have access to", - user_id, - conversation_id, - ) - response = ForbiddenResponse.conversation( - action="delete", resource_id=conversation_id, user_id=user_id - ) - raise HTTPException(**response.model_dump()) - - # If reached this, user is authorized to retreive this conversation - conversation = retrieve_conversation(conversation_id) - if conversation is None: - response = NotFoundResponse( - resource="conversation", resource_id=conversation_id - ) - raise HTTPException(**response.model_dump()) - - agent_id = conversation_id - logger.info("Deleting conversation %s", conversation_id) - - try: - # Get Llama Stack client - client = AsyncLlamaStackClientHolder().get_client() - - agent_sessions = (await client.agents.session.list(agent_id=agent_id)).data - - if not agent_sessions: - # If no sessions are found, do not raise an error, just return a success response - logger.info("No sessions found for conversation %s", conversation_id) - return ConversationDeleteResponse( - deleted=False, - conversation_id=conversation_id, - ) - - session_id = str(agent_sessions[0].get("session_id")) - - await client.agents.session.delete(agent_id=agent_id, session_id=session_id) - - logger.info("Successfully deleted conversation %s", conversation_id) - - delete_conversation(conversation_id=conversation_id) - - return ConversationDeleteResponse( - deleted=True, - conversation_id=conversation_id, - ) - - except APIConnectionError as e: - response = ServiceUnavailableResponse(backend_name="Llama Stack", cause=str(e)) - raise HTTPException(**response.model_dump()) from e - - except NotFoundError as e: - response = NotFoundResponse( - resource="conversation", resource_id=conversation_id - ) - raise HTTPException(**response.model_dump()) from e - - except SQLAlchemyError as e: - logger.exception("Error deleting conversation %s: %s", conversation_id, e) - response = InternalServerErrorResponse.database_error() - raise HTTPException(**response.model_dump()) from e diff --git a/src/app/endpoints/conversations_v3.py b/src/app/endpoints/conversations_v1.py similarity index 80% rename from src/app/endpoints/conversations_v3.py rename to src/app/endpoints/conversations_v1.py index ff9f8058b..7d2a40130 100644 --- a/src/app/endpoints/conversations_v3.py +++ b/src/app/endpoints/conversations_v1.py @@ -3,7 +3,7 @@ import logging from typing import Any -from fastapi import APIRouter, Depends, HTTPException, Request, status +from fastapi import APIRouter, Depends, HTTPException, Request from llama_stack_client import ( APIConnectionError, APIStatusError, @@ -16,7 +16,10 @@ from client import AsyncLlamaStackClientHolder from configuration import configuration from models.config import Action -from models.database.conversations import UserConversation +from models.database.conversations import ( + UserTurn, + UserConversation, +) from models.requests import ConversationUpdateRequest from models.responses import ( BadRequestResponse, @@ -36,12 +39,14 @@ check_configuration_loaded, delete_conversation, retrieve_conversation, + validate_and_retrieve_conversation, ) from utils.suid import ( check_suid, normalize_conversation_id, to_llama_stack_conversation_id, ) +from utils.conversations import build_conversation_turns_from_items logger = logging.getLogger("app.endpoints.handlers") router = APIRouter(tags=["conversations_v1"]) @@ -84,7 +89,6 @@ 500: InternalServerErrorResponse.openapi_response( examples=["database", "configuration"] ), - 503: ServiceUnavailableResponse.openapi_response(), } conversation_update_responses: dict[int | str, dict[str, Any]] = { @@ -102,68 +106,6 @@ } -def simplify_conversation_items(items: list[dict]) -> list[dict[str, Any]]: - """Simplify conversation items to include only essential information. - - Args: - items: The full conversation items list from llama-stack Conversations API - (in reverse chronological order, newest first) - - Returns: - Simplified items with only essential message and tool call information - (in chronological order, oldest first, grouped by turns) - """ - # Filter only message type items - message_items = [item for item in items if item.get("type") == "message"] - - # Process from bottom up (reverse to get chronological order) - # Assume items are grouped correctly: user input followed by assistant output - reversed_messages = list(reversed(message_items)) - - chat_history = [] - i = 0 - while i < len(reversed_messages): - # Extract text content from user message - user_item = reversed_messages[i] - user_content = user_item.get("content", []) - user_text = "" - for content_part in user_content: - if isinstance(content_part, dict): - content_type = content_part.get("type") - if content_type == "input_text": - user_text += content_part.get("text", "") - elif isinstance(content_part, str): - user_text += content_part - - # Extract text content from assistant message (next item) - assistant_text = "" - if i + 1 < len(reversed_messages): - assistant_item = reversed_messages[i + 1] - assistant_content = assistant_item.get("content", []) - for content_part in assistant_content: - if isinstance(content_part, dict): - content_type = content_part.get("type") - if content_type == "output_text": - assistant_text += content_part.get("text", "") - elif isinstance(content_part, str): - assistant_text += content_part - - # Create turn with user message first, then assistant message - chat_history.append( - { - "messages": [ - {"content": user_text, "type": "user"}, - {"content": assistant_text, "type": "assistant"}, - ] - } - ) - - # Move to next pair (skip both user and assistant) - i += 2 - - return chat_history - - @router.get( "/conversations", responses=conversations_list_responses, @@ -231,7 +173,7 @@ async def get_conversations_list_endpoint_handler( summary="Conversation Get Endpoint Handler V1", ) @authorize(Action.GET_CONVERSATION) -async def get_conversation_endpoint_handler( +async def get_conversation_endpoint_handler( # pylint: disable=too-many-locals,too-many-statements request: Request, conversation_id: str, auth: Any = Depends(get_auth_dependency()), @@ -273,47 +215,13 @@ async def get_conversation_endpoint_handler( ) user_id = auth[0] - if not can_access_conversation( - normalized_conv_id, - user_id, + conversation = validate_and_retrieve_conversation( + normalized_conv_id=normalized_conv_id, + user_id=user_id, others_allowed=( Action.READ_OTHERS_CONVERSATIONS in request.state.authorized_actions ), - ): - logger.warning( - "User %s attempted to read conversation %s they don't have access to", - user_id, - normalized_conv_id, - ) - response = ForbiddenResponse.conversation( - action="read", - resource_id=normalized_conv_id, - user_id=user_id, - ).model_dump() - raise HTTPException(**response) - - # If reached this, user is authorized to retrieve this conversation - try: - conversation = retrieve_conversation(normalized_conv_id) - if conversation is None: - logger.error( - "Conversation %s not found in database.", - normalized_conv_id, - ) - response = NotFoundResponse( - resource="conversation", resource_id=normalized_conv_id - ).model_dump() - raise HTTPException(**response) - - except SQLAlchemyError as e: - logger.error( - "Database error occurred while retrieving conversation %s: %s", - normalized_conv_id, - str(e), - ) - response = InternalServerErrorResponse.database_error() - raise HTTPException(**response.model_dump()) from e - + ) logger.info( "Retrieving conversation %s using Conversations API", normalized_conv_id ) @@ -334,28 +242,47 @@ async def get_conversation_endpoint_handler( after=None, include=None, limit=None, - order=None, - ) - items = ( - conversation_items_response.data - if hasattr(conversation_items_response, "data") - else [] + order="asc", # oldest first ) - # Convert items to dict format for processing - items_dicts = [ - item.model_dump() if hasattr(item, "model_dump") else dict(item) - for item in items - ] + + if not conversation_items_response.data: + logger.error("No items found for conversation %s", conversation_id) + response = NotFoundResponse( + resource="conversation", resource_id=normalized_conv_id + ).model_dump() + raise HTTPException(**response) + + items = conversation_items_response.data logger.info( "Successfully retrieved %d items for conversation %s", - len(items_dicts), + len(items), conversation_id, ) - # Simplify the conversation items to include only essential information - chat_history = simplify_conversation_items(items_dicts) + # Retrieve turns metadata from database + db_turns: list[UserTurn] = [] + try: + with get_session() as session: + db_turns = ( + session.query(UserTurn) + .filter_by(conversation_id=normalized_conv_id) + .order_by(UserTurn.turn_number) + .all() + ) + except SQLAlchemyError as e: + logger.error( + "Database error occurred while retrieving conversation turns for %s.", + normalized_conv_id, + ) + response = InternalServerErrorResponse.database_error() + raise HTTPException(**response.model_dump()) from e + + # Build conversation turns from items and populate turns metadata + # Use conversation.created_at for legacy conversations without turn metadata + chat_history = build_conversation_turns_from_items( + items, db_turns, conversation.created_at + ) - # Conversations api has no support for message level timestamps return ConversationResponse( conversation_id=normalized_conv_id, chat_history=chat_history, @@ -472,12 +399,8 @@ async def delete_conversation_endpoint_handler( ) except APIConnectionError as e: - raise HTTPException( - status_code=status.HTTP_503_SERVICE_UNAVAILABLE, - detail=ServiceUnavailableResponse( - backend_name="Llama Stack", cause=str(e) - ).model_dump(), - ) from e + response = ServiceUnavailableResponse(backend_name="Llama Stack", cause=str(e)) + raise HTTPException(**response.model_dump()) from e except APIStatusError: logger.warning( diff --git a/src/app/endpoints/conversations_v2.py b/src/app/endpoints/conversations_v2.py index a9125f74e..f9e6ebbc8 100644 --- a/src/app/endpoints/conversations_v2.py +++ b/src/app/endpoints/conversations_v2.py @@ -15,10 +15,12 @@ BadRequestResponse, ConversationDeleteResponse, ConversationResponse, + ConversationTurn, ConversationsListResponseV2, ConversationUpdateResponse, ForbiddenResponse, InternalServerErrorResponse, + Message, NotFoundResponse, UnauthorizedResponse, ) @@ -131,7 +133,10 @@ async def get_conversation_endpoint_handler( conversation = configuration.conversation_cache.get( user_id, conversation_id, skip_userid_check ) - chat_history = [transform_chat_message(entry) for entry in conversation] + # Each entry in conversation is a single turn + chat_history: list[ConversationTurn] = [ + build_conversation_turn_from_cache_entry(entry) for entry in conversation + ] return ConversationResponse( conversation_id=conversation_id, chat_history=chat_history @@ -238,21 +243,34 @@ def check_conversation_existence(user_id: str, conversation_id: str) -> None: raise HTTPException(**response.model_dump()) -def transform_chat_message(entry: CacheEntry) -> dict[str, Any]: - """Transform the message read from cache into format used by response payload.""" - user_message = {"content": entry.query, "type": "user"} - assistant_message: dict[str, Any] = {"content": entry.response, "type": "assistant"} - - # If referenced_documents exist on the entry, add them to the assistant message - if entry.referenced_documents is not None: - assistant_message["referenced_documents"] = [ - doc.model_dump(mode="json") for doc in entry.referenced_documents - ] - - return { - "provider": entry.provider, - "model": entry.model, - "messages": [user_message, assistant_message], - "started_at": entry.started_at, - "completed_at": entry.completed_at, - } +def build_conversation_turn_from_cache_entry(entry: CacheEntry) -> ConversationTurn: + """Build a ConversationTurn object from a cache entry. + + Each CacheEntry represents a single conversation turn with user query, + assistant response, and optional tool calls/results. + + Args: + entry: Cache entry representing one turn in the conversation + + Returns: + ConversationTurn object with messages, tool_calls, tool_results, and timestamps + """ + # Create Message objects for user and assistant + messages = [ + Message(content=entry.query, type="user"), + Message(content=entry.response, type="assistant"), + ] + + # Extract tool calls and results (default to empty lists if None) + tool_calls = entry.tool_calls if entry.tool_calls else [] + tool_results = entry.tool_results if entry.tool_results else [] + + return ConversationTurn( + messages=messages, + tool_calls=tool_calls, + tool_results=tool_results, + provider=entry.provider, + model=entry.model, + started_at=entry.started_at, + completed_at=entry.completed_at, + ) diff --git a/src/app/endpoints/models.py b/src/app/endpoints/models.py index 627b12eeb..bfd52270a 100644 --- a/src/app/endpoints/models.py +++ b/src/app/endpoints/models.py @@ -3,7 +3,7 @@ import logging from typing import Annotated, Any -from fastapi import APIRouter, HTTPException, Request +from fastapi import APIRouter, HTTPException, Request, Query from fastapi.params import Depends from llama_stack_client import APIConnectionError @@ -13,6 +13,7 @@ from client import AsyncLlamaStackClientHolder from configuration import configuration from models.config import Action +from models.requests import ModelFilter from models.responses import ( ForbiddenResponse, InternalServerErrorResponse, @@ -57,7 +58,6 @@ def parse_llama_stack_model(model: Any) -> dict[str, Any]: "provider_resource_id": str(custom_metadata.get("provider_resource_id", "")), "model_type": model_type, } - return legacy_model @@ -77,6 +77,7 @@ def parse_llama_stack_model(model: Any) -> dict[str, Any]: async def models_endpoint_handler( request: Request, auth: Annotated[AuthTuple, Depends(get_auth_dependency())], + model_type: Annotated[ModelFilter, Query()], ) -> ModelsResponse: """ Handle requests to the /models endpoint. @@ -84,6 +85,11 @@ async def models_endpoint_handler( Process GET requests to the /models endpoint, returning a list of available models from the Llama Stack service. + Parameters: + request: The incoming HTTP request. + auth: Authentication tuple from the auth dependency. + model_type: Optional filter to return only models matching this type. + Raises: HTTPException: If unable to connect to the Llama Stack server or if model retrieval fails for any reason. @@ -107,8 +113,18 @@ async def models_endpoint_handler( client = AsyncLlamaStackClientHolder().get_client() # retrieve models models = await client.models.list() - # Parse models to legacy format + + # parse models to legacy format parsed_models = [parse_llama_stack_model(model) for model in models] + + # optional filtering by model type + if model_type.model_type is not None: + parsed_models = [ + model + for model in parsed_models + if model["model_type"] == model_type.model_type + ] + return ModelsResponse(models=parsed_models) # Connection to Llama Stack server failed diff --git a/src/app/endpoints/query.py b/src/app/endpoints/query.py index 9646df7d9..54d0b5de7 100644 --- a/src/app/endpoints/query.py +++ b/src/app/endpoints/query.py @@ -2,41 +2,37 @@ """Handler for REST API call to provide answer to query using Response API.""" -import json +"""Handler for REST API call to provide answer to query using Response API.""" + +import datetime import logging -from typing import Annotated, Any, Optional, cast - -from fastapi import APIRouter, Depends, Request -from llama_stack_api.openai_responses import ( - OpenAIResponseMCPApprovalRequest, - OpenAIResponseMCPApprovalResponse, - OpenAIResponseObject, - OpenAIResponseOutput, - OpenAIResponseOutputMessageFileSearchToolCall, - OpenAIResponseOutputMessageFunctionToolCall, - OpenAIResponseOutputMessageMCPCall, - OpenAIResponseOutputMessageMCPListTools, - OpenAIResponseOutputMessageWebSearchToolCall, +from typing import Annotated, Any, cast + +from fastapi import APIRouter, Depends, HTTPException, Request +from llama_stack_api.openai_responses import OpenAIResponseObject +from llama_stack_client import ( + APIConnectionError, + AsyncLlamaStackClient, + APIStatusError as LLSApiStatusError, ) -from llama_stack_client import AsyncLlamaStackClient - -import constants -import metrics -from app.endpoints.query_old import ( - query_endpoint_handler_base, - validate_attachments_metadata, +from openai._exceptions import ( + APIStatusError as OpenAIAPIStatusError, ) + from authentication import get_auth_dependency from authentication.interface import AuthTuple +from authorization.azure_token_manager import AzureEntraIDManager from authorization.middleware import authorize -from configuration import AppConfig, configuration -from constants import DEFAULT_RAG_TOOL -from models.config import Action, ModelContextProtocolServer +from client import AsyncLlamaStackClientHolder +from configuration import configuration +from models.config import Action from models.requests import QueryRequest + from models.responses import ( ForbiddenResponse, InternalServerErrorResponse, NotFoundResponse, + PromptTooLongResponse, QueryResponse, QuotaExceededResponse, ReferencedDocument, @@ -46,25 +42,40 @@ ) from utils.endpoints import ( check_configuration_loaded, - get_system_prompt, - get_topic_summary_system_prompt, + validate_and_retrieve_conversation, +) +from utils.mcp_headers import mcp_headers_dependency, McpHeaders +from utils.query import ( + consume_query_tokens, + handle_known_apistatus_errors, + store_query_results, + update_azure_token, + validate_attachments_metadata, +) +from utils.quota import check_tokens_available, get_available_quotas +from utils.responses import ( + build_tool_call_summary, + extract_text_from_response_output_item, + extract_token_usage, + get_topic_summary, + parse_referenced_documents, + prepare_responses_params, ) -from utils.mcp_headers import mcp_headers_dependency -from utils.query import parse_arguments_string -from utils.responses import extract_text_from_response_output_item from utils.shields import ( append_turn_to_conversation, run_shield_moderation, ) -from utils.suid import normalize_conversation_id, to_llama_stack_conversation_id -from utils.token_counter import TokenCounter -from utils.types import RAGChunk, ToolCallSummary, ToolResultSummary, TurnSummary +from utils.suid import normalize_conversation_id +from utils.types import ( + ResponsesApiParams, + TurnSummary, +) from utils.vector_search import perform_vector_search, format_rag_context_for_injection logger = logging.getLogger("app.endpoints.handlers") -router = APIRouter(tags=["query_v1"]) +router = APIRouter(tags=["query"]) -query_v2_response: dict[int | str, dict[str, Any]] = { +query_response: dict[int | str, dict[str, Any]] = { 200: QueryResponse.openapi_response(), 401: UnauthorizedResponse.openapi_response( examples=["missing header", "missing token"] @@ -83,840 +94,248 @@ } -def _build_tool_call_summary( # pylint: disable=too-many-return-statements,too-many-branches - output_item: OpenAIResponseOutput, - rag_chunks: list[RAGChunk], -) -> tuple[Optional[ToolCallSummary], Optional[ToolResultSummary]]: - """Translate Responses API tool outputs into ToolCallSummary and ToolResultSummary records. - - Processes OpenAI response output items and extracts tool call and result information. - Also parses RAG chunks from file_search_call items and appends them to the provided list. - - Args: - output_item: An OpenAIResponseOutput item from the response.output array - rag_chunks: List to append extracted RAG chunks to (from file_search_call items) - Returns: - A tuple of (ToolCallSummary, ToolResultSummary) one of them possibly None - if current llama stack Responses API does not provide the information. - - Supported tool types: - - function_call: Function tool calls with parsed arguments (no result) - - file_search_call: File search operations with results (also extracts RAG chunks) - - web_search_call: Web search operations (incomplete) - - mcp_call: MCP calls with server labels - - mcp_list_tools: MCP server tool listings - - mcp_approval_request: MCP approval requests (no result) - - mcp_approval_response: MCP approval responses (no call) - """ - item_type = getattr(output_item, "type", None) - - if item_type == "function_call": - item = cast(OpenAIResponseOutputMessageFunctionToolCall, output_item) - return ( - ToolCallSummary( - id=item.call_id, - name=item.name, - args=parse_arguments_string(item.arguments), - type="function_call", - ), - None, # not supported by Responses API at all - ) - - if item_type == "file_search_call": - file_search_item = cast( - OpenAIResponseOutputMessageFileSearchToolCall, output_item - ) - extract_rag_chunks_from_file_search_item(file_search_item, rag_chunks) - response_payload: Optional[dict[str, Any]] = None - if file_search_item.results is not None: - response_payload = { - "results": [result.model_dump() for result in file_search_item.results] - } - return ToolCallSummary( - id=file_search_item.id, - name=DEFAULT_RAG_TOOL, - args={"queries": file_search_item.queries}, - type="file_search_call", - ), ToolResultSummary( - id=file_search_item.id, - status=file_search_item.status, - content=json.dumps(response_payload) if response_payload else "", - type="file_search_call", - round=1, - ) - - # Incomplete OpenAI Responses API definition in LLS: action attribute not supported yet - if item_type == "web_search_call": - web_search_item = cast( - OpenAIResponseOutputMessageWebSearchToolCall, output_item - ) - return ( - ToolCallSummary( - id=web_search_item.id, - name="web_search", - args={}, - type="web_search_call", - ), - ToolResultSummary( - id=web_search_item.id, - status=web_search_item.status, - content="", - type="web_search_call", - round=1, - ), - ) - - if item_type == "mcp_call": - mcp_call_item = cast(OpenAIResponseOutputMessageMCPCall, output_item) - args = parse_arguments_string(mcp_call_item.arguments) - if mcp_call_item.server_label: - args["server_label"] = mcp_call_item.server_label - content = ( - mcp_call_item.error - if mcp_call_item.error - else (mcp_call_item.output if mcp_call_item.output else "") - ) - - return ToolCallSummary( - id=mcp_call_item.id, - name=mcp_call_item.name, - args=args, - type="mcp_call", - ), ToolResultSummary( - id=mcp_call_item.id, - status="success" if mcp_call_item.error is None else "failure", - content=content, - type="mcp_call", - round=1, - ) - - if item_type == "mcp_list_tools": - mcp_list_tools_item = cast(OpenAIResponseOutputMessageMCPListTools, output_item) - tools_info = [ - { - "name": tool.name, - "description": tool.description, - "input_schema": tool.input_schema, - } - for tool in mcp_list_tools_item.tools - ] - content_dict = { - "server_label": mcp_list_tools_item.server_label, - "tools": tools_info, - } - return ( - ToolCallSummary( - id=mcp_list_tools_item.id, - name="mcp_list_tools", - args={"server_label": mcp_list_tools_item.server_label}, - type="mcp_list_tools", - ), - ToolResultSummary( - id=mcp_list_tools_item.id, - status="success", - content=json.dumps(content_dict), - type="mcp_list_tools", - round=1, - ), - ) - - if item_type == "mcp_approval_request": - approval_request_item = cast(OpenAIResponseMCPApprovalRequest, output_item) - args = parse_arguments_string(approval_request_item.arguments) - return ( - ToolCallSummary( - id=approval_request_item.id, - name=approval_request_item.name, - args=args, - type="tool_call", - ), - None, - ) - - if item_type == "mcp_approval_response": - approval_response_item = cast(OpenAIResponseMCPApprovalResponse, output_item) - content_dict = {} - if approval_response_item.reason: - content_dict["reason"] = approval_response_item.reason - return ( - None, - ToolResultSummary( - id=approval_response_item.approval_request_id, - status="success" if approval_response_item.approve else "denied", - content=json.dumps(content_dict), - type="mcp_approval_response", - round=1, - ), - ) - - return None, None - - -async def get_topic_summary( # pylint: disable=too-many-nested-blocks - question: str, client: AsyncLlamaStackClient, model_id: str -) -> str: - """ - Get a topic summary for a question using Responses API. - - This is the Responses API version of get_topic_summary, which uses - client.responses.create() instead of the Agent API. - - Args: - question: The question to generate a topic summary for - client: The AsyncLlamaStackClient to use for the request - model_id: The llama stack model ID (full format: provider/model) - - Returns: - str: The topic summary for the question - """ - topic_summary_system_prompt = get_topic_summary_system_prompt(configuration) - - # Use Responses API to generate topic summary - response = cast( - OpenAIResponseObject, - await client.responses.create( - input=question, - model=model_id, - instructions=topic_summary_system_prompt, - stream=False, - store=False, # Don't store topic summary requests - ), - ) - - # Extract text from response output - summary_text = "".join( - extract_text_from_response_output_item(output_item) - for output_item in response.output - ) - - return summary_text.strip() if summary_text else "" - - -@router.post("/query", responses=query_v2_response, summary="Query Endpoint Handler V1") +@router.post("/query", responses=query_response, summary="Query Endpoint Handler") @authorize(Action.QUERY) -async def query_endpoint_handler_v2( +async def query_endpoint_handler( request: Request, query_request: QueryRequest, auth: Annotated[AuthTuple, Depends(get_auth_dependency())], - mcp_headers: dict[str, dict[str, str]] = Depends(mcp_headers_dependency), + mcp_headers: McpHeaders = Depends(mcp_headers_dependency), ) -> QueryResponse: """ Handle request to the /query endpoint using Responses API. - This is a wrapper around query_endpoint_handler_base that provides - the Responses API specific retrieve_response and get_topic_summary functions. + Processes a POST request to a query endpoint, forwarding the + user's query to a selected Llama Stack LLM and returning the generated response. Returns: QueryResponse: Contains the conversation ID and the LLM-generated response. + + Raises: + HTTPException: + - 401: Unauthorized - Missing or invalid credentials + - 403: Forbidden - Insufficient permissions or model override not allowed + - 404: Not Found - Conversation, model, or provider not found + - 413: Prompt too long - Prompt exceeded model's context window size + - 422: Unprocessable Entity - Request validation failed + - 429: Quota limit exceeded - The token quota for model or user has been exceeded + - 500: Internal Server Error - Configuration not loaded or other server errors + - 503: Service Unavailable - Unable to connect to Llama Stack backend """ check_configuration_loaded(configuration) - return await query_endpoint_handler_base( - request=request, - query_request=query_request, - auth=auth, - mcp_headers=mcp_headers, - retrieve_response_func=retrieve_response, - get_topic_summary_func=get_topic_summary, - ) + started_at = datetime.datetime.now(datetime.timezone.utc).strftime( + "%Y-%m-%dT%H:%M:%SZ" + ) + user_id, _, _skip_userid_check, token = auth + # Check token availability + check_tokens_available(configuration.quota_limiters, user_id) -async def retrieve_response( # pylint: disable=too-many-locals,too-many-branches,too-many-arguments,too-many-statements - client: AsyncLlamaStackClient, - model_id: str, - query_request: QueryRequest, - token: str, - mcp_headers: Optional[dict[str, dict[str, str]]] = None, - *, - provider_id: str = "", -) -> tuple[TurnSummary, str, list[ReferencedDocument], TokenCounter]: - """ - Retrieve response from LLMs and agents. + # Validate attachments if provided + if query_request.attachments: + validate_attachments_metadata(query_request.attachments) - Retrieves a response from the Llama Stack LLM or agent for a - given query, handling shield configuration, tool usage, and - attachment validation. + # Retrieve conversation if conversation_id is provided + user_conversation = None + if query_request.conversation_id: + logger.debug( + "Conversation ID specified in query: %s", query_request.conversation_id + ) + normalized_conv_id = normalize_conversation_id(query_request.conversation_id) + user_conversation = validate_and_retrieve_conversation( + normalized_conv_id=normalized_conv_id, + user_id=user_id, + others_allowed=Action.READ_OTHERS_CONVERSATIONS + in request.state.authorized_actions, + ) - This function configures system prompts, shields, and toolgroups - (including RAG and MCP integration) as needed based on - the query request and system configuration. It - validates attachments, manages conversation and session - context, and processes MCP headers for multi-component - processing. Corresponding metrics are updated. + client = AsyncLlamaStackClientHolder().get_client() - Parameters: - client (AsyncLlamaStackClient): The AsyncLlamaStackClient to use for the request. - model_id (str): The identifier of the LLM model to use. - query_request (QueryRequest): The user's query and associated metadata. - token (str): The authentication token for authorization. - mcp_headers (dict[str, dict[str, str]], optional): Headers for multi-component processing. - provider_id (str): The identifier of the LLM provider to use. + doc_ids_from_chunks: list[ReferencedDocument] = [] + pre_rag_chunks: list[Any] = [] # use your RAGChunk type (or the upstream one) - Returns: - tuple[TurnSummary, str]: A tuple containing a summary of the LLM or agent's response content - and the conversation ID, the list of parsed referenced documents, - and token usage information. - """ - # use system prompt from request or default one - system_prompt = get_system_prompt(query_request, configuration) - logger.debug("Using system prompt: %s", system_prompt) + _, _, doc_ids_from_chunks, pre_rag_chunks = await perform_vector_search( + client, query_request, configuration + ) - # TODO(lucasagomes): redact attachments content before sending to LLM - # if attachments are provided, validate them - if query_request.attachments: - validate_attachments_metadata(query_request.attachments) + rag_context = format_rag_context_for_injection(pre_rag_chunks) + if rag_context: + # safest: mutate a local copy so we don't surprise other logic + query_request = query_request.model_copy(deep=True) # pydantic v2 + query_request.query = query_request.query + rag_context - # Prepare tools for responses API - skip RAG tools since we're doing direct vector query - toolgroups = await prepare_tools_for_responses_api( + # Prepare API request parameters + responses_params = await prepare_responses_params( client, query_request, + user_conversation, token, - configuration, - mcp_headers=mcp_headers, - skip_rag_tools=True, - ) - - # Prepare input for Responses API - # Convert attachments to text and concatenate with query - input_text = query_request.query - if query_request.attachments: - for attachment in query_request.attachments: - # Append attachment content with type label - input_text += ( - f"\n\n[Attachment: {attachment.attachment_type}]\n{attachment.content}" - ) - - # Handle conversation ID for Responses API - # Create conversation upfront if not provided - conversation_id = query_request.conversation_id - if conversation_id: - # Conversation ID was provided - convert to llama-stack format - logger.debug("Using existing conversation ID: %s", conversation_id) - llama_stack_conv_id = to_llama_stack_conversation_id(conversation_id) - else: - # No conversation_id provided - create a new conversation first - logger.debug("No conversation_id provided, creating new conversation") - - conversation = await client.conversations.create(metadata={}) - llama_stack_conv_id = conversation.id - # Store the normalized version for later use - conversation_id = normalize_conversation_id(llama_stack_conv_id) - logger.info( - "Created new conversation with ID: %s (normalized: %s)", - llama_stack_conv_id, - conversation_id, - ) - - # Run shield moderation before calling LLM - moderation_result = await run_shield_moderation(client, input_text) - if moderation_result.blocked: - violation_message = moderation_result.message or "" - await append_turn_to_conversation( - client, llama_stack_conv_id, input_text, violation_message - ) - summary = TurnSummary( - llm_response=violation_message, - tool_calls=[], - tool_results=[], - rag_chunks=[], - ) - return ( - summary, - normalize_conversation_id(conversation_id), - [], - TokenCounter(), - ) - - # Extract RAG chunks from vector DB query response BEFORE calling responses API - _, _, doc_ids_from_chunks, rag_chunks = await perform_vector_search( - client, query_request, configuration + mcp_headers, + stream=False, + store=True, ) - # Format RAG context for injection into user message - rag_context = format_rag_context_for_injection(rag_chunks) + # Handle Azure token refresh if needed + if ( + responses_params.model.startswith("azure") + and AzureEntraIDManager().is_entra_id_configured + and AzureEntraIDManager().is_token_expired + and AzureEntraIDManager().refresh_token() + ): + client = await update_azure_token(client) - # Inject RAG context into input text - if rag_context: - input_text = input_text + rag_context - - # Create OpenAI response using responses API - create_kwargs: dict[str, Any] = { - "input": input_text, - "model": model_id, - "instructions": system_prompt, - "tools": cast(Any, toolgroups), - "stream": False, - "store": True, - "conversation": llama_stack_conv_id, - } - - response = await client.responses.create(**create_kwargs) - response = cast(OpenAIResponseObject, response) - logger.debug( - "Received response with ID: %s, conversation ID: %s, output items: %d", - response.id, - conversation_id, - len(response.output), - ) + # Retrieve response using Responses API + turn_summary = await retrieve_response(client, responses_params) - # Process OpenAI response format - llm_response = "" - tool_calls: list[ToolCallSummary] = [] - tool_results: list[ToolResultSummary] = [] - response_api_rag_chunks: list[RAGChunk] = [] - for output_item in response.output: - message_text = extract_text_from_response_output_item(output_item) - if message_text: - llm_response += message_text + if pre_rag_chunks: + turn_summary.rag_chunks = pre_rag_chunks + (turn_summary.rag_chunks or []) - tool_call, tool_result = _build_tool_call_summary( - output_item, response_api_rag_chunks + if doc_ids_from_chunks: + turn_summary.referenced_documents = parse_referenced_docs( + doc_ids_from_chunks + (turn_summary.referenced_documents or []) ) - if tool_call: - tool_calls.append(tool_call) - if tool_result: - tool_results.append(tool_result) - # Merge RAG chunks from direct vector query with those from responses API - all_rag_chunks = rag_chunks + response_api_rag_chunks - logger.info( - "Combined RAG chunks: %d from direct query + %d from responses API = %d total", - len(rag_chunks), - len(response_api_rag_chunks), - len(all_rag_chunks), - ) - - logger.info( - "Response processing complete - Tool calls: %d, Response length: %d chars", - len(tool_calls), - len(llm_response), + # Get topic summary for new conversation + if not user_conversation and query_request.generate_topic_summary: + logger.debug("Generating topic summary for new conversation") + topic_summary = await get_topic_summary( + query_request.query, client, responses_params.model + ) + else: + topic_summary = None + + logger.info("Consuming tokens") + consume_query_tokens( + user_id=user_id, + model_id=responses_params.model, + token_usage=turn_summary.token_usage, + configuration=configuration, ) - summary = TurnSummary( - llm_response=llm_response, - tool_calls=tool_calls, - tool_results=tool_results, - rag_chunks=all_rag_chunks, + logger.info("Getting available quotas") + available_quotas = get_available_quotas( + quota_limiters=configuration.quota_limiters, user_id=user_id ) - # Extract referenced documents and token usage from Responses API response - # Merge with documents from direct vector query - response_referenced_documents = parse_referenced_documents_from_responses_api( - response + completed_at = datetime.datetime.now(datetime.timezone.utc).strftime( + "%Y-%m-%dT%H:%M:%SZ" ) - all_referenced_documents = doc_ids_from_chunks + response_referenced_documents - logger.info( - "Combined referenced documents: %d from direct query + %d from responses API = %d total", - len(doc_ids_from_chunks), - len(response_referenced_documents), - len(all_referenced_documents), - ) - model_label = model_id.split("/", 1)[1] if "/" in model_id else model_id - token_usage = extract_token_usage_from_responses_api( - response, model_label, provider_id, system_prompt + conversation_id = normalize_conversation_id(responses_params.conversation) + + logger.info("Storing query results") + store_query_results( + user_id=user_id, + conversation_id=conversation_id, + model=responses_params.model, + started_at=started_at, + completed_at=completed_at, + summary=turn_summary, + query_request=query_request, + configuration=configuration, + skip_userid_check=_skip_userid_check, + topic_summary=topic_summary, ) - if not summary.llm_response: - logger.warning( - "Response lacks content (conversation_id=%s)", - conversation_id, - ) - - return ( - summary, - normalize_conversation_id(conversation_id), - all_referenced_documents, - token_usage, + logger.info("Building final response") + return QueryResponse( + conversation_id=conversation_id, + response=turn_summary.llm_response, + tool_calls=turn_summary.tool_calls, + tool_results=turn_summary.tool_results, + rag_chunks=turn_summary.rag_chunks, + referenced_documents=turn_summary.referenced_documents, + truncated=False, + input_tokens=turn_summary.token_usage.input_tokens, + output_tokens=turn_summary.token_usage.output_tokens, + available_quotas=available_quotas, ) -def extract_rag_chunks_from_file_search_item( - item: OpenAIResponseOutputMessageFileSearchToolCall, - rag_chunks: list[RAGChunk], -) -> None: - """Extract RAG chunks from a file search tool call item and append to rag_chunks. - - Args: - item: The file search tool call item. - rag_chunks: List to append extracted RAG chunks to. - """ - if item.results is not None: - for result in item.results: - rag_chunk = RAGChunk( - content=result.text, source=result.filename, score=result.score - ) - rag_chunks.append(rag_chunk) - - -def parse_rag_chunks_from_responses_api( - response_obj: OpenAIResponseObject, -) -> list[RAGChunk]: - """ - Extract rag_chunks from the llama-stack OpenAI response. - - Args: - response_obj: The ResponseObject from OpenAI compatible response API in llama-stack. - - Returns: - List of RAGChunk with content, source, score - """ - rag_chunks: list[RAGChunk] = [] - - for output_item in response_obj.output: - item_type = getattr(output_item, "type", None) - if item_type == "file_search_call": - item = cast(OpenAIResponseOutputMessageFileSearchToolCall, output_item) - extract_rag_chunks_from_file_search_item(item, rag_chunks) - - return rag_chunks - - -def parse_referenced_documents_from_responses_api( - response: OpenAIResponseObject, # pylint: disable=unused-argument +def parse_referenced_docs( + docs: list[ReferencedDocument], ) -> list[ReferencedDocument]: - """ - Parse referenced documents from OpenAI Responses API response. - - Args: - response: The OpenAI Response API response object - - Returns: - list[ReferencedDocument]: List of referenced documents with doc_url and doc_title - """ - documents: list[ReferencedDocument] = [] - # Use a set to track unique documents by (doc_url, doc_title) tuple - seen_docs: set[tuple[Optional[str], Optional[str]]] = set() - - # Handle None response (e.g., when agent fails) - if response is None or not response.output: - return documents + seen: set[tuple[str | None, str | None]] = set() + out: list[ReferencedDocument] = [] + for d in docs: + key = (d.doc_url, d.doc_title) + if key in seen: + continue + seen.add(key) + out.append(d) + return out - for output_item in response.output: - item_type = getattr(output_item, "type", None) - - # 1. Parse from file_search_call results - if item_type == "file_search_call": - results = getattr(output_item, "results", []) or [] - for result in results: - # Handle both object and dict access - if isinstance(result, dict): - attributes = result.get("attributes", {}) - else: - attributes = getattr(result, "attributes", {}) - - # Try to get URL from attributes - # Look for common URL fields in attributes - doc_url = ( - attributes.get("doc_url") - or attributes.get("docs_url") - or attributes.get("url") - or attributes.get("link") - ) - doc_title = attributes.get("title") - - if doc_title or doc_url: - # Treat empty string as None for URL to satisfy Optional[AnyUrl] - final_url = doc_url if doc_url else None - if (final_url, doc_title) not in seen_docs: - documents.append( - ReferencedDocument(doc_url=final_url, doc_title=doc_title) - ) - seen_docs.add((final_url, doc_title)) - - return documents - - -def extract_token_usage_from_responses_api( - response: OpenAIResponseObject, - model: str, - provider: str, - system_prompt: str = "", # pylint: disable=unused-argument -) -> TokenCounter: +async def retrieve_response( # pylint: disable=too-many-locals + client: AsyncLlamaStackClient, + responses_params: ResponsesApiParams, +) -> TurnSummary: """ - Extract token usage from OpenAI Responses API response and update metrics. - - This function extracts token usage information from the Responses API response - object and updates Prometheus metrics. If usage information is not available, - it returns zero values without estimation. + Retrieve response from LLMs and agents. - Note: When llama stack internally uses chat_completions, the usage field may be - empty or a dict. This is expected and will be populated in future llama stack versions. + Retrieves a response from the Llama Stack LLM using the Responses API. + This function processes the prepared request and returns the LLM response. - Args: - response: The OpenAI Response API response object - model: The model identifier for metrics labeling - provider: The provider identifier for metrics labeling - system_prompt: The system prompt used (unused, kept for compatibility) + Parameters: + client: The AsyncLlamaStackClient to use for the request. + responses_params: The Responses API parameters. Returns: - TokenCounter: Token usage information with input_tokens and output_tokens + TurnSummary: Summary of the LLM response content """ - token_counter = TokenCounter() - token_counter.llm_calls = 1 - - # Extract usage from the response if available - # Note: usage attribute exists at runtime but may not be in type definitions - usage = getattr(response, "usage", None) - if usage: - try: - # Handle both dict and object cases due to llama_stack inconsistency: - # - When llama_stack converts to chat_completions internally, usage is a dict - # - When using proper Responses API, usage should be an object - # TODO: Remove dict handling once llama_stack standardizes on object type # pylint: disable=fixme - if isinstance(usage, dict): - input_tokens = usage.get("input_tokens", 0) - output_tokens = usage.get("output_tokens", 0) - else: - # Object with attributes (expected final behavior) - input_tokens = getattr(usage, "input_tokens", 0) - output_tokens = getattr(usage, "output_tokens", 0) - # Only set if we got valid values - if input_tokens or output_tokens: - token_counter.input_tokens = input_tokens or 0 - token_counter.output_tokens = output_tokens or 0 - - logger.debug( - "Extracted token usage from Responses API: input=%d, output=%d", - token_counter.input_tokens, - token_counter.output_tokens, - ) - - # Update Prometheus metrics only when we have actual usage data - try: - metrics.llm_token_sent_total.labels(provider, model).inc( - token_counter.input_tokens - ) - metrics.llm_token_received_total.labels(provider, model).inc( - token_counter.output_tokens - ) - except (AttributeError, TypeError, ValueError) as e: - logger.warning("Failed to update token metrics: %s", e) - _increment_llm_call_metric(provider, model) - else: - logger.debug( - "Usage object exists but tokens are 0 or None, treating as no usage info" - ) - # Still increment the call counter - _increment_llm_call_metric(provider, model) - except (AttributeError, KeyError, TypeError) as e: - logger.warning( - "Failed to extract token usage from response.usage: %s. Usage value: %s", - e, - usage, - ) - # Still increment the call counter - _increment_llm_call_metric(provider, model) - else: - # No usage information available - this is expected when llama stack - # internally converts to chat_completions - logger.debug( - "No usage information in Responses API response, token counts will be 0" - ) - # token_counter already initialized with 0 values - # Still increment the call counter - _increment_llm_call_metric(provider, model) - - return token_counter + summary = TurnSummary() - -def _increment_llm_call_metric(provider: str, model: str) -> None: - """Safely increment LLM call metric.""" try: - metrics.llm_calls_total.labels(provider, model).inc() - except (AttributeError, TypeError, ValueError) as e: - logger.warning("Failed to update LLM call metric: %s", e) - - -def get_rag_tools( - vector_store_ids: list[str], solr_params: Optional[dict[str, Any]] = None -) -> Optional[list[dict[str, Any]]]: - """ - Convert vector store IDs to tools format for Responses API. - - Args: - vector_store_ids: List of vector store identifiers - solr_params: Optional Solr filtering parameters - - Returns: - Optional[list[dict[str, Any]]]: List containing file_search tool configuration, - or None if no vector stores provided - """ - if not vector_store_ids: - return None - - tool_config = { - "type": "file_search", - "vector_store_ids": vector_store_ids, - "max_num_results": 10, - } - - if solr_params: - tool_config["solr"] = solr_params - - return [tool_config] - - -def get_mcp_tools( - mcp_servers: list[ModelContextProtocolServer], - token: str | None = None, - mcp_headers: dict[str, dict[str, str]] | None = None, -) -> list[dict[str, Any]]: - """ - Convert MCP servers to tools format for Responses API. - - Args: - mcp_servers: List of MCP server configurations - token: Optional authentication token for MCP server authorization - mcp_headers: Optional per-request headers for MCP servers, keyed by server URL - - Returns: - list[dict[str, Any]]: List of MCP tool definitions with server - details and optional auth headers - - The way it works is we go through all the defined mcp servers and - create a tool definitions for each of them. If MCP server definition - has a non-empty resolved_authorization_headers we create invocation - headers, following the algorithm: - 1. If the header value is 'kubernetes' the header value is a k8s token - 2. If the header value is 'client': - find the value for a given MCP server/header in mcp_headers. - if the value is not found omit this header, otherwise use found value - 3. otherwise use the value from resolved_authorization_headers directly - - This algorithm allows to: - 1. Use static global header values, provided by configuration - 2. Use user specific k8s token, which will work for the majority of kubernetes - based MCP servers - 3. Use user specific tokens (passed by the client) for user specific MCP headers - """ - - def _get_token_value(original: str, header: str) -> str | None: - """Convert to header value.""" - match original: - case constants.MCP_AUTH_KUBERNETES: - # use k8s token - if token is None or token == "": - return None - return f"Bearer {token}" - case constants.MCP_AUTH_CLIENT: - # use client provided token - if mcp_headers is None: - return None - c_headers = mcp_headers.get(mcp_server.name, None) - if c_headers is None: - return None - return c_headers.get(header, None) - case _: - # use provided - return original - - tools = [] - for mcp_server in mcp_servers: - # Base tool definition - tool_def = { - "type": "mcp", - "server_label": mcp_server.name, - "server_url": mcp_server.url, - "require_approval": "never", - } - - # Build headers - headers = {} - for name, value in mcp_server.resolved_authorization_headers.items(): - # for each defined header - h_value = _get_token_value(value, name) - # only add the header if we got value - if h_value is not None: - headers[name] = h_value - - # Skip server if auth headers were configured but not all could be resolved - if mcp_server.authorization_headers and len(headers) != len( - mcp_server.authorization_headers - ): - logger.warning( - "Skipping MCP server %s: required %d auth headers but only resolved %d", - mcp_server.name, - len(mcp_server.authorization_headers), - len(headers), + moderation_result = await run_shield_moderation(client, responses_params.input) + if moderation_result.blocked: + # Handle shield moderation blocking + violation_message = moderation_result.message or "" + await append_turn_to_conversation( + client, + responses_params.conversation, + responses_params.input, + violation_message, ) - continue - - if len(headers) > 0: - # add headers to tool definition - tool_def["headers"] = headers # type: ignore[index] - # collect tools info - tools.append(tool_def) - return tools - - -async def prepare_tools_for_responses_api( - client: AsyncLlamaStackClient, - query_request: QueryRequest, - token: str, - config: AppConfig, - *, - mcp_headers: Optional[dict[str, dict[str, str]]] = None, - skip_rag_tools: bool = False, -) -> Optional[list[dict[str, Any]]]: - """ - Prepare tools for Responses API including RAG and MCP tools. + summary.llm_response = violation_message + return summary + response = await client.responses.create(**responses_params.model_dump()) + response = cast(OpenAIResponseObject, response) + + except RuntimeError as e: # library mode wraps 413 into runtime error + if "context_length" in str(e).lower(): + error_response = PromptTooLongResponse(model=responses_params.model) + raise HTTPException(**error_response.model_dump()) from e + raise e + except APIConnectionError as e: + error_response = ServiceUnavailableResponse( + backend_name="Llama Stack", + cause=str(e), + ) + raise HTTPException(**error_response.model_dump()) from e + except (LLSApiStatusError, OpenAIAPIStatusError) as e: + error_response = handle_known_apistatus_errors(e, responses_params.model) + raise HTTPException(**error_response.model_dump()) from e - This function retrieves vector stores and combines them with MCP - server tools to create a unified toolgroups list for the Responses API. + # Process OpenAI response format + for output_item in response.output: + message_text = extract_text_from_response_output_item(output_item) + if message_text: + summary.llm_response += message_text - Args: - client: The Llama Stack client instance - query_request: The user's query request - token: Authentication token for MCP tools - config: Configuration object containing MCP server settings - mcp_headers: Per-request headers for MCP servers - skip_rag_tools: If True, skip adding RAG tools (used when doing direct vector querying) + tool_call, tool_result = build_tool_call_summary( + output_item, summary.rag_chunks + ) + if tool_call: + summary.tool_calls.append(tool_call) + if tool_result: + summary.tool_results.append(tool_result) - Returns: - Optional[list[dict[str, Any]]]: List of tool configurations for the - Responses API, or None if no_tools is True or no tools are available - """ - if query_request.no_tools: - return None - - toolgroups = [] - - # Add RAG tools if not skipped - if not skip_rag_tools: - # Get vector stores for RAG tools - use specified ones or fetch all - if query_request.vector_store_ids: - vector_store_ids = query_request.vector_store_ids - logger.info("Using specified vector_store_ids: %s", vector_store_ids) - else: - vector_store_ids = [ - vector_store.id - for vector_store in (await client.vector_stores.list()).data - ] - logger.info("Using all available vector_store_ids: %s", vector_store_ids) - - # Add RAG tools if vector stores are available - if vector_store_ids: - rag_tools = get_rag_tools(vector_store_ids) - if rag_tools: - logger.info("rag_tool are: %s", rag_tools) - toolgroups.extend(rag_tools) - else: - logger.info("No RAG tools configured") - else: - logger.info("No vector stores available for RAG tools") - else: - logger.info("Skipping RAG tools - using direct vector querying instead") + logger.info( + "Response processing complete - Tool calls: %d, Response length: %d chars", + len(summary.tool_calls), + len(summary.llm_response), + ) - # Add MCP server tools - mcp_tools = get_mcp_tools(config.mcp_servers, token, mcp_headers) - if mcp_tools: - toolgroups.extend(mcp_tools) - logger.debug( - "Configured %d MCP tools: %s", - len(mcp_tools), - [tool.get("server_label", "unknown") for tool in mcp_tools], - ) - # Convert empty list to None for consistency with existing behavior - if not toolgroups: - return None + # Extract referenced documents and token usage from Responses API response + summary.referenced_documents = parse_referenced_documents(response) + summary.token_usage = extract_token_usage(response, responses_params.model) - return toolgroups + return summary diff --git a/src/app/endpoints/query_old.py b/src/app/endpoints/query_old.py deleted file mode 100644 index 251b346d7..000000000 --- a/src/app/endpoints/query_old.py +++ /dev/null @@ -1,579 +0,0 @@ -"""Handler for REST API call to provide answer to query.""" - -import logging -from datetime import UTC, datetime -from typing import Annotated, Any, Optional - -from fastapi import APIRouter, Depends, HTTPException, Request -from llama_stack_api.shields import Shield -from llama_stack_client import ( - APIConnectionError, - APIStatusError, - RateLimitError, # type: ignore -) -from llama_stack_client.types.model_list_response import ModelListResponse -from sqlalchemy.exc import SQLAlchemyError - -import constants -import metrics -from app.database import get_session -from authentication import get_auth_dependency -from authentication.interface import AuthTuple -from authorization.azure_token_manager import AzureEntraIDManager -from client import AsyncLlamaStackClientHolder -from configuration import configuration -from models.cache_entry import CacheEntry -from models.config import Action -from models.database.conversations import UserConversation -from models.requests import Attachment, QueryRequest -from models.responses import ( - ForbiddenResponse, - InternalServerErrorResponse, - NotFoundResponse, - PromptTooLongResponse, - QueryResponse, - QuotaExceededResponse, - ServiceUnavailableResponse, - UnauthorizedResponse, - UnprocessableEntityResponse, -) -from utils.endpoints import ( - check_configuration_loaded, - store_conversation_into_cache, - validate_conversation_ownership, - validate_model_provider_override, -) -from utils.quota import ( - check_tokens_available, - consume_tokens, - get_available_quotas, -) -from utils.suid import normalize_conversation_id -from utils.transcripts import store_transcript - -logger = logging.getLogger("app.endpoints.handlers") -router = APIRouter(tags=["query"]) - - -query_response: dict[int | str, dict[str, Any]] = { - 200: QueryResponse.openapi_response(), - 401: UnauthorizedResponse.openapi_response( - examples=["missing header", "missing token"] - ), - 403: ForbiddenResponse.openapi_response( - examples=["endpoint", "conversation read", "model override"] - ), - 404: NotFoundResponse.openapi_response( - examples=["model", "conversation", "provider"] - ), - 413: PromptTooLongResponse.openapi_response(), - 422: UnprocessableEntityResponse.openapi_response(), - 429: QuotaExceededResponse.openapi_response(), - 500: InternalServerErrorResponse.openapi_response(examples=["configuration"]), - 503: ServiceUnavailableResponse.openapi_response(), -} - - -def is_transcripts_enabled() -> bool: - """Check if transcripts is enabled. - - Returns: - bool: True if transcripts is enabled, False otherwise. - """ - return configuration.user_data_collection_configuration.transcripts_enabled - - -def persist_user_conversation_details( - user_id: str, - conversation_id: str, - model: str, - provider_id: str, - topic_summary: Optional[str], -) -> None: - """Associate conversation to user in the database.""" - # Normalize the conversation ID (strip 'conv_' prefix if present) - normalized_id = normalize_conversation_id(conversation_id) - logger.debug( - "persist_user_conversation_details - original conv_id: %s, normalized: %s, user: %s", - conversation_id, - normalized_id, - user_id, - ) - - with get_session() as session: - existing_conversation = ( - session.query(UserConversation).filter_by(id=normalized_id).first() - ) - - if not existing_conversation: - conversation = UserConversation( - id=normalized_id, - user_id=user_id, - last_used_model=model, - last_used_provider=provider_id, - topic_summary=topic_summary, - message_count=1, - ) - session.add(conversation) - logger.debug( - "Associated conversation %s to user %s", normalized_id, user_id - ) - else: - existing_conversation.last_used_model = model - existing_conversation.last_used_provider = provider_id - existing_conversation.last_message_at = datetime.now(UTC) - existing_conversation.message_count += 1 - logger.debug( - "Updating existing conversation in DB - ID: %s, User: %s, Messages: %d", - normalized_id, - user_id, - existing_conversation.message_count, - ) - - session.commit() - logger.debug( - "Successfully committed conversation %s to database", normalized_id - ) - - -def evaluate_model_hints( - user_conversation: Optional[UserConversation], - query_request: QueryRequest, -) -> tuple[Optional[str], Optional[str]]: - """Evaluate model hints from user conversation.""" - model_id: Optional[str] = query_request.model - provider_id: Optional[str] = query_request.provider - - if user_conversation is not None: - if query_request.model is not None: - if query_request.model != user_conversation.last_used_model: - logger.debug( - "Model specified in request: %s, preferring it over user conversation model %s", - query_request.model, - user_conversation.last_used_model, - ) - else: - logger.debug( - "No model specified in request, using latest model from user conversation: %s", - user_conversation.last_used_model, - ) - model_id = user_conversation.last_used_model - - if query_request.provider is not None: - if query_request.provider != user_conversation.last_used_provider: - logger.debug( - "Provider specified in request: %s, " - "preferring it over user conversation provider %s", - query_request.provider, - user_conversation.last_used_provider, - ) - else: - logger.debug( - "No provider specified in request, " - "using latest provider from user conversation: %s", - user_conversation.last_used_provider, - ) - provider_id = user_conversation.last_used_provider - - return model_id, provider_id - - -async def query_endpoint_handler_base( # pylint: disable=R0914 - request: Request, - query_request: QueryRequest, - auth: Annotated[AuthTuple, Depends(get_auth_dependency())], - mcp_headers: dict[str, dict[str, str]], - retrieve_response_func: Any, - get_topic_summary_func: Any, -) -> QueryResponse: - """ - Handle query endpoints (shared by Agent API and Responses API). - - Processes a POST request to a query endpoint, forwarding the - user's query to a selected Llama Stack LLM and returning the generated response. - - Validates configuration and authentication, selects the appropriate model - and provider, retrieves the LLM response, updates metrics, and optionally - stores a transcript of the interaction. Handles connection errors to the - Llama Stack service by returning an HTTP 500 error. - - Args: - request: The FastAPI request object - query_request: The query request containing the user's question - auth: Authentication tuple from dependency - mcp_headers: MCP headers from dependency - retrieve_response_func: The retrieve_response function to use (Agent or Responses API) - get_topic_summary_func: The get_topic_summary function to use (Agent or Responses API) - - Returns: - QueryResponse: Contains the conversation ID and the LLM-generated response. - """ - check_configuration_loaded(configuration) - - # Enforce RBAC: optionally disallow overriding model/provider in requests - validate_model_provider_override(query_request, request.state.authorized_actions) - - # log Llama Stack configuration - logger.info("Llama stack config: %s", configuration.llama_stack_configuration) - - user_id, _, _skip_userid_check, token = auth - - started_at = datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ") - user_conversation: Optional[UserConversation] = None - if query_request.conversation_id: - logger.debug( - "Conversation ID specified in query: %s", query_request.conversation_id - ) - # Normalize the conversation ID for database lookup (strip conv_ prefix if present) - normalized_conv_id_for_lookup = normalize_conversation_id( - query_request.conversation_id - ) - user_conversation = validate_conversation_ownership( - user_id=user_id, - conversation_id=normalized_conv_id_for_lookup, - others_allowed=( - Action.QUERY_OTHERS_CONVERSATIONS in request.state.authorized_actions - ), - ) - - if user_conversation is None: - logger.warning( - "Conversation %s not found for user %s", - query_request.conversation_id, - user_id, - ) - response = NotFoundResponse( - resource="conversation", resource_id=query_request.conversation_id - ) - raise HTTPException(**response.model_dump()) - - else: - logger.debug("Query does not contain conversation ID") - - try: - check_tokens_available(configuration.quota_limiters, user_id) - # try to get Llama Stack client - client = AsyncLlamaStackClientHolder().get_client() - llama_stack_model_id, model_id, provider_id = select_model_and_provider_id( - await client.models.list(), - *evaluate_model_hints( - user_conversation=user_conversation, query_request=query_request - ), - ) - - if ( - provider_id == "azure" - and AzureEntraIDManager().is_entra_id_configured - and AzureEntraIDManager().is_token_expired - and AzureEntraIDManager().refresh_token() - ): - if AsyncLlamaStackClientHolder().is_library_client: - client = await AsyncLlamaStackClientHolder().reload_library_client() - else: - azure_config = next( - p.config - for p in await client.providers.list() - if p.provider_type == "remote::azure" - ) - client = AsyncLlamaStackClientHolder().update_provider_data( - { - "azure_api_key": AzureEntraIDManager().access_token.get_secret_value(), - "azure_api_base": str(azure_config.get("api_base")), - } - ) - - summary, conversation_id, referenced_documents, token_usage = ( - await retrieve_response_func( - client, - llama_stack_model_id, - query_request, - token, - mcp_headers=mcp_headers, - provider_id=provider_id, - ) - ) - - # Get the initial topic summary for the conversation - topic_summary = None - with get_session() as session: - existing_conversation = ( - session.query(UserConversation).filter_by(id=conversation_id).first() - ) - if not existing_conversation: - # Check if topic summary should be generated (default: True) - should_generate = query_request.generate_topic_summary - - if should_generate: - logger.debug("Generating topic summary for new conversation") - topic_summary = await get_topic_summary_func( - query_request.query, client, llama_stack_model_id - ) - else: - logger.debug( - "Topic summary generation disabled by request parameter" - ) - topic_summary = None - # Convert RAG chunks to dictionary format once for reuse - logger.info("Processing RAG chunks...") - rag_chunks_dict = [chunk.model_dump() for chunk in summary.rag_chunks] - - if not is_transcripts_enabled(): - logger.debug("Transcript collection is disabled in the configuration") - else: - store_transcript( - user_id=user_id, - conversation_id=conversation_id, - model_id=model_id, - provider_id=provider_id, - query_is_valid=True, # TODO(lucasagomes): implement as part of query validation - query=query_request.query, - query_request=query_request, - summary=summary, - rag_chunks=rag_chunks_dict, - truncated=False, # TODO(lucasagomes): implement truncation as part of quota work - attachments=query_request.attachments or [], - ) - - logger.info("Persisting conversation details...") - persist_user_conversation_details( - user_id=user_id, - conversation_id=conversation_id, - model=model_id, - provider_id=provider_id, - topic_summary=topic_summary, - ) - - completed_at = datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ") - cache_entry = CacheEntry( - query=query_request.query, - response=summary.llm_response, - provider=provider_id, - model=model_id, - started_at=started_at, - completed_at=completed_at, - referenced_documents=referenced_documents if referenced_documents else None, - tool_calls=summary.tool_calls if summary.tool_calls else None, - tool_results=summary.tool_results if summary.tool_results else None, - ) - - consume_tokens( - configuration.quota_limiters, - configuration.token_usage_history, - user_id, - input_tokens=token_usage.input_tokens, - output_tokens=token_usage.output_tokens, - model_id=model_id, - provider_id=provider_id, - ) - - store_conversation_into_cache( - configuration, - user_id, - conversation_id, - cache_entry, - _skip_userid_check, - topic_summary, - ) - - # Convert tool calls to response format - logger.info("Processing tool calls...") - - logger.info("Using referenced documents from response...") - - available_quotas = get_available_quotas(configuration.quota_limiters, user_id) - - logger.info("Building final response...") - response = QueryResponse( - conversation_id=conversation_id, - response=summary.llm_response, - rag_chunks=rag_chunks_dict, - tool_calls=summary.tool_calls if summary.tool_calls else [], - tool_results=summary.tool_results if summary.tool_results else [], - referenced_documents=referenced_documents, - truncated=False, # TODO: implement truncation detection - input_tokens=token_usage.input_tokens, - output_tokens=token_usage.output_tokens, - available_quotas=available_quotas, - ) - logger.info("Query processing completed successfully!") - return response - - # connection to Llama Stack server - except APIConnectionError as e: - # Update metrics for the LLM call failure - metrics.llm_calls_failures_total.inc() - logger.error("Unable to connect to Llama Stack: %s", e) - response = ServiceUnavailableResponse( - backend_name="Llama Stack", - cause=str(e), - ) - raise HTTPException(**response.model_dump()) from e - except SQLAlchemyError as e: - logger.exception("Error persisting conversation details.") - response = InternalServerErrorResponse.database_error() - raise HTTPException(**response.model_dump()) from e - except RateLimitError as e: - used_model = getattr(e, "model", "") - if used_model: - response = QuotaExceededResponse.model(used_model) - else: - response = QuotaExceededResponse( - response="The quota has been exceeded", cause=str(e) - ) - raise HTTPException(**response.model_dump()) from e - except APIStatusError as e: - logger.exception("Error in query endpoint handler: %s", e) - response = InternalServerErrorResponse.generic() - raise HTTPException(**response.model_dump()) from e - - -def select_model_and_provider_id( - models: ModelListResponse, model_id: Optional[str], provider_id: Optional[str] -) -> tuple[str, str, str]: - """ - Select the model ID and provider ID based on the request or available models. - - Determine and return the appropriate model and provider IDs for - a query request. - - If the request specifies both model and provider IDs, those are used. - Otherwise, defaults from configuration are applied. If neither is - available, selects the first available LLM model from the provided model - list. Validates that the selected model exists among the available models. - - Returns: - A tuple containing the combined model ID (in the format - "provider/model"), and its separated parts: the model label and the provider ID. - - Raises: - HTTPException: If no suitable LLM model is found or the selected model is not available. - """ - # If model_id and provider_id are provided in the request, use them - - # If model_id is not provided in the request, check the configuration - if not model_id or not provider_id: - logger.debug( - "No model ID or provider ID specified in request, checking configuration" - ) - model_id = configuration.inference.default_model # type: ignore[reportAttributeAccessIssue] - provider_id = ( - configuration.inference.default_provider # type: ignore[reportAttributeAccessIssue] - ) - - # If no model is specified in the request or configuration, use the first available LLM - if not model_id or not provider_id: - logger.debug( - "No model ID or provider ID specified in request or configuration, " - "using the first available LLM" - ) - try: - model = next( - m - for m in models - if m.custom_metadata and m.custom_metadata.get("model_type") == "llm" - ) - model_id = model.id - # Extract provider_id from custom_metadata - provider_id = ( - str(model.custom_metadata.get("provider_id", "")) - if model.custom_metadata - else "" - ) - logger.info("Selected model: %s", model) - model_label = model_id.split("/", 1)[1] if "/" in model_id else model_id - return model_id, model_label, provider_id - except (StopIteration, AttributeError) as e: - message = "No LLM model found in available models" - logger.error(message) - response = NotFoundResponse(resource="model", resource_id=model_id or "") - raise HTTPException(**response.model_dump()) from e - - llama_stack_model_id = f"{provider_id}/{model_id}" - # Validate that the model_id and provider_id are in the available models - logger.debug("Searching for model: %s, provider: %s", model_id, provider_id) - # TODO: Create sepparate validation of provider - if not any( - m.id in (llama_stack_model_id, model_id) - and ( - m.custom_metadata - and str(m.custom_metadata.get("provider_id", "")) == provider_id - ) - for m in models - ): - message = f"Model {model_id} from provider {provider_id} not found in available models" - logger.error(message) - response = NotFoundResponse(resource="model", resource_id=model_id) - raise HTTPException(**response.model_dump()) - return llama_stack_model_id, model_id, provider_id - - -def _is_inout_shield(shield: Shield) -> bool: - """ - Determine if the shield identifier indicates an input/output shield. - - Parameters: - shield (Shield): The shield to check. - - Returns: - bool: True if the shield identifier starts with "inout_", otherwise False. - """ - return shield.identifier.startswith("inout_") - - -def is_output_shield(shield: Shield) -> bool: - """ - Determine if the shield is for monitoring output. - - Return True if the given shield is classified as an output or - inout shield. - - A shield is considered an output shield if its identifier - starts with "output_" or "inout_". - """ - return _is_inout_shield(shield) or shield.identifier.startswith("output_") - - -def is_input_shield(shield: Shield) -> bool: - """ - Determine if the shield is for monitoring input. - - Return True if the shield is classified as an input or inout - shield. - - Parameters: - shield (Shield): The shield identifier to classify. - - Returns: - bool: True if the shield is for input or both input/output monitoring; False otherwise. - """ - return _is_inout_shield(shield) or not is_output_shield(shield) - - -def validate_attachments_metadata(attachments: list[Attachment]) -> None: - """Validate the attachments metadata provided in the request. - - Raises: - HTTPException: If any attachment has an invalid type or content type, - an HTTP 422 error is raised. - """ - for attachment in attachments: - if attachment.attachment_type not in constants.ATTACHMENT_TYPES: - message = ( - f"Invalid attatchment type {attachment.attachment_type}: " - f"must be one of {constants.ATTACHMENT_TYPES}" - ) - logger.error(message) - response = UnprocessableEntityResponse( - response="Invalid attribute value", cause=message - ) - raise HTTPException(**response.model_dump()) - if attachment.content_type not in constants.ATTACHMENT_CONTENT_TYPES: - message = ( - f"Invalid attatchment content type {attachment.content_type}: " - f"must be one of {constants.ATTACHMENT_CONTENT_TYPES}" - ) - logger.error(message) - response = UnprocessableEntityResponse( - response="Invalid attribute value", cause=message - ) - raise HTTPException(**response.model_dump()) diff --git a/src/app/endpoints/rlsapi_v1.py b/src/app/endpoints/rlsapi_v1.py index a10e47703..6336aa4a3 100644 --- a/src/app/endpoints/rlsapi_v1.py +++ b/src/app/endpoints/rlsapi_v1.py @@ -32,7 +32,7 @@ from models.rlsapi.requests import RlsapiV1InferRequest, RlsapiV1SystemInfo from models.rlsapi.responses import RlsapiV1InferData, RlsapiV1InferResponse from observability import InferenceEventData, build_inference_event, send_splunk_event -from utils.responses import extract_text_from_response_output_item +from utils.responses import extract_text_from_response_output_item, get_mcp_tools from utils.suid import get_suid logger = logging.getLogger(__name__) @@ -92,7 +92,13 @@ def _build_instructions(systeminfo: RlsapiV1SystemInfo) -> str: Returns: Instructions string for the LLM, with system context if available. """ - base_prompt = constants.DEFAULT_SYSTEM_PROMPT + if ( + configuration.customization is not None + and configuration.customization.system_prompt is not None + ): + base_prompt = configuration.customization.system_prompt + else: + base_prompt = constants.DEFAULT_SYSTEM_PROMPT context_parts = [] if systeminfo.os: @@ -142,15 +148,18 @@ def _get_default_model_id() -> str: ) -async def retrieve_simple_response(question: str, instructions: str) -> str: +async def retrieve_simple_response( + question: str, instructions: str, tools: list | None = None +) -> str: """Retrieve a simple response from the LLM for a stateless query. Uses the Responses API for simple stateless inference, consistent with - other endpoints (query_v2, streaming_query_v2). + other endpoints (query, streaming_query). Args: question: The combined user input (question + context). instructions: System instructions for the LLM. + tools: Optional list of MCP tool definitions for the LLM. Returns: The LLM-generated response text. @@ -168,6 +177,7 @@ async def retrieve_simple_response(question: str, instructions: str) -> str: input=question, model=model_id, instructions=instructions, + tools=tools or [], stream=False, store=False, ) @@ -255,13 +265,16 @@ async def infer_endpoint( input_source = infer_request.get_input_source() instructions = _build_instructions(infer_request.context.systeminfo) + mcp_tools = get_mcp_tools(configuration.mcp_servers) logger.debug( "Request %s: Combined input source length: %d", request_id, len(input_source) ) start_time = time.monotonic() try: - response_text = await retrieve_simple_response(input_source, instructions) + response_text = await retrieve_simple_response( + input_source, instructions, tools=mcp_tools + ) inference_time = time.monotonic() - start_time except APIConnectionError as e: inference_time = time.monotonic() - start_time diff --git a/src/app/endpoints/streaming_query.py b/src/app/endpoints/streaming_query.py index 789ee4c18..694147b17 100644 --- a/src/app/endpoints/streaming_query.py +++ b/src/app/endpoints/streaming_query.py @@ -1,86 +1,95 @@ -"""Streaming query handler using Responses API (v2).""" +"""Streaming query handler using Responses API.""" +import json import logging +from datetime import UTC, datetime from typing import Annotated, Any, AsyncIterator, Optional, cast -from fastapi import APIRouter, Depends, Request +from fastapi import APIRouter, Depends, HTTPException, Request from fastapi.responses import StreamingResponse from llama_stack_api.openai_responses import ( OpenAIResponseObject, OpenAIResponseObjectStream, - OpenAIResponseObjectStreamResponseCompleted, - OpenAIResponseObjectStreamResponseFailed, - OpenAIResponseObjectStreamResponseOutputItemDone, - OpenAIResponseObjectStreamResponseOutputTextDelta, - OpenAIResponseObjectStreamResponseOutputTextDone, + OpenAIResponseObjectStreamResponseMcpCallArgumentsDone as MCPArgsDoneChunk, + OpenAIResponseObjectStreamResponseOutputItemAdded as OutputItemAddedChunk, + OpenAIResponseObjectStreamResponseOutputItemDone as OutputItemDoneChunk, + OpenAIResponseObjectStreamResponseOutputTextDelta as TextDeltaChunk, + OpenAIResponseObjectStreamResponseOutputTextDone as TextDoneChunk, + OpenAIResponseOutputMessageMCPCall as MCPCall, ) -from llama_stack_client import AsyncLlamaStackClient - -from app.endpoints.query_old import ( - is_transcripts_enabled, - persist_user_conversation_details, - validate_attachments_metadata, -) -from app.endpoints.query import ( - _build_tool_call_summary, - extract_token_usage_from_responses_api, - get_topic_summary, - parse_referenced_documents_from_responses_api, - prepare_tools_for_responses_api, -) -from app.endpoints.streaming_query_old import ( - LLM_TOKEN_EVENT, - LLM_TOOL_CALL_EVENT, - LLM_TOOL_RESULT_EVENT, - format_stream_data, - stream_end_event, - stream_event, - stream_start_event, - streaming_query_endpoint_handler_base, +from llama_stack_client import ( + APIConnectionError, + APIStatusError as LLSApiStatusError, ) +from openai._exceptions import APIStatusError as OpenAIAPIStatusError +import metrics from authentication import get_auth_dependency from authentication.interface import AuthTuple +from authorization.azure_token_manager import AzureEntraIDManager from authorization.middleware import authorize +from client import AsyncLlamaStackClientHolder from configuration import configuration from constants import ( + LLM_TOKEN_EVENT, + LLM_TOOL_CALL_EVENT, + LLM_TOOL_RESULT_EVENT, + LLM_TURN_COMPLETE_EVENT, MEDIA_TYPE_JSON, + MEDIA_TYPE_TEXT, ) from models.config import Action from models.context import ResponseGeneratorContext from models.requests import QueryRequest from models.responses import ( + AbstractErrorResponse, ForbiddenResponse, InternalServerErrorResponse, NotFoundResponse, + PromptTooLongResponse, QuotaExceededResponse, - ReferencedDocument, ServiceUnavailableResponse, StreamingQueryResponse, UnauthorizedResponse, UnprocessableEntityResponse, ) +from utils.types import RAGChunk, ReferencedDocument from utils.endpoints import ( - cleanup_after_streaming, - get_system_prompt, + check_configuration_loaded, + validate_and_retrieve_conversation, +) +from utils.mcp_headers import mcp_headers_dependency, McpHeaders +from utils.query import ( + consume_query_tokens, + extract_provider_and_model_from_model_id, + handle_known_apistatus_errors, + store_query_results, + update_azure_token, + validate_attachments_metadata, + validate_model_provider_override, +) +from utils.quota import check_tokens_available, get_available_quotas +from utils.responses import ( + build_mcp_tool_call_from_arguments_done, + build_tool_call_summary, + build_tool_result_from_mcp_output_item_done, + extract_token_usage, + get_topic_summary, + parse_referenced_documents, + prepare_responses_params, ) -from utils.query import create_violation_stream -from utils.quota import consume_tokens, get_available_quotas -from utils.suid import normalize_conversation_id, to_llama_stack_conversation_id -from utils.mcp_headers import mcp_headers_dependency from utils.shields import ( append_turn_to_conversation, run_shield_moderation, ) +from utils.suid import normalize_conversation_id from utils.token_counter import TokenCounter -from utils.transcripts import store_transcript -from utils.types import RAGChunk, TurnSummary -from utils.vector_search import perform_vector_search, format_rag_context_for_injection +from utils.types import ResponsesApiParams, TurnSummary +from utils.vector_search import format_rag_context_for_injection, perform_vector_search -logger = logging.getLogger("app.endpoints.handlers") -router = APIRouter(tags=["streaming_query_v1"]) -auth_dependency = get_auth_dependency() +logger = logging.getLogger(__name__) +router = APIRouter(tags=["streaming_query"]) -streaming_query_v2_responses: dict[int | str, dict[str, Any]] = { +streaming_query_responses: dict[int | str, dict[str, Any]] = { 200: StreamingQueryResponse.openapi_response(), 401: UnauthorizedResponse.openapi_response( examples=["missing header", "missing token"] @@ -99,410 +108,662 @@ } -def create_responses_response_generator( # pylint: disable=too-many-locals,too-many-statements +@router.post( + "/streaming_query", + response_class=StreamingResponse, + responses=streaming_query_responses, + summary="Streaming Query Endpoint Handler", +) +@authorize(Action.STREAMING_QUERY) +async def streaming_query_endpoint_handler( # pylint: disable=too-many-locals + request: Request, + query_request: QueryRequest, + auth: Annotated[AuthTuple, Depends(get_auth_dependency())], + mcp_headers: McpHeaders = Depends(mcp_headers_dependency), +) -> StreamingResponse: + """ + Handle request to the /streaming_query endpoint using Responses API. + + Returns a streaming response using Server-Sent Events (SSE) format with + content type text/event-stream. + + Returns: + SSE-formatted events for the query lifecycle. + + Raises: + HTTPException: + - 401: Unauthorized - Missing or invalid credentials + - 403: Forbidden - Insufficient permissions or model override not allowed + - 404: Not Found - Conversation, model, or provider not found + - 413: Prompt too long - Prompt exceeded model's context window size + - 422: Unprocessable Entity - Request validation failed + - 429: Quota limit exceeded - The token quota for model or user has been exceeded + - 500: Internal Server Error - Configuration not loaded or other server errors + - 503: Service Unavailable - Unable to connect to Llama Stack backend + """ + check_configuration_loaded(configuration) + + user_id, _user_name, _skip_userid_check, token = auth + started_at = datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ") + + # Check token availability + check_tokens_available(configuration.quota_limiters, user_id) + + # Enforce RBAC: optionally disallow overriding model/provider in requests + validate_model_provider_override(query_request, request.state.authorized_actions) + + # Validate attachments if provided + if query_request.attachments: + validate_attachments_metadata(query_request.attachments) + + # Retrieve conversation if conversation_id is provided + user_conversation = None + if query_request.conversation_id: + logger.debug( + "Conversation ID specified in query: %s", query_request.conversation_id + ) + normalized_conv_id = normalize_conversation_id(query_request.conversation_id) + user_conversation = validate_and_retrieve_conversation( + normalized_conv_id=normalized_conv_id, + user_id=user_id, + others_allowed=Action.READ_OTHERS_CONVERSATIONS + in request.state.authorized_actions, + ) + + client = AsyncLlamaStackClientHolder().get_client() + + pre_rag_chunks: list[RAGChunk] = [] + doc_ids_from_chunks: list[ReferencedDocument] = [] + + _, _, doc_ids_from_chunks, pre_rag_chunks = await perform_vector_search( + client, query_request, configuration + ) + rag_context = format_rag_context_for_injection(pre_rag_chunks) + if rag_context: + query_request = query_request.model_copy(deep=True) + query_request.query = query_request.query + rag_context + + # Prepare API request parameters + responses_params = await prepare_responses_params( + client=client, + query_request=query_request, + user_conversation=user_conversation, + token=token, + mcp_headers=mcp_headers, + stream=True, + store=True, + ) + + # Handle Azure token refresh if needed + if ( + responses_params.model.startswith("azure") + and AzureEntraIDManager().is_entra_id_configured + and AzureEntraIDManager().is_token_expired + and AzureEntraIDManager().refresh_token() + ): + client = await update_azure_token(client) + + # Create context + context = ResponseGeneratorContext( + conversation_id=normalize_conversation_id(responses_params.conversation), + model_id=responses_params.model, + user_id=user_id, + skip_userid_check=_skip_userid_check, + query_request=query_request, + started_at=started_at, + client=client, + ) + + # Update metrics for the LLM call + provider_id, model_id = extract_provider_and_model_from_model_id( + responses_params.model + ) + metrics.llm_calls_total.labels(provider_id, model_id).inc() + + generator, turn_summary = await retrieve_response_generator( + responses_params=responses_params, + context=context, + doc_ids_from_chunks=doc_ids_from_chunks, + ) + + return StreamingResponse( + generate_response( + generator=generator, + context=context, + responses_params=responses_params, + turn_summary=turn_summary, + ), + media_type=query_request.media_type or MEDIA_TYPE_TEXT, + ) + + +async def retrieve_response_generator( + responses_params: ResponsesApiParams, context: ResponseGeneratorContext, - doc_ids_from_chunks: Optional[list[ReferencedDocument]] = None, -) -> Any: + doc_ids_from_chunks: list[ReferencedDocument], +) -> tuple[AsyncIterator[str], TurnSummary]: """ - Create a response generator function for Responses API streaming. + Retrieve the appropriate response generator. - This factory function returns an async generator that processes streaming - responses from the Responses API and yields Server-Sent Events (SSE). + Handles shield moderation check and retrieves response. + Returns the generator (shield violation or response generator) and turn_summary. + Fills turn_summary attributes for token usage, referenced documents, and tool calls. Args: - context: Context object containing all necessary parameters for response generation - doc_ids_from_chunks: Referenced documents extracted from vector DB chunks + responses_params: The Responses API parameters + context: The response generator context Returns: - An async generator function that yields SSE-formatted strings - """ + tuple[AsyncIterator[str], TurnSummary]: The response generator and turn summary - async def response_generator( # pylint: disable=too-many-branches,too-many-statements - turn_response: AsyncIterator[OpenAIResponseObjectStream], - ) -> AsyncIterator[str]: - """ - Generate SSE formatted streaming response. - - Asynchronously generates a stream of Server-Sent Events - (SSE) representing incremental responses from a - language model turn. - - Yields start, token, tool call, turn completion, and - end events as SSE-formatted strings. Collects the - complete response for transcript storage if enabled. - """ - chunk_id = 0 - summary = TurnSummary( - llm_response="", tool_calls=[], tool_results=[], rag_chunks=[] + """ + turn_summary = TurnSummary() + try: + moderation_result = await run_shield_moderation( + context.client, responses_params.input + ) + if moderation_result.blocked: + violation_message = moderation_result.message or "" + turn_summary.llm_response = violation_message + await append_turn_to_conversation( + context.client, + responses_params.conversation, + responses_params.input, + violation_message, + ) + media_type = context.query_request.media_type or MEDIA_TYPE_JSON + return ( + shield_violation_generator(violation_message, media_type), + turn_summary, + ) + # Retrieve response stream (may raise exceptions) + response = await context.client.responses.create( + **responses_params.model_dump() + ) + # Store pre-RAG documents for later merging + turn_summary.pre_rag_documents = doc_ids_from_chunks + return response_generator(response, context, turn_summary), turn_summary + + # Handle know LLS client errors only at stream creation time and shield execution + except RuntimeError as e: # library mode wraps 413 into runtime error + if "context_length" in str(e).lower(): + error_response = PromptTooLongResponse(model=responses_params.model) + raise HTTPException(**error_response.model_dump()) from e + raise e + except APIConnectionError as e: + error_response = ServiceUnavailableResponse( + backend_name="Llama Stack", + cause=str(e), ) + raise HTTPException(**error_response.model_dump()) from e - # Determine media type for response formatting - media_type = context.query_request.media_type or MEDIA_TYPE_JSON + except (LLSApiStatusError, OpenAIAPIStatusError) as e: + error_response = handle_known_apistatus_errors(e, responses_params.model) + raise HTTPException(**error_response.model_dump()) from e - # Accumulators for Responses API - text_parts: list[str] = [] - emitted_turn_complete = False - # Use the conversation_id from context (either provided or newly created) - conv_id = context.conversation_id +async def generate_response( + generator: AsyncIterator[str], + context: ResponseGeneratorContext, + responses_params: ResponsesApiParams, + turn_summary: TurnSummary, +) -> AsyncIterator[str]: + """Wrap a generator with cleanup logic. - # Track the latest response object from response.completed event - latest_response_object: Optional[Any] = None + Re-yields events from the generator, handles errors, and ensures + persistence and token consumption after completion. - # RAG chunks - rag_chunks: list[RAGChunk] = [] + Args: + generator: The base generator to wrap + context: The response generator context + responses_params: The Responses API parameters + turn_summary: TurnSummary populated during streaming - logger.debug("Starting streaming response (Responses API) processing") + Yields: + SSE-formatted strings from the wrapped generator + """ + yield stream_start_event(context.conversation_id) + + # Re-yield all events from the generator + try: + async for event in generator: + yield event + + # Handle known LLS client errors during response generation time + except RuntimeError as e: # library mode wraps 413 into runtime error + error_response = ( + PromptTooLongResponse(model=responses_params.model) + if "context_length" in str(e).lower() + else InternalServerErrorResponse.generic() + ) + yield stream_http_error_event(error_response, context.query_request.media_type) + return + except APIConnectionError as e: + error_response = ServiceUnavailableResponse( + backend_name="Llama Stack", + cause=str(e), + ) + yield stream_http_error_event(error_response, context.query_request.media_type) + return + except (LLSApiStatusError, OpenAIAPIStatusError) as e: + error_response = handle_known_apistatus_errors(e, responses_params.model) + yield stream_http_error_event(error_response, context.query_request.media_type) + return + + # Get topic summary for new conversations if needed + topic_summary = None + if not context.query_request.conversation_id: + should_generate = context.query_request.generate_topic_summary + if should_generate: + logger.debug("Generating topic summary for new conversation") + topic_summary = await get_topic_summary( + context.query_request.query, + context.client, + responses_params.model, + ) - async for chunk in turn_response: - event_type = getattr(chunk, "type", None) - logger.debug("Processing chunk %d, type: %s", chunk_id, event_type) + # Consume tokens + logger.info("Consuming tokens") + consume_query_tokens( + user_id=context.user_id, + model_id=responses_params.model, + token_usage=turn_summary.token_usage, + configuration=configuration, + ) + # Get available quotas + logger.info("Getting available quotas") + available_quotas = get_available_quotas( + quota_limiters=configuration.quota_limiters, user_id=context.user_id + ) - # Emit start event when response is created - if event_type == "response.created": - yield stream_start_event(conv_id) + yield stream_end_event( + turn_summary.token_usage, + available_quotas, + turn_summary.referenced_documents, + context.query_request.media_type or MEDIA_TYPE_JSON, + ) + completed_at = datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ") + + # Store query results (transcript, conversation details, cache) + logger.info("Storing query results") + store_query_results( + user_id=context.user_id, + conversation_id=context.conversation_id, + model=responses_params.model, + completed_at=completed_at, + started_at=context.started_at, + summary=turn_summary, + query_request=context.query_request, + configuration=configuration, + skip_userid_check=context.skip_userid_check, + topic_summary=topic_summary, + ) - # Text streaming - if event_type == "response.output_text.delta": - delta_chunk = cast( - OpenAIResponseObjectStreamResponseOutputTextDelta, chunk - ) - if delta_chunk.delta: - text_parts.append(delta_chunk.delta) - yield stream_event( - { - "id": chunk_id, - "token": delta_chunk.delta, - }, - LLM_TOKEN_EVENT, - media_type, - ) - chunk_id += 1 - # Final text of the output (capture, but emit at response.completed) - elif event_type == "response.output_text.done": - text_done_chunk = cast( - OpenAIResponseObjectStreamResponseOutputTextDone, chunk +async def response_generator( # pylint: disable=too-many-branches,too-many-statements,too-many-locals + turn_response: AsyncIterator[OpenAIResponseObjectStream], + context: ResponseGeneratorContext, + turn_summary: TurnSummary, +) -> AsyncIterator[str]: + """Generate SSE formatted streaming response. + + Processes streaming chunks from Llama Stack and converts them to + Server-Sent Events (SSE) format. Uses handler functions to process + different event types and populate turn_summary during streaming. + + Args: + turn_response: The streaming response from Llama Stack + context: The response generator context + turn_summary: TurnSummary to populate during streaming + + Yields: + SSE-formatted strings for tokens, tool calls, tool results, + turn completion, and error events. + """ + chunk_id = 0 + media_type = context.query_request.media_type or MEDIA_TYPE_JSON + text_parts: list[str] = [] + mcp_calls: dict[int, tuple[str, str]] = ( + {} + ) # output_index -> (mcp_call_id, mcp_call_name) + latest_response_object: Optional[OpenAIResponseObject] = None + + logger.debug("Starting streaming response (Responses API) processing") + + async for chunk in turn_response: + event_type = getattr(chunk, "type", None) + logger.debug("Processing chunk %d, type: %s", chunk_id, event_type) + + # Content part started - emit an empty token to kick off UI streaming + if event_type == "response.content_part.added": + yield stream_event( + { + "id": chunk_id, + "token": "", + }, + LLM_TOKEN_EVENT, + media_type, + ) + chunk_id += 1 + + # Store MCP call item info for later lookup when arguments.done event occurs + elif event_type == "response.output_item.added": + item_added_chunk = cast(OutputItemAddedChunk, chunk) + if item_added_chunk.item.type == "mcp_call": + mcp_call_item = cast(MCPCall, item_added_chunk.item) + mcp_calls[item_added_chunk.output_index] = ( + mcp_call_item.id, + mcp_call_item.name, ) - if text_done_chunk.text: - summary.llm_response = text_done_chunk.text - # Content part started - emit an empty token to kick off UI streaming - elif event_type == "response.content_part.added": + # Text streaming - emit token delta + elif event_type == "response.output_text.delta": + delta_chunk = cast(TextDeltaChunk, chunk) + text_parts.append(delta_chunk.delta) + yield stream_event( + { + "id": chunk_id, + "token": delta_chunk.delta, + }, + LLM_TOKEN_EVENT, + media_type, + ) + chunk_id += 1 + + # Final text of the output (capture, but emit at response.completed) + elif event_type == "response.output_text.done": + text_done_chunk = cast(TextDoneChunk, chunk) + turn_summary.llm_response = text_done_chunk.text + + # Emit tool call when MCP call arguments are done + elif event_type == "response.mcp_call.arguments.done": + mcp_arguments_done_chunk = cast(MCPArgsDoneChunk, chunk) + tool_call = build_mcp_tool_call_from_arguments_done( + mcp_arguments_done_chunk.output_index, + mcp_arguments_done_chunk.arguments, + mcp_calls, + ) + if tool_call: + turn_summary.tool_calls.append(tool_call) yield stream_event( - { - "id": chunk_id, - "token": "", - }, - LLM_TOKEN_EVENT, + tool_call.model_dump(), + LLM_TOOL_CALL_EVENT, media_type, ) - chunk_id += 1 - # Process tool calls and results are emitted together when output items are done - # TODO(asimurka): support emitting tool calls and results separately when ready - elif event_type == "response.output_item.done": - output_item_done_chunk = cast( - OpenAIResponseObjectStreamResponseOutputItemDone, chunk + # Process tool calls and results when output items are done + # For mcp_call, only emit result (call was already emitted when arguments.done) + # For other types, emit both call and result + elif event_type == "response.output_item.done": + output_item_done_chunk = cast(OutputItemDoneChunk, chunk) + item_type = output_item_done_chunk.item.type + # Skip message items as they are parsed separately + if item_type == "message": + continue + + output_index = output_item_done_chunk.output_index + + # For mcp_call, only emit result if call was already emitted when arguments.done + # (indicated by output_index not being in mcp_calls dict) + # If output_index is in dict, process in else branch (emit both call and result) + if item_type == "mcp_call" and output_index not in mcp_calls: + # Call was already emitted during arguments.done, only emit result + mcp_call_item = cast(MCPCall, output_item_done_chunk.item) + tool_result = build_tool_result_from_mcp_output_item_done(mcp_call_item) + turn_summary.tool_results.append(tool_result) + yield stream_event( + tool_result.model_dump(), + LLM_TOOL_RESULT_EVENT, + media_type, ) - if output_item_done_chunk.item.type == "message": - continue - tool_call, tool_result = _build_tool_call_summary( - output_item_done_chunk.item, rag_chunks + else: + # For all other types (and mcp_call when arguments.done didn't happen), + # emit both call and result together + tool_call, tool_result = build_tool_call_summary( + output_item_done_chunk.item, turn_summary.rag_chunks ) if tool_call: - summary.tool_calls.append(tool_call) + turn_summary.tool_calls.append(tool_call) yield stream_event( tool_call.model_dump(), LLM_TOOL_CALL_EVENT, media_type, ) if tool_result: - summary.tool_results.append(tool_result) + turn_summary.tool_results.append(tool_result) yield stream_event( tool_result.model_dump(), LLM_TOOL_RESULT_EVENT, media_type, ) - # Completed response - capture final text and response object - elif event_type == "response.completed": - # Capture the response object for token usage extraction - completed_chunk = cast( - OpenAIResponseObjectStreamResponseCompleted, chunk - ) - latest_response_object = completed_chunk.response + # Completed response - capture final text and response object + elif event_type == "response.completed": + latest_response_object = cast( + OpenAIResponseObject, getattr(chunk, "response") + ) + turn_summary.llm_response = turn_summary.llm_response or "".join(text_parts) + yield stream_event( + { + "id": chunk_id, + "token": turn_summary.llm_response, + }, + LLM_TURN_COMPLETE_EVENT, + media_type, + ) + chunk_id += 1 - if not emitted_turn_complete: - final_message = summary.llm_response or "".join(text_parts) - if not final_message: - final_message = "No response from the model" - summary.llm_response = final_message - yield stream_event( - { - "id": chunk_id, - "token": final_message, - }, - "turn_complete", - media_type, - ) - chunk_id += 1 - emitted_turn_complete = True - - # Incomplete response - emit error because LLS does not - # support incomplete responses "incomplete_detail" attribute yet - elif event_type == "response.incomplete": - error_response = InternalServerErrorResponse.query_failed( - "An unexpected error occurred while processing the request." - ) - logger.error("Error while obtaining answer for user question") - yield format_stream_data( - {"event": "error", "data": {**error_response.detail.model_dump()}} - ) - return - - # Failed response - emit error with custom cause from error message - elif event_type == "response.failed": - failed_chunk = cast(OpenAIResponseObjectStreamResponseFailed, chunk) - latest_response_object = failed_chunk.response - error_message = ( - failed_chunk.response.error.message - if failed_chunk.response.error - else "An unexpected error occurred while processing the request." - ) - error_response = InternalServerErrorResponse.query_failed(error_message) - logger.error("Error while obtaining answer for user question") - yield format_stream_data( - {"event": "error", "data": {**error_response.detail.model_dump()}} - ) - return + # Incomplete or failed response - emit error + elif event_type in ("response.incomplete", "response.failed"): + latest_response_object = cast( + OpenAIResponseObject, getattr(chunk, "response") + ) + error_message = ( + latest_response_object.error.message + if latest_response_object.error + else "An unexpected error occurred while processing the request." + ) + error_response = ( + PromptTooLongResponse(model=context.model_id) + if "context_length" in error_message.lower() + else InternalServerErrorResponse.query_failed(error_message) + ) + yield stream_http_error_event(error_response, media_type) - logger.debug( - "Streaming complete - Tool calls: %d, Response chars: %d", - len(summary.tool_calls), - len(summary.llm_response), - ) + logger.debug( + "Streaming complete - Tool calls: %d, Response chars: %d", + len(turn_summary.tool_calls), + len(turn_summary.llm_response), + ) - # Extract token usage from the response object - token_usage = ( - extract_token_usage_from_responses_api( - latest_response_object, context.model_id, context.provider_id - ) - if latest_response_object is not None - else TokenCounter() - ) - consume_tokens( - configuration.quota_limiters, - configuration.token_usage_history, - context.user_id, - input_tokens=token_usage.input_tokens, - output_tokens=token_usage.output_tokens, - model_id=context.model_id, - provider_id=context.provider_id, - ) - response_referenced_documents = parse_referenced_documents_from_responses_api( - cast(OpenAIResponseObject, latest_response_object) - ) - # Combine doc_ids_from_chunks with response_referenced_documents - all_referenced_documents = ( - doc_ids_from_chunks or [] - ) + response_referenced_documents - available_quotas = get_available_quotas( - configuration.quota_limiters, context.user_id - ) - yield stream_end_event( - context.metadata_map, - token_usage, - available_quotas, - all_referenced_documents, - media_type, - ) + # Extract token usage and referenced documents from the final response object + turn_summary.token_usage = extract_token_usage( + latest_response_object, context.model_id + ) + tool_based_documents = parse_referenced_documents(latest_response_object) + + # Merge pre-RAG documents with tool-based documents (similar to query.py) + if turn_summary.pre_rag_documents: + all_documents = turn_summary.pre_rag_documents + tool_based_documents + seen = set() + deduplicated_documents = [] + for doc in all_documents: + key = (doc.doc_url, doc.doc_title) + if key not in seen: + seen.add(key) + deduplicated_documents.append(doc) + turn_summary.referenced_documents = deduplicated_documents + else: + turn_summary.referenced_documents = tool_based_documents - # Perform cleanup tasks (database and cache operations)) - await cleanup_after_streaming( - user_id=context.user_id, - conversation_id=conv_id, - model_id=context.model_id, - provider_id=context.provider_id, - llama_stack_model_id=context.llama_stack_model_id, - query_request=context.query_request, - summary=summary, - metadata_map=context.metadata_map, - started_at=context.started_at, - client=context.client, - config=configuration, - skip_userid_check=context.skip_userid_check, - get_topic_summary_func=get_topic_summary, - is_transcripts_enabled_func=is_transcripts_enabled, - store_transcript_func=store_transcript, - persist_user_conversation_details_func=persist_user_conversation_details, - rag_chunks=[rag_chunk.model_dump() for rag_chunk in rag_chunks], - ) - return response_generator +def stream_http_error_event( + error: AbstractErrorResponse, media_type: str | None = MEDIA_TYPE_JSON +) -> str: + """ + Create an SSE-formatted error response for generic LLM or API errors. + Args: + error: An AbstractErrorResponse instance representing the error. + media_type: The media type for the response format. Defaults to MEDIA_TYPE_JSON if None. + Returns: + str: A Server-Sent Events (SSE) formatted error message containing + the serialized error details. + """ + logger.error("Error while obtaining answer for user question") + media_type = media_type or MEDIA_TYPE_JSON + if media_type == MEDIA_TYPE_TEXT: + return f"Status: {error.status_code} - {error.detail.response} - {error.detail.cause}" + + return format_stream_data( + { + "event": "error", + "data": { + "status_code": error.status_code, + "response": error.detail.response, + "cause": error.detail.cause, + }, + } + ) -@router.post( - "/streaming_query", - response_class=StreamingResponse, - responses=streaming_query_v2_responses, - summary="Streaming Query Endpoint Handler V1", -) -@authorize(Action.STREAMING_QUERY) -async def streaming_query_endpoint_handler_v2( # pylint: disable=too-many-locals - request: Request, - query_request: QueryRequest, - auth: Annotated[AuthTuple, Depends(auth_dependency)], - mcp_headers: dict[str, dict[str, str]] = Depends(mcp_headers_dependency), -) -> StreamingResponse: + +def format_stream_data(d: dict) -> str: """ - Handle request to the /streaming_query endpoint using Responses API. + Create a response generator function for Responses API streaming. - Returns a streaming response using Server-Sent Events (SSE) format with - content type text/event-stream. + Parameters: + d (dict): The data to be formatted as an SSE event. Returns: - StreamingResponse: An HTTP streaming response yielding - SSE-formatted events for the query lifecycle with content type - text/event-stream. + str: The formatted SSE data string. + """ + data = json.dumps(d) + return f"data: {data}\n\n" - Raises: - HTTPException: - - 401: Unauthorized - Missing or invalid credentials - - 403: Forbidden - Insufficient permissions or model override not allowed - - 404: Not Found - Conversation, model, or provider not found - - 422: Unprocessable Entity - Request validation failed - - 429: Too Many Requests - Quota limit exceeded - - 500: Internal Server Error - Configuration not loaded or other server errors - - 503: Service Unavailable - Unable to connect to Llama Stack backend + +def stream_start_event(conversation_id: str) -> str: """ - return await streaming_query_endpoint_handler_base( - request=request, - query_request=query_request, - auth=auth, - mcp_headers=mcp_headers, - retrieve_response_func=retrieve_response, - create_response_generator_func=create_responses_response_generator, - ) + Yield the start of the data stream. + Format a Server-Sent Events (SSE) start event containing the + conversation ID. -async def retrieve_response( # pylint: disable=too-many-locals - client: AsyncLlamaStackClient, - model_id: str, - query_request: QueryRequest, - token: str, - mcp_headers: Optional[dict[str, dict[str, str]]] = None, -) -> tuple[AsyncIterator[OpenAIResponseObjectStream], str, list[ReferencedDocument]]: + Parameters: + conversation_id (str): Unique identifier for the + conversation. + + Returns: + str: SSE-formatted string representing the start event. """ - Retrieve response from LLMs and agents. + return format_stream_data( + { + "event": "start", + "data": { + "conversation_id": conversation_id, + }, + } + ) - Asynchronously retrieves a streaming response and conversation - ID from the Llama Stack agent for a given user query. - This function configures shields, system prompt, and tool usage - based on the request and environment. It prepares the agent with - appropriate headers and toolgroups, validates attachments if - present, and initiates a streaming turn with the user's query - and any provided documents. +def stream_end_event( + token_usage: TokenCounter, + available_quotas: dict[str, int], + referenced_documents: list[ReferencedDocument], + media_type: str = MEDIA_TYPE_JSON, +) -> str: + """ + Yield the end of the data stream. + + Format and return the end event for a streaming response, + including referenced document metadata and token usage information. Parameters: - model_id (str): Identifier of the model to use for the query. - query_request (QueryRequest): The user's query and associated metadata. - token (str): Authentication token for downstream services. - mcp_headers (dict[str, dict[str, str]], optional): - Multi-cluster proxy headers for tool integrations. + token_usage (TokenCounter): Token usage information. + available_quotas (dict[str, int]): Available quotas for the user. + referenced_documents (list[ReferencedDocument]): List of referenced documents. + media_type (str): The media type for the response format. Returns: - tuple: A tuple containing the streaming response object, - the conversation ID, and the list of referenced documents from vector DB chunks. + str: A Server-Sent Events (SSE) formatted string + representing the end of the data stream. """ - # use system prompt from request or default one - system_prompt = get_system_prompt(query_request, configuration) - logger.debug("Using system prompt: %s", system_prompt) + if media_type == MEDIA_TYPE_TEXT: + ref_docs_string = "\n".join( + f"{doc.doc_title}: {doc.doc_url}" + for doc in referenced_documents + if doc.doc_url and doc.doc_title + ) + return f"\n\n---\n\n{ref_docs_string}" if ref_docs_string else "" + + referenced_docs_dict = [doc.model_dump(mode="json") for doc in referenced_documents] + + return format_stream_data( + { + "event": "end", + "data": { + "referenced_documents": referenced_docs_dict, + "truncated": None, + "input_tokens": token_usage.input_tokens, + "output_tokens": token_usage.output_tokens, + }, + "available_quotas": available_quotas, + } + ) - # TODO(lucasagomes): redact attachments content before sending to LLM - # if attachments are provided, validate them - if query_request.attachments: - validate_attachments_metadata(query_request.attachments) - # Prepare tools for responses API - skip RAG tools since we're doing direct vector query - toolgroups = await prepare_tools_for_responses_api( - client, - query_request, - token, - configuration, - mcp_headers=mcp_headers, - skip_rag_tools=True, - ) +def stream_event(data: dict, event_type: str, media_type: str) -> str: + """Build an item to yield based on media type. - # Extract RAG chunks from vector DB query response BEFORE calling responses API - _, _, doc_ids_from_chunks, rag_chunks = await perform_vector_search( - client, query_request, configuration - ) + Args: + data: Dictionary containing the event data + event_type: Type of event (token, tool call, etc.) + media_type: The media type for the response format - # Format RAG context for injection into user message - rag_context = format_rag_context_for_injection(rag_chunks) + Returns: + SSE-formatted string representing the event + """ + if media_type == MEDIA_TYPE_TEXT: + if event_type == LLM_TOKEN_EVENT: + return data.get("token", "") + if event_type == LLM_TOOL_CALL_EVENT: + return f"[Tool Call: {data.get('function_name', 'unknown')}]\n" + if event_type == LLM_TOOL_RESULT_EVENT: + return "[Tool Result]\n" + if event_type == LLM_TURN_COMPLETE_EVENT: + return "" + return "" + + return format_stream_data( + { + "event": event_type, + "data": data, + } + ) - # Prepare input for Responses API - # Convert attachments to text and concatenate with query - input_text = query_request.query - if query_request.attachments: - for attachment in query_request.attachments: - input_text += ( - f"\n\n[Attachment: {attachment.attachment_type}]\n" - f"{attachment.content}" - ) - # Add RAG context to input text - input_text += rag_context +async def shield_violation_generator( + violation_message: str, + media_type: str = MEDIA_TYPE_TEXT, +) -> AsyncIterator[str]: + """ + Create an SSE stream for shield violation responses. - # Handle conversation ID for Responses API - # Create conversation upfront if not provided - conversation_id = query_request.conversation_id - if conversation_id: - # Conversation ID was provided - convert to llama-stack format - logger.debug("Using existing conversation ID: %s", conversation_id) - llama_stack_conv_id = to_llama_stack_conversation_id(conversation_id) - else: - # No conversation_id provided - create a new conversation first - logger.debug("No conversation_id provided, creating new conversation") - conversation = await client.conversations.create(metadata={}) - llama_stack_conv_id = conversation.id - # Store the normalized version for later use - conversation_id = normalize_conversation_id(llama_stack_conv_id) - logger.info( - "Created new conversation with ID: %s (normalized: %s)", - llama_stack_conv_id, - conversation_id, - ) + Yields start, token, and end events immediately for shield violations. + This function creates a minimal streaming response without going through + the Llama Stack response format. - # Run shield moderation before calling LLM - moderation_result = await run_shield_moderation(client, input_text) - if moderation_result.blocked: - violation_message = moderation_result.message or "" - await append_turn_to_conversation( - client, llama_stack_conv_id, input_text, violation_message - ) - return ( - create_violation_stream(violation_message, moderation_result.shield_model), - normalize_conversation_id(conversation_id), - ) + Args: + violation_message: The violation message to display. + media_type: The media type for the response format. - create_params: dict[str, Any] = { - "input": input_text, - "model": model_id, - "instructions": system_prompt, - "stream": True, - "store": True, - "tools": toolgroups, - "conversation": llama_stack_conv_id, - } - - response = await client.responses.create(**create_params) - response_stream = cast(AsyncIterator[OpenAIResponseObjectStream], response) - - return ( - response_stream, - normalize_conversation_id(conversation_id), - doc_ids_from_chunks, + Yields: + str: SSE-formatted strings for start, token, and end events. + """ + yield stream_event( + { + "id": 0, + "token": violation_message, + }, + LLM_TOKEN_EVENT, + media_type, ) diff --git a/src/app/endpoints/streaming_query_old.py b/src/app/endpoints/streaming_query_old.py deleted file mode 100644 index 005949ea1..000000000 --- a/src/app/endpoints/streaming_query_old.py +++ /dev/null @@ -1,726 +0,0 @@ -"""Handler for REST API call to provide answer to streaming query.""" # pylint: disable=too-many-lines,too-many-locals,W0511 - -import ast -import json -import logging -import re -import uuid -from collections.abc import Callable -from datetime import UTC, datetime -from typing import ( - Any, - Iterator, - Optional, -) - -from fastapi import APIRouter, Request -from fastapi.responses import StreamingResponse -from llama_stack_client import ( - APIConnectionError, - RateLimitError, # type: ignore -) -from llama_stack_client.types.shared.interleaved_content_item import TextContentItem -from openai._exceptions import APIStatusError - -import metrics -from app.endpoints.query_old import ( - evaluate_model_hints, - select_model_and_provider_id, - validate_conversation_ownership, -) -from authentication.interface import AuthTuple -from authorization.azure_token_manager import AzureEntraIDManager -from client import AsyncLlamaStackClientHolder -from configuration import configuration -from constants import DEFAULT_RAG_TOOL, MEDIA_TYPE_JSON, MEDIA_TYPE_TEXT -from models.context import ResponseGeneratorContext -from models.database.conversations import UserConversation -from models.requests import QueryRequest -from models.responses import ( - AbstractErrorResponse, - ForbiddenResponse, - InternalServerErrorResponse, - NotFoundResponse, - PromptTooLongResponse, - QuotaExceededResponse, - ServiceUnavailableResponse, - StreamingQueryResponse, - UnauthorizedResponse, - UnprocessableEntityResponse, -) -from utils.endpoints import ( - ReferencedDocument, - check_configuration_loaded, - validate_model_provider_override, -) -from utils.token_counter import TokenCounter -from utils.types import content_to_str - -logger = logging.getLogger("app.endpoints.handlers") -router = APIRouter(tags=["streaming_query"]) - -streaming_query_responses: dict[int | str, dict[str, Any]] = { - 200: StreamingQueryResponse.openapi_response(), - 401: UnauthorizedResponse.openapi_response( - examples=["missing header", "missing token"] - ), - 403: ForbiddenResponse.openapi_response( - examples=["conversation read", "endpoint", "model override"] - ), - 404: NotFoundResponse.openapi_response( - examples=["conversation", "model", "provider"] - ), - 413: PromptTooLongResponse.openapi_response(), - 422: UnprocessableEntityResponse.openapi_response(), - 429: QuotaExceededResponse.openapi_response(), - 500: InternalServerErrorResponse.openapi_response(examples=["configuration"]), - 503: ServiceUnavailableResponse.openapi_response(), -} - - -METADATA_PATTERN = re.compile(r"\nMetadata: (\{.+})\n") - -# OLS-compatible event types -LLM_TOKEN_EVENT = "token" -LLM_TOOL_CALL_EVENT = "tool_call" -LLM_TOOL_RESULT_EVENT = "tool_result" -LLM_VALIDATION_EVENT = "validation" - - -def format_stream_data(d: dict) -> str: - """ - Format a dictionary as a Server-Sent Events (SSE) data string. - - Parameters: - d (dict): The data to be formatted as an SSE event. - - Returns: - str: The formatted SSE data string. - """ - data = json.dumps(d) - return f"data: {data}\n\n" - - -def stream_start_event(conversation_id: str) -> str: - """ - Yield the start of the data stream. - - Format a Server-Sent Events (SSE) start event containing the - conversation ID. - - Parameters: - conversation_id (str): Unique identifier for the - conversation. - - Returns: - str: SSE-formatted string representing the start event. - """ - return format_stream_data( - { - "event": "start", - "data": { - "conversation_id": conversation_id, - }, - } - ) - - -def stream_end_event( - metadata_map: dict, - token_usage: TokenCounter, - available_quotas: dict[str, int], - referenced_documents: list[ReferencedDocument], - media_type: str = MEDIA_TYPE_JSON, -) -> str: - """ - Yield the end of the data stream. - - Format and return the end event for a streaming response, - including referenced document metadata and token usage information. - - Parameters: - metadata_map (dict): A mapping containing metadata about - referenced documents. - summary (TurnSummary): Summary of the conversation turn. - token_usage (TokenCounter): Token usage information. - media_type (str): The media type for the response format. - - Returns: - str: A Server-Sent Events (SSE) formatted string - representing the end of the data stream. - """ - if media_type == MEDIA_TYPE_TEXT: - ref_docs_string = "\n".join( - f'{v["title"]}: {v["docs_url"]}' - for v in filter( - lambda v: ("docs_url" in v) and ("title" in v), - metadata_map.values(), - ) - ) - return f"\n\n---\n\n{ref_docs_string}" if ref_docs_string else "" - - # Convert ReferencedDocument objects to dicts for JSON serialization - # Use mode="json" to ensure AnyUrl is serialized to string (not just model_dump()) - referenced_docs_dict = [doc.model_dump(mode="json") for doc in referenced_documents] - - return format_stream_data( - { - "event": "end", - "data": { - "referenced_documents": referenced_docs_dict, - "truncated": None, # TODO(jboos): implement truncated - "input_tokens": token_usage.input_tokens, - "output_tokens": token_usage.output_tokens, - }, - "available_quotas": available_quotas, - } - ) - - -def stream_event(data: dict, event_type: str, media_type: str) -> str: - """Build an item to yield based on media type. - - Args: - data: The data to yield. - event_type: The type of event (e.g. token, tool request, tool execution). - media_type: Media type of the response (e.g. text or JSON). - - Returns: - str: The formatted string or JSON to yield. - """ - if media_type == MEDIA_TYPE_TEXT: - if event_type == LLM_TOKEN_EVENT: - return data["token"] - if event_type == LLM_TOOL_CALL_EVENT: - return f"\nTool call: {json.dumps(data)}\n" - if event_type == LLM_TOOL_RESULT_EVENT: - return f"\nTool result: {json.dumps(data)}\n" - logger.error("Unknown event type: %s", event_type) - return "" - return format_stream_data( - { - "event": event_type, - "data": data, - } - ) - - -# ----------------------------------- -# Error handling -# ----------------------------------- -def _handle_error_event( - chunk: Any, chunk_id: int, media_type: str = MEDIA_TYPE_JSON -) -> Iterator[str]: - """ - Yield error event. - - Yield a formatted Server-Sent Events (SSE) error event - containing the error message from a streaming chunk. - - Parameters: - chunk_id (int): The unique identifier for the current - streaming chunk. - media_type (str): The media type for the response format. - """ - if media_type == MEDIA_TYPE_TEXT: - yield f"Error: {chunk.error['message']}" - else: - yield format_stream_data( - { - "event": "error", - "data": { - "id": chunk_id, - "token": chunk.error["message"], - }, - } - ) - - -def prompt_too_long_error(error: Exception, media_type: str) -> str: - """Return error representation for long prompts. - - Args: - error: The exception raised for long prompts. - media_type: Media type of the response (e.g. text or JSON). - - Returns: - str: The error message formatted for the media type. - """ - logger.error("Prompt is too long: %s", error) - if media_type == MEDIA_TYPE_TEXT: - return f"Prompt is too long: {error}" - return format_stream_data( - { - "event": "error", - "data": { - "status_code": 413, - "response": "Prompt is too long", - "cause": str(error), - }, - } - ) - - -def generic_llm_error(error: Exception, media_type: str) -> str: - """Return error representation for generic LLM errors. - - Args: - error: The exception raised during processing. - media_type: Media type of the response (e.g. text or JSON). - - Returns: - str: The error message formatted for the media type. - """ - logger.error("Error while obtaining answer for user question") - logger.exception(error) - - if media_type == MEDIA_TYPE_TEXT: - return f"Error: {str(error)}" - return format_stream_data( - { - "event": "error", - "data": { - "response": "Internal server error", - "cause": str(error), - }, - } - ) - - -def stream_http_error(error: AbstractErrorResponse) -> Iterator[str]: - """ - Yield an SSE-formatted error response for generic LLM or API errors. - - Args: - error: An AbstractErrorResponse instance representing the error. - - Yields: - str: A Server-Sent Events (SSE) formatted error message containing - the serialized error details. - """ - logger.error("Error while obtaining answer for user question") - logger.exception(error) - - yield format_stream_data({"event": "error", "data": {**error.detail.model_dump()}}) - - -# ----------------------------------- -# Turn handling -# ----------------------------------- -def _handle_turn_start_event( - _chunk_id: int, - media_type: str = MEDIA_TYPE_JSON, - conversation_id: Optional[str] = None, -) -> Iterator[str]: - """ - Yield turn start event. - - Yield a Server-Sent Event (SSE) start event indicating the - start of a new conversation turn. - - Parameters: - chunk_id (int): The unique identifier for the current - chunk. - - Yields: - str: SSE-formatted start event with conversation_id. - """ - # Use provided conversation_id or generate one if not available - if conversation_id is None: - conversation_id = str(uuid.uuid4()) - - if media_type == MEDIA_TYPE_TEXT: - yield ( - f"data: {json.dumps({'event': 'start', 'data': {'conversation_id': conversation_id}})}\n\n" # pylint: disable=line-too-long - ) - else: - yield format_stream_data( - { - "event": "start", - "data": {"conversation_id": conversation_id}, - } - ) - - -def _handle_turn_complete_event( - chunk: Any, _chunk_id: int, media_type: str = MEDIA_TYPE_JSON -) -> Iterator[str]: - """ - Yield turn complete event. - - Yields a Server-Sent Event (SSE) indicating the completion of a - conversation turn, including the full output message content. - - Parameters: - chunk_id (int): The unique identifier for the current - chunk. - - Yields: - str: SSE-formatted string containing the turn completion - event and output message content. - """ - full_response = content_to_str(chunk.event.payload.turn.output_message.content) - - if media_type == MEDIA_TYPE_TEXT: - yield ( - f"data: {json.dumps({'event': 'turn_complete', 'data': {'token': full_response}})}\n\n" - ) - else: - yield format_stream_data( - { - "event": "turn_complete", - "data": {"token": full_response}, - } - ) - - -# ----------------------------------- -# Shield handling -# ----------------------------------- -def _handle_shield_event( - chunk: Any, chunk_id: int, media_type: str = MEDIA_TYPE_JSON -) -> Iterator[str]: - """ - Yield shield event. - - Processes a shield event chunk and yields a formatted SSE token - event indicating shield validation results. - - Yields a "No Violation" token if no violation is detected, or a - violation message if a shield violation occurs. Increments - validation error metrics when violations are present. - """ - if chunk.event.payload.event_type == "step_complete": - violation = chunk.event.payload.step_details.violation - if not violation: - yield stream_event( - data={ - "id": chunk_id, - "token": "No Violation", - }, - event_type=LLM_VALIDATION_EVENT, - media_type=media_type, - ) - else: - # Metric for LLM validation errors - metrics.llm_calls_validation_errors_total.inc() - violation = ( - f"Violation: {violation.user_message} (Metadata: {violation.metadata})" - ) - yield stream_event( - data={ - "id": chunk_id, - "token": violation, - }, - event_type=LLM_VALIDATION_EVENT, - media_type=media_type, - ) - - -# ----------------------------------- -# Tool Execution handling -# ----------------------------------- -# pylint: disable=R1702,R0912 -def _handle_tool_execution_event( - chunk: Any, chunk_id: int, metadata_map: dict, media_type: str = MEDIA_TYPE_JSON -) -> Iterator[str]: - """ - Yield tool call event. - - Processes tool execution events from a streaming chunk and - yields formatted Server-Sent Events (SSE) strings. - - Handles both tool call initiation and completion, including - tool call arguments, responses, and summaries. Extracts and - updates document metadata from knowledge search tool responses - when present. - - Parameters: - chunk_id (int): Unique identifier for the current streaming - chunk. metadata_map (dict): Dictionary to be updated with - document metadata extracted from tool responses. - - Yields: - str: SSE-formatted event strings representing tool call - events and responses. - """ - if chunk.event.payload.event_type == "step_start": - yield stream_event( - data={ - "id": chunk_id, - "token": "", - }, - event_type=LLM_TOOL_CALL_EVENT, - media_type=media_type, - ) - - elif chunk.event.payload.event_type == "step_complete": - for t in chunk.event.payload.step_details.tool_calls: - yield stream_event( - data={ - "id": chunk_id, - "token": { - "tool_name": t.tool_name, - "arguments": t.arguments, - }, - }, - event_type=LLM_TOOL_CALL_EVENT, - media_type=media_type, - ) - - for r in chunk.event.payload.step_details.tool_responses: - if r.tool_name == "query_from_memory": - inserted_context = content_to_str(r.content) - yield stream_event( - data={ - "id": chunk_id, - "token": { - "tool_name": r.tool_name, - "response": f"Fetched {len(inserted_context)} bytes from memory", - }, - }, - event_type=LLM_TOOL_RESULT_EVENT, - media_type=media_type, - ) - - elif r.tool_name == DEFAULT_RAG_TOOL and r.content: - summary = "" - for i, text_content_item in enumerate(r.content): - if isinstance(text_content_item, TextContentItem): - if i == 0: - summary = text_content_item.text - newline_pos = summary.find("\n") - if newline_pos > 0: - summary = summary[:newline_pos] - for match in METADATA_PATTERN.findall(text_content_item.text): - try: - meta = ast.literal_eval(match) - if "document_id" in meta: - metadata_map[meta["document_id"]] = meta - except Exception: # pylint: disable=broad-except - logger.debug( - "An exception was thrown in processing %s", - match, - ) - - yield stream_event( - data={ - "id": chunk_id, - "token": { - "tool_name": r.tool_name, - "summary": summary, - }, - }, - event_type=LLM_TOOL_RESULT_EVENT, - media_type=media_type, - ) - - else: - yield stream_event( - data={ - "id": chunk_id, - "token": { - "tool_name": r.tool_name, - "response": content_to_str(r.content), - }, - }, - event_type=LLM_TOOL_RESULT_EVENT, - media_type=media_type, - ) - - -# ----------------------------------- -# Catch-all for everything else -# ----------------------------------- -def _handle_heartbeat_event( - chunk_id: int, media_type: str = MEDIA_TYPE_JSON -) -> Iterator[str]: - """ - Yield a heartbeat event. - - Yield a heartbeat event as a Server-Sent Event (SSE) for the - given chunk ID. - - Parameters: - chunk_id (int): The identifier for the current streaming - chunk. - - Yields: - str: SSE-formatted heartbeat event string. - """ - yield stream_event( - data={ - "id": chunk_id, - "token": "heartbeat", - }, - event_type=LLM_TOKEN_EVENT, - media_type=media_type, - ) - - -async def streaming_query_endpoint_handler_base( # pylint: disable=too-many-locals,too-many-statements,too-many-arguments,too-many-positional-arguments - request: Request, - query_request: QueryRequest, - auth: AuthTuple, - mcp_headers: dict[str, dict[str, str]], - retrieve_response_func: Callable[..., Any], - create_response_generator_func: Callable[..., Any], -) -> StreamingResponse: - """ - Handle streaming query endpoints with common logic. - - This base handler contains all the common logic for streaming query endpoints - and accepts functions for API-specific behavior (Agent API vs Responses API). - - Args: - request: The FastAPI request object - query_request: The query request from the user - auth: Authentication tuple (user_id, username, skip_check, token) - mcp_headers: MCP headers for tool integrations - retrieve_response_func: Function to retrieve the streaming response - create_response_generator_func: Function factory that creates the response generator - - Returns: - StreamingResponse: An HTTP streaming response yielding SSE-formatted events - - Raises: - HTTPException: Returns HTTP 500 if unable to connect to Llama Stack - """ - # Nothing interesting in the request - _ = request - - check_configuration_loaded(configuration) - started_at = datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ") - - # Enforce RBAC: optionally disallow overriding model/provider in requests - validate_model_provider_override(query_request, request.state.authorized_actions) - - # log Llama Stack configuration - logger.info("Llama stack config: %s", configuration.llama_stack_configuration) - - user_id, _user_name, _skip_userid_check, token = auth - - user_conversation: Optional[UserConversation] = None - if query_request.conversation_id: - user_conversation = validate_conversation_ownership( - user_id=user_id, conversation_id=query_request.conversation_id - ) - - if user_conversation is None: - logger.warning( - "User %s attempted to query conversation %s they don't own", - user_id, - query_request.conversation_id, - ) - forbidden_error = ForbiddenResponse.conversation( - action="read", - resource_id=query_request.conversation_id, - user_id=user_id, - ) - return StreamingResponse( - stream_http_error(forbidden_error), - media_type="text/event-stream", - status_code=forbidden_error.status_code, - ) - - try: - # try to get Llama Stack client - client = AsyncLlamaStackClientHolder().get_client() - llama_stack_model_id, model_id, provider_id = select_model_and_provider_id( - await client.models.list(), - *evaluate_model_hints( - user_conversation=user_conversation, query_request=query_request - ), - ) - - if ( - provider_id == "azure" - and AzureEntraIDManager().is_entra_id_configured - and AzureEntraIDManager().is_token_expired - and AzureEntraIDManager().refresh_token() - ): - if AsyncLlamaStackClientHolder().is_library_client: - client = await AsyncLlamaStackClientHolder().reload_library_client() - else: - azure_config = next( - p.config - for p in await client.providers.list() - if p.provider_type == "remote::azure" - ) - client = AsyncLlamaStackClientHolder().update_provider_data( - { - "azure_api_key": AzureEntraIDManager().access_token.get_secret_value(), - "azure_api_base": str(azure_config.get("api_base")), - } - ) - - response, conversation_id = await retrieve_response_func( - client, - llama_stack_model_id, - query_request, - token, - mcp_headers=mcp_headers, - ) - - metadata_map: dict[str, dict[str, Any]] = {} - - # Create context object for response generator - context = ResponseGeneratorContext( - conversation_id=conversation_id, - user_id=user_id, - skip_userid_check=_skip_userid_check, - model_id=model_id, - provider_id=provider_id, - llama_stack_model_id=llama_stack_model_id, - query_request=query_request, - started_at=started_at, - client=client, - metadata_map=metadata_map, - ) - - # Create the response generator using the provided factory function - response_generator = create_response_generator_func(context) - - # Update metrics for the LLM call - metrics.llm_calls_total.labels(provider_id, model_id).inc() - - # Determine media type for response - # Note: The HTTP Content-Type header is always text/event-stream for SSE, - # but the media_type parameter controls how the content is formatted - return StreamingResponse( - response_generator(response), media_type="text/event-stream" - ) - except APIConnectionError as e: - metrics.llm_calls_failures_total.inc() - logger.error("Unable to connect to Llama Stack: %s", e) - error_response = ServiceUnavailableResponse( - backend_name="Llama Stack", - cause=str(e), - ) - return StreamingResponse( - stream_http_error(error_response), - status_code=error_response.status_code, - media_type="text/event-stream", - ) - except RateLimitError as e: - used_model = getattr(e, "model", "") - if used_model: - error_response = QuotaExceededResponse.model(used_model) - else: - error_response = QuotaExceededResponse( - response="The quota has been exceeded", cause=str(e) - ) - return StreamingResponse( - stream_http_error(error_response), - status_code=error_response.status_code, - media_type="text/event-stream", - ) - except APIStatusError as e: - metrics.llm_calls_failures_total.inc() - logger.error("API status error: %s", e) - error_response = InternalServerErrorResponse.generic() - return StreamingResponse( - stream_http_error(error_response), - status_code=error_response.status_code, - media_type=query_request.media_type or MEDIA_TYPE_JSON, - ) diff --git a/src/app/main.py b/src/app/main.py index f011ee22a..c18d0c5b3 100644 --- a/src/app/main.py +++ b/src/app/main.py @@ -8,6 +8,7 @@ from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import JSONResponse from starlette.routing import Mount, Route, WebSocketRoute +from llama_stack_client import APIConnectionError from authorization.azure_token_manager import AzureEntraIDManager import metrics @@ -55,10 +56,23 @@ async def lifespan(_app: FastAPI) -> AsyncIterator[None]: "Token refresh will be retried on next Azure request." ) - await AsyncLlamaStackClientHolder().load(configuration.configuration.llama_stack) + llama_stack_config = configuration.configuration.llama_stack + await AsyncLlamaStackClientHolder().load(llama_stack_config) client = AsyncLlamaStackClientHolder().get_client() # check if the Llama Stack version is supported by the service - await check_llama_stack_version(client) + try: + await check_llama_stack_version(client) + except APIConnectionError as e: + llama_stack_url = llama_stack_config.url + logger.error( + "Failed to connect to Llama Stack at '%s'. " + "Please verify that the 'llama_stack.url' configuration is correct " + "and that the Llama Stack service is running and accessible. " + "Original error: %s", + llama_stack_url, + e, + ) + raise # try: # await client.vector_stores.delete(vector_store_id="portal-rag") diff --git a/src/app/routers.py b/src/app/routers.py index 44e7ff5d3..95c91552e 100644 --- a/src/app/routers.py +++ b/src/app/routers.py @@ -5,7 +5,6 @@ from app.endpoints import ( info, models, - query, shields, providers, rags, @@ -16,11 +15,13 @@ streaming_query, authorized, conversations_v2, - conversations_v3, + conversations_v1, metrics, tools, mcp_auth, - # V2 endpoints for Response API support + # Query endpoints for Response API support + query, + # RHEL Lightspeed rlsapi v1 compatibility rlsapi_v1, # A2A (Agent-to-Agent) protocol support a2a, @@ -48,18 +49,14 @@ def include_routers(app: FastAPI) -> None: app.include_router(shields.router, prefix="/v1") app.include_router(providers.router, prefix="/v1") app.include_router(rags.router, prefix="/v1") - # V1 endpoints now use V2 implementations (query and streaming_query are deprecated) + # Query endpoints app.include_router(query.router, prefix="/v1") app.include_router(streaming_query.router, prefix="/v1") app.include_router(config.router, prefix="/v1") app.include_router(feedback.router, prefix="/v1") - # V1 conversations endpoint now uses V3 implementation (conversations is deprecated) - app.include_router(conversations_v3.router, prefix="/v1") + app.include_router(conversations_v1.router, prefix="/v1") app.include_router(conversations_v2.router, prefix="/v2") - # Note: query_v2, streaming_query_v2, and conversations_v3 are now exposed at /v1 above - # The old query, streaming_query, and conversations modules are deprecated - # RHEL Lightspeed rlsapi v1 compatibility - stateless CLA (Command Line Assistant) endpoint app.include_router(rlsapi_v1.router, prefix="/v1") diff --git a/src/authentication/noop.py b/src/authentication/noop.py index 8d0e9bd84..259dd1168 100644 --- a/src/authentication/noop.py +++ b/src/authentication/noop.py @@ -1,6 +1,6 @@ """Manage authentication flow for FastAPI endpoints with no-op auth.""" -from fastapi import Request +from fastapi import HTTPException, Request from constants import ( DEFAULT_USER_NAME, @@ -50,5 +50,7 @@ async def __call__(self, request: Request) -> tuple[str, str, bool, str]: ) # try to extract user ID from request user_id = request.query_params.get("user_id", DEFAULT_USER_UID) + if not user_id: + raise HTTPException(status_code=400, detail="user_id cannot be empty") logger.debug("Retrieved user ID: %s", user_id) return user_id, DEFAULT_USER_NAME, self.skip_userid_check, NO_USER_TOKEN diff --git a/src/authentication/noop_with_token.py b/src/authentication/noop_with_token.py index 3d1b0ed98..6d7777044 100644 --- a/src/authentication/noop_with_token.py +++ b/src/authentication/noop_with_token.py @@ -9,7 +9,7 @@ - Returns a tuple: (user_id, DEFAULT_USER_NAME, user_token). """ -from fastapi import Request +from fastapi import HTTPException, Request from constants import ( DEFAULT_USER_NAME, @@ -63,5 +63,7 @@ async def __call__(self, request: Request) -> tuple[str, str, bool, str]: user_token = extract_user_token(request.headers) # try to extract user ID from request user_id = request.query_params.get("user_id", DEFAULT_USER_UID) + if not user_id: + raise HTTPException(status_code=400, detail="user_id cannot be empty") logger.debug("Retrieved user ID: %s", user_id) return user_id, DEFAULT_USER_NAME, self.skip_userid_check, user_token diff --git a/src/cache/postgres_cache.py b/src/cache/postgres_cache.py index cff3d4409..45ff3dce3 100644 --- a/src/cache/postgres_cache.py +++ b/src/cache/postgres_cache.py @@ -8,9 +8,9 @@ from cache.cache_error import CacheError from models.cache_entry import CacheEntry from models.config import PostgreSQLDatabaseConfiguration -from models.responses import ConversationData, ReferencedDocument +from models.responses import ConversationData from utils.connection_decorator import connection -from utils.types import ToolCallSummary, ToolResultSummary +from utils.types import ReferencedDocument, ToolCallSummary, ToolResultSummary from log import get_logger logger = get_logger("cache.postgres_cache") diff --git a/src/cache/sqlite_cache.py b/src/cache/sqlite_cache.py index 0018f7efc..5cf765229 100644 --- a/src/cache/sqlite_cache.py +++ b/src/cache/sqlite_cache.py @@ -9,9 +9,9 @@ from cache.cache_error import CacheError from models.cache_entry import CacheEntry from models.config import SQLiteDatabaseConfiguration -from models.responses import ConversationData, ReferencedDocument +from models.responses import ConversationData from utils.connection_decorator import connection -from utils.types import ToolCallSummary, ToolResultSummary +from utils.types import ReferencedDocument, ToolCallSummary, ToolResultSummary from log import get_logger logger = get_logger("cache.sqlite_cache") diff --git a/src/client.py b/src/client.py index bd48acf39..94bf0349e 100644 --- a/src/client.py +++ b/src/client.py @@ -7,12 +7,14 @@ from typing import Optional import yaml +from fastapi import HTTPException from llama_stack.core.library_client import AsyncLlamaStackAsLibraryClient -from llama_stack_client import AsyncLlamaStackClient # type: ignore +from llama_stack_client import APIConnectionError, AsyncLlamaStackClient # type: ignore from configuration import configuration from llama_stack_configuration import enrich_byok_rag, YamlDumper from models.config import LlamaStackConfiguration +from models.responses import ServiceUnavailableResponse from utils.types import Singleton logger = logging.getLogger(__name__) @@ -70,8 +72,10 @@ def _load_service_client(self, config: LlamaStackConfiguration) -> None: "Using timeout of %d seconds for Llama Stack requests", config.timeout ) api_key = config.api_key.get_secret_value() if config.api_key else None + # Convert AnyHttpUrl to string for the client + base_url = str(config.url) if config.url else None self._lsc = AsyncLlamaStackClient( - base_url=config.url, api_key=api_key, timeout=config.timeout + base_url=base_url, api_key=api_key, timeout=config.timeout ) def _enrich_library_config( @@ -129,9 +133,15 @@ async def reload_library_client(self) -> AsyncLlamaStackClient: """ if not self._config_path: raise RuntimeError("Cannot reload: config path not set") - - client = AsyncLlamaStackAsLibraryClient(self._config_path) - await client.initialize() + try: + client = AsyncLlamaStackAsLibraryClient(self._config_path) + await client.initialize() + except APIConnectionError as e: + error_response = ServiceUnavailableResponse( + backend_name="Llama Stack", + cause=str(e), + ) + raise HTTPException(**error_response.model_dump()) from e self._lsc = client return client @@ -165,5 +175,6 @@ def update_provider_data(self, updates: dict[str, str]) -> AsyncLlamaStackClient **current_headers, "X-LlamaStack-Provider-Data": json.dumps(provider_data), } + self._lsc = self._lsc.copy(set_default_headers=updated_headers) # type: ignore return self._lsc diff --git a/src/constants.py b/src/constants.py index e4f5de25e..6b43a2ec2 100644 --- a/src/constants.py +++ b/src/constants.py @@ -132,6 +132,13 @@ # Media type constants for streaming responses MEDIA_TYPE_JSON = "application/json" MEDIA_TYPE_TEXT = "text/plain" +MEDIA_TYPE_EVENT_STREAM = "text/event-stream" + +# Streaming event type constants +LLM_TOKEN_EVENT = "token" +LLM_TOOL_CALL_EVENT = "tool_call" +LLM_TOOL_RESULT_EVENT = "tool_result" +LLM_TURN_COMPLETE_EVENT = "turn_complete" # PostgreSQL connection constants # See: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNECT-SSLMODE diff --git a/src/models/cache_entry.py b/src/models/cache_entry.py index e00069ce4..af1a5fc86 100644 --- a/src/models/cache_entry.py +++ b/src/models/cache_entry.py @@ -2,8 +2,7 @@ from typing import Optional from pydantic import BaseModel -from models.responses import ReferencedDocument -from utils.types import ToolCallSummary, ToolResultSummary +from utils.types import ReferencedDocument, ToolCallSummary, ToolResultSummary class CacheEntry(BaseModel): diff --git a/src/models/config.py b/src/models/config.py index 26672ccd5..f6a649a20 100644 --- a/src/models/config.py +++ b/src/models/config.py @@ -16,6 +16,7 @@ BaseModel, ConfigDict, Field, + field_validator, model_validator, FilePath, AnyHttpUrl, @@ -406,6 +407,36 @@ class ServiceConfiguration(ConfigurationBase): description="Transport Layer Security configuration for HTTPS support", ) + root_path: str = Field( + "", + title="Root path", + description="ASGI root path for serving behind a reverse proxy on a subpath", + ) + + @field_validator("root_path") + @classmethod + def validate_root_path(cls, value: str) -> str: + """Validate root_path format. + + Ensures the root path is either empty or starts with a leading + slash and does not end with a trailing slash. + + Parameters: + value: The root path value to validate. + + Returns: + The validated root path value. + + Raises: + ValueError: If root_path is missing a leading slash or has + a trailing slash. + """ + if value and not value.startswith("/"): + raise ValueError("root_path must start with '/'") + if value.endswith("/"): + raise ValueError("root_path must not end with '/'") + return value + cors: CORSConfiguration = Field( default_factory=lambda: CORSConfiguration( allow_origins=["*"], @@ -533,10 +564,11 @@ class LlamaStackConfiguration(ConfigurationBase): - [Build AI Applications with Llama Stack](https://llamastack.github.io/) """ - url: Optional[str] = Field( + url: Optional[AnyHttpUrl] = Field( None, title="Llama Stack URL", - description="URL to Llama Stack service; used when library mode is disabled", + description="URL to Llama Stack service; used when library mode is disabled. " + "Must be a valid HTTP or HTTPS URL.", ) api_key: Optional[SecretStr] = Field( diff --git a/src/models/context.py b/src/models/context.py index a67851679..1d5d56949 100644 --- a/src/models/context.py +++ b/src/models/context.py @@ -1,7 +1,6 @@ """Context objects for internal operations.""" from dataclasses import dataclass -from typing import Any from llama_stack_client import AsyncLlamaStackClient @@ -9,7 +8,7 @@ @dataclass -class ResponseGeneratorContext: # pylint: disable=too-many-instance-attributes +class ResponseGeneratorContext: """ Context object for response generator creation. @@ -21,12 +20,9 @@ class ResponseGeneratorContext: # pylint: disable=too-many-instance-attributes user_id: The user identifier skip_userid_check: Whether to skip user ID validation model_id: The model identifier - provider_id: The provider identifier - llama_stack_model_id: The full llama stack model ID query_request: The query request object started_at: Timestamp when the request started (ISO 8601 format) client: The Llama Stack client for API interactions - metadata_map: Dictionary for storing metadata from tool responses """ # Conversation & User context @@ -34,10 +30,8 @@ class ResponseGeneratorContext: # pylint: disable=too-many-instance-attributes user_id: str skip_userid_check: bool - # Model & Provider info + # Model info model_id: str - provider_id: str - llama_stack_model_id: str # Request & Timing query_request: QueryRequest @@ -45,4 +39,3 @@ class ResponseGeneratorContext: # pylint: disable=too-many-instance-attributes # Dependencies & State client: AsyncLlamaStackClient - metadata_map: dict[str, dict[str, Any]] diff --git a/src/models/database/conversations.py b/src/models/database/conversations.py index fd720b418..b34c9eb53 100644 --- a/src/models/database/conversations.py +++ b/src/models/database/conversations.py @@ -2,8 +2,8 @@ from datetime import datetime +from sqlalchemy import DateTime, ForeignKey, func from sqlalchemy.orm import Mapped, mapped_column -from sqlalchemy import DateTime, func from models.database.base import Base @@ -36,3 +36,33 @@ class UserConversation(Base): # pylint: disable=too-few-public-methods message_count: Mapped[int] = mapped_column(default=0) topic_summary: Mapped[str] = mapped_column(default="") + + +class UserTurn(Base): # pylint: disable=too-few-public-methods + """Model for storing turn-level metadata.""" + + __tablename__ = "user_turn" + + # Foreign key to user_conversation (part of composite primary key) + conversation_id: Mapped[str] = mapped_column( + ForeignKey("user_conversation.id", ondelete="CASCADE"), + primary_key=True, + ) + + # Turn number (1-indexed, first turn is 1) for ordering within a conversation + # Part of composite primary key with conversation_id + turn_number: Mapped[int] = mapped_column(primary_key=True) + + # Timestamps for the turn + started_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + nullable=False, + ) + completed_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + nullable=False, + ) + + provider: Mapped[str] = mapped_column(nullable=False) + + model: Mapped[str] = mapped_column(nullable=False) diff --git a/src/models/requests.py b/src/models/requests.py index 3ac4ede66..4448940af 100644 --- a/src/models/requests.py +++ b/src/models/requests.py @@ -528,3 +528,18 @@ class ConversationUpdateRequest(BaseModel): # Reject unknown fields model_config = {"extra": "forbid"} + + +class ModelFilter(BaseModel): + """Model representing a query parameter to select models by its type. + + Attributes: + model_type: Required model type, such as 'llm', 'embeddings' etc. + """ + + model_config = {"extra": "forbid"} + model_type: Optional[str] = Field( + None, + description="Optional filter to return only models matching this type", + examples=["llm", "embeddings"], + ) diff --git a/src/models/responses.py b/src/models/responses.py index 214bb47dc..c412256c9 100644 --- a/src/models/responses.py +++ b/src/models/responses.py @@ -2,15 +2,15 @@ """Models for REST API responses.""" -from typing import Any, ClassVar, Optional, Union +from typing import Any, ClassVar, Literal, Optional, Union from fastapi import status -from pydantic import AnyUrl, BaseModel, Field +from pydantic import BaseModel, Field from pydantic_core import SchemaError from models.config import Action, Configuration from quota.quota_exceed_error import QuotaExceedError -from utils.types import RAGChunk, ToolCallSummary, ToolResultSummary +from utils.types import RAGChunk, ReferencedDocument, ToolCallSummary, ToolResultSummary SUCCESSFUL_RESPONSE_DESCRIPTION = "Successful response" BAD_REQUEST_DESCRIPTION = "Invalid request format" @@ -25,21 +25,6 @@ INTERNAL_SERVER_ERROR_DESCRIPTION = "Internal server error" -# class ToolCall(BaseModel): -# """Model representing a tool call made during response generation.""" - -# tool_name: str = Field(description="Name of the tool called") -# arguments: dict[str, Any] = Field(description="Arguments passed to the tool") -# result: Optional[dict[str, Any]] = Field(None, description="Result from the tool") - - -# class ToolResult(BaseModel): -# """Model representing a tool result.""" - -# tool_name: str = Field(description="Name of the tool") -# result: dict[str, Any] = Field(description="Result from the tool") - - class AbstractSuccessfulResponse(BaseModel): """Base class for all successful response models.""" @@ -363,23 +348,6 @@ class ConversationData(BaseModel): last_message_timestamp: float -class ReferencedDocument(BaseModel): - """Model representing a document referenced in generating a response. - - Attributes: - doc_url: Url to the referenced doc. - doc_title: Title of the referenced doc. - """ - - doc_url: Optional[AnyUrl] = Field( - None, description="URL of the referenced document" - ) - - doc_title: Optional[str] = Field( - None, description="Title of the referenced document" - ) - - class QueryResponse(AbstractSuccessfulResponse): """Model representing LLM response to a query. @@ -431,7 +399,7 @@ class QueryResponse(AbstractSuccessfulResponse): truncated: bool = Field( False, - description="Whether conversation history was truncated", + description="Deprecated:Whether conversation history was truncated", examples=[False, True], ) @@ -835,29 +803,79 @@ class AuthorizedResponse(AbstractSuccessfulResponse): } +class Message(BaseModel): + """Model representing a message in a conversation turn. + + Attributes: + content: The message content. + type: The type of message. + """ + + content: str = Field( + ..., + description="The message content", + examples=["Hello, how can I help you?"], + ) + type: Literal["user", "assistant", "system", "developer"] = Field( + ..., + description="The type of message", + examples=["user", "assistant", "system", "developer"], + ) + + +class ConversationTurn(BaseModel): + """Model representing a single conversation turn. + + Attributes: + messages: List of messages in this turn. + tool_calls: List of tool calls made in this turn. + tool_results: List of tool results from this turn. + provider: Provider identifier used for this turn. + model: Model identifier used for this turn. + started_at: ISO 8601 timestamp when the turn started. + completed_at: ISO 8601 timestamp when the turn completed. + """ + + messages: list[Message] = Field( + default_factory=list, + description="List of messages in this turn", + ) + tool_calls: list[ToolCallSummary] = Field( + default_factory=list, + description="List of tool calls made in this turn", + ) + tool_results: list[ToolResultSummary] = Field( + default_factory=list, + description="List of tool results from this turn", + ) + provider: str = Field( + ..., + description="Provider identifier used for this turn", + examples=["openai"], + ) + model: str = Field( + ..., + description="Model identifier used for this turn", + examples=["gpt-4o-mini"], + ) + started_at: str = Field( + ..., + description="ISO 8601 timestamp when the turn started", + examples=["2024-01-01T00:01:00Z"], + ) + completed_at: str = Field( + ..., + description="ISO 8601 timestamp when the turn completed", + examples=["2024-01-01T00:01:05Z"], + ) + + class ConversationResponse(AbstractSuccessfulResponse): """Model representing a response for retrieving a conversation. Attributes: conversation_id: The conversation ID (UUID). - chat_history: The simplified chat history as a list of conversation turns. - - Example: - ```python - conversation_response = ConversationResponse( - conversation_id="123e4567-e89b-12d3-a456-426614174000", - chat_history=[ - { - "messages": [ - {"content": "Hello", "type": "user"}, - {"content": "Hi there!", "type": "assistant"} - ], - "started_at": "2024-01-01T00:01:00Z", - "completed_at": "2024-01-01T00:01:05Z" - } - ] - ) - ``` + chat_history: The chat history as a list of conversation turns. """ conversation_id: str = Field( @@ -866,7 +884,7 @@ class ConversationResponse(AbstractSuccessfulResponse): examples=["c5260aec-4d82-4370-9fdf-05cf908b3f16"], ) - chat_history: list[dict[str, Any]] = Field( + chat_history: list[ConversationTurn] = Field( ..., description="The simplified chat history as a list of conversation turns", examples=[ @@ -875,6 +893,10 @@ class ConversationResponse(AbstractSuccessfulResponse): {"content": "Hello", "type": "user"}, {"content": "Hi there!", "type": "assistant"}, ], + "tool_calls": [], + "tool_results": [], + "provider": "openai", + "model": "gpt-4o-mini", "started_at": "2024-01-01T00:01:00Z", "completed_at": "2024-01-01T00:01:05Z", } @@ -893,6 +915,10 @@ class ConversationResponse(AbstractSuccessfulResponse): {"content": "Hello", "type": "user"}, {"content": "Hi there!", "type": "assistant"}, ], + "tool_calls": [], + "tool_results": [], + "provider": "openai", + "model": "gpt-4o-mini", "started_at": "2024-01-01T00:01:00Z", "completed_at": "2024-01-01T00:01:05Z", } @@ -1759,13 +1785,27 @@ class PromptTooLongResponse(AbstractErrorResponse): } } - def __init__(self, *, response: str = "Prompt is too long", cause: str): + def __init__( + self, + *, + response: str = "Prompt is too long", + cause: str | None = None, + model: str | None = None, + ) -> None: """Initialize a PromptTooLongResponse. Args: response: Short summary of the error. Defaults to "Prompt is too long". - cause: Detailed explanation of what caused the error. + cause: Detailed explanation of what caused the error. If not provided, + will be generated to include model information if model is provided. + model: The model identifier for which the prompt is too long. """ + if cause is None: + if model: + cause = f"The input exceeds the context window size of model '{model}'." + else: + cause = "The prompt exceeds the maximum allowed length." + super().__init__( response=response, cause=cause, @@ -1798,7 +1838,7 @@ class UnprocessableEntityResponse(AbstractErrorResponse): "label": "invalid value", "detail": { "response": "Invalid attribute value", - "cause": "Invalid attatchment type: must be one of ['text/plain', " + "cause": "Invalid attachment type: must be one of ['text/plain', " "'application/json', 'application/yaml', 'application/xml']", }, }, diff --git a/src/observability/README.md b/src/observability/README.md index 29cb90ffa..6153e11f9 100644 --- a/src/observability/README.md +++ b/src/observability/README.md @@ -30,7 +30,7 @@ event_data = InferenceEventData( org_id="12345678", system_id="abc-def-123", request_id="req_xyz789", - cla_version="CLA/0.4.0", + cla_version="CLA/0.4.1", system_os="RHEL", system_version="9.3", system_arch="x86_64", diff --git a/src/runners/uvicorn.py b/src/runners/uvicorn.py index d99b640ae..906129d8d 100644 --- a/src/runners/uvicorn.py +++ b/src/runners/uvicorn.py @@ -15,7 +15,7 @@ def start_uvicorn(configuration: ServiceConfiguration) -> None: Parameters: configuration (ServiceConfiguration): Configuration providing host, - port, workers, and `tls_config` (including `tls_key_path`, + port, workers, `root_path`, and `tls_config` (including `tls_key_path`, `tls_certificate_path`, and `tls_key_password`). TLS fields may be None and will be forwarded to uvicorn.run as provided. """ @@ -30,6 +30,7 @@ def start_uvicorn(configuration: ServiceConfiguration) -> None: host=configuration.host, port=configuration.port, workers=configuration.workers, + root_path=configuration.root_path, log_level=log_level, ssl_keyfile=configuration.tls_config.tls_key_path, ssl_certfile=configuration.tls_config.tls_certificate_path, diff --git a/src/utils/README.md b/src/utils/README.md index d5adb8546..f9266a2ff 100644 --- a/src/utils/README.md +++ b/src/utils/README.md @@ -12,6 +12,9 @@ Common utilities for the project. ## [connection_decorator.py](connection_decorator.py) Decorator that makes sure the object is 'connected' according to it's connected predicate. +## [conversations.py](conversations.py) +Utilities for conversations. + ## [endpoints.py](endpoints.py) Utility functions for endpoint handlers. @@ -24,6 +27,9 @@ Utilities for resolving MCP server authorization headers. ## [mcp_headers.py](mcp_headers.py) MCP headers handling. +## [prompts.py](prompts.py) +Utility functions for system prompts. + ## [query.py](query.py) Utility functions for working with queries. diff --git a/src/utils/conversations.py b/src/utils/conversations.py new file mode 100644 index 000000000..577c3fce7 --- /dev/null +++ b/src/utils/conversations.py @@ -0,0 +1,425 @@ +"""Utilities for conversations.""" + +import json +from datetime import UTC, datetime +from typing import Any, Optional, Union, cast + +from llama_stack_api.openai_responses import ( + OpenAIResponseOutputMessageFileSearchToolCall as FileSearchCall, + OpenAIResponseOutputMessageFunctionToolCall as FunctionCall, + OpenAIResponseOutputMessageMCPCall as MCPCall, + OpenAIResponseOutputMessageMCPListTools as MCPListTools, + OpenAIResponseOutputMessageWebSearchToolCall as WebSearchCall, +) +from llama_stack_client.types.conversations.item_list_response import ( + ItemListResponse, + OpenAIResponseInputFunctionToolCallOutput as FunctionToolCallOutput, + OpenAIResponseMcpApprovalRequest as MCPApprovalRequest, + OpenAIResponseMcpApprovalResponse as MCPApprovalResponse, + OpenAIResponseMessageOutput as MessageOutput, +) + +from constants import DEFAULT_RAG_TOOL +from models.database.conversations import UserTurn +from models.responses import ConversationTurn, Message +from utils.responses import parse_arguments_string +from utils.types import ToolCallSummary, ToolResultSummary + + +def _extract_text_from_content(content: Union[str, list[Any]]) -> str: + """Extract text content from message content. + + Args: + content: The content field from a message (can be str or list) + + Returns: + Extracted text content as a string + """ + if isinstance(content, str): + return content + + text_fragments: list[str] = [] + if isinstance(content, list): + for part in content: + if isinstance(part, str): + text_fragments.append(part) + continue + text_value = getattr(part, "text", None) + if text_value: + text_fragments.append(text_value) + continue + refusal = getattr(part, "refusal", None) + if refusal: + text_fragments.append(refusal) + continue + if isinstance(part, dict): + dict_text = part.get("text") or part.get("refusal") + if dict_text: + text_fragments.append(str(dict_text)) + + return "".join(text_fragments) + + +def _parse_message_item(item: MessageOutput) -> Message: + """Parse a message item into a Message object. + + Args: + item: The message item from Conversations API + + Returns: + Message object with extracted content and type (user or assistant) + """ + content_text = _extract_text_from_content(item.content) + message_type = item.role + return Message(content=content_text, type=message_type) + + +def _build_tool_call_summary_from_item( # pylint: disable=too-many-return-statements + item: ItemListResponse, +) -> tuple[Optional[ToolCallSummary], Optional[ToolResultSummary]]: + """Translate Conversations API tool items into ToolCallSummary and ToolResultSummary records. + + Args: + item: A tool item from the Conversations API items list + + Returns: + A tuple of (ToolCallSummary, ToolResultSummary) one of them possibly None + if the item type doesn't provide both call and result information. + """ + item_type = getattr(item, "type", None) + + if item_type == "function_call": + function_call_item = cast(FunctionCall, item) + return ( + ToolCallSummary( + id=function_call_item.call_id, + name=function_call_item.name, + args=parse_arguments_string(function_call_item.arguments), + type="function_call", + ), + None, # Function call results come as separate function_call_output items + ) + + if item_type == "file_search_call": + file_search_item = cast(FileSearchCall, item) + response_payload: Optional[dict[str, Any]] = None + if file_search_item.results is not None: + response_payload = { + "results": [result.model_dump() for result in file_search_item.results] + } + return ( + ToolCallSummary( + id=file_search_item.id, + name=DEFAULT_RAG_TOOL, + args={"queries": file_search_item.queries}, + type="file_search_call", + ), + ToolResultSummary( + id=file_search_item.id, + status=file_search_item.status, + content=json.dumps(response_payload) if response_payload else "", + type="file_search_call", + round=1, + ), + ) + + if item_type == "web_search_call": + web_search_item = cast(WebSearchCall, item) + return ( + ToolCallSummary( + id=web_search_item.id, + name="web_search", + args={}, + type="web_search_call", + ), + ToolResultSummary( + id=web_search_item.id, + status=web_search_item.status, + content="", + type="web_search_call", + round=1, + ), + ) + + if item_type == "mcp_call": + mcp_call_item = cast(MCPCall, item) + args = parse_arguments_string(mcp_call_item.arguments) + if mcp_call_item.server_label: + args["server_label"] = mcp_call_item.server_label + content = ( + mcp_call_item.error + if mcp_call_item.error + else (mcp_call_item.output if mcp_call_item.output else "") + ) + + return ( + ToolCallSummary( + id=mcp_call_item.id, + name=mcp_call_item.name, + args=args, + type="mcp_call", + ), + ToolResultSummary( + id=mcp_call_item.id, + status="success" if mcp_call_item.error is None else "failure", + content=content, + type="mcp_call", + round=1, + ), + ) + + if item_type == "mcp_list_tools": + mcp_list_tools_item = cast(MCPListTools, item) + tools_info = [ + { + "name": tool.name, + "description": tool.description, + "input_schema": tool.input_schema, + } + for tool in mcp_list_tools_item.tools + ] + content_dict = { + "server_label": mcp_list_tools_item.server_label, + "tools": tools_info, + } + return ( + ToolCallSummary( + id=mcp_list_tools_item.id, + name="mcp_list_tools", + args={"server_label": mcp_list_tools_item.server_label}, + type="mcp_list_tools", + ), + ToolResultSummary( + id=mcp_list_tools_item.id, + status="success", + content=json.dumps(content_dict), + type="mcp_list_tools", + round=1, + ), + ) + + if item_type == "mcp_approval_request": + approval_request_item = cast(MCPApprovalRequest, item) + args = parse_arguments_string(approval_request_item.arguments) + return ( + ToolCallSummary( + id=approval_request_item.id, + name=approval_request_item.name, + args=args, + type="tool_call", + ), + None, + ) + + if item_type == "mcp_approval_response": + approval_response_item = cast(MCPApprovalResponse, item) + content_dict = {} + if approval_response_item.reason: + content_dict["reason"] = approval_response_item.reason + return ( + None, + ToolResultSummary( + id=approval_response_item.approval_request_id, + status="success" if approval_response_item.approve else "denied", + content=json.dumps(content_dict), + type="mcp_approval_response", + round=1, + ), + ) + + if item_type == "function_call_output": + function_output = cast(FunctionToolCallOutput, item) + return ( + None, + ToolResultSummary( + id=function_output.call_id, + status=function_output.status or "success", + content=function_output.output, + type="function_call_output", + round=1, + ), + ) + + return None, None + + +def _create_dummy_turn_metadata(started_at: datetime) -> UserTurn: + """Create a dummy UserTurn instance for legacy conversations without metadata. + + Args: + started_at: Timestamp to use for started_at and completed_at (conversation created_at) + + Returns: + UserTurn instance with default values (N/A for provider/model, provided timestamp) + for legacy conversations that don't have stored turn metadata. + """ + # Create a UserTurn instance with default values for legacy conversations + # Note: conversation_id and turn_number are not used, so we use placeholder values + return UserTurn( + conversation_id="", + turn_number=0, + started_at=started_at, + completed_at=started_at, + provider="N/A", + model="N/A", + ) + + +def _create_turn_from_db_metadata( + turn_metadata: UserTurn, + messages: list[Message], + tool_calls: list[ToolCallSummary], + tool_results: list[ToolResultSummary], +) -> ConversationTurn: + """Create a ConversationTurn from database metadata and accumulated items. + + Args: + turn_metadata: Database UserTurn object with metadata + messages: List of messages for this turn + tool_calls: List of tool calls for this turn + tool_results: List of tool results for this turn + + Returns: + ConversationTurn object with all metadata populated + """ + started_at = turn_metadata.started_at.astimezone(UTC).strftime("%Y-%m-%dT%H:%M:%SZ") + completed_at = turn_metadata.completed_at.astimezone(UTC).strftime( + "%Y-%m-%dT%H:%M:%SZ" + ) + return ConversationTurn( + messages=messages, + tool_calls=tool_calls, + tool_results=tool_results, + provider=turn_metadata.provider, + model=turn_metadata.model, + started_at=started_at, + completed_at=completed_at, + ) + + +def _group_items_into_turns( + items: list[ItemListResponse], +) -> list[list[ItemListResponse]]: + """Group conversation items into turns. + + Each turn starts with a user message. All subsequent messages and tool items + belong to that turn until the next user message. + + Args: + items: Conversation items list from Conversations API, oldest first + + Returns: + List of turns, where each turn is a list of items belonging to that turn + """ + turns: list[list[ItemListResponse]] = [] + current_turn_items: list[ItemListResponse] = [] + + for item in items: + item_type = getattr(item, "type", None) + + # User message marks the beginning of a new turn + if item_type == "message": + message_item = cast(MessageOutput, item) + if message_item.role == "user": + # If we have accumulated items, finish the previous turn + if current_turn_items: + turns.append(current_turn_items) + current_turn_items = [] + + # Start new turn with this user message + current_turn_items = [item] + else: + # Add non-user message to current turn + current_turn_items.append(item) + else: + # Add tool-related items to current turn + current_turn_items.append(item) + + # Add final turn if there are items + if current_turn_items: + turns.append(current_turn_items) + + return turns + + +def _process_turn_items( + turn_items: list[ItemListResponse], +) -> tuple[list[Message], list[ToolCallSummary], list[ToolResultSummary]]: + """Process items from a single turn into messages, tool calls, and tool results. + + Args: + turn_items: List of items belonging to a single turn + + Returns: + Tuple of (messages, tool_calls, tool_results) + """ + messages: list[Message] = [] + tool_calls: list[ToolCallSummary] = [] + tool_results: list[ToolResultSummary] = [] + + for item in turn_items: + item_type = getattr(item, "type", None) + + if item_type == "message": + message_item = cast(MessageOutput, item) + message = _parse_message_item(message_item) + messages.append(message) + else: + tool_call, tool_result = _build_tool_call_summary_from_item(item) + if tool_call is not None: + tool_calls.append(tool_call) + if tool_result is not None: + tool_results.append(tool_result) + + return messages, tool_calls, tool_results + + +def build_conversation_turns_from_items( + items: list[ItemListResponse], + turns_metadata: list[UserTurn], + conversation_start_time: datetime, +) -> list[ConversationTurn]: + """Build conversation turns from Conversations API items and turns metadata. + + Args: + items: Conversation items list from Conversations API, oldest first + turns_metadata: List of UserTurn database objects ordered by turn_number. + Can be empty for legacy conversations without stored metadata. + For extended legacy conversations, only the newer turns have metadata. + conversation_start_time: Timestamp to use for dummy metadata in legacy conversations. + Typically the conversation's created_at timestamp. + + Returns: + List of ConversationTurn objects, oldest first + """ + # Group items into turns first + turn_items_list = _group_items_into_turns(items) + + # Calculate how many legacy turns don't have metadata + total_turns = len(turn_items_list) + legacy_turns_count = total_turns - len(turns_metadata) + + # Process each turn with its corresponding metadata + chat_history: list[ConversationTurn] = [] + for turn_index, turn_items in enumerate(turn_items_list): + # Process items into messages, tool calls, and tool results + messages, tool_calls, tool_results = _process_turn_items(turn_items) + + # Select appropriate metadata for this turn + if turn_index < legacy_turns_count: + turn_metadata = _create_dummy_turn_metadata(conversation_start_time) + else: + metadata_index = turn_index - legacy_turns_count + turn_metadata = turns_metadata[metadata_index] + + # Create ConversationTurn from metadata and processed items + chat_history.append( + _create_turn_from_db_metadata( + turn_metadata, + messages, + tool_calls, + tool_results, + ) + ) + + return chat_history diff --git a/src/utils/endpoints.py b/src/utils/endpoints.py index 016cf95fc..332002eeb 100644 --- a/src/utils/endpoints.py +++ b/src/utils/endpoints.py @@ -1,31 +1,22 @@ """Utility functions for endpoint handlers.""" -from contextlib import suppress -from datetime import UTC, datetime from typing import Any, Optional from fastapi import HTTPException -from llama_stack_client._client import AsyncLlamaStackClient -from llama_stack_client.lib.agents.agent import AsyncAgent from pydantic import AnyUrl, ValidationError +from sqlalchemy.exc import SQLAlchemyError import constants from app.database import get_session from configuration import AppConfig, LogicError from log import get_logger -from models.cache_entry import CacheEntry -from models.config import Action from models.database.conversations import UserConversation -from models.requests import QueryRequest from models.responses import ( ForbiddenResponse, InternalServerErrorResponse, NotFoundResponse, - ReferencedDocument, - UnprocessableEntityResponse, ) -from utils.suid import get_suid -from utils.types import GraniteToolParser, TurnSummary +from utils.types import ReferencedDocument, TurnSummary logger = get_logger(__name__) @@ -121,322 +112,88 @@ def can_access_conversation( return owner_user_id == user_id -def check_configuration_loaded(config: AppConfig) -> None: - """ - Raise an error if the configuration is not loaded. - - Args: - config (AppConfig): The application configuration. - - Raises: - HTTPException: If configuration is missing. - """ - try: - _ = config.configuration - except LogicError as e: - response = InternalServerErrorResponse.configuration_not_loaded() - raise HTTPException(**response.model_dump()) from e - - -def get_system_prompt(query_request: QueryRequest, config: AppConfig) -> str: - """ - Resolve which system prompt to use for a query. - - Precedence: - 1. If the request includes `system_prompt`, that value is returned (highest - precedence). - 2. Else if the application configuration provides a customization - `system_prompt`, that value is returned. - 3. Otherwise the module default `constants.DEFAULT_SYSTEM_PROMPT` is - returned (lowest precedence). - - If configuration disables per-request system prompts - (config.customization.disable_query_system_prompt) and the incoming - `query_request` contains a `system_prompt`, an HTTP 422 Unprocessable - Entity is raised instructing the client to remove the field. - - Parameters: - query_request (QueryRequest): The incoming query payload; may contain a - per-request `system_prompt`. - config (AppConfig): Application configuration which may include - customization flags and a default `system_prompt`. - - Returns: - str: The resolved system prompt to apply to the request. +def validate_and_retrieve_conversation( + normalized_conv_id: str, + user_id: str, + others_allowed: bool, +) -> UserConversation: """ - system_prompt_disabled = ( - config.customization is not None - and config.customization.disable_query_system_prompt - ) - if system_prompt_disabled and query_request.system_prompt: - response = UnprocessableEntityResponse( - response="System prompt customization is disabled", - cause=( - "This instance does not support customizing the system prompt in the " - "query request (disable_query_system_prompt is set). Please remove the " - "system_prompt field from your request." - ), - ) - raise HTTPException(**response.model_dump()) - - if query_request.system_prompt: - # Query taking precedence over configuration is the only behavior that - # makes sense here - if the configuration wants precedence, it can - # disable query system prompt altogether with disable_system_prompt. - return query_request.system_prompt - - # profile takes precedence for setting prompt - if ( - config.customization is not None - and config.customization.custom_profile is not None - ): - prompt = config.customization.custom_profile.get_prompts().get("default") - if prompt: - return prompt + Validate access and retrieve a conversation from the database. - if ( - config.customization is not None - and config.customization.system_prompt is not None - ): - return config.customization.system_prompt - - # default system prompt has the lowest precedence - return constants.DEFAULT_SYSTEM_PROMPT - - -def get_topic_summary_system_prompt(config: AppConfig) -> str: - """ - Get the topic summary system prompt. + This function performs access validation, retrieves the conversation, + and handles all error cases (forbidden access, not found, database errors). - Parameters: - config (AppConfig): Application configuration from which to read - customization/profile settings. + Args: + normalized_conv_id: The normalized conversation ID to retrieve. + user_id: The ID of the user requesting access. + others_allowed: Whether the user can access conversations owned by others. Returns: - str: The topic summary system prompt from the active custom profile if - set, otherwise the default prompt. - """ - # profile takes precedence for setting prompt - if ( - config.customization is not None - and config.customization.custom_profile is not None - ): - prompt = config.customization.custom_profile.get_prompts().get("topic_summary") - if prompt: - return prompt - - return constants.DEFAULT_TOPIC_SUMMARY_SYSTEM_PROMPT - - -def validate_model_provider_override( - query_request: QueryRequest, authorized_actions: set[Action] | frozenset[Action] -) -> None: - """Validate whether model/provider overrides are allowed by RBAC. + UserConversation: The conversation object if found and accessible. Raises: - HTTPException: HTTP 403 if the request includes model or provider and - the caller lacks Action.MODEL_OVERRIDE permission. + HTTPException: + - 403 Forbidden: If user doesn't have access to the conversation. + - 404 Not Found: If conversation doesn't exist in database. + - 500 Internal Server Error: If database error occurs. """ - if (query_request.model is not None or query_request.provider is not None) and ( - Action.MODEL_OVERRIDE not in authorized_actions + if not can_access_conversation( + normalized_conv_id, + user_id, + others_allowed=others_allowed, ): - response = ForbiddenResponse.model_override() + logger.warning( + "User %s attempted to read conversation %s they don't have access to", + user_id, + normalized_conv_id, + ) + response = ForbiddenResponse.conversation( + action="read", + resource_id=normalized_conv_id, + user_id=user_id, + ) raise HTTPException(**response.model_dump()) - -# # pylint: disable=R0913,R0917 -def store_conversation_into_cache( - config: AppConfig, - user_id: str, - conversation_id: str, - cache_entry: CacheEntry, - _skip_userid_check: bool, - topic_summary: Optional[str], -) -> None: - """ - Store one part of conversation into conversation history cache. - - If a conversation cache type is configured but the cache instance is not - initialized, the function logs a warning and returns without persisting - anything. - - Parameters: - config (AppConfig): Application configuration that may contain - conversation cache settings and instance. - user_id (str): Owner identifier used as the cache key. - conversation_id (str): Conversation identifier used as the cache key. - cache_entry (CacheEntry): Entry to insert or append to the conversation history. - _skip_userid_check (bool): When true, bypasses enforcing that the cache - operation must match the user id. - topic_summary (Optional[str]): Optional topic summary to store alongside - the conversation; ignored if None or empty. - """ - if config.conversation_cache_configuration.type is not None: - cache = config.conversation_cache - if cache is None: - logger.warning("Conversation cache configured but not initialized") - return - cache.insert_or_append( - user_id, conversation_id, cache_entry, _skip_userid_check - ) - if topic_summary and len(topic_summary) > 0: - cache.set_topic_summary( - user_id, conversation_id, topic_summary, _skip_userid_check + # If reached this, user is authorized to retrieve this conversation + try: + user_conversation = retrieve_conversation(normalized_conv_id) + if user_conversation is None: + logger.error( + "Conversation %s not found in database.", + normalized_conv_id, ) - - -# # pylint: disable=R0913,R0917,unused-argument -async def get_agent( - client: AsyncLlamaStackClient, - model_id: str, - system_prompt: str, - available_input_shields: list[str], - available_output_shields: list[str], - conversation_id: Optional[str], - no_tools: bool = False, -) -> tuple[AsyncAgent, str, str]: - """ - Create or reuse an AsyncAgent with session persistence. - - Return the agent, conversation and session IDs. - - If a conversation_id is provided, the function attempts to retrieve the - existing agent and, on success, rebinds a newly created agent instance to - that conversation (deleting the temporary/orphan agent) and returns the - first existing session_id for the conversation. If no conversation_id is - provided or the existing agent cannot be retrieved, a new agent and session - are created. - - Parameters: - model_id (str): Identifier of the model to instantiate the agent with. - system_prompt (str): Instructions/system prompt to initialize the agent with. - - available_input_shields (list[str]): Input shields to apply to the - agent; empty list used if None/empty. - - available_output_shields (list[str]): Output shields to apply to the - agent; empty list used if None/empty. - - conversation_id (Optional[str]): If provided, attempt to reuse the agent - for this conversation; otherwise a new conversation_id is created. - - no_tools (bool): When True, disables tool parsing for the agent (uses no tool parser). - - Returns: - tuple[AsyncAgent, str, str]: A tuple of (agent, conversation_id, session_id). - - Raises: - HTTPException: Raises HTTP 404 Not Found if an attempt to reuse a - conversation succeeds in retrieving the agent but no sessions are found - for that conversation. - - Side effects: - - May delete an orphan agent when rebinding a newly created agent to an - existing conversation_id. - - Initializes the agent and may create a new session. - """ - existing_agent_id = None - if conversation_id: - with suppress(ValueError): - # agent_response = await client.agents.retrieve(agent_id=conversation_id) - # existing_agent_id = agent_response.agent_id - ... - - logger.debug("Creating new agent") - # pylint: disable=unexpected-keyword-arg,no-member - agent = AsyncAgent( - client, # type: ignore[arg-type] - model=model_id, - instructions=system_prompt, - # type: ignore[call-arg] - # input_shields=available_input_shields if available_input_shields else [], - # type: ignore[call-arg] - # output_shields=available_output_shields if available_output_shields else [], - tool_parser=None if no_tools else GraniteToolParser.get_parser(model_id), - enable_session_persistence=True, # type: ignore[call-arg] - ) - await agent.initialize() # type: ignore[attr-defined] - - if existing_agent_id and conversation_id: - logger.debug("Existing conversation ID: %s", conversation_id) - logger.debug("Existing agent ID: %s", existing_agent_id) - # orphan_agent_id = agent.agent_id - agent._agent_id = conversation_id # type: ignore[assignment] # pylint: disable=protected-access - # await client.agents.delete(agent_id=orphan_agent_id) - # sessions_response = await client.agents.session.list(agent_id=conversation_id) - # logger.info("session response: %s", sessions_response) - try: - # session_id = str(sessions_response.data[0]["session_id"]) - ... - except IndexError as e: - logger.error("No sessions found for conversation %s", conversation_id) response = NotFoundResponse( - resource="conversation", resource_id=conversation_id + resource="conversation", resource_id=normalized_conv_id ) - raise HTTPException(**response.model_dump()) from e - else: - # conversation_id = agent.agent_id - # pylint: enable=unexpected-keyword-arg,no-member - logger.debug("New conversation ID: %s", conversation_id) - session_id = await agent.create_session(get_suid()) - logger.debug("New session ID: %s", session_id) - - return agent, conversation_id, session_id # type: ignore[return-value] - - -async def get_temp_agent( - client: AsyncLlamaStackClient, - model_id: str, - system_prompt: str, -) -> tuple[AsyncAgent, str, str]: - """Create a temporary agent with new agent_id and session_id. - - This function creates a new agent without persistence, shields, or tools. - Useful for temporary operations or one-off queries, such as validating a - question or generating a summary. + raise HTTPException(**response.model_dump()) - Parameters: - client: The AsyncLlamaStackClient to use for the request. - model_id: The ID of the model to use. - system_prompt: The system prompt/instructions for the agent. - - Returns: - tuple[AsyncAgent, str]: A tuple containing the agent and session_id. - """ - logger.debug("Creating temporary agent") - # pylint: disable=unexpected-keyword-arg,no-member - agent = AsyncAgent( - client, # type: ignore[arg-type] - model=model_id, - instructions=system_prompt, - # type: ignore[call-arg] # Temporary agent doesn't need persistence - # enable_session_persistence=False, - ) - await agent.initialize() # type: ignore[attr-defined] - - # Generate new IDs for the temporary agent - # conversation_id = agent.agent_id - conversation_id = None - # pylint: enable=unexpected-keyword-arg,no-member - session_id = await agent.create_session(get_suid()) + except SQLAlchemyError as e: + logger.error( + "Database error occurred while retrieving conversation %s: %s", + normalized_conv_id, + str(e), + ) + response = InternalServerErrorResponse.database_error() + raise HTTPException(**response.model_dump()) from e - return agent, session_id, conversation_id # type: ignore[return-value] + return user_conversation -def create_rag_chunks_dict(summary: TurnSummary) -> list[dict[str, Any]]: +def check_configuration_loaded(config: AppConfig) -> None: """ - Create dictionary representation of RAG chunks for streaming response. + Raise an error if the configuration is not loaded. Args: - summary: TurnSummary containing RAG chunks + config (AppConfig): The application configuration. - Returns: - List of dictionaries with content, source, and score + Raises: + HTTPException: If configuration is missing. """ - return [ - {"content": chunk.content, "source": chunk.source, "score": chunk.score} - for chunk in summary.rag_chunks - ] + try: + _ = config.configuration + except LogicError as e: + response = InternalServerErrorResponse.configuration_not_loaded() + raise HTTPException(**response.model_dump()) from e def _process_http_source( @@ -701,128 +458,3 @@ def create_referenced_documents_from_chunks( ReferencedDocument(doc_url=doc_url, doc_title=doc_title) for doc_url, doc_title in document_entries ] - - -# pylint: disable=R0913,R0917,too-many-locals -async def cleanup_after_streaming( - user_id: str, - conversation_id: str, - model_id: str, - provider_id: str, - llama_stack_model_id: str, - query_request: QueryRequest, - summary: TurnSummary, - metadata_map: dict[str, Any], - started_at: str, - client: AsyncLlamaStackClient, - config: AppConfig, - skip_userid_check: bool, - get_topic_summary_func: Any, - is_transcripts_enabled_func: Any, - store_transcript_func: Any, - persist_user_conversation_details_func: Any, - rag_chunks: Optional[list[dict[str, Any]]] = None, -) -> None: - """ - Perform cleanup tasks after streaming is complete. - - This function handles all database and cache operations after the streaming - response has been sent to the client. It is shared between Agent API and - Responses API streaming implementations. - - Args: - user_id: ID of the user making the request - conversation_id: ID of the conversation - model_id: ID of the model used - provider_id: ID of the provider used - llama_stack_model_id: Full Llama Stack model ID (provider/model format) - query_request: The original query request - summary: Summary of the turn including LLM response and tool calls - metadata_map: Metadata about referenced documents - started_at: Timestamp when the request started - client: AsyncLlamaStackClient instance - config: Application configuration - skip_userid_check: Whether to skip user ID checks - get_topic_summary_func: Function to get topic summary (API-specific) - is_transcripts_enabled_func: Function to check if transcripts are enabled - store_transcript_func: Function to store transcript - persist_user_conversation_details_func: Function to persist conversation details - rag_chunks: Optional RAG chunks dict - """ - # Store transcript if enabled - if not is_transcripts_enabled_func(): - logger.debug("Transcript collection is disabled in the configuration") - else: - # Prepare attachments - attachments = query_request.attachments or [] - - # Determine rag_chunks: use provided value or empty list - transcript_rag_chunks = rag_chunks if rag_chunks is not None else [] - - store_transcript_func( - user_id=user_id, - conversation_id=conversation_id, - model_id=model_id, - provider_id=provider_id, - query_is_valid=True, - query=query_request.query, - query_request=query_request, - summary=summary, - rag_chunks=transcript_rag_chunks, - truncated=False, - attachments=attachments, - ) - - # Get the initial topic summary for the conversation - topic_summary = None - with get_session() as session: - existing_conversation = ( - session.query(UserConversation).filter_by(id=conversation_id).first() - ) - if not existing_conversation: - # Check if topic summary should be generated (default: True) - should_generate = query_request.generate_topic_summary - - if should_generate: - logger.debug("Generating topic summary for new conversation") - topic_summary = await get_topic_summary_func( - query_request.query, client, llama_stack_model_id - ) - else: - logger.debug("Topic summary generation disabled by request parameter") - topic_summary = None - - completed_at = datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ") - - referenced_documents = create_referenced_documents_with_metadata( - summary, metadata_map - ) - - cache_entry = CacheEntry( - query=query_request.query, - response=summary.llm_response, - provider=provider_id, - model=model_id, - started_at=started_at, - completed_at=completed_at, - referenced_documents=referenced_documents if referenced_documents else None, - tool_calls=summary.tool_calls if summary.tool_calls else None, - tool_results=summary.tool_results if summary.tool_results else None, - ) - - store_conversation_into_cache( - config, - user_id, - conversation_id, - cache_entry, - skip_userid_check, - topic_summary, - ) - - persist_user_conversation_details_func( - user_id=user_id, - conversation_id=conversation_id, - model=model_id, - provider_id=provider_id, - topic_summary=topic_summary, - ) diff --git a/src/utils/mcp_headers.py b/src/utils/mcp_headers.py index c0f8d9d51..af6623ec5 100644 --- a/src/utils/mcp_headers.py +++ b/src/utils/mcp_headers.py @@ -10,8 +10,10 @@ logger = logging.getLogger("app.endpoints.dependencies") +type McpHeaders = dict[str, dict[str, str]] -async def mcp_headers_dependency(request: Request) -> dict[str, dict[str, str]]: + +async def mcp_headers_dependency(request: Request) -> McpHeaders: """Get the MCP headers dependency to passed to mcp servers. mcp headers is a json dictionary or mcp url paths and their respective headers @@ -25,7 +27,7 @@ async def mcp_headers_dependency(request: Request) -> dict[str, dict[str, str]]: return extract_mcp_headers(request) -def extract_mcp_headers(request: Request) -> dict[str, dict[str, str]]: +def extract_mcp_headers(request: Request) -> McpHeaders: """Extract mcp headers from MCP-HEADERS header. If the header is missing, contains invalid JSON, or the decoded @@ -56,8 +58,8 @@ def extract_mcp_headers(request: Request) -> dict[str, dict[str, str]]: def handle_mcp_headers_with_toolgroups( - mcp_headers: dict[str, dict[str, str]], config: AppConfig -) -> dict[str, dict[str, str]]: + mcp_headers: McpHeaders, config: AppConfig +) -> McpHeaders: """Process MCP headers by converting toolgroup names to URLs. This function takes MCP headers where keys can be either valid URLs or diff --git a/src/utils/prompts.py b/src/utils/prompts.py new file mode 100644 index 000000000..0b6410b75 --- /dev/null +++ b/src/utils/prompts.py @@ -0,0 +1,97 @@ +"""Utility functions for system prompts.""" + +from fastapi import HTTPException + +import constants +from configuration import AppConfig +from models.requests import QueryRequest +from models.responses import UnprocessableEntityResponse + + +def get_system_prompt(query_request: QueryRequest, config: AppConfig) -> str: + """ + Resolve which system prompt to use for a query. + + Precedence (highest to lowest): + 1. Per-request `system_prompt` from `query_request.system_prompt`. + 2. The `custom_profile`'s "default" prompt (when present), accessed via + `config.customization.custom_profile.get_prompts().get("default")`. + 3. `config.customization.system_prompt` from application configuration. + 4. The module default `constants.DEFAULT_SYSTEM_PROMPT` (lowest precedence). + + If configuration disables per-request system prompts + (config.customization.disable_query_system_prompt) and the incoming + `query_request` contains a `system_prompt`, an HTTP 422 Unprocessable + Entity is raised instructing the client to remove the field. + + Parameters: + query_request (QueryRequest): The incoming query payload; may contain a + per-request `system_prompt`. + config (AppConfig): Application configuration which may include + customization flags, a custom profile, and a default `system_prompt`. + + Returns: + str: The resolved system prompt to apply to the request. + """ + system_prompt_disabled = ( + config.customization is not None + and config.customization.disable_query_system_prompt + ) + if system_prompt_disabled and query_request.system_prompt: + response = UnprocessableEntityResponse( + response="System prompt customization is disabled", + cause=( + "This instance does not support customizing the system prompt in the " + "query request (disable_query_system_prompt is set). Please remove the " + "system_prompt field from your request." + ), + ) + raise HTTPException(**response.model_dump()) + + if query_request.system_prompt: + # Query taking precedence over configuration is the only behavior that + # makes sense here - if the configuration wants precedence, it can + # disable query system prompt altogether with disable_query_system_prompt. + return query_request.system_prompt + + # profile takes precedence for setting prompt + if ( + config.customization is not None + and config.customization.custom_profile is not None + ): + prompt = config.customization.custom_profile.get_prompts().get("default") + if prompt: + return prompt + + if ( + config.customization is not None + and config.customization.system_prompt is not None + ): + return config.customization.system_prompt + + # default system prompt has the lowest precedence + return constants.DEFAULT_SYSTEM_PROMPT + + +def get_topic_summary_system_prompt(config: AppConfig) -> str: + """ + Get the topic summary system prompt. + + Parameters: + config (AppConfig): Application configuration from which to read + customization/profile settings. + + Returns: + str: The topic summary system prompt from the active custom profile if + set, otherwise the default prompt. + """ + # profile takes precedence for setting prompt + if ( + config.customization is not None + and config.customization.custom_profile is not None + ): + prompt = config.customization.custom_profile.get_prompts().get("topic_summary") + if prompt: + return prompt + + return constants.DEFAULT_TOPIC_SUMMARY_SYSTEM_PROMPT diff --git a/src/utils/query.py b/src/utils/query.py index 6f2644988..39e39a152 100644 --- a/src/utils/query.py +++ b/src/utils/query.py @@ -1,121 +1,640 @@ """Utility functions for working with queries.""" -import json -from typing import Any, AsyncIterator, Optional - -from llama_stack_api.openai_responses import ( - OpenAIResponseContentPartOutputText, - OpenAIResponseMessage, - OpenAIResponseObject, - OpenAIResponseObjectStream, - OpenAIResponseObjectStreamResponseCompleted, - OpenAIResponseObjectStreamResponseContentPartAdded, - OpenAIResponseObjectStreamResponseCreated, - OpenAIResponseObjectStreamResponseOutputTextDelta, - OpenAIResponseObjectStreamResponseOutputTextDone, - OpenAIResponseOutputMessageContentOutputText, +import logging +from datetime import UTC, datetime +from typing import Optional + +from llama_stack_client import ( + APIConnectionError, + APIStatusError as LLSApiStatusError, + AsyncLlamaStackClient, +) +from openai._exceptions import APIStatusError as OpenAIAPIStatusError +from llama_stack_client.types import ModelListResponse, Shield + +from fastapi import HTTPException +from sqlalchemy import func +from configuration import AppConfig, configuration +from models.cache_entry import CacheEntry +from models.config import Action +from models.database.conversations import UserConversation, UserTurn +import constants +from models.requests import Attachment, QueryRequest +from models.responses import ( + AbstractErrorResponse, + ForbiddenResponse, + InternalServerErrorResponse, + NotFoundResponse, + PromptTooLongResponse, + QuotaExceededResponse, + ServiceUnavailableResponse, + UnprocessableEntityResponse, ) +from authorization.azure_token_manager import AzureEntraIDManager +from cache.cache_error import CacheError +import psycopg2 +import sqlite3 +from sqlalchemy.exc import SQLAlchemyError +from app.database import get_session +from client import AsyncLlamaStackClientHolder +from utils.transcripts import store_transcript +from utils.quota import consume_tokens +from utils.suid import normalize_conversation_id +from utils.token_counter import TokenCounter +from utils.types import TurnSummary + +logger = logging.getLogger(__name__) + + +def store_conversation_into_cache( + config: AppConfig, + user_id: str, + conversation_id: str, + cache_entry: CacheEntry, + _skip_userid_check: bool, + topic_summary: Optional[str], +) -> None: + """ + Store one part of conversation into conversation history cache. + + If a conversation cache type is configured but the cache instance is not + initialized, the function logs a warning and returns without persisting + anything. + + Parameters: + config (AppConfig): Application configuration that may contain + conversation cache settings and instance. + user_id (str): Owner identifier used as the cache key. + conversation_id (str): Conversation identifier used as the cache key. + cache_entry (CacheEntry): Entry to insert or append to the conversation history. + _skip_userid_check (bool): When true, bypasses enforcing that the cache + operation must match the user id. + topic_summary (Optional[str]): Optional topic summary to store alongside + the conversation; ignored if None or empty. + """ + if config.conversation_cache_configuration.type is not None: + cache = config.conversation_cache + if cache is None: + logger.warning("Conversation cache configured but not initialized") + return + cache.insert_or_append( + user_id, conversation_id, cache_entry, _skip_userid_check + ) + if topic_summary: + cache.set_topic_summary( + user_id, conversation_id, topic_summary, _skip_userid_check + ) + + +def select_model_and_provider_id( + models: ModelListResponse, model_id: Optional[str], provider_id: Optional[str] +) -> tuple[str, str, str]: + """ + Select the model ID and provider ID based on the request or available models. + + Determine and return the appropriate model and provider IDs for + a query request. + + If the request specifies both model and provider IDs, those are used. + Otherwise, defaults from configuration are applied. If neither is + available, selects the first available LLM model from the provided model + list. Validates that the selected model exists among the available models. + + Returns: + A tuple containing the combined model ID (in the format + "provider/model"), and its separated parts: the model label and the provider ID. + + Raises: + HTTPException: If no suitable LLM model is found or the selected model is not available. + """ + # If model_id and provider_id are provided in the request, use them + + # If model_id is not provided in the request, check the configuration + if not model_id or not provider_id: + logger.debug( + "No model ID or provider ID specified in request, checking configuration" + ) + model_id = configuration.inference.default_model # type: ignore[reportAttributeAccessIssue] + provider_id = ( + configuration.inference.default_provider # type: ignore[reportAttributeAccessIssue] + ) + + # If no model is specified in the request or configuration, use the first available LLM + if not model_id or not provider_id: + logger.debug( + "No model ID or provider ID specified in request or configuration, " + "using the first available LLM" + ) + try: + model = next( + m + for m in models + if m.custom_metadata and m.custom_metadata.get("model_type") == "llm" + ) + model_id = model.id + # Extract provider_id from custom_metadata + provider_id = ( + str(model.custom_metadata.get("provider_id", "")) + if model.custom_metadata + else "" + ) + logger.info("Selected model: %s", model) + model_label = model_id.split("/", 1)[1] if "/" in model_id else model_id + return model_id, model_label, provider_id + except (StopIteration, AttributeError) as e: + message = "No LLM model found in available models" + logger.error(message) + response = NotFoundResponse(resource="model", resource_id=model_id or "") + raise HTTPException(**response.model_dump()) from e + + llama_stack_model_id = f"{provider_id}/{model_id}" + # Validate that the model_id and provider_id are in the available models + logger.debug("Searching for model: %s, provider: %s", model_id, provider_id) + # TODO: Create separate validation of provider + if not any( + m.id in (llama_stack_model_id, model_id) + and ( + m.custom_metadata + and str(m.custom_metadata.get("provider_id", "")) == provider_id + ) + for m in models + ): + message = f"Model {model_id} from provider {provider_id} not found in available models" + logger.error(message) + response = NotFoundResponse(resource="model", resource_id=model_id) + raise HTTPException(**response.model_dump()) + return llama_stack_model_id, model_id, provider_id + + +def validate_model_provider_override( + query_request: QueryRequest, authorized_actions: set[Action] | frozenset[Action] +) -> None: + """Validate whether model/provider overrides are allowed by RBAC. + + Raises: + HTTPException: HTTP 403 if the request includes model or provider and + the caller lacks Action.MODEL_OVERRIDE permission. + """ + if (query_request.model is not None or query_request.provider is not None) and ( + Action.MODEL_OVERRIDE not in authorized_actions + ): + response = ForbiddenResponse.model_override() + raise HTTPException(**response.model_dump()) + +def _is_inout_shield(shield: Shield) -> bool: + """ + Determine if the shield identifier indicates an input/output shield. + + Parameters: + shield (Shield): The shield to check. + + Returns: + bool: True if the shield identifier starts with "inout_", otherwise False. + """ + return shield.identifier.startswith("inout_") + + +def is_output_shield(shield: Shield) -> bool: + """ + Determine if the shield is for monitoring output. + + Return True if the given shield is classified as an output or + inout shield. -def parse_arguments_string(arguments_str: str) -> dict[str, Any]: + A shield is considered an output shield if its identifier + starts with "output_" or "inout_". + """ + return _is_inout_shield(shield) or shield.identifier.startswith("output_") + + +def is_input_shield(shield: Shield) -> bool: + """ + Determine if the shield is for monitoring input. + + Return True if the shield is classified as an input or inout + shield. + + Parameters: + shield (Shield): The shield identifier to classify. + + Returns: + bool: True if the shield is for input or both input/output monitoring; False otherwise. """ - Try to parse an arguments string into a dictionary. + return _is_inout_shield(shield) or not is_output_shield(shield) - Attempts multiple parsing strategies: - 1. Try parsing the string as-is as JSON (if it's already valid JSON) - 2. Try wrapping the string in {} if it doesn't start with { - 3. Return {"args": arguments_str} if all attempts fail + +def evaluate_model_hints( + user_conversation: Optional[UserConversation], + query_request: QueryRequest, +) -> tuple[Optional[str], Optional[str]]: + """Evaluate model hints from user conversation.""" + model_id: Optional[str] = query_request.model + provider_id: Optional[str] = query_request.provider + + if user_conversation is not None: + if query_request.model is not None: + if query_request.model != user_conversation.last_used_model: + logger.debug( + "Model specified in request: %s, preferring it over user conversation model %s", + query_request.model, + user_conversation.last_used_model, + ) + else: + logger.debug( + "No model specified in request, using latest model from user conversation: %s", + user_conversation.last_used_model, + ) + model_id = user_conversation.last_used_model + + if query_request.provider is not None: + if query_request.provider != user_conversation.last_used_provider: + logger.debug( + "Provider specified in request: %s, " + "preferring it over user conversation provider %s", + query_request.provider, + user_conversation.last_used_provider, + ) + else: + logger.debug( + "No provider specified in request, " + "using latest provider from user conversation: %s", + user_conversation.last_used_provider, + ) + provider_id = user_conversation.last_used_provider + + return model_id, provider_id + + +async def update_azure_token( + client: AsyncLlamaStackClient, +) -> AsyncLlamaStackClient: + """ + Update the client with a fresh Azure token. + + Updates the client with the fresh Azure token. Should be called after + verifying that token refresh is needed and successful. Args: - arguments_str: The arguments string to parse + client: The current AsyncLlamaStackClient instance Returns: - Parsed dictionary if successful, otherwise {"args": arguments_str} + AsyncLlamaStackClient: The client instance (reloaded or updated with fresh token) """ - # Try parsing as-is first (most common case) + if AsyncLlamaStackClientHolder().is_library_client: + return await AsyncLlamaStackClientHolder().reload_library_client() try: - parsed = json.loads(arguments_str) - if isinstance(parsed, dict): - return parsed - except (json.JSONDecodeError, ValueError): - pass - - # Try wrapping in {} if string doesn't start with { - # This handles cases where the string is just the content without braces - stripped = arguments_str.strip() - if stripped and not stripped.startswith("{"): - try: - wrapped = "{" + stripped + "}" - parsed = json.loads(wrapped) - if isinstance(parsed, dict): - return parsed - except (json.JSONDecodeError, ValueError): - pass - - # Fallback: return wrapped in arguments key - return {"args": arguments_str} - - -async def create_violation_stream( - message: str, - shield_model: Optional[str] = None, -) -> AsyncIterator[OpenAIResponseObjectStream]: - """Generate a minimal streaming response for cases where input is blocked by a shield. - - This yields only the essential streaming events to indicate that the input was rejected. - Dummy item identifiers are used solely for protocol compliance and are not used later. - """ - response_id = "resp_shield_violation" - - # Create the response object with empty output at the beginning - response_obj = OpenAIResponseObject( - id=response_id, - created_at=0, # not used - model=shield_model or "shield", - output=[], - status="in_progress", - ) - yield OpenAIResponseObjectStreamResponseCreated(response=response_obj) - - # Triggers empty initial token - yield OpenAIResponseObjectStreamResponseContentPartAdded( - content_index=0, - response_id=response_id, - item_id="msg_shield_violation_1", - output_index=0, - part=OpenAIResponseContentPartOutputText(text=""), - sequence_number=0, - ) + providers = await client.providers.list() + azure_config = next( + p.config for p in providers if p.provider_type == "remote::azure" + ) + except APIConnectionError as e: + error_response = ServiceUnavailableResponse( + backend_name="Llama Stack", + cause=str(e), + ) + raise HTTPException(**error_response.model_dump()) from e + except LLSApiStatusError as e: + error_response = InternalServerErrorResponse.generic() + raise HTTPException(**error_response.model_dump()) from e - # Text delta - yield OpenAIResponseObjectStreamResponseOutputTextDelta( - content_index=1, - delta=message, - item_id="msg_shield_violation_2", - output_index=1, - sequence_number=1, + return AsyncLlamaStackClientHolder().update_provider_data( + { + "azure_api_key": AzureEntraIDManager().access_token.get_secret_value(), + "azure_api_base": str(azure_config.get("api_base")), + } ) - # Output text done - yield OpenAIResponseObjectStreamResponseOutputTextDone( - content_index=2, - text=message, - item_id="msg_shield_violation_3", - output_index=2, - sequence_number=2, + +def prepare_input(query_request: QueryRequest) -> str: + """ + Prepare input text for Responses API by appending attachments. + + Takes the query text and appends any attachment content with type labels. + + Args: + query_request: The query request containing the query and optional attachments + + Returns: + str: The input text with attachments appended (if any) + """ + input_text = query_request.query + if query_request.attachments: + for attachment in query_request.attachments: + # Append attachment content with type label + input_text += ( + f"\n\n[Attachment: {attachment.attachment_type}]\n{attachment.content}" + ) + return input_text + + +def store_query_results( # pylint: disable=too-many-arguments,too-many-locals + user_id: str, + conversation_id: str, + model: str, + started_at: str, + completed_at: str, + summary: TurnSummary, + query_request: QueryRequest, + configuration: AppConfig, + skip_userid_check: bool, + topic_summary: Optional[str], +) -> None: + """ + Store query results: transcript, conversation details, and cache. + + This function handles post-query storage operations including: + - Storing transcripts (if enabled) + - Persisting conversation details to database + - Storing conversation in cache + + Args: + user_id: The authenticated user ID + conversation_id: The conversation ID + model: The model identifier + started_at: ISO formatted timestamp when the request started + completed_at: ISO formatted timestamp when the request completed + summary: Summary of the turn including LLM response and tool calls + query_request: The original query request + configuration: Application configuration + skip_userid_check: Whether to skip user ID validation + topic_summary: Optional topic summary for the conversation + + Raises: + HTTPException: On any database, cache, or IO errors during processing + """ + provider_id, model_id = extract_provider_and_model_from_model_id(model) + # Store transcript if enabled + if is_transcripts_enabled(): + try: + # Convert RAG chunks to dictionary format once for reuse + logger.info("Storing transcript") + rag_chunks_dict = [chunk.model_dump() for chunk in summary.rag_chunks] + store_transcript( + user_id=user_id, + conversation_id=conversation_id, + model_id=model_id, + provider_id=provider_id, + query_is_valid=True, # TODO(lucasagomes): implement as part of query validation + query=query_request.query, + query_request=query_request, + summary=summary, + rag_chunks=rag_chunks_dict, + truncated=False, # TODO(lucasagomes): implement truncation as part of quota work + attachments=query_request.attachments or [], + ) + except (IOError, OSError) as e: + logger.exception("Error storing transcript: %s", e) + response = InternalServerErrorResponse.generic() + raise HTTPException(**response.model_dump()) from e + else: + logger.debug("Transcript collection is disabled in the configuration") + + # Persist conversation details + try: + logger.info("Persisting conversation details") + # Extract provider_id from model_id (format: "provider/model") + persist_user_conversation_details( + user_id=user_id, + conversation_id=conversation_id, + started_at=started_at, + completed_at=completed_at, + model_id=model_id, + provider_id=provider_id, + topic_summary=topic_summary, + ) + except SQLAlchemyError as e: + logger.exception("Error persisting conversation details.") + response = InternalServerErrorResponse.database_error() + raise HTTPException(**response.model_dump()) from e + + # Store conversation in cache + try: + cache_entry = CacheEntry( + query=query_request.query, + response=summary.llm_response, + provider=provider_id, + model=model_id, + started_at=started_at, + completed_at=completed_at, + referenced_documents=summary.referenced_documents, + tool_calls=summary.tool_calls, + tool_results=summary.tool_results, + ) + + logger.info("Storing conversation in cache") + store_conversation_into_cache( + config=configuration, + user_id=user_id, + conversation_id=conversation_id, + cache_entry=cache_entry, + _skip_userid_check=skip_userid_check, + topic_summary=topic_summary, + ) + except (CacheError, ValueError, psycopg2.Error, sqlite3.Error) as e: + logger.exception("Error storing conversation in cache: %s", e) + response = InternalServerErrorResponse.database_error() + raise HTTPException(**response.model_dump()) from e + + +def consume_query_tokens( + user_id: str, + model_id: str, + token_usage: TokenCounter, + configuration: AppConfig, +) -> None: + """Consume tokens from quota limiters for a query. + + This function handles token consumption with proper error handling. + It should be called after token usage has been determined but before + returning the response to the client (especially for streaming responses). + + Args: + user_id: The authenticated user ID + model_id: The full model identifier in "provider/model" format + token_usage: TokenCounter object with input and output token counts + configuration: Application configuration + + Raises: + HTTPException: On database errors during token consumption + """ + provider, model = extract_provider_and_model_from_model_id(model_id) + try: + logger.info("Consuming tokens") + consume_tokens( + quota_limiters=configuration.quota_limiters, + token_usage_history=configuration.token_usage_history, + user_id=user_id, + input_tokens=token_usage.input_tokens, + output_tokens=token_usage.output_tokens, + model_id=model, + provider_id=provider, + ) + except (psycopg2.Error, sqlite3.Error, ValueError) as e: + logger.exception("Error consuming tokens: %s", e) + response = InternalServerErrorResponse.database_error() + raise HTTPException(**response.model_dump()) from e + + +def is_transcripts_enabled() -> bool: + """Check if transcripts is enabled. + + Returns: + bool: True if transcripts is enabled, False otherwise. + """ + return configuration.user_data_collection_configuration.transcripts_enabled + + +def persist_user_conversation_details( + user_id: str, + conversation_id: str, + started_at: str, + completed_at: str, + model_id: str, + provider_id: str, + topic_summary: Optional[str], +) -> None: + """Associate conversation to user in the database. + + Args: + user_id: The authenticated user ID + conversation_id: The conversation ID + started_at: The timestamp when the conversation started + completed_at: The timestamp when the conversation completed + model_id: The model identifier + provider_id: The provider identifier + topic_summary: Optional topic summary for the conversation + """ + # Normalize the conversation ID (strip 'conv_' prefix if present) + normalized_id = normalize_conversation_id(conversation_id) + logger.debug( + "persist_user_conversation_details - original conv_id: %s, normalized: %s, user: %s", + conversation_id, + normalized_id, + user_id, ) - # Fill the output when message is completed - response_obj.output = [ - OpenAIResponseMessage( - id="msg_shield_violation_4", - content=[OpenAIResponseOutputMessageContentOutputText(text=message)], - role="assistant", - status="completed", + with get_session() as session: + existing_conversation = ( + session.query(UserConversation).filter_by(id=normalized_id).first() ) - ] - # Update status to completed - response_obj.status = "completed" - # Completed response triggers turn complete event - yield OpenAIResponseObjectStreamResponseCompleted(response=response_obj) + if not existing_conversation: + conversation = UserConversation( + id=normalized_id, + user_id=user_id, + last_used_model=model_id, + last_used_provider=provider_id, + topic_summary=topic_summary or "", + message_count=1, + ) + session.add(conversation) + logger.debug( + "Associated conversation %s to user %s", normalized_id, user_id + ) + else: + existing_conversation.last_used_model = model_id + existing_conversation.last_used_provider = provider_id + existing_conversation.last_message_at = datetime.now(UTC) + existing_conversation.message_count += 1 + logger.debug( + "Updating existing conversation in DB - ID: %s, User: %s, Messages: %d", + normalized_id, + user_id, + existing_conversation.message_count, + ) + + max_turn_number = ( + session.query(func.max(UserTurn.turn_number)) + .filter_by(conversation_id=normalized_id) + .scalar() + ) + turn_number = (max_turn_number or 0) + 1 + turn = UserTurn( + conversation_id=normalized_id, + turn_number=turn_number, + started_at=datetime.fromisoformat(started_at), + completed_at=datetime.fromisoformat(completed_at), + provider=provider_id, + model=model_id, + ) + session.add(turn) + logger.debug( + "Created conversation turn - Conversation: %s, Turn: %d", + normalized_id, + turn_number, + ) + + session.commit() + logger.debug( + "Successfully committed conversation %s to database", normalized_id + ) + + +def validate_attachments_metadata(attachments: list[Attachment]) -> None: + """Validate the attachments metadata provided in the request. + + Raises: + HTTPException: If any attachment has an invalid type or content type, + an HTTP 422 error is raised. + """ + for attachment in attachments: + if attachment.attachment_type not in constants.ATTACHMENT_TYPES: + message = ( + f"Invalid attachment type {attachment.attachment_type}: " + f"must be one of {constants.ATTACHMENT_TYPES}" + ) + logger.error(message) + response = UnprocessableEntityResponse( + response="Invalid attribute value", cause=message + ) + raise HTTPException(**response.model_dump()) + if attachment.content_type not in constants.ATTACHMENT_CONTENT_TYPES: + message = ( + f"Invalid attachment content type {attachment.content_type}: " + f"must be one of {constants.ATTACHMENT_CONTENT_TYPES}" + ) + logger.error(message) + response = UnprocessableEntityResponse( + response="Invalid attribute value", cause=message + ) + raise HTTPException(**response.model_dump()) + + +def extract_provider_and_model_from_model_id(model_id: str) -> tuple[str, str]: + """Extract model and provider from model ID. + + Args: + model_id: The model ID to extract from. + + Returns: + tuple[str, str]: The model and provider. + """ + split = model_id.split("/", 1) + if len(split) == 2: + return split[0], split[1] + return "", model_id + + +def handle_known_apistatus_errors( + error: LLSApiStatusError | OpenAIAPIStatusError, model_id: str +) -> AbstractErrorResponse: + """Handle known API status errors from both Llama Stack and OpenAI. + + Args: + error: The API status error to handle (can be from Llama Stack or OpenAI). + model_id: The model ID for quota exceeded responses. + + Returns: + AbstractErrorResponse: The error response model. + """ + if error.status_code == 400: + error_message = getattr(error, "message", str(error)) + if ( + "context_length" in error_message.lower() + or "context length" in error_message.lower() + ): + return PromptTooLongResponse(model=model_id) + elif error.status_code == 429: + return QuotaExceededResponse.model(model_id) + return InternalServerErrorResponse.generic() diff --git a/src/utils/quota.py b/src/utils/quota.py index 524f4d33b..69a8ca761 100644 --- a/src/utils/quota.py +++ b/src/utils/quota.py @@ -1,5 +1,6 @@ """Quota handling helper functions.""" +import sqlite3 from typing import Optional import psycopg2 @@ -74,7 +75,7 @@ def check_tokens_available(quota_limiters: list[QuotaLimiter], user_id: str) -> # check available tokens using all configured quota limiters for quota_limiter in quota_limiters: quota_limiter.ensure_available_quota(subject_id=user_id) - except psycopg2.Error as pg_error: + except (psycopg2.Error, sqlite3.Error) as pg_error: message = "Error communicating with quota database backend" logger.error(message) response = InternalServerErrorResponse.database_error() @@ -97,12 +98,20 @@ def get_available_quotas( Returns: Dictionary mapping quota limiter class names to available token counts. + + Raises: + HTTPException: With status 500 if database communication fails. """ available_quotas: dict[str, int] = {} # retrieve available tokens using all configured quota limiters for quota_limiter in quota_limiters: name = quota_limiter.__class__.__name__ - available_quota = quota_limiter.available_quota(user_id) - available_quotas[name] = available_quota + try: + available_quota = quota_limiter.available_quota(user_id) + available_quotas[name] = available_quota + except (psycopg2.Error, sqlite3.Error) as e: + logger.exception("Database error getting available quotas.") + response = InternalServerErrorResponse.database_error() + raise HTTPException(**response.model_dump()) from e return available_quotas diff --git a/src/utils/responses.py b/src/utils/responses.py index 88437af07..4055eea6d 100644 --- a/src/utils/responses.py +++ b/src/utils/responses.py @@ -1,29 +1,64 @@ """Utility functions for processing Responses API output.""" -from typing import Any +import json +import logging +from typing import Any, Optional, cast + +from fastapi import HTTPException +from llama_stack_api.openai_responses import ( + OpenAIResponseObject, + OpenAIResponseOutput, + OpenAIResponseOutputMessageFileSearchToolCall as FileSearchCall, + OpenAIResponseOutputMessageFunctionToolCall as FunctionCall, + OpenAIResponseOutputMessageMCPCall as MCPCall, + OpenAIResponseOutputMessageMCPListTools as MCPListTools, + OpenAIResponseOutputMessageWebSearchToolCall as WebSearchCall, + OpenAIResponseMCPApprovalRequest as MCPApprovalRequest, + OpenAIResponseMCPApprovalResponse as MCPApprovalResponse, +) +from llama_stack_client import APIConnectionError, APIStatusError, AsyncLlamaStackClient + +import constants +import metrics +from configuration import AppConfig, configuration +from constants import DEFAULT_RAG_TOOL +from models.config import ModelContextProtocolServer +from models.database.conversations import UserConversation +from models.requests import QueryRequest +from models.responses import ( + InternalServerErrorResponse, + ServiceUnavailableResponse, +) +from utils.prompts import get_system_prompt, get_topic_summary_system_prompt +from utils.query import ( + evaluate_model_hints, + extract_provider_and_model_from_model_id, + handle_known_apistatus_errors, + prepare_input, + select_model_and_provider_id, +) +from utils.mcp_headers import McpHeaders +from utils.suid import to_llama_stack_conversation_id +from utils.token_counter import TokenCounter +from utils.types import ( + RAGChunk, + ReferencedDocument, + ResponsesApiParams, + ToolCallSummary, + ToolResultSummary, +) + +logger = logging.getLogger(__name__) def extract_text_from_response_output_item(output_item: Any) -> str: """Extract assistant message text from a Responses API output item. - This function parses output items from the OpenAI-compatible Responses API - and extracts text content from assistant messages. It handles multiple content - formats including string content, content arrays with text parts, and refusal - messages. - - Parameters: - output_item: A Responses API output item (typically from response.output array). - Expected to have attributes like type, role, and content. + Args: + output_item: A Responses API output item from response.output array. Returns: - str: The extracted text content from the assistant message. Returns an empty - string if the output_item is not an assistant message or contains no text. - - Example: - >>> for output_item in response.output: - ... text = extract_text_from_response_output_item(output_item) - ... if text: - ... print(text) + Extracted text content, or empty string if not an assistant message. """ if getattr(output_item, "type", None) != "message": return "" @@ -54,3 +89,702 @@ def extract_text_from_response_output_item(output_item: Any) -> str: text_fragments.append(str(dict_text)) return "".join(text_fragments) + + +async def get_topic_summary( # pylint: disable=too-many-nested-blocks + question: str, client: AsyncLlamaStackClient, model_id: str +) -> str: + """Get a topic summary for a question using Responses API. + + Args: + question: The question to generate a topic summary for + client: The AsyncLlamaStackClient to use for the request + model_id: The llama stack model ID (full format: provider/model) + + Returns: + The topic summary for the question + """ + topic_summary_system_prompt = get_topic_summary_system_prompt(configuration) + + # Use Responses API to generate topic summary + try: + response = cast( + OpenAIResponseObject, + await client.responses.create( + input=question, + model=model_id, + instructions=topic_summary_system_prompt, + stream=False, + store=False, # Don't store topic summary requests + ), + ) + except APIConnectionError as e: + error_response = ServiceUnavailableResponse( + backend_name="Llama Stack", + cause=str(e), + ) + raise HTTPException(**error_response.model_dump()) from e + except APIStatusError as e: + error_response = handle_known_apistatus_errors(e, model_id) + raise HTTPException(**error_response.model_dump()) from e + + # Extract text from response output + summary_text = "".join( + extract_text_from_response_output_item(output_item) + for output_item in response.output + ) + + return summary_text.strip() if summary_text else "" + + +async def prepare_tools( + client: AsyncLlamaStackClient, + query_request: QueryRequest, + token: str, + config: AppConfig, + mcp_headers: Optional[McpHeaders] = None, +) -> Optional[list[dict[str, Any]]]: + """Prepare tools for Responses API including RAG and MCP tools. + + Args: + client: The Llama Stack client instance + query_request: The user's query request + token: Authentication token for MCP tools + config: Configuration object containing MCP server settings + mcp_headers: Per-request headers for MCP servers + + Returns: + List of tool configurations, or None if no_tools is True or no tools available + """ + if query_request.no_tools: + return None + + toolgroups = [] + # Get vector stores for RAG tools - use specified ones or fetch all + if query_request.vector_store_ids: + vector_store_ids = query_request.vector_store_ids + else: + try: + vector_stores = await client.vector_stores.list() + vector_store_ids = [vector_store.id for vector_store in vector_stores.data] + except APIConnectionError as e: + error_response = ServiceUnavailableResponse( + backend_name="Llama Stack", + cause=str(e), + ) + raise HTTPException(**error_response.model_dump()) from e + except APIStatusError as e: + error_response = InternalServerErrorResponse.generic() + raise HTTPException(**error_response.model_dump()) from e + + # Add RAG tools if vector stores are available + rag_tools = get_rag_tools(vector_store_ids) + if rag_tools: + toolgroups.extend(rag_tools) + + # Add MCP server tools + mcp_tools = get_mcp_tools(config.mcp_servers, token, mcp_headers) + if mcp_tools: + toolgroups.extend(mcp_tools) + logger.debug( + "Configured %d MCP tools: %s", + len(mcp_tools), + [tool.get("server_label", "unknown") for tool in mcp_tools], + ) + # Convert empty list to None for consistency with existing behavior + if not toolgroups: + return None + + return toolgroups + + +async def prepare_responses_params( # pylint: disable=too-many-arguments,too-many-locals,too-many-positional-arguments + client: AsyncLlamaStackClient, + query_request: QueryRequest, + user_conversation: Optional[UserConversation], + token: str, + mcp_headers: Optional[McpHeaders] = None, + stream: bool = False, + store: bool = True, +) -> ResponsesApiParams: + """Prepare API request parameters for Responses API. + + Args: + client: The AsyncLlamaStackClient instance (must be initialized by caller) + query_request: The query request containing the user's question + user_conversation: The user conversation if conversation_id was provided, None otherwise + token: The authentication token for authorization + mcp_headers: Optional MCP headers for multi-component processing + stream: Whether to stream the response + store: Whether to store the response + + Returns: + ResponsesApiParams containing all prepared parameters for the API request + """ + # Select model and provider + try: + models = await client.models.list() + except APIConnectionError as e: + error_response = ServiceUnavailableResponse( + backend_name="Llama Stack", + cause=str(e), + ) + raise HTTPException(**error_response.model_dump()) from e + except APIStatusError as e: + error_response = InternalServerErrorResponse.generic() + raise HTTPException(**error_response.model_dump()) from e + + llama_stack_model_id, _model_id, _provider_id = select_model_and_provider_id( + models, + *evaluate_model_hints( + user_conversation=user_conversation, query_request=query_request + ), + ) + + # Use system prompt from request or default one + system_prompt = get_system_prompt(query_request, configuration) + logger.debug("Using system prompt: %s", system_prompt) + + # Prepare tools for responses API + tools = await prepare_tools( + client, query_request, token, configuration, mcp_headers + ) + + # Prepare input for Responses API + input_text = prepare_input(query_request) + + # Handle conversation ID for Responses API + # Create conversation upfront if not provided + conversation_id = query_request.conversation_id + if conversation_id: + # Conversation ID was provided - convert to llama-stack format + logger.debug("Using existing conversation ID: %s", conversation_id) + llama_stack_conv_id = to_llama_stack_conversation_id(conversation_id) + else: + # No conversation_id provided - create a new conversation first + logger.debug("No conversation_id provided, creating new conversation") + try: + conversation = await client.conversations.create(metadata={}) + except APIConnectionError as e: + error_response = ServiceUnavailableResponse( + backend_name="Llama Stack", + cause=str(e), + ) + raise HTTPException(**error_response.model_dump()) from e + except APIStatusError as e: + error_response = InternalServerErrorResponse.generic() + raise HTTPException(**error_response.model_dump()) from e + + llama_stack_conv_id = conversation.id + logger.info( + "Created new conversation with ID: %s", + llama_stack_conv_id, + ) + + return ResponsesApiParams( + input=input_text, + model=llama_stack_model_id, + instructions=system_prompt, + tools=tools, + conversation=llama_stack_conv_id, + stream=stream, + store=store, + ) + + +def get_rag_tools(vector_store_ids: list[str]) -> Optional[list[dict[str, Any]]]: + """Convert vector store IDs to tools format for Responses API. + + Args: + vector_store_ids: List of vector store identifiers + + Returns: + List containing file_search tool configuration, or None if no vector stores provided + """ + if not vector_store_ids: + return None + + return [ + { + "type": "file_search", + "vector_store_ids": vector_store_ids, + "max_num_results": 10, + } + ] + + +def get_mcp_tools( + mcp_servers: list[ModelContextProtocolServer], + token: str | None = None, + mcp_headers: Optional[McpHeaders] = None, +) -> list[dict[str, Any]]: + """Convert MCP servers to tools format for Responses API. + + Args: + mcp_servers: List of MCP server configurations + token: Optional authentication token for MCP server authorization + mcp_headers: Optional per-request headers for MCP servers, keyed by server URL + + Returns: + List of MCP tool definitions with server details and optional auth headers + """ + + def _get_token_value(original: str, header: str) -> str | None: + """Convert to header value.""" + match original: + case constants.MCP_AUTH_KUBERNETES: + # use k8s token + if token is None or token == "": + return None + return f"Bearer {token}" + case constants.MCP_AUTH_CLIENT: + # use client provided token + if mcp_headers is None: + return None + c_headers = mcp_headers.get(mcp_server.name, None) + if c_headers is None: + return None + return c_headers.get(header, None) + case _: + # use provided + return original + + tools = [] + for mcp_server in mcp_servers: + # Base tool definition + tool_def = { + "type": "mcp", + "server_label": mcp_server.name, + "server_url": mcp_server.url, + "require_approval": "never", + } + + # Build headers + headers = {} + for name, value in mcp_server.resolved_authorization_headers.items(): + # for each defined header + h_value = _get_token_value(value, name) + # only add the header if we got value + if h_value is not None: + headers[name] = h_value + + # Skip server if auth headers were configured but not all could be resolved + if mcp_server.authorization_headers and len(headers) != len( + mcp_server.authorization_headers + ): + logger.warning( + "Skipping MCP server %s: required %d auth headers but only resolved %d", + mcp_server.name, + len(mcp_server.authorization_headers), + len(headers), + ) + continue + + if len(headers) > 0: + # add headers to tool definition + tool_def["headers"] = headers # type: ignore[index] + # collect tools info + tools.append(tool_def) + return tools + + +def parse_referenced_documents( + response: Optional[OpenAIResponseObject], +) -> list[ReferencedDocument]: + """Parse referenced documents from Responses API response. + + Args: + response: The OpenAI Response API response object + + Returns: + List of referenced documents with doc_url and doc_title + """ + documents: list[ReferencedDocument] = [] + # Use a set to track unique documents by (doc_url, doc_title) tuple + seen_docs: set[tuple[Optional[str], Optional[str]]] = set() + + # Handle None response (e.g., when agent fails) + if response is None or not response.output: + return documents + + for output_item in response.output: + item_type = getattr(output_item, "type", None) + + if item_type == "file_search_call": + results = getattr(output_item, "results", []) or [] + for result in results: + # Handle both object and dict access + if isinstance(result, dict): + attributes = result.get("attributes", {}) + else: + attributes = getattr(result, "attributes", {}) + + # Try to get URL from attributes + # Look for common URL fields in attributes + doc_url = ( + attributes.get("doc_url") + or attributes.get("docs_url") + or attributes.get("url") + or attributes.get("link") + ) + doc_title = attributes.get("title") + + if doc_title or doc_url: + # Treat empty string as None for URL to satisfy Optional[AnyUrl] + final_url = doc_url if doc_url else None + if (final_url, doc_title) not in seen_docs: + documents.append( + ReferencedDocument(doc_url=final_url, doc_title=doc_title) + ) + seen_docs.add((final_url, doc_title)) + + return documents + + +def extract_token_usage( + response: Optional[OpenAIResponseObject], model_id: str +) -> TokenCounter: + """Extract token usage from Responses API response and update metrics. + + Args: + response: The OpenAI Response API response object + model_id: The model identifier for metrics labeling + + Returns: + TokenCounter with input_tokens and output_tokens + """ + token_counter = TokenCounter() + token_counter.llm_calls = 1 + provider, model = extract_provider_and_model_from_model_id(model_id) + + # Extract usage from the response if available + # Note: usage attribute exists at runtime but may not be in type definitions + usage = getattr(response, "usage", None) if response else None + if usage: + try: + # Handle both dict and object cases due to llama_stack inconsistency: + # - When llama_stack converts to chat_completions internally, usage is a dict + # - When using proper Responses API, usage should be an object + # TODO: Remove dict handling once llama_stack standardizes on object type # pylint: disable=fixme + if isinstance(usage, dict): + input_tokens = usage.get("input_tokens", 0) + output_tokens = usage.get("output_tokens", 0) + else: + # Object with attributes (expected final behavior) + input_tokens = getattr(usage, "input_tokens", 0) + output_tokens = getattr(usage, "output_tokens", 0) + # Only set if we got valid values + if input_tokens or output_tokens: + token_counter.input_tokens = input_tokens or 0 + token_counter.output_tokens = output_tokens or 0 + + logger.debug( + "Extracted token usage from Responses API: input=%d, output=%d", + token_counter.input_tokens, + token_counter.output_tokens, + ) + + # Update Prometheus metrics only when we have actual usage data + try: + metrics.llm_token_sent_total.labels(provider, model).inc( + token_counter.input_tokens + ) + metrics.llm_token_received_total.labels(provider, model).inc( + token_counter.output_tokens + ) + except (AttributeError, TypeError, ValueError) as e: + logger.warning("Failed to update token metrics: %s", e) + _increment_llm_call_metric(provider, model) + else: + logger.debug( + "Usage object exists but tokens are 0 or None, treating as no usage info" + ) + # Still increment the call counter + _increment_llm_call_metric(provider, model) + except (AttributeError, KeyError, TypeError) as e: + logger.warning( + "Failed to extract token usage from response.usage: %s. Usage value: %s", + e, + usage, + ) + # Still increment the call counter + _increment_llm_call_metric(provider, model) + else: + # No usage information available - this is expected when llama stack + # internally converts to chat_completions + logger.debug( + "No usage information in Responses API response, token counts will be 0" + ) + # token_counter already initialized with 0 values + # Still increment the call counter + _increment_llm_call_metric(provider, model) + + return token_counter + + +def build_tool_call_summary( # pylint: disable=too-many-return-statements,too-many-branches + output_item: OpenAIResponseOutput, + rag_chunks: list[RAGChunk], +) -> tuple[Optional[ToolCallSummary], Optional[ToolResultSummary]]: + """Translate Responses API tool outputs into ToolCallSummary and ToolResultSummary. + + Args: + output_item: An OpenAIResponseOutput item from the response.output array + rag_chunks: List to append extracted RAG chunks to (from file_search_call items) + + Returns: + Tuple of (ToolCallSummary, ToolResultSummary), one may be None + """ + item_type = getattr(output_item, "type", None) + + if item_type == "function_call": + item = cast(FunctionCall, output_item) + return ( + ToolCallSummary( + id=item.call_id, + name=item.name, + args=parse_arguments_string(item.arguments), + type="function_call", + ), + None, # not supported by Responses API at all + ) + + if item_type == "file_search_call": + file_search_item = cast(FileSearchCall, output_item) + extract_rag_chunks_from_file_search_item(file_search_item, rag_chunks) + response_payload: Optional[dict[str, Any]] = None + if file_search_item.results is not None: + response_payload = { + "results": [result.model_dump() for result in file_search_item.results] + } + return ToolCallSummary( + id=file_search_item.id, + name=DEFAULT_RAG_TOOL, + args={"queries": file_search_item.queries}, + type="file_search_call", + ), ToolResultSummary( + id=file_search_item.id, + status=file_search_item.status, + content=json.dumps(response_payload) if response_payload else "", + type="file_search_call", + round=1, + ) + + # Incomplete OpenAI Responses API definition in LLS: action attribute not supported yet + if item_type == "web_search_call": + web_search_item = cast(WebSearchCall, output_item) + return ( + ToolCallSummary( + id=web_search_item.id, + name="web_search", + args={}, + type="web_search_call", + ), + ToolResultSummary( + id=web_search_item.id, + status=web_search_item.status, + content="", + type="web_search_call", + round=1, + ), + ) + + if item_type == "mcp_call": + mcp_call_item = cast(MCPCall, output_item) + args = parse_arguments_string(mcp_call_item.arguments) + if mcp_call_item.server_label: + args["server_label"] = mcp_call_item.server_label + content = ( + mcp_call_item.error + if mcp_call_item.error + else (mcp_call_item.output if mcp_call_item.output else "") + ) + + return ToolCallSummary( + id=mcp_call_item.id, + name=mcp_call_item.name, + args=args, + type="mcp_call", + ), ToolResultSummary( + id=mcp_call_item.id, + status="success" if mcp_call_item.error is None else "failure", + content=content, + type="mcp_call", + round=1, + ) + + if item_type == "mcp_list_tools": + mcp_list_tools_item = cast(MCPListTools, output_item) + tools_info = [ + { + "name": tool.name, + "description": tool.description, + "input_schema": tool.input_schema, + } + for tool in mcp_list_tools_item.tools + ] + content_dict = { + "server_label": mcp_list_tools_item.server_label, + "tools": tools_info, + } + return ( + ToolCallSummary( + id=mcp_list_tools_item.id, + name="mcp_list_tools", + args={"server_label": mcp_list_tools_item.server_label}, + type="mcp_list_tools", + ), + ToolResultSummary( + id=mcp_list_tools_item.id, + status="success", + content=json.dumps(content_dict), + type="mcp_list_tools", + round=1, + ), + ) + + if item_type == "mcp_approval_request": + approval_request_item = cast(MCPApprovalRequest, output_item) + args = parse_arguments_string(approval_request_item.arguments) + return ( + ToolCallSummary( + id=approval_request_item.id, + name=approval_request_item.name, + args=args, + type="mcp_approval_request", + ), + None, + ) + + if item_type == "mcp_approval_response": + approval_response_item = cast(MCPApprovalResponse, output_item) + content_dict = {} + if approval_response_item.reason: + content_dict["reason"] = approval_response_item.reason + return ( + None, + ToolResultSummary( + id=approval_response_item.approval_request_id, + status="success" if approval_response_item.approve else "denied", + content=json.dumps(content_dict), + type="mcp_approval_response", + round=1, + ), + ) + + return None, None + + +def build_mcp_tool_call_from_arguments_done( + output_index: int, + arguments: str, + mcp_call_items: dict[int, tuple[str, str]], +) -> Optional[ToolCallSummary]: + """Build ToolCallSummary from MCP call arguments completion event. + + Args: + output_index: The output index of the MCP call item + arguments: The JSON string of arguments from the arguments.done event + mcp_call_items: Dictionary storing item ID and name, keyed by output_index + + Returns: + ToolCallSummary for the MCP call, or None if item info not found + """ + item_info = mcp_call_items.get(output_index) + if not item_info: + return None + + # remove from dict to indicate it was processed during arguments.done + del mcp_call_items[output_index] + item_id, item_name = item_info + args = parse_arguments_string(arguments) + return ToolCallSummary( + id=item_id, + name=item_name, + args=args, + type="mcp_call", + ) + + +def build_tool_result_from_mcp_output_item_done( + output_item: MCPCall, +) -> ToolResultSummary: + """Build ToolResultSummary from MCP call output item done event. + + Args: + output_item: An MCP call output item + + Returns: + ToolResultSummary for the MCP call + """ + content = ( + output_item.error + if output_item.error + else (output_item.output if output_item.output else "") + ) + return ToolResultSummary( + id=output_item.id, + status="success" if output_item.error is None else "failure", + content=content, + type="mcp_call", + round=1, + ) + + +def extract_rag_chunks_from_file_search_item( + item: FileSearchCall, + rag_chunks: list[RAGChunk], +) -> None: + """Extract RAG chunks from a file search tool call item. + + Args: + item: The file search tool call item + rag_chunks: List to append extracted RAG chunks to + """ + if item.results is not None: + for result in item.results: + rag_chunk = RAGChunk( + content=result.text, source=result.filename, score=result.score + ) + rag_chunks.append(rag_chunk) + + +def _increment_llm_call_metric(provider: str, model: str) -> None: + """Safely increment LLM call metric.""" + try: + metrics.llm_calls_total.labels(provider, model).inc() + except (AttributeError, TypeError, ValueError) as e: + logger.warning("Failed to update LLM call metric: %s", e) + + +def parse_arguments_string(arguments_str: str) -> dict[str, Any]: + """Parse an arguments string into a dictionary. + + Args: + arguments_str: The arguments string to parse + + Returns: + Parsed dictionary if successful, otherwise {"args": arguments_str} + """ + # Try parsing as-is first (most common case) + try: + parsed = json.loads(arguments_str) + if isinstance(parsed, dict): + return parsed + except (json.JSONDecodeError, ValueError): + pass + + # Try wrapping in {} if string doesn't start with { + # This handles cases where the string is just the content without braces + stripped = arguments_str.strip() + if stripped and not stripped.startswith("{"): + try: + wrapped = "{" + stripped + "}" + parsed = json.loads(wrapped) + if isinstance(parsed, dict): + return parsed + except (json.JSONDecodeError, ValueError): + pass + + # Fallback: return wrapped in arguments key + return {"args": arguments_str} diff --git a/src/utils/schema_dumper.py b/src/utils/schema_dumper.py index e3c8902dc..40ccc6aaa 100644 --- a/src/utils/schema_dumper.py +++ b/src/utils/schema_dumper.py @@ -6,13 +6,17 @@ from models.config import Configuration -def recursive_update(original: dict) -> dict: +# pylint: disable=too-many-boolean-expressions +def recursive_update( + original: dict, +) -> dict: """Recursively update the schema to be 100% OpenAPI-compatible. Parameters: - original: The original schema dictionary to transform. + original (dict): The original schema dictionary to transform. + Returns: - A new dictionary with OpenAPI-compatible transformations applied. + dict: A new dictionary with OpenAPI-compatible transformations applied. """ new: dict = {} for key, value in original.items(): @@ -24,8 +28,10 @@ def recursive_update(original: dict) -> dict: key == "anyOf" and isinstance(value, list) and len(value) >= 2 + and isinstance(value[0], dict) and "type" in value[0] - and value[1]["type"] == "null" + and isinstance(value[1], dict) + and value[1].get("type") == "null" ): # only the first type is correct, # we need to ignore the second one diff --git a/src/utils/shields.py b/src/utils/shields.py index 065cc96e4..ecfa80f5c 100644 --- a/src/utils/shields.py +++ b/src/utils/shields.py @@ -4,11 +4,13 @@ from typing import Any, cast from fastapi import HTTPException -from llama_stack_client import AsyncLlamaStackClient, BadRequestError +from llama_stack_client import AsyncLlamaStackClient from llama_stack_client.types import CreateResponse import metrics -from models.responses import NotFoundResponse +from models.responses import ( + NotFoundResponse, +) from utils.types import ShieldModerationResult logger = logging.getLogger(__name__) @@ -76,17 +78,16 @@ async def run_shield_moderation( Returns: ShieldModerationResult: Result indicating if content was blocked and the message. - - Raises: - HTTPException: If shield's provider_resource_id is not configured or model not found. """ available_models = {model.id for model in await client.models.list()} - for shield in await client.shields.list(): + shields = await client.shields.list() + for shield in shields: if ( not shield.provider_resource_id or shield.provider_resource_id not in available_models ): + logger.error("Shield model not found: %s", shield.provider_resource_id) response = NotFoundResponse( resource="Shield model", resource_id=shield.provider_resource_id or "" ) @@ -96,29 +97,11 @@ async def run_shield_moderation( moderation = await client.moderations.create( input=input_text, model=shield.provider_resource_id ) - moderation_result = cast(CreateResponse, moderation) - - if moderation_result.results and moderation_result.results[0].flagged: - result = moderation_result.results[0] - metrics.llm_calls_validation_errors_total.inc() - logger.warning( - "Shield '%s' flagged content: categories=%s", - shield.identifier, - result.categories, - ) - violation_message = result.user_message or DEFAULT_VIOLATION_MESSAGE - return ShieldModerationResult( - blocked=True, - message=violation_message, - shield_model=shield.provider_resource_id, - ) - # Known Llama Stack bug: error is raised when violation is present # in the shield LLM response but has wrong format that cannot be parsed. - except (BadRequestError, ValueError): + except ValueError: logger.warning( - "Shield '%s' violation detected, treating as blocked", - shield.identifier, + "Shield violation detected, treating as blocked", ) metrics.llm_calls_validation_errors_total.inc() return ShieldModerationResult( @@ -127,6 +110,22 @@ async def run_shield_moderation( shield_model=shield.provider_resource_id, ) + moderation_result = cast(CreateResponse, moderation) + if moderation_result.results and moderation_result.results[0].flagged: + result = moderation_result.results[0] + metrics.llm_calls_validation_errors_total.inc() + logger.warning( + "Shield '%s' flagged content: categories=%s", + shield.identifier, + result.categories, + ) + violation_message = result.user_message or DEFAULT_VIOLATION_MESSAGE + return ShieldModerationResult( + blocked=True, + message=violation_message, + shield_model=shield.provider_resource_id, + ) + return ShieldModerationResult(blocked=False) diff --git a/src/utils/suid.py b/src/utils/suid.py index 23d1b46af..aafd64de5 100644 --- a/src/utils/suid.py +++ b/src/utils/suid.py @@ -20,6 +20,10 @@ def check_suid(suid: str) -> bool: """ Check if given string is a proper session ID. + Accepts standard RFC 4122 UUID strings, 48-character + hexadecimal llama-stack IDs, or the same hex ID prefixed with + "conv_". Non-string inputs are considered invalid. + Returns True if the string is a valid UUID or a llama-stack conversation ID. Parameters: @@ -84,7 +88,7 @@ def to_llama_stack_conversation_id(conversation_id: str) -> str: Adds the 'conv_' prefix if not already present. - Args: + Parameters: conversation_id: The conversation ID from database. Returns: diff --git a/src/utils/transcripts.py b/src/utils/transcripts.py index 6e159566d..738212676 100644 --- a/src/utils/transcripts.py +++ b/src/utils/transcripts.py @@ -21,7 +21,16 @@ def _hash_user_id(user_id: str) -> str: - """Hash the user ID using SHA-256.""" + """Hash the user ID using SHA-256. + + Return the SHA-256 hex digest of the given user_id. + + Parameters: + user_id (str): The user identifier to hash. + + Returns: + str: Hexadecimal SHA-256 digest of the UTF-8 encoded user_id. + """ return hashlib.sha256(user_id.encode("utf-8")).hexdigest() diff --git a/src/utils/types.py b/src/utils/types.py index e5a924e90..92421fe2d 100644 --- a/src/utils/types.py +++ b/src/utils/types.py @@ -1,6 +1,5 @@ """Common types for the project.""" -import json from typing import Any, Optional from llama_stack_client.lib.agents.tool_parser import ToolParser @@ -14,9 +13,9 @@ ImageContentItem, TextContentItem, ) -from pydantic import BaseModel, Field +from pydantic import AnyUrl, BaseModel, Field -from constants import DEFAULT_RAG_TOOL +from utils.token_counter import TokenCounter def content_to_str(content: Any) -> str: @@ -110,6 +109,22 @@ class ShieldModerationResult(BaseModel): shield_model: Optional[str] = None +class ResponsesApiParams(BaseModel): + """Parameters for a Llama Stack Responses API request.""" + + input: str = Field(description="The input text with attachments appended") + model: str = Field(description='The full model ID in format "provider/model"') + instructions: Optional[str] = Field( + default=None, description="The resolved system prompt" + ) + tools: Optional[list[dict[str, Any]]] = Field( + default=None, description="Prepared tool groups for Responses API" + ) + conversation: str = Field(description="The conversation ID in llama-stack format") + stream: bool = Field(description="Whether to stream the response") + store: bool = Field(description="Whether to store the response") + + class ToolCallSummary(BaseModel): """Model representing a tool call made during response generation (for tool_calls list).""" @@ -143,70 +158,30 @@ class RAGChunk(BaseModel): score: Optional[float] = Field(None, description="Relevance score") +class ReferencedDocument(BaseModel): + """Model representing a document referenced in generating a response. + + Attributes: + doc_url: Url to the referenced doc. + doc_title: Title of the referenced doc. + """ + + doc_url: Optional[AnyUrl] = Field( + None, description="URL of the referenced document" + ) + + doc_title: Optional[str] = Field( + None, description="Title of the referenced document" + ) + + class TurnSummary(BaseModel): """Summary of a turn in llama stack.""" - llm_response: str - tool_calls: list[ToolCallSummary] - tool_results: list[ToolResultSummary] - rag_chunks: list[RAGChunk] - - def _extract_rag_chunks_from_response(self, response_content: str) -> None: - """ - Parse a tool response string and append extracted RAG chunks to this rag_chunks list. - - Attempts to parse `response_content` as JSON and extract chunks in either of two formats: - - A dict containing a "chunks" list: each item's "content", "source", and "score" are used. - - A top-level list of chunk objects: for dict items, "content", - "source", and "score" are used; non-dict items are stringified into - the chunk content. - - If JSON parsing fails or an unexpected structure/error occurs and - `response_content` contains non-whitespace characters, the entire - `response_content` is appended as a single RAGChunk with - `source=DEFAULT_RAG_TOOL` and `score=None`. Empty or whitespace-only - `response_content` is ignored. - """ - try: - # Parse the response to get chunks - # Try JSON first - try: - data = json.loads(response_content) - if isinstance(data, dict) and "chunks" in data: - for chunk in data["chunks"]: - self.rag_chunks.append( - RAGChunk( - content=chunk.get("content", ""), - source=chunk.get("source"), - score=chunk.get("score"), - ) - ) - elif isinstance(data, list): - # Handle list of chunks - for chunk in data: - if isinstance(chunk, dict): - self.rag_chunks.append( - RAGChunk( - content=chunk.get("content", str(chunk)), - source=chunk.get("source"), - score=chunk.get("score"), - ) - ) - except json.JSONDecodeError: - # If not JSON, treat the entire response as a single chunk - if response_content.strip(): - self.rag_chunks.append( - RAGChunk( - content=response_content, - source=DEFAULT_RAG_TOOL, - score=None, - ) - ) - except (KeyError, AttributeError, TypeError, ValueError): - # Treat response as single chunk on data access/structure errors - if response_content.strip(): - self.rag_chunks.append( - RAGChunk( - content=response_content, source=DEFAULT_RAG_TOOL, score=None - ) - ) + llm_response: str = "" + tool_calls: list[ToolCallSummary] = Field(default_factory=list) + tool_results: list[ToolResultSummary] = Field(default_factory=list) + rag_chunks: list[RAGChunk] = Field(default_factory=list) + referenced_documents: list[ReferencedDocument] = Field(default_factory=list) + pre_rag_documents: list[ReferencedDocument] = Field(default_factory=list) + token_usage: TokenCounter = Field(default_factory=TokenCounter) diff --git a/src/utils/vector_search.py b/src/utils/vector_search.py index 6f157b1f8..682e513b4 100644 --- a/src/utils/vector_search.py +++ b/src/utils/vector_search.py @@ -115,7 +115,9 @@ async def perform_vector_search( if chunk_meta is not None: # chunk_meta might be a pydantic model or a dict depending on caller if isinstance(chunk_meta, dict): - doc_id = chunk_meta.get("doc_id") or chunk_meta.get("document_id") + doc_id = chunk_meta.get("doc_id") or chunk_meta.get( + "document_id" + ) title = title or chunk_meta.get("title") reference_url = chunk_meta.get("reference_url") else: @@ -123,7 +125,9 @@ async def perform_vector_search( chunk_meta, "document_id", None ) title = title or getattr(chunk_meta, "title", None) - reference_url = getattr(chunk_meta, "reference_url", None) + reference_url = getattr( + chunk_meta, "reference_url", None + ) else: reference_url = None else: diff --git a/src/version.py b/src/version.py index 1e5ca2068..9b670f396 100644 --- a/src/version.py +++ b/src/version.py @@ -9,4 +9,4 @@ # [tool.pdm.version] # source = "file" # path = "src/version.py" -__version__ = "0.4.0" +__version__ = "0.4.1" diff --git a/test.containerfile b/test.containerfile index 3e85c2987..ecfc54313 100644 --- a/test.containerfile +++ b/test.containerfile @@ -1,16 +1,39 @@ -# Custom Red Hat llama-stack image with missing dependencies -FROM quay.io/rhoai/odh-llama-stack-core-rhel9:rhoai-3.3 +# Upstream llama-stack built from Red Hat UBI +FROM registry.access.redhat.com/ubi9/ubi-minimal -# Install missing dependencies and create required directories USER root -RUN pip install azure-identity && \ - mkdir -p /app-root && \ - chown -R 1001:0 /app-root && \ - chmod -R 775 /app-root && \ - mkdir -p /opt/app-root/src/.llama/storage /opt/app-root/src/.llama/providers.d && \ - chown -R 1001:0 /opt/app-root/src/.llama && \ - mkdir -p /opt/app-root/src/.cache/huggingface && \ - chown -R 1001:0 /opt/app-root/src/.cache + +# Install Python and build tools +RUN microdnf install -y --nodocs --setopt=keepcache=0 --setopt=tsflags=nodocs \ + python3.12 python3.12-devel python3.12-pip git tar gcc gcc-c++ make + +# Install uv +ENV PATH="/root/.local/bin:${PATH}" +RUN curl -LsSf https://astral.sh/uv/install.sh | sh + +# Copy project files for dependency installation +WORKDIR /opt/app-root +COPY pyproject.toml uv.lock LICENSE README.md ./ +COPY src ./src + +# Install dependencies using uv sync +RUN uv sync --locked --no-install-project --group llslibdev + +# Add virtual environment to PATH for llama command +ENV PATH="/opt/app-root/.venv/bin:$PATH" + +# Set HOME directory so llama-stack uses /opt/app-root/src/.llama +ENV HOME="/opt/app-root/src" + +# Create python3 symlink for compatibility +RUN ln -sf /usr/bin/python3.12 /usr/bin/python3 + +# Create required directories +RUN mkdir -p /opt/app-root/src/.llama/storage \ + /opt/app-root/src/.llama/providers.d \ + /opt/app-root/src/.cache/huggingface && \ + chown -R 1001:0 /opt/app-root && \ + chmod -R 775 /opt/app-root # Copy enrichment scripts for runtime config enrichment COPY src/llama_stack_configuration.py /opt/app-root/llama_stack_configuration.py diff --git a/tests/benchmarks/test_app_database.py b/tests/benchmarks/test_app_database.py index 7b3ecf5ca..4eb4de02f 100644 --- a/tests/benchmarks/test_app_database.py +++ b/tests/benchmarks/test_app_database.py @@ -15,6 +15,7 @@ from models.database.conversations import UserConversation # number of records to be stored in database before benchmarks +SMALL_DB_RECORDS_COUNT = 100 MIDDLE_DB_RECORDS_COUNT = 1000 LARGE_DB_RECORDS_COUNT = 10000 @@ -29,7 +30,7 @@ def configuration_filename_fixture() -> str: Returns: str: Path to the benchmark configuration file to load. """ - return "tests/configuration/benchmarks-lightspeed-stack.yaml" + return "tests/configuration/benchmarks-sqlite.yaml" @pytest.fixture(name="sqlite_database") @@ -217,7 +218,9 @@ def generate_topic_summary() -> str: return summary -def store_new_user_conversation(session: Session, id: Optional[str] = None) -> None: +def store_new_user_conversation( + session: Session, id: Optional[str] = None, user_id: Optional[str] = None +) -> None: """Store the new user conversation into database. This helper constructs a UserConversation structure with randomized @@ -228,6 +231,8 @@ def store_new_user_conversation(session: Session, id: Optional[str] = None) -> N session (Session): SQLAlchemy session used to persist the record. id (Optional[str]): Optional explicit ID to assign to the new conversation. If not provided, a generated suid will be used. + user_id (Optional[str]): Optional explicit user ID to assign to the new + conversation. If not provided, a generated suid will be used. Returns: None @@ -237,7 +242,7 @@ def store_new_user_conversation(session: Session, id: Optional[str] = None) -> N topic_summary = generate_topic_summary() conversation = UserConversation( id=id or get_suid(), - user_id=get_suid(), + user_id=user_id or get_suid(), last_used_model=model, last_used_provider=provider, topic_summary=topic_summary, @@ -298,6 +303,74 @@ def list_conversation_for_all_users(session: Session) -> None: assert len(user_conversations) >= 0 +def retrieve_conversation( + session: Session, conversation_id: str, should_be_none: bool +) -> None: + """Query and assert retrieval of one conversation. + + This helper function retrieves one given conversation from a database. It + is intended for use in a benchmark that measures the listing performance. + + Parameters: + session (Session): SQLAlchemy session used to query conversations. + + Returns: + None + """ + query = session.query(UserConversation).filter_by(id=conversation_id) + + conversation = query.first() + if should_be_none: + assert conversation is None + else: + assert conversation is not None + + +def retrieve_conversation_for_one_user( + session: Session, user_id: str, conversation_id: str, should_be_none: bool +) -> None: + """Query and assert retrieval of one conversation. + + This helper function retrieves one given conversation from a database. It + is intended for use in a benchmark that measures the listing performance. + + Parameters: + session (Session): SQLAlchemy session used to query conversations. + + Returns: + None + """ + query = session.query(UserConversation).filter_by( + id=conversation_id, user_id=user_id + ) + + conversation = query.first() + if should_be_none: + assert conversation is None + else: + assert conversation is not None + + +def list_conversation_for_one_user(session: Session, user_id: str) -> None: + """Query and assert retrieval of one user conversation. + + This helper queries all UserConversation records and asserts that the + result is a list (possibly empty). It is intended for use in a benchmark + that measures the listing performance. + + Parameters: + session (Session): SQLAlchemy session used to query conversations. + + Returns: + None + """ + query = session.query(UserConversation).filter_by(user_id=user_id) + + user_conversations = query.all() + assert user_conversations is not None + assert len(user_conversations) >= 0 + + def benchmark_store_new_user_conversations( benchmark: BenchmarkFixture, records_to_insert: int ) -> None: @@ -321,11 +394,13 @@ def benchmark_store_new_user_conversations( benchmark(store_new_user_conversation, session) -def test_store_new_user_conversations_small_db( +def test_store_new_user_conversations_empty_db( sqlite_database: None, benchmark: BenchmarkFixture ) -> None: """Benchmark for the DB operation to create and store new topic and conversation ID mapping. + Benchmark is performed against empty DB. + Parameters: sqlite_database: Fixture that prepares a temporary SQLite DB. benchmark (BenchmarkFixture): pytest-benchmark fixture. @@ -336,11 +411,30 @@ def test_store_new_user_conversations_small_db( benchmark_store_new_user_conversations(benchmark, 0) +def test_store_new_user_conversations_small_db( + sqlite_database: None, benchmark: BenchmarkFixture +) -> None: + """Benchmark for the DB operation to create and store new topic and conversation ID mapping. + + Benchmark is performed against small DB. + + Parameters: + sqlite_database: Fixture that prepares a temporary SQLite DB. + benchmark (BenchmarkFixture): pytest-benchmark fixture. + + Returns: + None + """ + benchmark_store_new_user_conversations(benchmark, SMALL_DB_RECORDS_COUNT) + + def test_store_new_user_conversations_middle_db( sqlite_database: None, benchmark: BenchmarkFixture ) -> None: """Benchmark for the DB operation to create and store new topic and conversation ID mapping. + Benchmark is performed against middle-sized DB. + Parameters: sqlite_database: Fixture that prepares a temporary SQLite DB. benchmark (BenchmarkFixture): pytest-benchmark fixture. @@ -356,6 +450,8 @@ def test_store_new_user_conversations_large_db( ) -> None: """Benchmark for the DB operation to create and store new topic and conversation ID mapping. + Benchmark is performed against large DB. + Parameters: sqlite_database: Fixture that prepares a temporary SQLite DB. benchmark (BenchmarkFixture): pytest-benchmark fixture. @@ -397,7 +493,7 @@ def benchmark_update_user_conversation( benchmark(update_user_conversation, session, "1234") -def test_update_user_conversation_small_db( +def test_update_user_conversation_empty_db( sqlite_database: None, benchmark: BenchmarkFixture, ) -> None: @@ -413,6 +509,22 @@ def test_update_user_conversation_small_db( benchmark_update_user_conversation(benchmark, 0) +def test_update_user_conversation_small_db( + sqlite_database: None, + benchmark: BenchmarkFixture, +) -> None: + """Benchmark updating conversation on small database. + + Parameters: + sqlite_database: Fixture that prepares a temporary SQLite DB. + benchmark (BenchmarkFixture): pytest-benchmark fixture. + + Returns: + None + """ + benchmark_update_user_conversation(benchmark, SMALL_DB_RECORDS_COUNT) + + def test_update_user_conversation_middle_db( sqlite_database: None, benchmark: BenchmarkFixture, @@ -468,7 +580,7 @@ def benchmark_list_conversations_for_all_users( benchmark(list_conversation_for_all_users, session) -def test_list_conversations_for_all_users_small_db( +def test_list_conversations_for_all_users_empty_db( sqlite_database: None, benchmark: BenchmarkFixture ) -> None: """Benchmark listing conversations on an empty database. @@ -483,6 +595,21 @@ def test_list_conversations_for_all_users_small_db( benchmark_list_conversations_for_all_users(benchmark, 0) +def test_list_conversations_for_all_users_small_db( + sqlite_database: None, benchmark: BenchmarkFixture +) -> None: + """Benchmark listing conversations on small database. + + Parameters: + sqlite_database: Fixture that prepares a temporary SQLite DB. + benchmark (BenchmarkFixture): pytest-benchmark fixture. + + Returns: + None + """ + benchmark_list_conversations_for_all_users(benchmark, SMALL_DB_RECORDS_COUNT) + + def test_list_conversations_for_all_users_middle_db( sqlite_database: None, benchmark: BenchmarkFixture ) -> None: @@ -511,3 +638,270 @@ def test_list_conversations_for_all_users_large_db( None """ benchmark_list_conversations_for_all_users(benchmark, LARGE_DB_RECORDS_COUNT) + + +def benchmark_list_conversations_for_one_user( + benchmark: BenchmarkFixture, records_to_insert: int +) -> None: + """Prepare DB and benchmark listing all conversations. + + Pre-populates the DB with ``records_to_insert`` entries and benchmarks + the performance of querying and retrieving all UserConversation rows. + + Parameters: + benchmark (BenchmarkFixture): pytest-benchmark fixture to run the measurement. + records_to_insert (int): Number of records to pre-populate before benchmarking. + + Returns: + None + """ + with get_session() as session: + # store bunch of conversations first + for id in range(records_to_insert): + # use explicit conversation ID and also user ID + store_new_user_conversation(session, str(id), str(id)) + # user ID somewhere in the middle of database + user_id = str(records_to_insert / 2) + # then perform the benchmark + benchmark(list_conversation_for_one_user, session, user_id) + + +def test_list_conversations_for_one_user_empty_db( + sqlite_database: None, benchmark: BenchmarkFixture +) -> None: + """Benchmark listing conversations on an empty database. + + Parameters: + sqlite_database: Fixture that prepares a temporary SQLite DB. + benchmark (BenchmarkFixture): pytest-benchmark fixture. + + Returns: + None + """ + benchmark_list_conversations_for_one_user(benchmark, 0) + + +def test_list_conversations_for_one_user_small_db( + sqlite_database: None, benchmark: BenchmarkFixture +) -> None: + """Benchmark listing conversations on an small database. + + Parameters: + sqlite_database: Fixture that prepares a temporary SQLite DB. + benchmark (BenchmarkFixture): pytest-benchmark fixture. + + Returns: + None + """ + benchmark_list_conversations_for_one_user(benchmark, SMALL_DB_RECORDS_COUNT) + + +def test_list_conversations_for_one_user_middle_db( + sqlite_database: None, benchmark: BenchmarkFixture +) -> None: + """Benchmark listing conversations on a medium-sized database. + + Parameters: + sqlite_database: Fixture that prepares a temporary SQLite DB. + benchmark (BenchmarkFixture): pytest-benchmark fixture. + + Returns: + None + """ + benchmark_list_conversations_for_one_user(benchmark, MIDDLE_DB_RECORDS_COUNT) + + +def test_list_conversations_for_one_user_large_db( + sqlite_database: None, benchmark: BenchmarkFixture +) -> None: + """Benchmark listing conversations on a large database. + + Parameters: + sqlite_database: Fixture that prepares a temporary SQLite DB. + benchmark (BenchmarkFixture): pytest-benchmark fixture. + + Returns: + None + """ + benchmark_list_conversations_for_one_user(benchmark, LARGE_DB_RECORDS_COUNT) + + +def benchmark_retrieve_conversation( + benchmark: BenchmarkFixture, records_to_insert: int +) -> None: + """Prepare DB and benchmark retrieving one conversation. + + Pre-populates the DB with ``records_to_insert`` entries and benchmarks + the performance of querying and retrieving one UserConversation record. + + Parameters: + benchmark (BenchmarkFixture): pytest-benchmark fixture to run the measurement. + records_to_insert (int): Number of records to pre-populate before benchmarking. + + Returns: + None + """ + with get_session() as session: + # store bunch of conversations first + for id in range(records_to_insert): + # use explicit conversation ID and also user ID + store_new_user_conversation(session, str(id), str(id)) + # user ID somewhere in the middle of database + conversation_id = str(records_to_insert // 2) + # then perform the benchmark + benchmark( + retrieve_conversation, session, conversation_id, records_to_insert == 0 + ) + + +def test_retrieve_conversation_empty_db( + sqlite_database: None, benchmark: BenchmarkFixture +) -> None: + """Benchmark retrieving conversations on an empty database. + + Parameters: + sqlite_database: Fixture that prepares a temporary SQLite DB. + benchmark (BenchmarkFixture): pytest-benchmark fixture. + + Returns: + None + """ + benchmark_retrieve_conversation(benchmark, 0) + + +def test_retrieve_conversation_small_db( + sqlite_database: None, benchmark: BenchmarkFixture +) -> None: + """Benchmark retrieving conversations on a small database. + + Parameters: + sqlite_database: Fixture that prepares a temporary SQLite DB. + benchmark (BenchmarkFixture): pytest-benchmark fixture. + + Returns: + None + """ + benchmark_retrieve_conversation(benchmark, SMALL_DB_RECORDS_COUNT) + + +def test_retrieve_conversation_middle_db( + sqlite_database: None, benchmark: BenchmarkFixture +) -> None: + """Benchmark retrieving conversations on a medium-sized database. + + Parameters: + sqlite_database: Fixture that prepares a temporary SQLite DB. + benchmark (BenchmarkFixture): pytest-benchmark fixture. + + Returns: + None + """ + benchmark_retrieve_conversation(benchmark, MIDDLE_DB_RECORDS_COUNT) + + +def test_retrieve_conversation_large_db( + sqlite_database: None, benchmark: BenchmarkFixture +) -> None: + """Benchmark retrieving conversations on a large database. + + Parameters: + sqlite_database: Fixture that prepares a temporary SQLite DB. + benchmark (BenchmarkFixture): pytest-benchmark fixture. + + Returns: + None + """ + benchmark_retrieve_conversation(benchmark, LARGE_DB_RECORDS_COUNT) + + +def benchmark_retrieve_conversation_for_one_user( + benchmark: BenchmarkFixture, records_to_insert: int +) -> None: + """Prepare DB and benchmark retrieving one conversation. + + Pre-populates the DB with ``records_to_insert`` entries and benchmarks + the performance of querying and retrieving one UserConversation record. + + Parameters: + benchmark (BenchmarkFixture): pytest-benchmark fixture to run the measurement. + records_to_insert (int): Number of records to pre-populate before benchmarking. + + Returns: + None + """ + with get_session() as session: + # store bunch of conversations first + for id in range(records_to_insert): + # use explicit conversation ID and also user ID + store_new_user_conversation(session, str(id), str(id)) + # user ID somewhere in the middle of database + user_id = str(records_to_insert // 2) + conversation_id = str(records_to_insert // 2) + # then perform the benchmark + benchmark( + retrieve_conversation_for_one_user, + session, + user_id, + conversation_id, + records_to_insert == 0, # a flag whether records should be read + ) + + +def test_retrieve_conversation_for_one_user_empty_db( + sqlite_database: None, benchmark: BenchmarkFixture +) -> None: + """Benchmark retrieving conversations on an empty database. + + Parameters: + sqlite_database: Fixture that prepares a temporary SQLite DB. + benchmark (BenchmarkFixture): pytest-benchmark fixture. + + Returns: + None + """ + benchmark_retrieve_conversation_for_one_user(benchmark, 0) + + +def test_retrieve_conversation_for_one_user_small_db( + sqlite_database: None, benchmark: BenchmarkFixture +) -> None: + """Benchmark retrieving conversations on a small database. + + Parameters: + sqlite_database: Fixture that prepares a temporary SQLite DB. + benchmark (BenchmarkFixture): pytest-benchmark fixture. + + Returns: + None + """ + benchmark_retrieve_conversation_for_one_user(benchmark, SMALL_DB_RECORDS_COUNT) + + +def test_retrieve_conversation_for_one_user_middle_db( + sqlite_database: None, benchmark: BenchmarkFixture +) -> None: + """Benchmark retrieving conversations on a medium-sized database. + + Parameters: + sqlite_database: Fixture that prepares a temporary SQLite DB. + benchmark (BenchmarkFixture): pytest-benchmark fixture. + + Returns: + None + """ + benchmark_retrieve_conversation_for_one_user(benchmark, MIDDLE_DB_RECORDS_COUNT) + + +def test_retrieve_conversation_for_one_user_large_db( + sqlite_database: None, benchmark: BenchmarkFixture +) -> None: + """Benchmark retrieving conversations on a large database. + + Parameters: + sqlite_database: Fixture that prepares a temporary SQLite DB. + benchmark (BenchmarkFixture): pytest-benchmark fixture. + + Returns: + None + """ + benchmark_retrieve_conversation_for_one_user(benchmark, LARGE_DB_RECORDS_COUNT) diff --git a/tests/configuration/benchmarks-lightspeed-stack.yaml b/tests/configuration/benchmarks-sqlite.yaml similarity index 100% rename from tests/configuration/benchmarks-lightspeed-stack.yaml rename to tests/configuration/benchmarks-sqlite.yaml diff --git a/tests/e2e/features/authorized_noop.feature b/tests/e2e/features/authorized_noop.feature index 62edb9bf9..7da0ef959 100644 --- a/tests/e2e/features/authorized_noop.feature +++ b/tests/e2e/features/authorized_noop.feature @@ -35,14 +35,14 @@ Feature: Authorized endpoint API tests for the noop authentication module {"user_id": "00000000-0000-0000-0000-000","username": "lightspeed-user","skip_userid_check": true} """ - Scenario: Check if the authorized endpoint works when providing empty user_id + Scenario: Check if the authorized endpoint rejects empty user_id Given The system is in default state And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva When I access endpoint "authorized" using HTTP POST method with user_id "" - Then The status code of the response is 200 + Then The status code of the response is 400 And The body of the response is the following """ - {"user_id": "","username": "lightspeed-user","skip_userid_check": true} + {"detail": "user_id cannot be empty"} """ Scenario: Check if the authorized endpoint works when providing proper user_id diff --git a/tests/e2e/features/authorized_noop_token.feature b/tests/e2e/features/authorized_noop_token.feature index b654a77d2..f977790f5 100644 --- a/tests/e2e/features/authorized_noop_token.feature +++ b/tests/e2e/features/authorized_noop_token.feature @@ -32,14 +32,14 @@ Feature: Authorized endpoint API tests for the noop-with-token authentication mo {"user_id": "00000000-0000-0000-0000-000","username": "lightspeed-user","skip_userid_check": true} """ - Scenario: Check if the authorized endpoint works when providing empty user_id + Scenario: Check if the authorized endpoint rejects empty user_id Given The system is in default state And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva When I access endpoint "authorized" using HTTP POST method with user_id "" - Then The status code of the response is 200 + Then The status code of the response is 400 And The body of the response is the following """ - {"user_id": "","username": "lightspeed-user","skip_userid_check": true} + {"detail": "user_id cannot be empty"} """ Scenario: Check if the authorized endpoint works when providing proper user_id diff --git a/tests/e2e/features/conversation_cache_v2.feature b/tests/e2e/features/conversation_cache_v2.feature index efc0ba601..51e2be687 100644 --- a/tests/e2e/features/conversation_cache_v2.feature +++ b/tests/e2e/features/conversation_cache_v2.feature @@ -130,10 +130,18 @@ Feature: Conversation Cache V2 API tests } } }, + "tool_calls": { + "type": "array", + "items": { "type": "object" } + }, + "tool_results": { + "type": "array", + "items": { "type": "object" } + }, "started_at": { "type": "string", "format": "date-time" }, "completed_at": { "type": "string", "format": "date-time" } }, - "required": ["provider", "model", "messages", "started_at", "completed_at"] + "required": ["provider", "model", "messages", "tool_calls", "tool_results", "started_at", "completed_at"] } } } diff --git a/tests/e2e/features/conversations.feature b/tests/e2e/features/conversations.feature index a3f04078b..1d7671f29 100644 --- a/tests/e2e/features/conversations.feature +++ b/tests/e2e/features/conversations.feature @@ -73,6 +73,8 @@ Feature: conversations endpoint API tests "items": { "type": "object", "properties": { + "provider": { "type": "string" }, + "model": { "type": "string" }, "messages": { "type": "array", "items": { @@ -83,9 +85,18 @@ Feature: conversations endpoint API tests } } }, + "tool_calls": { + "type": "array", + "items": { "type": "object" } + }, + "tool_results": { + "type": "array", + "items": { "type": "object" } + }, "started_at": { "type": "string", "format": "date-time" }, "completed_at": { "type": "string", "format": "date-time" } - } + }, + "required": ["provider", "model", "messages", "tool_calls", "tool_results", "started_at", "completed_at"] } } } diff --git a/tests/e2e/features/info.feature b/tests/e2e/features/info.feature index 648d49918..3bfb10acd 100644 --- a/tests/e2e/features/info.feature +++ b/tests/e2e/features/info.feature @@ -15,7 +15,7 @@ Feature: Info tests Given The system is in default state When I access REST API endpoint "info" using HTTP GET method Then The status code of the response is 200 - And The body of the response has proper name Lightspeed Core Service (LCS) and version 0.4.0 + And The body of the response has proper name Lightspeed Core Service (LCS) and version 0.4.1 And The body of the response has llama-stack version 0.4.3 @skip-in-library-mode @@ -136,4 +136,4 @@ Feature: Info tests Then The status code of the response is 200 And The body of the response has proper client auth options structure And The response contains server "github-api" with client auth header "Authorization" - And The response contains server "gitlab-api" with client auth header "X-API-Token" \ No newline at end of file + And The response contains server "gitlab-api" with client auth header "X-API-Token" diff --git a/tests/e2e/features/query.feature b/tests/e2e/features/query.feature index 501233b52..ac43b786b 100644 --- a/tests/e2e/features/query.feature +++ b/tests/e2e/features/query.feature @@ -8,6 +8,7 @@ Feature: Query endpoint API tests Scenario: Check if LLM responds properly to restrictive system prompt to sent question with different system prompt Given The system is in default state And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva + And I capture the current token metrics When I use "query" to ask question with authorization header """ {"query": "Generate sample yaml file for simple GitHub Actions workflow.", "system_prompt": "refuse to answer anything but openshift questions", "model": "{MODEL}", "provider": "{PROVIDER}"} @@ -16,18 +17,22 @@ Feature: Query endpoint API tests And The response should contain following fragments | Fragments in LLM response | | ask | + And The token metrics should have increased Scenario: Check if LLM responds properly to non-restrictive system prompt to sent question with different system prompt Given The system is in default state - And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva + And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva + And I capture the current token metrics When I use "query" to ask question with authorization header """ {"query": "Generate sample yaml file for simple GitHub Actions workflow.", "system_prompt": "you are linguistic assistant", "model": "{MODEL}", "provider": "{PROVIDER}"} """ - Then The status code of the response is 200 + Then The status code of the response is 200 And The response should contain following fragments | Fragments in LLM response | | checkout | + And The response should contain token counter fields + And The token metrics should have increased #enable on demand @skip @@ -79,12 +84,14 @@ Feature: Query endpoint API tests Scenario: Check if LLM responds to sent question with error when attempting to access conversation Given The system is in default state And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva + And I capture the current token metrics When I use "query" to ask question with authorization header """ {"conversation_id": "123e4567-e89b-12d3-a456-426614174000", "query": "Write a simple code for reversing string", "model": "{MODEL}", "provider": "{PROVIDER}"} """ Then The status code of the response is 404 And The body of the response contains Conversation not found + And The token metrics should not have changed Scenario: Check if LLM responds to sent question with error when attempting to access conversation with incorrect conversation ID format Given The system is in default state @@ -101,13 +108,25 @@ Scenario: Check if LLM responds for query request with error for missing query And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva When I use "query" to ask question with authorization header """ - {"provider": "{PROVIDER}"} + {"conversation_id": "123e4567", "query": "Write a simple code for reversing string", "model": "{MODEL}", "provider": "{PROVIDER}"} """ - Then The status code of the response is 422 - And The body of the response is the following + Then The status code of the response is 422 + And The body of the response contains Value error, Improper conversation ID '123e4567' + + Scenario: Check if LLM responds for query request with error for missing query + Given The system is in default state + And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva + And I capture the current token metrics + When I use "query" to ask question with authorization header + """ + {"provider": "{PROVIDER}"} + """ + Then The status code of the response is 422 + And The body of the response is the following """ { "detail": [{"type": "missing", "loc": [ "body", "query" ], "msg": "Field required", "input": {"provider": "{PROVIDER}"}}] } """ + And The token metrics should not have changed Scenario: Check if LLM responds for query request for missing model and provider Given The system is in default state @@ -163,12 +182,14 @@ Scenario: Check if LLM responds for query request with error for missing query Given The system is in default state And The llama-stack connection is disrupted And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva + And I capture the current token metrics When I use "query" to ask question with authorization header """ {"query": "Say hello"} """ Then The status code of the response is 503 And The body of the response contains Unable to connect to Llama Stack + And The token metrics should not have changed Scenario: Check if LLM responds properly when XML and JSON attachments are sent Given The system is in default state diff --git a/tests/e2e/features/steps/README.md b/tests/e2e/features/steps/README.md index 2e3136a4e..4038c1b68 100644 --- a/tests/e2e/features/steps/README.md +++ b/tests/e2e/features/steps/README.md @@ -30,3 +30,6 @@ LLM query and response steps. ## [rbac.py](rbac.py) Step definitions for RBAC E2E tests. +## [token_counters.py](token_counters.py) +Step definitions for token counter validation. + diff --git a/tests/e2e/features/steps/token_counters.py b/tests/e2e/features/steps/token_counters.py new file mode 100644 index 000000000..5bb4c83e7 --- /dev/null +++ b/tests/e2e/features/steps/token_counters.py @@ -0,0 +1,204 @@ +"""Step definitions for token counter validation.""" + +import json + +import requests +from behave import given, then # pyright: ignore[reportAttributeAccessIssue] +from behave.runner import Context + +DEFAULT_TIMEOUT = 10 + + +@then("The response should contain token counter fields") +def check_token_counter_fields(context: Context) -> None: + """Check that response contains input_tokens and output_tokens fields.""" + assert context.response is not None, "Request needs to be performed first" + response_json = context.response.json() + + input_tokens = response_json.get("input_tokens") + output_tokens = response_json.get("output_tokens") + assert ( + "input_tokens" in response_json + ), f"Response should contain 'input_tokens' field. Got: {response_json}" + assert ( + "output_tokens" in response_json + ), f"Response should contain 'output_tokens' field. Got: {response_json}" + assert ( + "available_quotas" in response_json + ), f"Response should contain 'available_quotas' field. Got: {response_json}" + assert input_tokens >= 0, f"input_tokens should be non-negative, got {input_tokens}" + assert ( + output_tokens >= 0 + ), f"output_tokens should be non-negative, got {output_tokens}" + + +@given("I capture the current token metrics") +def capture_token_metrics(context: Context) -> None: + """Capture the current Prometheus token metrics values. + + Stores the metrics in context.initial_token_metrics for later comparison. + """ + context.initial_token_metrics = _get_current_token_metrics(context) + print(f"Initial token metrics: {context.initial_token_metrics}") + + +@then("The token metrics should have increased") +def check_token_metrics_increased(context: Context) -> None: + """Check that token metrics have increased after a query. + + Compares current metrics against context.initial_token_metrics. + """ + assert hasattr( + context, "initial_token_metrics" + ), "Initial metrics not captured. Call 'I capture the current token metrics' first" + + final_metrics = _get_current_token_metrics(context) + initial_metrics = context.initial_token_metrics + + print(f"Final token metrics: {final_metrics}") + + # Check that both token metrics increased + sent_increased = final_metrics["token_sent"] > initial_metrics["token_sent"] + received_increased = ( + final_metrics["token_received"] > initial_metrics["token_received"] + ) + + assert sent_increased and received_increased, ( + f"Both token metrics should have increased. " + f"Initial: {initial_metrics}, Final: {final_metrics}" + ) + + +@then("The token metrics should not have changed") +def check_token_metrics_unchanged(context: Context) -> None: + """Check that token metrics have not changed after an error. + + Compares current metrics against context.initial_token_metrics. + """ + assert hasattr( + context, "initial_token_metrics" + ), "Initial metrics not captured. Call 'I capture the current token metrics' first" + + final_metrics = _get_current_token_metrics(context) + initial_metrics = context.initial_token_metrics + + print(f"Final token metrics: {final_metrics}") + + assert final_metrics["token_sent"] == initial_metrics["token_sent"], ( + f"token_sent should not have changed. " + f"Initial: {initial_metrics['token_sent']}, Final: {final_metrics['token_sent']}" + ) + assert final_metrics["token_received"] == initial_metrics["token_received"], ( + f"token_received should not have changed. " + f"Initial: {initial_metrics['token_received']}, " + f"Final: {final_metrics['token_received']}" + ) + + +@then("The streamed response should contain token counter fields") +def check_streamed_token_counter_fields(context: Context) -> None: + """Check that streamed response end event contains token fields.""" + assert context.response_data is not None, "Response data needs to be parsed first" + + # Parse the end event from the streaming response to get token info + end_event_data = _get_end_event_data(context.response.text) + assert end_event_data is not None, "End event not found in streaming response" + + assert "input_tokens" in end_event_data, ( + f"Streamed response should contain 'input_tokens' in end event. " + f"Got: {end_event_data}" + ) + assert "output_tokens" in end_event_data, ( + f"Streamed response should contain 'output_tokens' in end event. " + f"Got: {end_event_data}" + ) + assert "available_quotas" in end_event_data, ( + f"Streamed response should contain 'available_quotas' in end event. " + f"Got: {end_event_data}" + ) + input_tokens: int = end_event_data["input_tokens"] + output_tokens: int = end_event_data["output_tokens"] + assert ( + input_tokens >= 0 + ), f"streamed input_tokens should be non-negative, got {input_tokens}" + assert ( + output_tokens >= 0 + ), f"streamed output_tokens should be non-negative, got {output_tokens}" + + +def _get_current_token_metrics(context: Context) -> dict[str, float]: + """Fetch and parse current token metrics from Prometheus endpoint. + + Parameters: + context: Behave context containing hostname, port, and auth_headers. + + Returns: + Dictionary with 'token_sent' and 'token_received' totals. + """ + base = f"http://{context.hostname}:{context.port}" + url = f"{base}/metrics" + headers = context.auth_headers if hasattr(context, "auth_headers") else {} + + response = requests.get(url, headers=headers, timeout=DEFAULT_TIMEOUT) + assert ( + response.status_code == 200 + ), f"Failed to get metrics, status: {response.status_code}" + + return _parse_token_metrics(response.text) + + +def _get_end_event_data(response_text: str) -> dict | None: + """Extract the end event data from streaming SSE response. + + Parameters: + response_text: The raw SSE response text. + + Returns: + The data dictionary from the end event (including available_quotas), + or None if not found. + """ + lines = response_text.strip().split("\n") + for line in lines: + if line.startswith("data: "): + try: + event = json.loads(line[6:]) + if event.get("event") == "end": + # Merge data contents with available_quotas from parent level + result = event.get("data", {}) + result["available_quotas"] = event.get("available_quotas", {}) + return result + except json.JSONDecodeError: + continue + return None + + +def _parse_token_metrics(metrics_text: str) -> dict[str, float]: + """Parse Prometheus metrics text to extract token counter values. + + Parameters: + metrics_text: Raw Prometheus metrics text output. + + Returns: + Dictionary with 'token_sent' and 'token_received' totals. + """ + token_sent_total = 0.0 + token_received_total = 0.0 + + # Prometheus format: metric_name{labels} value + for line in metrics_text.split("\n"): + line = line.strip() + if not line or line.startswith("#"): + continue + + # Extract value (last space-separated element) + if line.startswith("ls_llm_token_sent_total{"): + value = line.split()[-1] + token_sent_total += float(value) + elif line.startswith("ls_llm_token_received_total{"): + value = line.split()[-1] + token_received_total += float(value) + + return { + "token_sent": token_sent_total, + "token_received": token_received_total, + } diff --git a/tests/e2e/features/streaming_query.feature b/tests/e2e/features/streaming_query.feature index a89dde123..22b3255b9 100644 --- a/tests/e2e/features/streaming_query.feature +++ b/tests/e2e/features/streaming_query.feature @@ -19,29 +19,34 @@ Feature: streaming_query endpoint API tests Scenario: Check if LLM responds properly to restrictive system prompt to sent question with different system prompt Given The system is in default state - And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva - And I use "streaming_query" to ask question with authorization header + And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva + And I capture the current token metrics + And I use "streaming_query" to ask question with authorization header """ {"query": "Generate sample yaml file for simple GitHub Actions workflow.", "system_prompt": "refuse to answer anything but openshift questions", "model": "{MODEL}", "provider": "{PROVIDER}"} """ - When I wait for the response to be completed - Then The status code of the response is 200 + When I wait for the response to be completed + Then The status code of the response is 200 And The streamed response should contain following fragments | Fragments in LLM response | | questions | + And The token metrics should have increased Scenario: Check if LLM responds properly to non-restrictive system prompt to sent question with different system prompt Given The system is in default state - And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva - And I use "streaming_query" to ask question with authorization header + And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva + And I capture the current token metrics + And I use "streaming_query" to ask question with authorization header """ {"query": "Generate sample yaml file for simple GitHub Actions workflow.", "system_prompt": "you are linguistic assistant", "model": "{MODEL}", "provider": "{PROVIDER}"} """ - When I wait for the response to be completed - Then The status code of the response is 200 + When I wait for the response to be completed + Then The status code of the response is 200 And The streamed response should contain following fragments | Fragments in LLM response | | checkout | + And The streamed response should contain token counter fields + And The token metrics should have increased #enable on demand @skip @@ -65,16 +70,18 @@ Feature: streaming_query endpoint API tests Scenario: Check if LLM responds for streaming_query request with error for missing query Given The system is in default state - And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva + And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva + And I capture the current token metrics When I use "streaming_query" to ask question with authorization header """ {"provider": "{PROVIDER}"} """ - Then The status code of the response is 422 + Then The status code of the response is 422 And The body of the response is the following """ { "detail": [{"type": "missing", "loc": [ "body", "query" ], "msg": "Field required", "input": {"provider": "{PROVIDER}"}}] } """ + And The token metrics should not have changed Scenario: Check if LLM responds for streaming_query request for missing model and provider Given The system is in default state @@ -87,13 +94,15 @@ Feature: streaming_query endpoint API tests Scenario: Check if LLM responds for streaming_query request with error for missing model Given The system is in default state - And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva + And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva + And I capture the current token metrics When I use "streaming_query" to ask question with authorization header """ {"query": "Say hello", "provider": "{PROVIDER}"} """ - Then The status code of the response is 422 + Then The status code of the response is 422 And The body of the response contains Value error, Model must be specified if provider is specified + And The token metrics should not have changed Scenario: Check if LLM responds for streaming_query request with error for missing provider Given The system is in default state @@ -102,28 +111,30 @@ Feature: streaming_query endpoint API tests """ {"query": "Say hello", "model": "{MODEL}"} """ - Then The status code of the response is 422 + Then The status code of the response is 422 And The body of the response contains Value error, Provider must be specified if model is specified - Scenario: Check if LLM responds for query request with error for unknown model + Scenario: Check if LLM responds for streaming_query request with error for unknown model Given The system is in default state - And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva - When I use "streaming_query" to ask question with authorization header - """ - {"query": "Say hello", "provider": "{PROVIDER}", "model":"unknown"} - """ - Then The status code of the response is 404 + And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva + When I use "streaming_query" to ask question with authorization header + """ + {"query": "Say hello", "provider": "{PROVIDER}", "model":"unknown"} + """ + Then The status code of the response is 404 And The body of the response contains Model with ID unknown does not exist - Scenario: Check if LLM responds for query request with error for unknown provider + Scenario: Check if LLM responds for streaming_query request with error for unknown provider Given The system is in default state - And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva + And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva + And I capture the current token metrics When I use "streaming_query" to ask question with authorization header """ {"query": "Say hello", "model": "{MODEL}", "provider":"unknown"} """ - Then The status code of the response is 404 + Then The status code of the response is 404 And The body of the response contains Model with ID gpt-4o-mini does not exist + And The token metrics should not have changed Scenario: Check if LLM responds properly when XML and JSON attachments are sent Given The system is in default state @@ -153,11 +164,11 @@ Feature: streaming_query endpoint API tests Scenario: Check if LLM responds to sent question with error when not authenticated Given The system is in default state - When I use "streaming_query" to ask question - """ - {"query": "Say hello", "model": "{MODEL}", "provider": "{PROVIDER}"} - """ - Then The status code of the response is 401 + When I use "streaming_query" to ask question + """ + {"query": "Say hello", "model": "{MODEL}", "provider": "{PROVIDER}"} + """ + Then The status code of the response is 401 And The body of the response is the following """ { diff --git a/tests/integration/endpoints/README.md b/tests/integration/endpoints/README.md index 41b238f58..6cf2ec14a 100644 --- a/tests/integration/endpoints/README.md +++ b/tests/integration/endpoints/README.md @@ -13,7 +13,7 @@ Integration tests for the /health endpoint. Integration tests for the /info endpoint. ## [test_query_v2_integration.py](test_query_v2_integration.py) -Integration tests for the /query endpoint (v2 with Responses API). +Integration tests for the /query endpoint (using Responses API). ## [test_rlsapi_v1_integration.py](test_rlsapi_v1_integration.py) Integration tests for the rlsapi v1 /infer endpoint. diff --git a/tests/integration/endpoints/test_config_integration.py b/tests/integration/endpoints/test_config_integration.py index 74aee0b3d..12385f21e 100644 --- a/tests/integration/endpoints/test_config_integration.py +++ b/tests/integration/endpoints/test_config_integration.py @@ -1,6 +1,8 @@ """Integration tests for the /config endpoint.""" +from typing import cast import pytest + from fastapi import HTTPException, Request, status from app.endpoints.config import config_endpoint_handler @@ -81,6 +83,6 @@ async def test_config_endpoint_fails_without_configuration( # Verify error details assert exc_info.value.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR assert isinstance(exc_info.value.detail, dict) - assert ( - "configuration is not loaded" in exc_info.value.detail["response"].lower() - ) # type: ignore + assert "response" in exc_info.value.detail + detail = cast(dict[str, str], exc_info.value.detail) + assert "configuration is not loaded" in detail["response"].lower() diff --git a/tests/integration/endpoints/test_query_v2_integration.py b/tests/integration/endpoints/test_query_v2_integration.py index 56abee2a5..7619dbc7d 100644 --- a/tests/integration/endpoints/test_query_v2_integration.py +++ b/tests/integration/endpoints/test_query_v2_integration.py @@ -1,4 +1,4 @@ -"""Integration tests for the /query endpoint (v2 with Responses API).""" +"""Integration tests for the /query endpoint (using Responses API).""" # pylint: disable=too-many-lines # Integration tests require comprehensive coverage # pylint: disable=too-many-arguments # Integration tests need many fixtures @@ -16,13 +16,14 @@ from sqlalchemy.orm import Session, sessionmaker import app.database -import app.endpoints.query_old -from app.endpoints.query import query_endpoint_handler_v2 +import app.endpoints.query +from app.endpoints.query import query_endpoint_handler from authentication.interface import AuthTuple from configuration import AppConfig from models.cache_entry import CacheEntry from models.database.conversations import UserConversation from models.requests import Attachment, QueryRequest +import utils.query # Test constants - use valid UUID format TEST_CONVERSATION_ID = "c9d40813-d64d-41eb-8060-3b2446929a02" @@ -167,7 +168,7 @@ async def test_query_v2_endpoint_successful_response( query="What is Ansible?", ) - response = await query_endpoint_handler_v2( + response = await query_endpoint_handler( request=test_request, query_request=query_request, auth=test_auth, @@ -218,7 +219,7 @@ async def test_query_v2_endpoint_handles_connection_error( query_request = QueryRequest(query="What is Ansible?") with pytest.raises(HTTPException) as exc_info: - await query_endpoint_handler_v2( + await query_endpoint_handler( request=test_request, query_request=query_request, auth=test_auth, @@ -260,7 +261,7 @@ async def test_query_v2_endpoint_empty_query( query_request = QueryRequest(query="") - response = await query_endpoint_handler_v2( + response = await query_endpoint_handler( request=test_request, query_request=query_request, auth=test_auth, @@ -312,7 +313,7 @@ async def test_query_v2_endpoint_with_attachments( ], ) - response = await query_endpoint_handler_v2( + response = await query_endpoint_handler( request=test_request, query_request=query_request, auth=test_auth, @@ -395,7 +396,7 @@ async def test_query_v2_endpoint_with_tool_calls( query_request = QueryRequest(query="What is Ansible?") - response = await query_endpoint_handler_v2( + response = await query_endpoint_handler( request=test_request, query_request=query_request, auth=test_auth, @@ -463,7 +464,7 @@ async def test_query_v2_endpoint_with_mcp_list_tools( query_request = QueryRequest(query="What tools are available?") - response = await query_endpoint_handler_v2( + response = await query_endpoint_handler( request=test_request, query_request=query_request, auth=test_auth, @@ -530,7 +531,7 @@ async def test_query_v2_endpoint_with_multiple_tool_types( query_request = QueryRequest(query="Search docs and calculate deployment replicas") - response = await query_endpoint_handler_v2( + response = await query_endpoint_handler( request=test_request, query_request=query_request, auth=test_auth, @@ -586,7 +587,7 @@ async def test_query_v2_endpoint_bypasses_tools_when_no_tools_true( query_request = QueryRequest(query="What is Ansible?", no_tools=True) - response = await query_endpoint_handler_v2( + response = await query_endpoint_handler( request=test_request, query_request=query_request, auth=test_auth, @@ -643,7 +644,7 @@ async def test_query_v2_endpoint_uses_tools_when_available( query_request = QueryRequest(query="What is Ansible?", no_tools=False) - response = await query_endpoint_handler_v2( + response = await query_endpoint_handler( request=test_request, query_request=query_request, auth=test_auth, @@ -692,7 +693,7 @@ async def test_query_v2_endpoint_persists_conversation_to_database( query_request = QueryRequest(query="What is Ansible?") - response = await query_endpoint_handler_v2( + response = await query_endpoint_handler( request=test_request, query_request=query_request, auth=test_auth, @@ -760,7 +761,7 @@ async def test_query_v2_endpoint_updates_existing_conversation( query_request = QueryRequest(query="Tell me more", conversation_id=EXISTING_CONV_ID) - response = await query_endpoint_handler_v2( + response = await query_endpoint_handler( request=test_request, query_request=query_request, auth=test_auth, @@ -817,7 +818,7 @@ async def test_query_v2_endpoint_conversation_ownership_validation( query_request = QueryRequest(query="What is Ansible?", conversation_id=TEST_CONV_ID) - response = await query_endpoint_handler_v2( + response = await query_endpoint_handler( request=test_request, query_request=query_request, auth=test_auth, @@ -861,11 +862,11 @@ async def test_query_v2_endpoint_creates_valid_cache_entry( _ = mock_llama_stack_client _ = patch_db_session - cache_spy = mocker.spy(app.endpoints.query_old, "store_conversation_into_cache") + cache_spy = mocker.spy(utils.query, "store_conversation_into_cache") query_request = QueryRequest(query="What is Ansible?") - response = await query_endpoint_handler_v2( + response = await query_endpoint_handler( request=test_request, query_request=query_request, auth=test_auth, @@ -874,8 +875,8 @@ async def test_query_v2_endpoint_creates_valid_cache_entry( cache_spy.assert_called_once() - call_args = cache_spy.call_args.args - cache_entry = call_args[3] + call_args = cache_spy.call_args + cache_entry = call_args.kwargs["cache_entry"] assert isinstance(cache_entry, CacheEntry) assert cache_entry.query == "What is Ansible?" @@ -923,7 +924,7 @@ async def test_query_v2_endpoint_conversation_not_found_returns_404( ) with pytest.raises(HTTPException) as exc_info: - await query_endpoint_handler_v2( + await query_endpoint_handler( request=test_request, query_request=query_request, auth=test_auth, @@ -995,7 +996,7 @@ async def test_query_v2_endpoint_with_shield_violation( query_request = QueryRequest(query="Inappropriate query") # Shield violations are advisory - request should succeed - response = await query_endpoint_handler_v2( + response = await query_endpoint_handler( request=test_request, query_request=query_request, auth=test_auth, @@ -1041,7 +1042,7 @@ async def test_query_v2_endpoint_without_shields( query_request = QueryRequest(query="What is Ansible?") - response = await query_endpoint_handler_v2( + response = await query_endpoint_handler( request=test_request, query_request=query_request, auth=test_auth, @@ -1102,7 +1103,7 @@ async def test_query_v2_endpoint_handles_empty_llm_response( query_request = QueryRequest(query="What is Ansible?") - response = await query_endpoint_handler_v2( + response = await query_endpoint_handler( request=test_request, query_request=query_request, auth=test_auth, @@ -1153,12 +1154,12 @@ async def test_query_v2_endpoint_quota_integration( mock_llama_stack_client.responses.create.return_value = mock_response - mock_consume = mocker.spy(app.endpoints.query_old, "consume_tokens") - _ = mocker.spy(app.endpoints.query_old, "get_available_quotas") + mock_consume = mocker.spy(app.endpoints.query, "consume_query_tokens") + _ = mocker.spy(app.endpoints.query, "get_available_quotas") query_request = QueryRequest(query="What is Ansible?") - response = await query_endpoint_handler_v2( + response = await query_endpoint_handler( request=test_request, query_request=query_request, auth=test_auth, @@ -1170,11 +1171,12 @@ async def test_query_v2_endpoint_quota_integration( mock_consume.assert_called_once() consume_args = mock_consume.call_args user_id, _, _, _ = test_auth - assert consume_args.args[2] == user_id - assert consume_args.kwargs["model_id"] == "test-model" - assert consume_args.kwargs["provider_id"] == "test-provider" - assert consume_args.kwargs["input_tokens"] == 100 - assert consume_args.kwargs["output_tokens"] == 50 + assert consume_args.kwargs["user_id"] == user_id + assert consume_args.kwargs["model_id"] is not None + assert consume_args.kwargs["token_usage"] is not None + assert consume_args.kwargs["token_usage"].input_tokens == 100 + assert consume_args.kwargs["token_usage"].output_tokens == 50 + assert consume_args.kwargs["configuration"] is not None assert response.available_quotas is not None assert isinstance(response.available_quotas, dict) @@ -1223,7 +1225,7 @@ async def test_query_v2_endpoint_rejects_query_when_quota_exceeded( query_request = QueryRequest(query="What is Ansible?") with pytest.raises(HTTPException) as exc_info: - await query_endpoint_handler_v2( + await query_endpoint_handler( request=test_request, query_request=query_request, auth=test_auth, @@ -1272,7 +1274,7 @@ async def test_query_v2_endpoint_transcript_behavior( _ = mock_llama_stack_client # Mock store_transcript to prevent file creation - mocker.patch("app.endpoints.query.store_transcript") + mocker.patch("utils.query.store_transcript") test_config.user_data_collection_configuration.transcripts_enabled = True @@ -1287,7 +1289,7 @@ async def test_query_v2_endpoint_transcript_behavior( ], ) - response_enabled = await query_endpoint_handler_v2( + response_enabled = await query_endpoint_handler( request=test_request, query_request=query_request_enabled, auth=test_auth, @@ -1310,7 +1312,7 @@ async def test_query_v2_endpoint_transcript_behavior( query_request_disabled = QueryRequest(query="What is Kubernetes?") - response_disabled = await query_endpoint_handler_v2( + response_disabled = await query_endpoint_handler( request=test_request, query_request=query_request_disabled, auth=test_auth, @@ -1379,7 +1381,7 @@ async def test_query_v2_endpoint_uses_conversation_history_model( query_request = QueryRequest(query="Tell me more", conversation_id=EXISTING_CONV_ID) - response = await query_endpoint_handler_v2( + response = await query_endpoint_handler( request=test_request, query_request=query_request, auth=test_auth, diff --git a/tests/integration/endpoints/test_rlsapi_v1_integration.py b/tests/integration/endpoints/test_rlsapi_v1_integration.py index 3ceac9eff..3b0da6f8c 100644 --- a/tests/integration/endpoints/test_rlsapi_v1_integration.py +++ b/tests/integration/endpoints/test_rlsapi_v1_integration.py @@ -9,7 +9,7 @@ # pylint: disable=protected-access # pylint: disable=unused-argument -from typing import Any +from typing import Any, cast import pytest from fastapi import HTTPException, status @@ -42,7 +42,7 @@ def _create_mock_request(mocker: MockerFixture) -> Any: mock_request = mocker.Mock() # Use spec=[] to create a Mock with no attributes, simulating absent rh_identity_data mock_request.state = mocker.Mock(spec=[]) - mock_request.headers = {"User-Agent": "CLA/0.4.0"} + mock_request.headers = {"User-Agent": "CLA/0.4.1"} return mock_request @@ -139,6 +139,7 @@ async def test_rlsapi_v1_infer_minimal_request( assert isinstance(response, RlsapiV1InferResponse) assert response.data.text == "Use the `ls` command to list files in a directory." + assert response.data.request_id is not None assert check_suid(response.data.request_id) @@ -169,7 +170,7 @@ async def test_rlsapi_v1_infer_minimal_request( attachments=RlsapiV1Attachment(contents="log content"), terminal=RlsapiV1Terminal(output="command not found"), systeminfo=RlsapiV1SystemInfo(os="RHEL", version="9.3", arch="x86_64"), - cla=RlsapiV1CLA(nevra="cla-0.4.0", version="0.4.0"), + cla=RlsapiV1CLA(nevra="cla-0.4.1", version="0.4.1"), ), "full_context", id="full_context", @@ -221,7 +222,9 @@ async def test_rlsapi_v1_infer_generates_unique_request_ids( request_ids = {r.data.request_id for r in responses} assert len(request_ids) == 3 - assert all(check_suid(rid) for rid in request_ids) + for rid in request_ids: + assert rid is not None + assert all(check_suid(rid) for rid in request_ids if rid is not None) # ========================================== @@ -262,7 +265,9 @@ async def test_rlsapi_v1_infer_connection_error_returns_503( assert exc_info.value.status_code == status.HTTP_503_SERVICE_UNAVAILABLE assert isinstance(exc_info.value.detail, dict) - assert "Llama Stack" in exc_info.value.detail["response"] + assert "response" in exc_info.value.detail + detail = cast(dict[str, str], exc_info.value.detail) + assert "Llama Stack" in detail["response"] @pytest.mark.asyncio @@ -349,6 +354,104 @@ async def test_rlsapi_v1_infer_input_source_combination( assert expected in input_content +# ========================================== +# MCP Tools Passthrough Tests +# ========================================== + + +@pytest.mark.asyncio +async def test_rlsapi_v1_infer_no_mcp_servers_passes_empty_tools( + rlsapi_config: AppConfig, + mock_authorization: None, + test_auth: AuthTuple, + mocker: MockerFixture, +) -> None: + """Regression: no MCP servers configured passes empty tools list. + + When mcp_servers is empty (the default), get_mcp_tools returns [], + and responses.create should receive tools=[]. + """ + _ = rlsapi_config + + mock_response = mocker.Mock() + mock_response.output = [_create_mock_response_output(mocker, "response text")] + + mock_responses = mocker.Mock() + mock_responses.create = mocker.AsyncMock(return_value=mock_response) + + mock_client = mocker.Mock() + mock_client.responses = mock_responses + + mock_holder_class = mocker.patch( + "app.endpoints.rlsapi_v1.AsyncLlamaStackClientHolder" + ) + mock_holder_class.return_value.get_client.return_value = mock_client + + mocker.patch( + "app.endpoints.rlsapi_v1.get_mcp_tools", + return_value=[], + ) + + await infer_endpoint( + infer_request=RlsapiV1InferRequest(question="How do I list files?"), + request=_create_mock_request(mocker), + background_tasks=_create_mock_background_tasks(mocker), + auth=test_auth, + ) + + call_kwargs = mock_responses.create.call_args.kwargs + assert call_kwargs["tools"] == [] + + +@pytest.mark.asyncio +async def test_rlsapi_v1_infer_mcp_tools_passed_to_llm( + rlsapi_config: AppConfig, + mock_authorization: None, + test_auth: AuthTuple, + mocker: MockerFixture, +) -> None: + """Test that MCP tool definitions are forwarded to responses.create().""" + _ = rlsapi_config + + mock_response = mocker.Mock() + mock_response.output = [_create_mock_response_output(mocker, "enriched response")] + + mock_responses = mocker.Mock() + mock_responses.create = mocker.AsyncMock(return_value=mock_response) + + mock_client = mocker.Mock() + mock_client.responses = mock_responses + + mock_holder_class = mocker.patch( + "app.endpoints.rlsapi_v1.AsyncLlamaStackClientHolder" + ) + mock_holder_class.return_value.get_client.return_value = mock_client + + mcp_tools = [ + { + "type": "mcp", + "server_label": "rag-knowledge-base", + "server_url": "http://rag-server:8080/sse", + "require_approval": "never", + } + ] + mocker.patch( + "app.endpoints.rlsapi_v1.get_mcp_tools", + return_value=mcp_tools, + ) + + response = await infer_endpoint( + infer_request=RlsapiV1InferRequest(question="How do I configure SELinux?"), + request=_create_mock_request(mocker), + background_tasks=_create_mock_background_tasks(mocker), + auth=test_auth, + ) + + call_kwargs = mock_responses.create.call_args.kwargs + assert call_kwargs["tools"] == mcp_tools + assert response.data.text == "enriched response" + + # ========================================== # Skip RAG Tests # ========================================== diff --git a/tests/integration/test_configuration.py b/tests/integration/test_configuration.py index 294042a51..60e6d09a6 100644 --- a/tests/integration/test_configuration.py +++ b/tests/integration/test_configuration.py @@ -69,7 +69,7 @@ def test_loading_proper_configuration(configuration_filename: str) -> None: # check 'llama_stack' section ls_config = cfg.llama_stack_configuration assert ls_config.use_as_library_client is False - assert ls_config.url == "http://localhost:8321" + assert str(ls_config.url) == "http://localhost:8321/" assert ls_config.api_key is not None assert ls_config.api_key.get_secret_value() == "xyzzy" diff --git a/tests/integration/test_openapi_json.py b/tests/integration/test_openapi_json.py index 0e098f940..53dbacc02 100644 --- a/tests/integration/test_openapi_json.py +++ b/tests/integration/test_openapi_json.py @@ -227,7 +227,7 @@ def test_servers_section_present_from_url(spec_from_url: dict[str, Any]) -> None ("/v1/feedback", "post", {"200", "401", "403", "404", "500"}), ("/v1/feedback/status", "get", {"200"}), ("/v1/feedback/status", "put", {"200", "401", "403", "500"}), - ("/v1/conversations", "get", {"200", "401", "403", "500", "503"}), + ("/v1/conversations", "get", {"200", "401", "403", "500"}), ( "/v1/conversations/{conversation_id}", "get", @@ -309,7 +309,7 @@ def test_paths_and_responses_exist_from_file( ("/v1/feedback", "post", {"200", "401", "403", "404", "500"}), ("/v1/feedback/status", "get", {"200"}), ("/v1/feedback/status", "put", {"200", "401", "403", "500"}), - ("/v1/conversations", "get", {"200", "401", "403", "500", "503"}), + ("/v1/conversations", "get", {"200", "401", "403", "500"}), ( "/v1/conversations/{conversation_id}", "get", diff --git a/tests/profiles/empty.py b/tests/profiles/empty.py new file mode 100644 index 000000000..e359a9e6e --- /dev/null +++ b/tests/profiles/empty.py @@ -0,0 +1 @@ +"""Empty profile.""" diff --git a/tests/profiles/syntax_error.py b/tests/profiles/syntax_error.py new file mode 100644 index 000000000..112c9a640 --- /dev/null +++ b/tests/profiles/syntax_error.py @@ -0,0 +1,3 @@ +"""Broken profile.""" + +xyzzy diff --git a/tests/profiles/test_four/profile.py b/tests/profiles/test_four/profile.py new file mode 100644 index 000000000..e1cadd074 --- /dev/null +++ b/tests/profiles/test_four/profile.py @@ -0,0 +1,49 @@ +"""Custom profile for test profile.""" + +SUBJECT_ALLOWED = "ALLOWED" +SUBJECT_REJECTED = "REJECTED" + +# Default responses +INVALID_QUERY_RESP = ( + "Hi, I'm the Red Hat Developer Hub Lightspeed assistant, I can help you with questions about Red Hat Developer Hub or Backstage. " + "Please ensure your question is about these topics, and feel free to ask again!" +) + +QUERY_SYSTEM_INSTRUCTION = """ +1. Test +This is a test system instruction + +You achieve this by offering: +- testing +""" + +USE_CONTEXT_INSTRUCTION = """ +Use the retrieved document to answer the question. +""" + +USE_HISTORY_INSTRUCTION = """ +Use the previous chat history to interact and help the user. +""" + +QUESTION_VALIDATOR_PROMPT_TEMPLATE = f""" +Instructions: +- You provide validation for testing +Example Question: +How can I integrate GitOps into my pipeline? +Example Response: +{SUBJECT_ALLOWED} +""" + +TOPIC_SUMMARY_PROMPT_TEMPLATE = """ +Instructions: +- You are a topic summarizer +- For testing +- Your job is to extract precise topic summary from user input + +Example Input: +Testing placeholder +Example Output: +Proper response test. +""" + +PROFILE_CONFIG = {"xyzzy": QUERY_SYSTEM_INSTRUCTION} diff --git a/tests/unit/app/endpoints/README.md b/tests/unit/app/endpoints/README.md index dd6c34995..801298b19 100644 --- a/tests/unit/app/endpoints/README.md +++ b/tests/unit/app/endpoints/README.md @@ -42,9 +42,6 @@ Unit tests for the /providers REST API endpoints. ## [test_query.py](test_query.py) Unit tests for the /query REST API endpoint. -## [test_query_v2.py](test_query_v2.py) -Unit tests for the /query (v2) REST API endpoint using Responses API. - ## [test_rags.py](test_rags.py) Unit tests for the /rags REST API endpoints. @@ -60,9 +57,6 @@ Unit tests for the /shields REST API endpoint. ## [test_streaming_query.py](test_streaming_query.py) Unit tests for the /streaming-query REST API endpoint. -## [test_streaming_query_v2.py](test_streaming_query_v2.py) -Unit tests for the /streaming_query (v2) endpoint using Responses API. - ## [test_tools.py](test_tools.py) Unit tests for tools endpoint. diff --git a/tests/unit/app/endpoints/test_a2a.py b/tests/unit/app/endpoints/test_a2a.py index 9ad9fd173..55eb47cf7 100644 --- a/tests/unit/app/endpoints/test_a2a.py +++ b/tests/unit/app/endpoints/test_a2a.py @@ -713,18 +713,15 @@ async def test_process_task_streaming_handles_api_connection_error_on_models_lis "app.endpoints.a2a.AsyncLlamaStackClientHolder" ).return_value.get_client.return_value = mock_client - await executor._process_task_streaming( - context, task_updater, context.task_id, context.context_id - ) + # prepare_responses_params raises HTTPException when APIConnectionError occurs + with pytest.raises(HTTPException) as exc_info: + await executor._process_task_streaming( + context, task_updater, context.task_id, context.context_id + ) - # Verify failure status was sent - task_updater.update_status.assert_called_once() - call_args = task_updater.update_status.call_args - assert call_args[0][0] == TaskState.failed - assert call_args[1]["final"] is True - # Verify error message contains helpful info - error_message = call_args[1]["message"] - assert "Unable to connect to Llama Stack backend service" in str(error_message) + assert exc_info.value.status_code == 503 + # Verify error detail contains helpful info + assert "Unable to connect to Llama Stack" in str(exc_info.value.detail) @pytest.mark.asyncio async def test_process_task_streaming_handles_api_connection_error_on_retrieve_response( @@ -764,31 +761,30 @@ async def test_process_task_streaming_handles_api_connection_error_on_retrieve_r # Mock the client to succeed on models.list() mock_client = AsyncMock() - mock_models = MagicMock() - mock_models.models = [] - mock_client.models.list.return_value = mock_models - mocker.patch( - "app.endpoints.a2a.AsyncLlamaStackClientHolder" - ).return_value.get_client.return_value = mock_client + mock_models = [MagicMock()] # Return a list of models + mock_client.models.list = mocker.AsyncMock(return_value=mock_models) - # Mock select_model_and_provider_id - mocker.patch( - "app.endpoints.a2a.select_model_and_provider_id", - return_value=("model-id", "model-id", "provider-id"), + # Mock responses.create to raise APIConnectionError + mock_request = httpx.Request("POST", "http://test-llama-stack/responses") + mock_client.responses.create = mocker.AsyncMock( + side_effect=APIConnectionError( + message="Connection timeout during streaming", request=mock_request + ) ) - # Mock evaluate_model_hints mocker.patch( - "app.endpoints.a2a.evaluate_model_hints", return_value=(None, None) - ) + "app.endpoints.a2a.AsyncLlamaStackClientHolder" + ).return_value.get_client.return_value = mock_client - # Mock retrieve_response to raise APIConnectionError - mock_request = httpx.Request("POST", "http://test-llama-stack/responses") + # Mock prepare_responses_params to return valid params + mock_responses_params = mocker.Mock() + mock_responses_params.model_dump.return_value = { + "input": "Hello", + "model": "test-model", + } mocker.patch( - "app.endpoints.a2a.retrieve_response", - side_effect=APIConnectionError( - message="Connection timeout during streaming", request=mock_request - ), + "app.endpoints.a2a.prepare_responses_params", + new=mocker.AsyncMock(return_value=mock_responses_params), ) await executor._process_task_streaming( diff --git a/tests/unit/app/endpoints/test_conversations.py b/tests/unit/app/endpoints/test_conversations.py index 7e12da21a..d1423a7c2 100644 --- a/tests/unit/app/endpoints/test_conversations.py +++ b/tests/unit/app/endpoints/test_conversations.py @@ -3,27 +3,33 @@ """Unit tests for the /conversations REST API endpoints.""" +from datetime import UTC, datetime from typing import Any, Optional import pytest from fastapi import HTTPException, Request, status -from llama_stack_client import APIConnectionError, NotFoundError +from llama_stack_client import APIConnectionError, APIStatusError, NotFoundError from pytest_mock import MockerFixture, MockType from sqlalchemy.exc import SQLAlchemyError -from app.endpoints.conversations import ( +from app.endpoints.conversations_v1 import ( delete_conversation_endpoint_handler, get_conversation_endpoint_handler, get_conversations_list_endpoint_handler, - simplify_session_data, + update_conversation_endpoint_handler, ) +from utils.conversations import build_conversation_turns_from_items from configuration import AppConfig from models.config import Action -from models.database.conversations import UserConversation +from models.database.conversations import UserConversation, UserTurn +from models.requests import ConversationUpdateRequest from models.responses import ( ConversationDeleteResponse, ConversationResponse, ConversationsListResponse, + ConversationUpdateResponse, + ForbiddenResponse, + InternalServerErrorResponse, ) from tests.unit.utils.auth_helpers import mock_authorization_resolvers @@ -104,8 +110,92 @@ def create_mock_conversation( return mock_conversation +def create_mock_db_turn( + mocker: MockerFixture, + turn_number: int, + started_at: str = "2024-01-01T00:01:00Z", + completed_at: str = "2024-01-01T00:01:05Z", + provider: str = "google", + model: str = "gemini-2.0-flash-exp", +) -> MockType: + """Create a mock UserTurn database object. + + Args: + mocker: Mocker fixture + turn_number: Turn number (1-indexed) + started_at: ISO 8601 timestamp string + completed_at: ISO 8601 timestamp string + provider: Provider identifier + model: Model identifier + + Returns: + Mock UserTurn database object with required attributes + """ + mock_turn = mocker.Mock(spec=UserTurn) + mock_turn.turn_number = turn_number + # Convert ISO strings to datetime objects (Python 3.12+ supports "Z" directly) + mock_turn.started_at = datetime.fromisoformat(started_at) + mock_turn.completed_at = datetime.fromisoformat(completed_at) + mock_turn.provider = provider + mock_turn.model = model + return mock_turn + + +def _setup_user_turn_query( + mock_query: MockType, db_turns: Optional[list[MockType]] +) -> None: + """Configure mock query for UserTurn model. + + Args: + mock_query: The mock query object to configure. + db_turns: List of UserTurn objects to return, or None for empty list. + """ + turns_to_return = db_turns if db_turns is not None else [] + mock_query.filter_by.return_value.order_by.return_value.all.return_value = ( + turns_to_return + ) + + +def _setup_user_conversation_query( + mock_query: MockType, query_result: Optional[list[MockType]] +) -> None: + """Configure mock query for UserConversation model. + + Args: + mock_query: The mock query object to configure. + query_result: List of UserConversation objects to return, or None for None. + """ + if query_result is not None: + mock_query.all.return_value = query_result + mock_query.filter_by.return_value.all.return_value = query_result + mock_query.filter_by.return_value.first.return_value = ( + query_result[0] if query_result else None + ) + else: + mock_query.filter_by.return_value.first.return_value = None + + +def _patch_get_session_functions( + mocker: MockerFixture, mock_session_context: MockType +) -> None: + """Patch all get_session functions used by the endpoint handlers. + + Args: + mocker: Mocker fixture for creating patches. + mock_session_context: The context manager mock to return from get_session. + """ + mocker.patch( + "app.endpoints.conversations_v1.get_session", return_value=mock_session_context + ) + mocker.patch("app.database.get_session", return_value=mock_session_context) + mocker.patch("utils.endpoints.get_session", return_value=mock_session_context) + mocker.patch("utils.endpoints.can_access_conversation", return_value=True) + + def mock_database_session( - mocker: MockerFixture, query_result: Optional[list[MockType]] = None + mocker: MockerFixture, + query_result: Optional[list[MockType]] = None, + db_turns: Optional[list[MockType]] = None, ) -> MockType: """Helper function to mock get_session with proper context manager support. @@ -115,27 +205,34 @@ def mock_database_session( mocker (pytest.MockerFixture): Fixture used to create and patch mocks. query_result (Optional[list]): If provided, configures the session.query().all() and session.query().filter_by().all() to return - this list. + this list (for UserConversation queries). + db_turns (Optional[list]): If provided, configures UserTurn queries + to return this list. Returns: Mock: The mocked session object that will be yielded by the patched get_session context manager. """ mock_session = mocker.Mock() - if query_result is not None: - # Mock both the filtered and unfiltered query paths + + def query_side_effect(model_class: type[Any]) -> Any: + """Handle different model queries.""" mock_query = mocker.Mock() - mock_query.all.return_value = query_result - mock_query.filter_by.return_value.all.return_value = query_result - mock_session.query.return_value = mock_query + if model_class == UserTurn: + _setup_user_turn_query(mock_query, db_turns) + else: + _setup_user_conversation_query(mock_query, query_result) + return mock_query - # Mock get_session to return a context manager + mock_session.query.side_effect = query_side_effect + + # Create context manager mock for get_session mock_session_context = mocker.MagicMock() mock_session_context.__enter__.return_value = mock_session mock_session_context.__exit__.return_value = None - mocker.patch( - "app.endpoints.conversations.get_session", return_value=mock_session_context - ) + + _patch_get_session_functions(mocker, mock_session_context) + return mock_session @@ -251,6 +348,8 @@ def expected_chat_history_fixture() -> list[dict[str, Any]]: list[dict[str, Any]]: A list of conversation turns. Each turn contains: - messages: list of message dicts with `content` (str) and `type` (`"user"` or `"assistant"`) + - tool_calls: list of tool call summaries (empty by default) + - tool_results: list of tool result summaries (empty by default) - started_at: ISO 8601 UTC timestamp string for the turn start - completed_at: ISO 8601 UTC timestamp string for the turn end """ @@ -260,6 +359,10 @@ def expected_chat_history_fixture() -> list[dict[str, Any]]: {"content": "Hello", "type": "user"}, {"content": "Hi there!", "type": "assistant"}, ], + "tool_calls": [], + "tool_results": [], + "provider": "google", + "model": "gemini-2.0-flash-exp", "started_at": "2024-01-01T00:01:00Z", "completed_at": "2024-01-01T00:01:05Z", }, @@ -268,6 +371,10 @@ def expected_chat_history_fixture() -> list[dict[str, Any]]: {"content": "How are you?", "type": "user"}, {"content": "I'm doing well, thanks!", "type": "assistant"}, ], + "tool_calls": [], + "tool_results": [], + "provider": "google", + "model": "gemini-2.0-flash-exp", "started_at": "2024-01-01T00:02:00Z", "completed_at": "2024-01-01T00:02:03Z", }, @@ -294,77 +401,54 @@ def mock_conversation_fixture() -> UserConversation: return mock_conv -class TestSimplifySessionData: - """Test cases for the simplify_session_data function.""" +class TestBuildConversationTurnsFromItems: + """Test cases for the build_conversation_turns_from_items function.""" @pytest.mark.asyncio - async def test_simplify_session_data_with_model_dump( + async def test_build_conversation_turns_from_items_with_model_dump( self, - mock_session_data: dict[str, Any], + mocker: MockerFixture, + mock_session_data: dict[str, Any], # pylint: disable=unused-argument expected_chat_history: list[dict[str, Any]], ) -> None: - """Test simplify_session_data with session data.""" - result = simplify_session_data(mock_session_data) - - assert result == expected_chat_history + """Test build_conversation_turns_from_items with items data.""" + # Create mock items from session_data structure + mock_items = [ + mocker.Mock(type="message", role="user", content="Hello"), + mocker.Mock(type="message", role="assistant", content="Hi there!"), + mocker.Mock(type="message", role="user", content="How are you?"), + mocker.Mock( + type="message", role="assistant", content="I'm doing well, thanks!" + ), + ] + # Create mock db_turns matching the expected turns + mock_db_turns = [ + create_mock_db_turn( + mocker, 1, "2024-01-01T00:01:00Z", "2024-01-01T00:01:05Z" + ), + create_mock_db_turn( + mocker, 2, "2024-01-01T00:02:00Z", "2024-01-01T00:02:03Z" + ), + ] + conversation_start_time = datetime.fromisoformat( + "2024-01-01T00:00:00Z" + ).replace(tzinfo=UTC) + result = build_conversation_turns_from_items( + mock_items, mock_db_turns, conversation_start_time + ) + actual_history = [turn.model_dump() for turn in result] + assert actual_history == expected_chat_history @pytest.mark.asyncio - async def test_simplify_session_data_empty_turns(self) -> None: - """Test simplify_session_data with empty turns.""" - session_data = { - "session_id": VALID_CONVERSATION_ID, - "started_at": "2024-01-01T00:00:00Z", - "turns": [], - } - - result = simplify_session_data(session_data) + async def test_build_conversation_turns_from_items_empty_turns(self) -> None: + """Test build_conversation_turns_from_items with empty items.""" + conversation_start_time = datetime.fromisoformat( + "2024-01-01T00:00:00Z" + ).replace(tzinfo=UTC) + result = build_conversation_turns_from_items([], [], conversation_start_time) assert not result - @pytest.mark.asyncio - async def test_simplify_session_data_filters_unwanted_fields(self) -> None: - """Test that simplify_session_data properly filters out unwanted fields.""" - session_data = { - "session_id": VALID_CONVERSATION_ID, - "turns": [ - { - "turn_id": "turn-1", - "input_messages": [ - { - "content": "Test message", - "role": "user", - "context": {"some": "context"}, # Should be filtered out - "metadata": {"extra": "data"}, # Should be filtered out - } - ], - "output_message": { - "content": "Test response", - "role": "assistant", - "stop_reason": "end_of_turn", # Should be filtered out - "tool_calls": ["tool1", "tool2"], # Should be filtered out - }, - "started_at": "2024-01-01T00:01:00Z", - "completed_at": "2024-01-01T00:01:05Z", - "steps": ["step1", "step2"], # Should be filtered out - } - ], - } - - result = simplify_session_data(session_data) - - expected = [ - { - "messages": [ - {"content": "Test message", "type": "user"}, - {"content": "Test response", "type": "assistant"}, - ], - "started_at": "2024-01-01T00:01:00Z", - "completed_at": "2024-01-01T00:01:05Z", - } - ] - - assert result == expected - class TestGetConversationEndpoint: """Test cases for the GET /conversations/{conversation_id} endpoint.""" @@ -376,7 +460,7 @@ async def test_configuration_not_loaded( """Test the endpoint when configuration is not loaded.""" mock_authorization_resolvers(mocker) mock_config = AppConfig() - mocker.patch("app.endpoints.conversations.configuration", mock_config) + mocker.patch("app.endpoints.conversations_v1.configuration", mock_config) with pytest.raises(HTTPException) as exc_info: await get_conversation_endpoint_handler( @@ -400,8 +484,10 @@ async def test_invalid_conversation_id_format( ) -> None: """Test the endpoint with an invalid conversation ID format.""" mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) - mocker.patch("app.endpoints.conversations.check_suid", return_value=False) + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=False) with pytest.raises(HTTPException) as exc_info: await get_conversation_endpoint_handler( @@ -422,21 +508,23 @@ async def test_llama_stack_connection_error( mocker: MockerFixture, setup_configuration: AppConfig, dummy_request: Request, + mock_conversation: MockType, ) -> None: """Test the endpoint when LlamaStack connection fails.""" mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) - mocker.patch("app.endpoints.conversations.check_suid", return_value=True) - mocker.patch("app.endpoints.conversations.can_access_conversation") - mocker.patch("app.endpoints.conversations.retrieve_conversation") + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=True) + + mock_database_session(mocker, query_result=[mock_conversation], db_turns=[]) - # Mock AsyncLlamaStackClientHolder to raise APIConnectionError mock_client = mocker.AsyncMock() - mock_client.agents.session.list.side_effect = APIConnectionError( + mock_client.conversations.items.list.side_effect = APIConnectionError( request=None # type: ignore ) mock_client_holder = mocker.patch( - "app.endpoints.conversations.AsyncLlamaStackClientHolder" + "app.endpoints.conversations_v1.AsyncLlamaStackClientHolder" ) mock_client_holder.return_value.get_client.return_value = mock_client @@ -460,6 +548,7 @@ async def test_llama_stack_not_found_error( mocker: MockerFixture, setup_configuration: AppConfig, dummy_request: Request, + mock_conversation: MockType, ) -> None: """Test the endpoint when LlamaStack returns NotFoundError. @@ -471,18 +560,25 @@ async def test_llama_stack_not_found_error( "does not exist" and the conversation ID. """ mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) - mocker.patch("app.endpoints.conversations.check_suid", return_value=True) - mocker.patch("app.endpoints.conversations.can_access_conversation") - mocker.patch("app.endpoints.conversations.retrieve_conversation") + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=True) + mocker.patch( + "app.endpoints.conversations_v1.validate_and_retrieve_conversation", + return_value=mock_conversation, + ) + + mock_database_session(mocker, db_turns=[]) - # Mock AsyncLlamaStackClientHolder to raise NotFoundError mock_client = mocker.AsyncMock() - mock_client.agents.session.list.side_effect = NotFoundError( - message="Session not found", response=mocker.Mock(request=None), body=None + mock_client.conversations.items.list.side_effect = NotFoundError( + message="Conversation not found", + response=mocker.Mock(request=None), + body=None, ) mock_client_holder = mocker.patch( - "app.endpoints.conversations.AsyncLlamaStackClientHolder" + "app.endpoints.conversations_v1.AsyncLlamaStackClientHolder" ) mock_client_holder.return_value.get_client.return_value = mock_client @@ -501,75 +597,33 @@ async def test_llama_stack_not_found_error( assert "does not exist" in detail["cause"] # type: ignore assert VALID_CONVERSATION_ID in detail["cause"] # type: ignore - @pytest.mark.asyncio - async def test_session_retrieve_exception( - self, - mocker: MockerFixture, - setup_configuration: AppConfig, - dummy_request: Request, - ) -> None: - """Test the endpoint when session retrieval raises an APIConnectionError.""" - mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) - mocker.patch("app.endpoints.conversations.check_suid", return_value=True) - mocker.patch("app.endpoints.conversations.can_access_conversation") - mocker.patch("app.endpoints.conversations.retrieve_conversation") - - # Mock AsyncLlamaStackClientHolder to raise APIConnectionError - mock_client_holder = mocker.patch( - "app.endpoints.conversations.AsyncLlamaStackClientHolder" - ) - mock_client = mocker.AsyncMock() - mock_client.agents.session.list.side_effect = APIConnectionError( - request=mocker.Mock() - ) - mock_client_holder.return_value.get_client.return_value = mock_client - - with pytest.raises(HTTPException) as exc_info: - await get_conversation_endpoint_handler( - request=dummy_request, - conversation_id=VALID_CONVERSATION_ID, - auth=MOCK_AUTH, - ) - - assert exc_info.value.status_code == status.HTTP_503_SERVICE_UNAVAILABLE - detail = exc_info.value.detail - assert isinstance(detail, dict) - assert detail["response"] == "Unable to connect to Llama Stack" # type: ignore - @pytest.mark.asyncio async def test_get_conversation_forbidden( self, mocker: MockerFixture, setup_configuration: AppConfig, dummy_request: Request, - mock_conversation: MockType, ) -> None: """Test forbidden access when user lacks permission to read conversation.""" - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) - mocker.patch("app.endpoints.conversations.check_suid", return_value=True) mocker.patch( - "app.endpoints.conversations.retrieve_conversation", - return_value=mock_conversation, + "app.endpoints.conversations_v1.configuration", setup_configuration ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=True) mocker.patch( "authorization.resolvers.NoopAccessResolver.get_actions", return_value=set(Action.GET_CONVERSATION), - ) # Reduce user's permissions to access only their conversations - - mock_row = mocker.Mock() - mock_row.user_id = "different_user_id" - - # Mock the SQLAlchemy-like session - mock_session = mocker.MagicMock() - mock_session.query.return_value.filter.return_value.first.return_value = ( - mock_row ) - mock_session.__enter__.return_value = mock_session - mock_session.__exit__.return_value = None - - mocker.patch("utils.endpoints.get_session", return_value=mock_session) + # Mock validate_and_retrieve_conversation to raise 403 Forbidden + forbidden_response = ForbiddenResponse.conversation( + action="read", + resource_id=VALID_CONVERSATION_ID, + user_id=MOCK_AUTH[0], + ) + mocker.patch( + "app.endpoints.conversations_v1.validate_and_retrieve_conversation", + side_effect=HTTPException(**forbidden_response.model_dump()), + ) with pytest.raises(HTTPException) as exc_info: await get_conversation_endpoint_handler( @@ -594,31 +648,41 @@ async def test_get_others_conversations_allowed_for_authorized_user( setup_configuration: AppConfig, mock_conversation: MockType, dummy_request: Request, - mock_session_data: dict[str, Any], - ) -> None: # pylint: disable=too-many-arguments, too-many-positional-arguments + ) -> None: """Test allowed access to another user's conversation for authorized user.""" mocker.patch( "authorization.resolvers.NoopAccessResolver.get_actions", return_value={Action.GET_CONVERSATION, Action.READ_OTHERS_CONVERSATIONS}, - ) # Allow user to access other users' conversations - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) - mocker.patch("app.endpoints.conversations.check_suid", return_value=True) + ) mocker.patch( - "app.endpoints.conversations.retrieve_conversation", - return_value=mock_conversation, + "app.endpoints.conversations_v1.configuration", setup_configuration ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=True) - mock_client = mocker.AsyncMock() - mock_client.agents.session.list.return_value = mocker.Mock( - data=[mock_session_data] + mock_db_turns = [ + create_mock_db_turn( + mocker, 1, "2024-01-01T00:01:00Z", "2024-01-01T00:01:05Z" + ), + ] + mock_database_session( + mocker, query_result=[mock_conversation], db_turns=mock_db_turns ) - mock_session_retrieve_result = mocker.Mock() - mock_session_retrieve_result.model_dump.return_value = mock_session_data - mock_client.agents.session.retrieve.return_value = mock_session_retrieve_result + mock_client = mocker.AsyncMock() + mock_items_response = mocker.Mock() + mock_item1 = mocker.Mock() + mock_item1.type = "message" + mock_item1.role = "user" + mock_item1.content = "Hello" + mock_item2 = mocker.Mock() + mock_item2.type = "message" + mock_item2.role = "assistant" + mock_item2.content = "Hi there!" + mock_items_response.data = [mock_item1, mock_item2] + mock_client.conversations.items.list.return_value = mock_items_response mock_client_holder = mocker.patch( - "app.endpoints.conversations.AsyncLlamaStackClientHolder" + "app.endpoints.conversations_v1.AsyncLlamaStackClientHolder" ) mock_client_holder.return_value.get_client.return_value = mock_client response = await get_conversation_endpoint_handler( @@ -635,30 +699,43 @@ async def test_successful_conversation_retrieval( self, mocker: MockerFixture, setup_configuration: AppConfig, - mock_session_data: dict[str, Any], expected_chat_history: list[dict[str, Any]], dummy_request: Request, - ) -> None: # pylint: disable=too-many-arguments,too-many-positional-arguments + mock_conversation: MockType, + ) -> None: """Test successful conversation retrieval with simplified response structure.""" mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) - mocker.patch("app.endpoints.conversations.check_suid", return_value=True) - mocker.patch("app.endpoints.conversations.can_access_conversation") - mocker.patch("app.endpoints.conversations.retrieve_conversation") + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=True) - # Mock AsyncLlamaStackClientHolder - mock_client = mocker.AsyncMock() - mock_client.agents.session.list.return_value = mocker.Mock( - data=[mock_session_data] + mock_db_turns = [ + create_mock_db_turn( + mocker, 1, "2024-01-01T00:01:00Z", "2024-01-01T00:01:05Z" + ), + create_mock_db_turn( + mocker, 2, "2024-01-01T00:02:00Z", "2024-01-01T00:02:03Z" + ), + ] + mock_database_session( + mocker, query_result=[mock_conversation], db_turns=mock_db_turns ) - # Mock session.retrieve to return an object with model_dump() method - mock_session_retrieve_result = mocker.Mock() - mock_session_retrieve_result.model_dump.return_value = mock_session_data - mock_client.agents.session.retrieve.return_value = mock_session_retrieve_result + mock_client = mocker.AsyncMock() + mock_items = mocker.Mock() + mock_items.data = [ + mocker.Mock(type="message", role="user", content="Hello"), + mocker.Mock(type="message", role="assistant", content="Hi there!"), + mocker.Mock(type="message", role="user", content="How are you?"), + mocker.Mock( + type="message", role="assistant", content="I'm doing well, thanks!" + ), + ] + mock_client.conversations.items.list.return_value = mock_items mock_client_holder = mocker.patch( - "app.endpoints.conversations.AsyncLlamaStackClientHolder" + "app.endpoints.conversations_v1.AsyncLlamaStackClientHolder" ) mock_client_holder.return_value.get_client.return_value = mock_client @@ -668,10 +745,8 @@ async def test_successful_conversation_retrieval( assert isinstance(response, ConversationResponse) assert response.conversation_id == VALID_CONVERSATION_ID - assert response.chat_history == expected_chat_history - mock_client.agents.session.list.assert_called_once_with( - agent_id=VALID_CONVERSATION_ID - ) + actual_history = [turn.model_dump() for turn in response.chat_history] + assert actual_history == expected_chat_history @pytest.mark.asyncio async def test_retrieve_conversation_returns_none( @@ -682,12 +757,16 @@ async def test_retrieve_conversation_returns_none( ) -> None: """Test when retrieve_conversation returns None.""" mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) - mocker.patch("app.endpoints.conversations.check_suid", return_value=True) - mocker.patch("app.endpoints.conversations.can_access_conversation") mocker.patch( - "app.endpoints.conversations.retrieve_conversation", return_value=None + "app.endpoints.conversations_v1.configuration", setup_configuration + ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=True) + mock_database_session(mocker, query_result=[]) + mock_client = mocker.AsyncMock() + mock_client_holder = mocker.patch( + "app.endpoints.conversations_v1.AsyncLlamaStackClientHolder" ) + mock_client_holder.return_value.get_client.return_value = mock_client with pytest.raises(HTTPException) as exc_info: await get_conversation_endpoint_handler( @@ -702,28 +781,32 @@ async def test_retrieve_conversation_returns_none( assert "Conversation not found" in detail["response"] # type: ignore @pytest.mark.asyncio - async def test_no_sessions_found_in_get_conversation( + async def test_no_items_found_in_get_conversation( self, mocker: MockerFixture, setup_configuration: AppConfig, dummy_request: Request, mock_conversation: MockType, ) -> None: - """Test when no sessions are found for the conversation.""" + """Test when no items are found for the conversation (empty data list).""" mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) - mocker.patch("app.endpoints.conversations.check_suid", return_value=True) - mocker.patch("app.endpoints.conversations.can_access_conversation") mocker.patch( - "app.endpoints.conversations.retrieve_conversation", + "app.endpoints.conversations_v1.configuration", setup_configuration + ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=True) + mocker.patch( + "app.endpoints.conversations_v1.validate_and_retrieve_conversation", return_value=mock_conversation, ) - # Mock AsyncLlamaStackClientHolder with empty sessions list + mock_database_session(mocker, db_turns=[]) + mock_client = mocker.AsyncMock() - mock_client.agents.session.list.return_value = mocker.Mock(data=[]) + mock_items_response = mocker.Mock() + mock_items_response.data = [] + mock_client.conversations.items.list.return_value = mock_items_response mock_client_holder = mocker.patch( - "app.endpoints.conversations.AsyncLlamaStackClientHolder" + "app.endpoints.conversations_v1.AsyncLlamaStackClientHolder" ) mock_client_holder.return_value.get_client.return_value = mock_client @@ -740,33 +823,34 @@ async def test_no_sessions_found_in_get_conversation( assert "Conversation not found" in detail["response"] # type: ignore @pytest.mark.asyncio - async def test_sqlalchemy_error_in_get_conversation( + async def test_api_status_error_in_get_conversation( self, mocker: MockerFixture, setup_configuration: AppConfig, dummy_request: Request, mock_conversation: MockType, ) -> None: - """Test when SQLAlchemyError is raised during conversation retrieval.""" + """Test when APIStatusError is raised during conversation retrieval.""" mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) - mocker.patch("app.endpoints.conversations.check_suid", return_value=True) - mocker.patch("app.endpoints.conversations.can_access_conversation") mocker.patch( - "app.endpoints.conversations.retrieve_conversation", + "app.endpoints.conversations_v1.configuration", setup_configuration + ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=True) + mocker.patch( + "app.endpoints.conversations_v1.validate_and_retrieve_conversation", return_value=mock_conversation, ) - # Mock AsyncLlamaStackClientHolder - SQLAlchemyError should come from session.retrieve + mock_database_session(mocker, db_turns=[]) + mock_client = mocker.AsyncMock() - mock_session_list_response = mocker.Mock() - mock_session_list_response.data = [{"session_id": VALID_CONVERSATION_ID}] - mock_client.agents.session.list.return_value = mock_session_list_response - mock_client.agents.session.retrieve.side_effect = SQLAlchemyError( - "Database error" + mock_client.conversations.items.list.side_effect = APIStatusError( + message="Conversation not found", + response=mocker.Mock(status_code=404, request=None), + body=None, ) mock_client_holder = mocker.patch( - "app.endpoints.conversations.AsyncLlamaStackClientHolder" + "app.endpoints.conversations_v1.AsyncLlamaStackClientHolder" ) mock_client_holder.return_value.get_client.return_value = mock_client @@ -777,158 +861,235 @@ async def test_sqlalchemy_error_in_get_conversation( auth=MOCK_AUTH, ) - assert exc_info.value.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR + assert exc_info.value.status_code == status.HTTP_404_NOT_FOUND detail = exc_info.value.detail assert isinstance(detail, dict) - assert "Database" in detail["response"] # type: ignore - - -class TestDeleteConversationEndpoint: - """Test cases for the DELETE /conversations/{conversation_id} endpoint.""" + assert "Conversation not found" in detail["response"] # type: ignore @pytest.mark.asyncio - async def test_configuration_not_loaded( - self, mocker: MockerFixture, dummy_request: Request + async def test_sqlalchemy_error_in_get_conversation( + self, + mocker: MockerFixture, + setup_configuration: AppConfig, + dummy_request: Request, ) -> None: - """Test the endpoint when configuration is not loaded.""" + """Test when SQLAlchemyError is raised during conversation retrieval.""" mock_authorization_resolvers(mocker) - mock_config = AppConfig() - mocker.patch("app.endpoints.conversations.configuration", mock_config) + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=True) + # Mock validate_and_retrieve_conversation to raise HTTPException (which it does + # when it catches SQLAlchemyError internally) + database_error_response = InternalServerErrorResponse.database_error() + mocker.patch( + "app.endpoints.conversations_v1.validate_and_retrieve_conversation", + side_effect=HTTPException(**database_error_response.model_dump()), + ) with pytest.raises(HTTPException) as exc_info: - await delete_conversation_endpoint_handler( + await get_conversation_endpoint_handler( request=dummy_request, conversation_id=VALID_CONVERSATION_ID, auth=MOCK_AUTH, ) assert exc_info.value.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR - detail = exc_info.value.detail assert isinstance(detail, dict) - assert "Configuration is not loaded" in detail["response"] # type: ignore + assert "Database" in detail["response"] # type: ignore @pytest.mark.asyncio - async def test_invalid_conversation_id_format( + async def test_sqlalchemy_error_retrieving_turns_in_get_conversation( self, mocker: MockerFixture, setup_configuration: AppConfig, dummy_request: Request, + mock_conversation: MockType, ) -> None: - """Test the endpoint with an invalid conversation ID format.""" + """Test when SQLAlchemyError is raised while retrieving conversation turns.""" mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) - mocker.patch("app.endpoints.conversations.check_suid", return_value=False) + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=True) + mocker.patch( + "app.endpoints.conversations_v1.validate_and_retrieve_conversation", + return_value=mock_conversation, + ) - with pytest.raises(HTTPException) as exc_info: - await delete_conversation_endpoint_handler( - request=dummy_request, - conversation_id=INVALID_CONVERSATION_ID, - auth=MOCK_AUTH, - ) + # Mock get_session to raise SQLAlchemyError when querying UserTurn + mock_session = mocker.Mock() - assert exc_info.value.status_code == status.HTTP_400_BAD_REQUEST + def query_side_effect(model_class: type[Any]) -> Any: + if model_class == UserTurn: + mock_query = mocker.Mock() + mock_query_chain = ( + mock_query.filter_by.return_value.order_by.return_value.all + ) + mock_query_chain.side_effect = SQLAlchemyError("Database error") + return mock_query + # Return a default mock for other queries + return mocker.Mock() - detail = exc_info.value.detail - assert isinstance(detail, dict) - assert "Invalid conversation ID format" in detail["response"] # type: ignore - assert INVALID_CONVERSATION_ID in detail["cause"] # type: ignore + mock_session.query.side_effect = query_side_effect - @pytest.mark.asyncio - async def test_llama_stack_connection_error( - self, - mocker: MockerFixture, - setup_configuration: AppConfig, - dummy_request: Request, - ) -> None: - """Test the endpoint when LlamaStack connection fails.""" - mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) - mocker.patch("app.endpoints.conversations.check_suid", return_value=True) - mocker.patch("app.endpoints.conversations.can_access_conversation") - mocker.patch("app.endpoints.conversations.retrieve_conversation") + mock_session_context = mocker.MagicMock() + mock_session_context.__enter__.return_value = mock_session + mock_session_context.__exit__.return_value = None + mocker.patch( + "app.endpoints.conversations_v1.get_session", + return_value=mock_session_context, + ) - # Mock AsyncLlamaStackClientHolder to raise APIConnectionError mock_client = mocker.AsyncMock() - mock_client.agents.session.delete.side_effect = APIConnectionError( - request=None # type: ignore - ) + mock_items_response = mocker.Mock() + mock_items_response.data = [ + mocker.Mock(type="message", role="user", content="Hello"), + mocker.Mock(type="message", role="assistant", content="Hi!"), + ] + mock_client.conversations.items.list.return_value = mock_items_response mock_client_holder = mocker.patch( - "app.endpoints.conversations.AsyncLlamaStackClientHolder" + "app.endpoints.conversations_v1.AsyncLlamaStackClientHolder" ) mock_client_holder.return_value.get_client.return_value = mock_client with pytest.raises(HTTPException) as exc_info: - await delete_conversation_endpoint_handler( + await get_conversation_endpoint_handler( request=dummy_request, conversation_id=VALID_CONVERSATION_ID, auth=MOCK_AUTH, ) - assert exc_info.value.status_code == status.HTTP_503_SERVICE_UNAVAILABLE + assert exc_info.value.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR detail = exc_info.value.detail assert isinstance(detail, dict) - assert detail["response"] == "Unable to connect to Llama Stack" # type: ignore + assert "Database" in detail["response"] # type: ignore @pytest.mark.asyncio - async def test_llama_stack_not_found_error( + async def test_sqlalchemy_error_in_retrieve_conversation( self, mocker: MockerFixture, setup_configuration: AppConfig, dummy_request: Request, ) -> None: - """Test the endpoint when LlamaStack returns NotFoundError.""" + """Test when SQLAlchemyError is raised during retrieve_conversation call.""" mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) - mocker.patch("app.endpoints.conversations.check_suid", return_value=True) - mocker.patch("app.endpoints.conversations.can_access_conversation") - mocker.patch("app.endpoints.conversations.retrieve_conversation") + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=True) + mocker.patch( + "utils.endpoints.can_access_conversation", + return_value=True, + ) - # Mock AsyncLlamaStackClientHolder to raise NotFoundError - mock_client = mocker.AsyncMock() - mock_client.agents.session.delete.side_effect = NotFoundError( - message="Session not found", response=mocker.Mock(request=None), body=None + mock_session = mocker.Mock() + mock_query = mocker.Mock() + mock_query.filter_by.return_value.first.side_effect = SQLAlchemyError( + "Database error" ) - mock_client_holder = mocker.patch( - "app.endpoints.conversations.AsyncLlamaStackClientHolder" + mock_session.query.return_value = mock_query + mock_session_context = mocker.MagicMock() + mock_session_context.__enter__.return_value = mock_session + mock_session_context.__exit__.return_value = None + mocker.patch( + "utils.endpoints.get_session", + return_value=mock_session_context, ) - mock_client_holder.return_value.get_client.return_value = mock_client with pytest.raises(HTTPException) as exc_info: - await delete_conversation_endpoint_handler( + await get_conversation_endpoint_handler( request=dummy_request, conversation_id=VALID_CONVERSATION_ID, auth=MOCK_AUTH, ) - assert exc_info.value.status_code == status.HTTP_404_NOT_FOUND + assert exc_info.value.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR detail = exc_info.value.detail assert isinstance(detail, dict) - assert "Conversation not found" in detail["response"] # type: ignore - assert "does not exist" in detail["cause"] # type: ignore - assert VALID_CONVERSATION_ID in detail["cause"] # type: ignore + assert "Database" in detail["response"] # type: ignore + + +class TestDeleteConversationEndpoint: + """Test cases for the DELETE /conversations/{conversation_id} endpoint.""" @pytest.mark.asyncio - async def test_session_deletion_exception( - self, - mocker: MockerFixture, - setup_configuration: AppConfig, - dummy_request: Request, + async def test_configuration_not_loaded( + self, mocker: MockerFixture, dummy_request: Request ) -> None: - """Test the endpoint when session deletion raises an exception.""" + """Test the endpoint when configuration is not loaded.""" + mock_authorization_resolvers(mocker) + mock_config = AppConfig() + mocker.patch("app.endpoints.conversations_v1.configuration", mock_config) + + with pytest.raises(HTTPException) as exc_info: + await delete_conversation_endpoint_handler( + request=dummy_request, + conversation_id=VALID_CONVERSATION_ID, + auth=MOCK_AUTH, + ) + + assert exc_info.value.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR + + detail = exc_info.value.detail + assert isinstance(detail, dict) + assert "Configuration is not loaded" in detail["response"] # type: ignore + + @pytest.mark.asyncio + async def test_invalid_conversation_id_format( + self, + mocker: MockerFixture, + setup_configuration: AppConfig, + dummy_request: Request, + ) -> None: + """Test the endpoint with an invalid conversation ID format.""" + mock_authorization_resolvers(mocker) + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=False) + + with pytest.raises(HTTPException) as exc_info: + await delete_conversation_endpoint_handler( + request=dummy_request, + conversation_id=INVALID_CONVERSATION_ID, + auth=MOCK_AUTH, + ) + + assert exc_info.value.status_code == status.HTTP_400_BAD_REQUEST + + detail = exc_info.value.detail + assert isinstance(detail, dict) + assert "Invalid conversation ID format" in detail["response"] # type: ignore + assert INVALID_CONVERSATION_ID in detail["cause"] # type: ignore + + @pytest.mark.asyncio + async def test_llama_stack_connection_error( + self, + mocker: MockerFixture, + setup_configuration: AppConfig, + dummy_request: Request, + ) -> None: + """Test the endpoint when LlamaStack connection fails.""" mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) - mocker.patch("app.endpoints.conversations.check_suid", return_value=True) - mocker.patch("app.endpoints.conversations.can_access_conversation") - mocker.patch("app.endpoints.conversations.retrieve_conversation") + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=True) + mocker.patch("app.endpoints.conversations_v1.can_access_conversation") + mocker.patch("app.endpoints.conversations_v1.retrieve_conversation") + + mocker.patch( + "app.endpoints.conversations_v1.delete_conversation", return_value=True + ) - # Mock AsyncLlamaStackClientHolder to raise a general exception mock_client = mocker.AsyncMock() - mock_client.agents.session.delete.side_effect = APIConnectionError( + mock_client.conversations.delete.side_effect = APIConnectionError( request=None # type: ignore ) mock_client_holder = mocker.patch( - "app.endpoints.conversations.AsyncLlamaStackClientHolder" + "app.endpoints.conversations_v1.AsyncLlamaStackClientHolder" ) mock_client_holder.return_value.get_client.return_value = mock_client @@ -938,10 +1099,53 @@ async def test_session_deletion_exception( conversation_id=VALID_CONVERSATION_ID, auth=MOCK_AUTH, ) + assert exc_info.value.status_code == status.HTTP_503_SERVICE_UNAVAILABLE detail = exc_info.value.detail assert isinstance(detail, dict) - assert "Unable to connect to Llama Stack" in detail["response"] # type: ignore + assert detail["response"] == "Unable to connect to Llama Stack" # type: ignore + + @pytest.mark.asyncio + async def test_llama_stack_not_found_error( + self, + mocker: MockerFixture, + setup_configuration: AppConfig, + dummy_request: Request, + ) -> None: + """Test the endpoint when LlamaStack returns NotFoundError.""" + mock_authorization_resolvers(mocker) + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=True) + mocker.patch("app.endpoints.conversations_v1.can_access_conversation") + mocker.patch("app.endpoints.conversations_v1.retrieve_conversation") + + mocker.patch( + "app.endpoints.conversations_v1.delete_conversation", return_value=True + ) + + mock_client = mocker.AsyncMock() + mock_client.conversations.delete.side_effect = APIStatusError( + message="Conversation not found", + response=mocker.Mock(status_code=404, request=None), + body=None, + ) + mock_client_holder = mocker.patch( + "app.endpoints.conversations_v1.AsyncLlamaStackClientHolder" + ) + mock_client_holder.return_value.get_client.return_value = mock_client + + response = await delete_conversation_endpoint_handler( + request=dummy_request, + conversation_id=VALID_CONVERSATION_ID, + auth=MOCK_AUTH, + ) + + assert isinstance(response, ConversationDeleteResponse) + assert response.conversation_id == VALID_CONVERSATION_ID + assert response.success is True + assert "deleted successfully" in response.response @pytest.mark.asyncio async def test_delete_conversation_forbidden( @@ -952,21 +1156,22 @@ async def test_delete_conversation_forbidden( mock_conversation: MockType, ) -> None: """Test forbidden deletion when user lacks permission to delete conversation.""" - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) - mocker.patch("app.endpoints.conversations.check_suid", return_value=True) mocker.patch( - "app.endpoints.conversations.retrieve_conversation", + "app.endpoints.conversations_v1.configuration", setup_configuration + ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=True) + mocker.patch( + "app.endpoints.conversations_v1.retrieve_conversation", return_value=mock_conversation, ) mocker.patch( "authorization.resolvers.NoopAccessResolver.get_actions", return_value=set(Action.DELETE_CONVERSATION), - ) # Reduce user's permissions to delete only their conversations + ) mock_row = mocker.Mock() mock_row.user_id = "different_user_id" - # Mock the SQLAlchemy-like session mock_session = mocker.MagicMock() mock_session.query.return_value.filter.return_value.first.return_value = ( mock_row @@ -1008,36 +1213,38 @@ async def test_delete_others_conversations_allowed_for_authorized_user( Action.DELETE_OTHERS_CONVERSATIONS, Action.DELETE_CONVERSATION, }, - ) # Allow user to detele other users' conversations + ) - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) - mocker.patch("app.endpoints.conversations.check_suid", return_value=True) mocker.patch( - "app.endpoints.conversations.retrieve_conversation", + "app.endpoints.conversations_v1.configuration", setup_configuration + ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=True) + mocker.patch( + "app.endpoints.conversations_v1.retrieve_conversation", return_value=mock_conversation, ) - - mock_client = mocker.AsyncMock() - mock_client.agents.session.list.return_value.data = [ - {"session_id": VALID_CONVERSATION_ID} - ] - mock_client.agents.session.delete.return_value = None mocker.patch( - "app.endpoints.conversations.AsyncLlamaStackClientHolder.get_client", - return_value=mock_client, + "app.endpoints.conversations_v1.delete_conversation", return_value=True ) - mocker.patch( - "app.endpoints.conversations.delete_conversation", return_value=None + mock_client = mocker.AsyncMock() + mock_delete_response = mocker.Mock() + mock_delete_response.deleted = True + mock_client.conversations.delete.return_value = mock_delete_response + mock_client_holder = mocker.patch( + "app.endpoints.conversations_v1.AsyncLlamaStackClientHolder" ) + mock_client_holder.return_value.get_client.return_value = mock_client + response = await delete_conversation_endpoint_handler( request=dummy_request, conversation_id=VALID_CONVERSATION_ID, auth=MOCK_AUTH, ) - assert response.success is True + assert isinstance(response, ConversationDeleteResponse) assert response.conversation_id == VALID_CONVERSATION_ID + assert response.success is True assert "deleted successfully" in response.response @pytest.mark.asyncio @@ -1049,23 +1256,23 @@ async def test_successful_conversation_deletion( ) -> None: """Test successful conversation deletion.""" mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) - mocker.patch("app.endpoints.conversations.check_suid", return_value=True) - mocker.patch("app.endpoints.conversations.can_access_conversation") - mocker.patch("app.endpoints.conversations.retrieve_conversation") + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=True) + mocker.patch("app.endpoints.conversations_v1.can_access_conversation") + mocker.patch("app.endpoints.conversations_v1.retrieve_conversation") - # Mock the delete_conversation function - mocker.patch("app.endpoints.conversations.delete_conversation") + mock_delete = mocker.patch( + "app.endpoints.conversations_v1.delete_conversation", return_value=True + ) - # Mock AsyncLlamaStackClientHolder mock_client = mocker.AsyncMock() - # Ensure the endpoint sees an existing session so it proceeds to delete - mock_client.agents.session.list.return_value = mocker.Mock( - data=[{"session_id": VALID_CONVERSATION_ID}] - ) - mock_client.agents.session.delete.return_value = None # Successful deletion + mock_delete_response = mocker.Mock() + mock_delete_response.deleted = True + mock_client.conversations.delete.return_value = mock_delete_response mock_client_holder = mocker.patch( - "app.endpoints.conversations.AsyncLlamaStackClientHolder" + "app.endpoints.conversations_v1.AsyncLlamaStackClientHolder" ) mock_client_holder.return_value.get_client.return_value = mock_client @@ -1076,10 +1283,9 @@ async def test_successful_conversation_deletion( assert isinstance(response, ConversationDeleteResponse) assert response.conversation_id == VALID_CONVERSATION_ID assert response.success is True - assert response.response == "Conversation deleted successfully" - mock_client.agents.session.delete.assert_called_once_with( - agent_id=VALID_CONVERSATION_ID, session_id=VALID_CONVERSATION_ID - ) + assert "deleted successfully" in response.response + mock_delete.assert_called_once() + mock_client.conversations.delete.assert_called_once() @pytest.mark.asyncio async def test_retrieve_conversation_returns_none_in_delete( @@ -1088,50 +1294,25 @@ async def test_retrieve_conversation_returns_none_in_delete( setup_configuration: AppConfig, dummy_request: Request, ) -> None: - """Test when retrieve_conversation returns None in delete endpoint.""" + """Test when conversation doesn't exist in delete endpoint.""" mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) - mocker.patch("app.endpoints.conversations.check_suid", return_value=True) - mocker.patch("app.endpoints.conversations.can_access_conversation") mocker.patch( - "app.endpoints.conversations.retrieve_conversation", return_value=None + "app.endpoints.conversations_v1.configuration", setup_configuration ) - - with pytest.raises(HTTPException) as exc_info: - await delete_conversation_endpoint_handler( - request=dummy_request, - conversation_id=VALID_CONVERSATION_ID, - auth=MOCK_AUTH, - ) - - assert exc_info.value.status_code == status.HTTP_404_NOT_FOUND - detail = exc_info.value.detail - assert isinstance(detail, dict) - assert "Conversation not found" in detail["response"] # type: ignore - - @pytest.mark.asyncio - async def test_no_sessions_found_in_delete( - self, - mocker: MockerFixture, - setup_configuration: AppConfig, - dummy_request: Request, - mock_conversation: MockType, - ) -> None: - """Test when no sessions are found in delete endpoint (early return).""" - mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) - mocker.patch("app.endpoints.conversations.check_suid", return_value=True) - mocker.patch("app.endpoints.conversations.can_access_conversation") + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=True) mocker.patch( - "app.endpoints.conversations.retrieve_conversation", - return_value=mock_conversation, + "app.endpoints.conversations_v1.can_access_conversation", return_value=True + ) + mocker.patch( + "app.endpoints.conversations_v1.delete_conversation", return_value=False ) - # Mock AsyncLlamaStackClientHolder with empty sessions list mock_client = mocker.AsyncMock() - mock_client.agents.session.list.return_value = mocker.Mock(data=[]) + mock_delete_response = mocker.Mock() + mock_delete_response.deleted = True + mock_client.conversations.delete.return_value = mock_delete_response mock_client_holder = mocker.patch( - "app.endpoints.conversations.AsyncLlamaStackClientHolder" + "app.endpoints.conversations_v1.AsyncLlamaStackClientHolder" ) mock_client_holder.return_value.get_client.return_value = mock_client @@ -1143,8 +1324,8 @@ async def test_no_sessions_found_in_delete( assert isinstance(response, ConversationDeleteResponse) assert response.conversation_id == VALID_CONVERSATION_ID - assert response.success is True # Operation completed successfully - assert "cannot be deleted" in response.response # But nothing was deleted + assert response.success is True + assert "cannot be deleted" in response.response # Not found locally @pytest.mark.asyncio async def test_sqlalchemy_error_in_delete( @@ -1156,28 +1337,28 @@ async def test_sqlalchemy_error_in_delete( ) -> None: """Test when SQLAlchemyError is raised during conversation deletion.""" mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) - mocker.patch("app.endpoints.conversations.check_suid", return_value=True) - mocker.patch("app.endpoints.conversations.can_access_conversation") mocker.patch( - "app.endpoints.conversations.retrieve_conversation", + "app.endpoints.conversations_v1.configuration", setup_configuration + ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=True) + mocker.patch("app.endpoints.conversations_v1.can_access_conversation") + mocker.patch( + "app.endpoints.conversations_v1.retrieve_conversation", return_value=mock_conversation, ) - # Mock AsyncLlamaStackClientHolder - SQLAlchemyError should come from delete_conversation mock_client = mocker.AsyncMock() mock_session_list_response = mocker.Mock() mock_session_list_response.data = [{"session_id": VALID_CONVERSATION_ID}] mock_client.agents.session.list.return_value = mock_session_list_response mock_client.agents.session.delete.return_value = None mock_client_holder = mocker.patch( - "app.endpoints.conversations.AsyncLlamaStackClientHolder" + "app.endpoints.conversations_v1.AsyncLlamaStackClientHolder" ) mock_client_holder.return_value.get_client.return_value = mock_client - # Mock delete_conversation to raise SQLAlchemyError mocker.patch( - "app.endpoints.conversations.delete_conversation", + "app.endpoints.conversations_v1.delete_conversation", side_effect=SQLAlchemyError("Database error"), ) @@ -1205,7 +1386,7 @@ async def test_configuration_not_loaded( """Test the endpoint when configuration is not loaded.""" mock_authorization_resolvers(mocker) mock_config = AppConfig() - mocker.patch("app.endpoints.conversations.configuration", mock_config) + mocker.patch("app.endpoints.conversations_v1.configuration", mock_config) with pytest.raises(HTTPException) as exc_info: await get_conversations_list_endpoint_handler( @@ -1226,7 +1407,9 @@ async def test_successful_conversations_list_retrieval( ) -> None: """Test successful retrieval of conversations list.""" mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) # Mock database session and query results mock_conversations = [ @@ -1289,7 +1472,9 @@ async def test_empty_conversations_list( ) -> None: """Test when user has no conversations.""" mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) # Mock database session with no results mock_database_session(mocker, []) @@ -1311,7 +1496,9 @@ async def test_database_exception( ) -> None: """Test when database query raises an exception.""" mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) # Mock database session to raise exception mock_session = mock_database_session(mocker) @@ -1331,7 +1518,9 @@ async def test_sqlalchemy_error_in_list( ) -> None: """Test when database query raises SQLAlchemyError.""" mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) # Mock database session to raise SQLAlchemyError when all() is called # Since dummy_request has all actions, it will use query directly (not filter_by) @@ -1348,7 +1537,8 @@ async def test_sqlalchemy_error_in_list( mock_session_context.__enter__.return_value = mock_session mock_session_context.__exit__.return_value = None mocker.patch( - "app.endpoints.conversations.get_session", return_value=mock_session_context + "app.endpoints.conversations_v1.get_session", + return_value=mock_session_context, ) with pytest.raises(HTTPException) as exc_info: @@ -1370,7 +1560,9 @@ async def test_conversations_list_with_none_topic_summary( ) -> None: """Test conversations list when topic_summary is None.""" mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) # Mock database session with conversation having None topic_summary mock_conversations = [ @@ -1407,7 +1599,9 @@ async def test_conversations_list_with_mixed_topic_summaries( ) -> None: """Test conversations list with mixed topic_summary values (some None, some not).""" mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) # Mock database session with mixed topic_summary values mock_conversations = [ @@ -1475,7 +1669,9 @@ async def test_conversations_list_with_empty_topic_summary( ) -> None: """Test conversations list when topic_summary is an empty string.""" mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) # Mock database session with conversation having empty topic_summary mock_conversations = [ @@ -1512,7 +1708,9 @@ async def test_conversations_list_topic_summary_field_presence( ) -> None: """Test that topic_summary field is always present in ConversationDetails objects.""" mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations.configuration", setup_configuration) + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) # Mock database session with conversations mock_conversations = [ @@ -1546,3 +1744,380 @@ async def test_conversations_list_topic_summary_field_presence( conv_dict = conv.model_dump() assert "topic_summary" in conv_dict assert conv_dict["topic_summary"] == "Test topic summary" + + +class TestUpdateConversationEndpoint: + """Test cases for the PUT /conversations/{conversation_id} endpoint.""" + + @pytest.mark.asyncio + async def test_configuration_not_loaded( + self, mocker: MockerFixture, dummy_request: Request + ) -> None: + """Test the endpoint when configuration is not loaded.""" + mock_authorization_resolvers(mocker) + mock_config = AppConfig() + mocker.patch("app.endpoints.conversations_v1.configuration", mock_config) + + update_request = ConversationUpdateRequest(topic_summary="New topic") + + with pytest.raises(HTTPException) as exc_info: + await update_conversation_endpoint_handler( + request=dummy_request, + conversation_id=VALID_CONVERSATION_ID, + update_request=update_request, + auth=MOCK_AUTH, + ) + + assert exc_info.value.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR + detail = exc_info.value.detail + assert isinstance(detail, dict) + assert "Configuration is not loaded" in detail["response"] # type: ignore + + @pytest.mark.asyncio + async def test_invalid_conversation_id_format( + self, + mocker: MockerFixture, + setup_configuration: AppConfig, + dummy_request: Request, + ) -> None: + """Test the endpoint with an invalid conversation ID format.""" + mock_authorization_resolvers(mocker) + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=False) + + update_request = ConversationUpdateRequest(topic_summary="New topic") + + with pytest.raises(HTTPException) as exc_info: + await update_conversation_endpoint_handler( + request=dummy_request, + conversation_id=INVALID_CONVERSATION_ID, + update_request=update_request, + auth=MOCK_AUTH, + ) + + assert exc_info.value.status_code == status.HTTP_400_BAD_REQUEST + detail = exc_info.value.detail + assert isinstance(detail, dict) + assert "Invalid conversation ID format" in detail["response"] # type: ignore + + @pytest.mark.asyncio + async def test_update_conversation_forbidden( + self, + mocker: MockerFixture, + setup_configuration: AppConfig, + dummy_request: Request, + mock_conversation: MockType, + ) -> None: + """Test forbidden access when user lacks permission to update conversation.""" + mock_authorization_resolvers(mocker) + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=True) + mocker.patch( + "app.endpoints.conversations_v1.retrieve_conversation", + return_value=mock_conversation, + ) + mocker.patch( + "authorization.resolvers.NoopAccessResolver.get_actions", + return_value=set(Action.UPDATE_CONVERSATION), + ) # User can only update their own conversations + + # Mock can_access_conversation to return False (user doesn't have access) + mocker.patch( + "app.endpoints.conversations_v1.can_access_conversation", return_value=False + ) + + update_request = ConversationUpdateRequest(topic_summary="New topic") + + with pytest.raises(HTTPException) as exc_info: + await update_conversation_endpoint_handler( + request=dummy_request, + conversation_id=VALID_CONVERSATION_ID, + update_request=update_request, + auth=MOCK_AUTH, + ) + + assert exc_info.value.status_code == status.HTTP_403_FORBIDDEN + detail = exc_info.value.detail + assert isinstance(detail, dict) + assert "does not have permission" in detail["cause"] # type: ignore + + @pytest.mark.asyncio + async def test_conversation_not_found_in_update( + self, + mocker: MockerFixture, + setup_configuration: AppConfig, + dummy_request: Request, + ) -> None: + """Test when conversation is not found in update endpoint.""" + mock_authorization_resolvers(mocker) + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=True) + mocker.patch("app.endpoints.conversations_v1.can_access_conversation") + mocker.patch( + "app.endpoints.conversations_v1.retrieve_conversation", return_value=None + ) + + update_request = ConversationUpdateRequest(topic_summary="New topic") + + with pytest.raises(HTTPException) as exc_info: + await update_conversation_endpoint_handler( + request=dummy_request, + conversation_id=VALID_CONVERSATION_ID, + update_request=update_request, + auth=MOCK_AUTH, + ) + + assert exc_info.value.status_code == status.HTTP_404_NOT_FOUND + detail = exc_info.value.detail + assert isinstance(detail, dict) + assert "Conversation not found" in detail["response"] # type: ignore + + @pytest.mark.asyncio + async def test_sqlalchemy_error_in_retrieve_conversation_update( + self, + mocker: MockerFixture, + setup_configuration: AppConfig, + dummy_request: Request, + ) -> None: + """Test when SQLAlchemyError is raised during retrieve_conversation in update.""" + mock_authorization_resolvers(mocker) + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=True) + mocker.patch("app.endpoints.conversations_v1.can_access_conversation") + mocker.patch( + "app.endpoints.conversations_v1.retrieve_conversation", + side_effect=SQLAlchemyError("Database error"), + ) + + update_request = ConversationUpdateRequest(topic_summary="New topic") + + with pytest.raises(HTTPException) as exc_info: + await update_conversation_endpoint_handler( + request=dummy_request, + conversation_id=VALID_CONVERSATION_ID, + update_request=update_request, + auth=MOCK_AUTH, + ) + + assert exc_info.value.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR + detail = exc_info.value.detail + assert isinstance(detail, dict) + assert "Database" in detail["response"] # type: ignore + + @pytest.mark.asyncio + async def test_successful_conversation_update( + self, + mocker: MockerFixture, + setup_configuration: AppConfig, + dummy_request: Request, + mock_conversation: MockType, + ) -> None: + """Test successful conversation update.""" + mock_authorization_resolvers(mocker) + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=True) + mocker.patch("app.endpoints.conversations_v1.can_access_conversation") + mocker.patch( + "app.endpoints.conversations_v1.retrieve_conversation", + return_value=mock_conversation, + ) + + # Mock database session for update + mock_session = mocker.Mock() + mock_db_conv = mocker.Mock() + mock_db_conv.topic_summary = None + mock_session.query.return_value.filter_by.return_value.first.return_value = ( + mock_db_conv + ) + mock_session_context = mocker.MagicMock() + mock_session_context.__enter__.return_value = mock_session + mock_session_context.__exit__.return_value = None + mocker.patch( + "app.endpoints.conversations_v1.get_session", + return_value=mock_session_context, + ) + + # Mock AsyncLlamaStackClientHolder + mock_client = mocker.AsyncMock() + mock_client.conversations.update.return_value = None + mock_client_holder = mocker.patch( + "app.endpoints.conversations_v1.AsyncLlamaStackClientHolder" + ) + mock_client_holder.return_value.get_client.return_value = mock_client + + update_request = ConversationUpdateRequest(topic_summary="New topic summary") + + response = await update_conversation_endpoint_handler( + request=dummy_request, + conversation_id=VALID_CONVERSATION_ID, + update_request=update_request, + auth=MOCK_AUTH, + ) + + assert isinstance(response, ConversationUpdateResponse) + assert response.conversation_id == VALID_CONVERSATION_ID + assert response.success is True + assert "updated successfully" in response.message + mock_client.conversations.update.assert_called_once() + mock_session.commit.assert_called_once() + + @pytest.mark.asyncio + async def test_llama_stack_connection_error_in_update( + self, + mocker: MockerFixture, + setup_configuration: AppConfig, + dummy_request: Request, + mock_conversation: MockType, + ) -> None: + """Test the endpoint when LlamaStack connection fails during update.""" + mock_authorization_resolvers(mocker) + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=True) + mocker.patch("app.endpoints.conversations_v1.can_access_conversation") + mocker.patch( + "app.endpoints.conversations_v1.retrieve_conversation", + return_value=mock_conversation, + ) + + # Mock AsyncLlamaStackClientHolder to raise APIConnectionError + mock_client = mocker.AsyncMock() + mock_client.conversations.update.side_effect = APIConnectionError( + request=None # type: ignore + ) + mock_client_holder = mocker.patch( + "app.endpoints.conversations_v1.AsyncLlamaStackClientHolder" + ) + mock_client_holder.return_value.get_client.return_value = mock_client + + update_request = ConversationUpdateRequest(topic_summary="New topic") + + with pytest.raises(HTTPException) as exc_info: + await update_conversation_endpoint_handler( + request=dummy_request, + conversation_id=VALID_CONVERSATION_ID, + update_request=update_request, + auth=MOCK_AUTH, + ) + + assert exc_info.value.status_code == status.HTTP_503_SERVICE_UNAVAILABLE + detail = exc_info.value.detail + assert isinstance(detail, dict) + assert detail["response"] == "Unable to connect to Llama Stack" # type: ignore + + @pytest.mark.asyncio + async def test_llama_stack_not_found_error_in_update( + self, + mocker: MockerFixture, + setup_configuration: AppConfig, + dummy_request: Request, + mock_conversation: MockType, + ) -> None: + """Test the endpoint when LlamaStack returns NotFoundError during update.""" + mock_authorization_resolvers(mocker) + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=True) + mocker.patch("app.endpoints.conversations_v1.can_access_conversation") + mocker.patch( + "app.endpoints.conversations_v1.retrieve_conversation", + return_value=mock_conversation, + ) + + # Mock AsyncLlamaStackClientHolder to raise APIStatusError + mock_client = mocker.AsyncMock() + mock_client.conversations.update.side_effect = APIStatusError( + message="Conversation not found", + response=mocker.Mock(status_code=404, request=None), + body=None, + ) + mock_client_holder = mocker.patch( + "app.endpoints.conversations_v1.AsyncLlamaStackClientHolder" + ) + mock_client_holder.return_value.get_client.return_value = mock_client + + update_request = ConversationUpdateRequest(topic_summary="New topic") + + with pytest.raises(HTTPException) as exc_info: + await update_conversation_endpoint_handler( + request=dummy_request, + conversation_id=VALID_CONVERSATION_ID, + update_request=update_request, + auth=MOCK_AUTH, + ) + + assert exc_info.value.status_code == status.HTTP_404_NOT_FOUND + detail = exc_info.value.detail + assert isinstance(detail, dict) + assert "Conversation not found" in detail["response"] # type: ignore + + @pytest.mark.asyncio + async def test_sqlalchemy_error_in_database_update( + self, + mocker: MockerFixture, + setup_configuration: AppConfig, + dummy_request: Request, + mock_conversation: MockType, + ) -> None: + """Test when SQLAlchemyError is raised during database update.""" + mock_authorization_resolvers(mocker) + mocker.patch( + "app.endpoints.conversations_v1.configuration", setup_configuration + ) + mocker.patch("app.endpoints.conversations_v1.check_suid", return_value=True) + mocker.patch("app.endpoints.conversations_v1.can_access_conversation") + mocker.patch( + "app.endpoints.conversations_v1.retrieve_conversation", + return_value=mock_conversation, + ) + + # Mock AsyncLlamaStackClientHolder - update succeeds + mock_client = mocker.AsyncMock() + mock_client.conversations.update.return_value = None + mock_client_holder = mocker.patch( + "app.endpoints.conversations_v1.AsyncLlamaStackClientHolder" + ) + mock_client_holder.return_value.get_client.return_value = mock_client + + # Mock database session - commit raises SQLAlchemyError + mock_session = mocker.Mock() + mock_db_conv = mocker.Mock() + mock_db_conv.topic_summary = None + mock_session.query.return_value.filter_by.return_value.first.return_value = ( + mock_db_conv + ) + mock_session.commit.side_effect = SQLAlchemyError("Database error") + mock_session_context = mocker.MagicMock() + mock_session_context.__enter__.return_value = mock_session + mock_session_context.__exit__.return_value = None + mocker.patch( + "app.endpoints.conversations_v1.get_session", + return_value=mock_session_context, + ) + + update_request = ConversationUpdateRequest(topic_summary="New topic") + + with pytest.raises(HTTPException) as exc_info: + await update_conversation_endpoint_handler( + request=dummy_request, + conversation_id=VALID_CONVERSATION_ID, + update_request=update_request, + auth=MOCK_AUTH, + ) + + assert exc_info.value.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR + detail = exc_info.value.detail + assert isinstance(detail, dict) + assert "Database" in detail["response"] # type: ignore diff --git a/tests/unit/app/endpoints/test_conversations_v2.py b/tests/unit/app/endpoints/test_conversations_v2.py index 1ee7f8d86..57019dad8 100644 --- a/tests/unit/app/endpoints/test_conversations_v2.py +++ b/tests/unit/app/endpoints/test_conversations_v2.py @@ -3,18 +3,19 @@ """Unit tests for the /conversations REST API endpoints.""" from datetime import datetime, timezone +from typing import Any, cast import pytest from fastapi import HTTPException, status from pytest_mock import MockerFixture, MockType from app.endpoints.conversations_v2 import ( + build_conversation_turn_from_cache_entry, check_conversation_existence, check_valid_conversation_id, delete_conversation_endpoint_handler, get_conversation_endpoint_handler, get_conversations_list_endpoint_handler, - transform_chat_message, update_conversation_endpoint_handler, ) from configuration import AppConfig @@ -23,57 +24,20 @@ from models.responses import ( ConversationData, ConversationUpdateResponse, - ReferencedDocument, ) from tests.unit.utils.auth_helpers import mock_authorization_resolvers +from utils.types import ToolCallSummary, ToolResultSummary MOCK_AUTH = ("mock_user_id", "mock_username", False, "mock_token") VALID_CONVERSATION_ID = "123e4567-e89b-12d3-a456-426614174000" INVALID_CONVERSATION_ID = "invalid-id" -def test_transform_message() -> None: - """Test the transform_chat_message transformation function.""" - entry = CacheEntry( - query="query", - response="response", - provider="provider", - model="model", - started_at="2024-01-01T00:00:00Z", - completed_at="2024-01-01T00:00:05Z", - ) - transformed = transform_chat_message(entry) - assert transformed is not None +class TestBuildConversationTurnFromCacheEntry: + """Test cases for the build_conversation_turn_from_cache_entry utility function.""" - assert "provider" in transformed - assert transformed["provider"] == "provider" - - assert "model" in transformed - assert transformed["model"] == "model" - - assert "started_at" in transformed - assert transformed["started_at"] == "2024-01-01T00:00:00Z" - - assert "completed_at" in transformed - assert transformed["completed_at"] == "2024-01-01T00:00:05Z" - - assert "messages" in transformed - assert len(transformed["messages"]) == 2 - - message1 = transformed["messages"][0] - assert message1["type"] == "user" - assert message1["content"] == "query" - - message2 = transformed["messages"][1] - assert message2["type"] == "assistant" - assert message2["content"] == "response" - - -class TestTransformChatMessage: - """Test cases for the transform_chat_message utility function.""" - - def test_transform_message_without_documents(self) -> None: - """Test the transformation when no referenced_documents are present.""" + def test_build_turn_without_tool_calls(self) -> None: + """Test building a turn when no tool calls/results are present.""" entry = CacheEntry( query="query", response="response", @@ -81,41 +45,36 @@ def test_transform_message_without_documents(self) -> None: model="model", started_at="2024-01-01T00:00:00Z", completed_at="2024-01-01T00:00:05Z", - # referenced_documents is None by default + # tool_calls and tool_results are None by default ) - transformed = transform_chat_message(entry) - - assistant_message = transformed["messages"][1] - - # Assert that the key is NOT present when the list is None - assert "referenced_documents" not in assistant_message - - def test_transform_message_with_referenced_documents(self) -> None: - """Test the transformation when referenced_documents are present.""" - docs = [ - ReferencedDocument(doc_title="Test Doc", doc_url="http://example.com") - ] # type: ignore - entry = CacheEntry( - query="query", - response="response", - provider="provider", - model="model", - started_at="2024-01-01T00:00:00Z", - completed_at="2024-01-01T00:00:05Z", - referenced_documents=docs, - ) - - transformed = transform_chat_message(entry) - assistant_message = transformed["messages"][1] - - assert "referenced_documents" in assistant_message - ref_docs = assistant_message["referenced_documents"] - assert len(ref_docs) == 1 - assert ref_docs[0]["doc_title"] == "Test Doc" - assert str(ref_docs[0]["doc_url"]) == "http://example.com/" - - def test_transform_message_with_empty_referenced_documents(self) -> None: - """Test the transformation when referenced_documents is an empty list.""" + turn = build_conversation_turn_from_cache_entry(entry) + + assert turn.tool_calls == [] + assert turn.tool_results == [] + assert turn.provider == "provider" + assert turn.model == "model" + assert len(turn.messages) == 2 + + def test_build_turn_with_tool_calls(self) -> None: + """Test building a turn when tool calls and results are present.""" + + tool_calls = [ + ToolCallSummary( + id="call_1", + name="test_tool", + args={"arg1": "value1"}, + type="function_call", + ) + ] + tool_results = [ + ToolResultSummary( + id="call_1", + status="success", + content="result", + type="function_call_output", + round=1, + ) + ] entry = CacheEntry( query="query", response="response", @@ -123,14 +82,18 @@ def test_transform_message_with_empty_referenced_documents(self) -> None: model="model", started_at="2024-01-01T00:00:00Z", completed_at="2024-01-01T00:00:05Z", - referenced_documents=[], # Explicitly empty + tool_calls=tool_calls, + tool_results=tool_results, ) - transformed = transform_chat_message(entry) - assistant_message = transformed["messages"][1] + turn = build_conversation_turn_from_cache_entry(entry) - assert "referenced_documents" in assistant_message - assert assistant_message["referenced_documents"] == [] + assert turn.provider == "provider" + assert turn.model == "model" + assert len(turn.tool_calls) == 1 + assert turn.tool_calls[0].name == "test_tool" + assert len(turn.tool_results) == 1 + assert turn.tool_results[0].status == "success" @pytest.fixture @@ -258,7 +221,9 @@ async def test_conversation_cache_not_configured( assert exc_info.value.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR detail = exc_info.value.detail assert isinstance(detail, dict) - assert "Conversation cache not configured" in detail["response"] + detail_dict = cast(dict[str, Any], detail) + response_text = detail_dict.get("response", "") + assert "Conversation cache not configured" in response_text @pytest.mark.asyncio async def test_successful_retrieval( @@ -269,9 +234,9 @@ async def test_successful_retrieval( mocker.patch("app.endpoints.conversations_v2.configuration", mock_configuration) timestamp_str = "2024-01-01T00:00:00Z" - timestamp_dt = datetime.fromisoformat( - timestamp_str.replace("Z", "+00:00") - ).replace(tzinfo=timezone.utc) + timestamp_dt = datetime.fromisoformat(timestamp_str).replace( + tzinfo=timezone.utc + ) timestamp = timestamp_dt.timestamp() mock_configuration.conversation_cache.list.return_value = [ @@ -335,20 +300,6 @@ async def test_with_skip_userid_check( "mock_user_id", True ) - @pytest.mark.asyncio - async def test_malformed_auth_object( - self, mocker: MockerFixture, mock_configuration: MockType - ) -> None: - """Test the endpoint with a malformed auth object.""" - mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations_v2.configuration", mock_configuration) - - with pytest.raises(IndexError): - await get_conversations_list_endpoint_handler( - request=mocker.Mock(), - auth=(), # Malformed auth object - ) - class TestGetConversationEndpoint: """Test cases for the GET /conversations/{conversation_id} endpoint.""" @@ -421,7 +372,9 @@ async def test_conversation_cache_not_configured( assert exc_info.value.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR detail = exc_info.value.detail assert isinstance(detail, dict) - assert "Conversation cache not configured" in detail["response"] + detail_dict = cast(dict[str, Any], detail) + response_text = detail_dict.get("response", "") + assert "Conversation cache not configured" in response_text @pytest.mark.asyncio async def test_conversation_not_found( @@ -473,7 +426,7 @@ async def test_successful_retrieval( assert response is not None assert response.conversation_id == VALID_CONVERSATION_ID assert len(response.chat_history) == 1 - assert response.chat_history[0]["messages"][0]["content"] == "query" + assert response.chat_history[0].messages[0].content == "query" @pytest.mark.asyncio async def test_with_skip_userid_check( @@ -508,22 +461,6 @@ async def test_with_skip_userid_check( "mock_user_id", VALID_CONVERSATION_ID, True ) - @pytest.mark.asyncio - async def test_malformed_auth_object( - self, mocker: MockerFixture, mock_configuration: MockType - ) -> None: - """Test the endpoint with a malformed auth object.""" - mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations_v2.configuration", mock_configuration) - mocker.patch("app.endpoints.conversations_v2.check_suid", return_value=True) - - with pytest.raises(IndexError): - await get_conversation_endpoint_handler( - request=mocker.Mock(), - conversation_id=VALID_CONVERSATION_ID, - auth=(), # Malformed auth object - ) - class TestDeleteConversationEndpoint: """Test cases for the DELETE /conversations/{conversation_id} endpoint.""" @@ -585,28 +522,9 @@ async def test_conversation_cache_not_configured( assert exc_info.value.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR detail = exc_info.value.detail assert isinstance(detail, dict) - assert "Conversation cache not configured" in detail["response"] - - @pytest.mark.asyncio - async def test_conversation_not_found( - self, mocker: MockerFixture, mock_configuration: MockType - ) -> None: - """Test the endpoint when conversation does not exist.""" - mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations_v2.configuration", mock_configuration) - mocker.patch("app.endpoints.conversations_v2.check_suid", return_value=True) - mock_configuration.conversation_cache.delete.return_value = False - - response = await delete_conversation_endpoint_handler( - request=mocker.Mock(), - conversation_id=VALID_CONVERSATION_ID, - auth=MOCK_AUTH, - ) - - assert response is not None - assert response.conversation_id == VALID_CONVERSATION_ID - assert response.success is True - assert response.response == "Conversation cannot be deleted" + detail_dict = cast(dict[str, Any], detail) + response_text = detail_dict.get("response", "") + assert "Conversation cache not configured" in response_text @pytest.mark.asyncio async def test_successful_deletion( @@ -616,9 +534,6 @@ async def test_successful_deletion( mock_authorization_resolvers(mocker) mocker.patch("app.endpoints.conversations_v2.configuration", mock_configuration) mocker.patch("app.endpoints.conversations_v2.check_suid", return_value=True) - mock_configuration.conversation_cache.list.return_value = [ - mocker.Mock(conversation_id=VALID_CONVERSATION_ID) - ] mock_configuration.conversation_cache.delete.return_value = True response = await delete_conversation_endpoint_handler( @@ -636,13 +551,10 @@ async def test_successful_deletion( async def test_unsuccessful_deletion( self, mocker: MockerFixture, mock_configuration: MockType ) -> None: - """Test unsuccessful deletion of a conversation.""" + """Test unsuccessful deletion when delete returns False.""" mock_authorization_resolvers(mocker) mocker.patch("app.endpoints.conversations_v2.configuration", mock_configuration) mocker.patch("app.endpoints.conversations_v2.check_suid", return_value=True) - mock_configuration.conversation_cache.list.return_value = [ - mocker.Mock(conversation_id=VALID_CONVERSATION_ID) - ] mock_configuration.conversation_cache.delete.return_value = False response = await delete_conversation_endpoint_handler( @@ -674,9 +586,6 @@ async def test_with_skip_userid_check( mock_authorization_resolvers(mocker) mocker.patch("app.endpoints.conversations_v2.configuration", mock_configuration) mocker.patch("app.endpoints.conversations_v2.check_suid", return_value=True) - mock_configuration.conversation_cache.list.return_value = [ - mocker.Mock(conversation_id=VALID_CONVERSATION_ID) - ] mock_auth_with_skip = ("mock_user_id", "mock_username", True, "mock_token") await delete_conversation_endpoint_handler( @@ -689,22 +598,6 @@ async def test_with_skip_userid_check( "mock_user_id", VALID_CONVERSATION_ID, True ) - @pytest.mark.asyncio - async def test_malformed_auth_object( - self, mocker: MockerFixture, mock_configuration: MockType - ) -> None: - """Test the endpoint with a malformed auth object.""" - mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations_v2.configuration", mock_configuration) - mocker.patch("app.endpoints.conversations_v2.check_suid", return_value=True) - - with pytest.raises(IndexError): - await delete_conversation_endpoint_handler( - request=mocker.Mock(), - conversation_id=VALID_CONVERSATION_ID, - auth=(), # Malformed auth object - ) - class TestUpdateConversationEndpoint: """Test cases for the PUT /conversations/{conversation_id} endpoint.""" @@ -777,7 +670,9 @@ async def test_conversation_cache_not_configured( assert exc_info.value.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR detail = exc_info.value.detail assert isinstance(detail, dict) - assert "Conversation cache not configured" in detail["response"] # type: ignore + detail_dict = cast(dict[str, Any], detail) + response_text = detail_dict.get("response", "") + assert "Conversation cache not configured" in response_text # type: ignore @pytest.mark.asyncio async def test_conversation_not_found( @@ -856,20 +751,3 @@ async def test_with_skip_userid_check( mock_configuration.conversation_cache.set_topic_summary.assert_called_once_with( "mock_user_id", VALID_CONVERSATION_ID, "New topic summary", True ) - - @pytest.mark.asyncio - async def test_malformed_auth_object( - self, mocker: MockerFixture, mock_configuration: MockType - ) -> None: - """Test the endpoint with a malformed auth object.""" - mock_authorization_resolvers(mocker) - mocker.patch("app.endpoints.conversations_v2.configuration", mock_configuration) - mocker.patch("app.endpoints.conversations_v2.check_suid", return_value=True) - update_request = ConversationUpdateRequest(topic_summary="New topic summary") - - with pytest.raises(IndexError): - await update_conversation_endpoint_handler( - conversation_id=VALID_CONVERSATION_ID, - update_request=update_request, - auth=(), # Malformed auth object - ) diff --git a/tests/unit/app/endpoints/test_models.py b/tests/unit/app/endpoints/test_models.py index a477d7b28..39bd61672 100644 --- a/tests/unit/app/endpoints/test_models.py +++ b/tests/unit/app/endpoints/test_models.py @@ -6,13 +6,28 @@ from fastapi import HTTPException, Request, status from llama_stack_client import APIConnectionError from pytest_mock import MockerFixture +from pytest_subtests import SubTests +from models.requests import ModelFilter from app.endpoints.models import models_endpoint_handler from authentication.interface import AuthTuple from configuration import AppConfig from tests.unit.utils.auth_helpers import mock_authorization_resolvers +# pylint: disable=R0903 +class Model: + """Model information returned in response.""" + + def __init__(self, model_id: str, provider_id: str, model_type: str) -> None: + """Initialize model information.""" + self.id = model_id + self.custom_metadata = { + "model_type": model_type, + "provider_id": provider_id, + } + + @pytest.mark.asyncio async def test_models_endpoint_handler_configuration_not_loaded( mocker: MockerFixture, @@ -35,7 +50,9 @@ async def test_models_endpoint_handler_configuration_not_loaded( auth: AuthTuple = ("test_user_id", "test_user", True, "test_token") with pytest.raises(HTTPException) as e: - await models_endpoint_handler(request=request, auth=auth) + await models_endpoint_handler( + request=request, auth=auth, model_type=ModelFilter(model_type=None) + ) assert e.value.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR assert e.value.detail["response"] == "Configuration is not loaded" # type: ignore @@ -102,7 +119,9 @@ async def test_models_endpoint_handler_configuration_loaded( auth: AuthTuple = ("test_user_id", "test_user", True, "test_token") with pytest.raises(HTTPException) as e: - await models_endpoint_handler(request=request, auth=auth) + await models_endpoint_handler( + request=request, auth=auth, model_type=ModelFilter(model_type=None) + ) assert e.value.status_code == status.HTTP_503_SERVICE_UNAVAILABLE assert e.value.detail["response"] == "Unable to connect to Llama Stack" # type: ignore @@ -160,10 +179,239 @@ async def test_models_endpoint_handler_unable_to_retrieve_models_list( # Authorization tuple required by URL endpoint handler auth: AuthTuple = ("test_user_id", "test_user", True, "test_token") - response = await models_endpoint_handler(request=request, auth=auth) + response = await models_endpoint_handler( + request=request, auth=auth, model_type=ModelFilter(model_type=None) + ) assert response is not None +@pytest.mark.asyncio +async def test_models_endpoint_handler_model_type_query_parameter( + mocker: MockerFixture, +) -> None: + """Test the models endpoint handler if model_type query parameter is specified.""" + mock_authorization_resolvers(mocker) + + # configuration for tests + config_dict: dict[str, Any] = { + "name": "foo", + "service": { + "host": "localhost", + "port": 8080, + "auth_enabled": False, + "workers": 1, + "color_log": True, + "access_log": True, + }, + "llama_stack": { + "api_key": "xyzzy", + "url": "http://x.y.com:1234", + "use_as_library_client": False, + }, + "user_data_collection": { + "feedback_enabled": False, + }, + "customization": None, + "authorization": {"access_rules": []}, + "authentication": {"module": "noop"}, + } + cfg = AppConfig() + cfg.init_from_dict(config_dict) + + # Mock the LlamaStack client + mock_client = mocker.AsyncMock() + mock_client.models.list.return_value = [] + mock_lsc = mocker.patch( + "app.endpoints.models.AsyncLlamaStackClientHolder.get_client" + ) + mock_lsc.return_value = mock_client + mock_config = mocker.Mock() + mocker.patch("app.endpoints.models.configuration", mock_config) + + request = Request( + scope={ + "type": "http", + "headers": [(b"authorization", b"Bearer invalid-token")], + } + ) + + # Authorization tuple required by URL endpoint handler + auth: AuthTuple = ("test_user_id", "test_user", True, "test_token") + response = await models_endpoint_handler( + request=request, auth=auth, model_type=ModelFilter(model_type="llm") + ) + assert response is not None + + +@pytest.mark.asyncio +async def test_models_endpoint_handler_model_list_retrieved( + mocker: MockerFixture, +) -> None: + """Test the models endpoint handler if model list can be retrieved.""" + mock_authorization_resolvers(mocker) + + # configuration for tests + config_dict: dict[str, Any] = { + "name": "foo", + "service": { + "host": "localhost", + "port": 8080, + "auth_enabled": False, + "workers": 1, + "color_log": True, + "access_log": True, + }, + "llama_stack": { + "api_key": "xyzzy", + "url": "http://x.y.com:1234", + "use_as_library_client": False, + }, + "user_data_collection": { + "feedback_enabled": False, + }, + "customization": None, + "authorization": {"access_rules": []}, + "authentication": {"module": "noop"}, + } + cfg = AppConfig() + cfg.init_from_dict(config_dict) + + # Mock the LlamaStack client + mock_client = mocker.AsyncMock() + mock_client.models.list.return_value = [ + Model("model1", "provider1", "llm"), + Model("model2", "provider2", "embedding"), + Model("model3", "provider3", "llm"), + Model("model4", "provider4", "embedding"), + ] + mock_lsc = mocker.patch( + "app.endpoints.models.AsyncLlamaStackClientHolder.get_client" + ) + mock_lsc.return_value = mock_client + mock_config = mocker.Mock() + mocker.patch("app.endpoints.models.configuration", mock_config) + + request = Request( + scope={ + "type": "http", + "headers": [(b"authorization", b"Bearer invalid-token")], + } + ) + + # Authorization tuple required by URL endpoint handler + auth: AuthTuple = ("test_user_id", "test_user", True, "test_token") + + response = await models_endpoint_handler( + request=request, auth=auth, model_type=ModelFilter(model_type=None) + ) + assert response is not None + assert len(response.models) == 4 + assert response.models[0]["identifier"] == "model1" + assert response.models[0]["model_type"] == "llm" + assert response.models[1]["identifier"] == "model2" + assert response.models[1]["model_type"] == "embedding" + assert response.models[2]["identifier"] == "model3" + assert response.models[2]["model_type"] == "llm" + assert response.models[3]["identifier"] == "model4" + assert response.models[3]["model_type"] == "embedding" + + +@pytest.mark.asyncio +async def test_models_endpoint_handler_model_list_retrieved_with_query_parameter( + mocker: MockerFixture, + subtests: SubTests, +) -> None: + """Test the models endpoint handler if model list can be retrieved.""" + mock_authorization_resolvers(mocker) + + # configuration for tests + config_dict: dict[str, Any] = { + "name": "foo", + "service": { + "host": "localhost", + "port": 8080, + "auth_enabled": False, + "workers": 1, + "color_log": True, + "access_log": True, + }, + "llama_stack": { + "api_key": "xyzzy", + "url": "http://x.y.com:1234", + "use_as_library_client": False, + }, + "user_data_collection": { + "feedback_enabled": False, + }, + "customization": None, + "authorization": {"access_rules": []}, + "authentication": {"module": "noop"}, + } + cfg = AppConfig() + cfg.init_from_dict(config_dict) + + # Mock the LlamaStack client + mock_client = mocker.AsyncMock() + mock_client.models.list.return_value = [ + Model("model1", "provider1", "llm"), + Model("model2", "provider2", "embedding"), + Model("model3", "provider3", "llm"), + Model("model4", "provider4", "embedding"), + ] + mock_lsc = mocker.patch( + "app.endpoints.models.AsyncLlamaStackClientHolder.get_client" + ) + mock_lsc.return_value = mock_client + mock_config = mocker.Mock() + mocker.patch("app.endpoints.models.configuration", mock_config) + + request = Request( + scope={ + "type": "http", + "headers": [(b"authorization", b"Bearer invalid-token")], + } + ) + + # Authorization tuple required by URL endpoint handler + auth: AuthTuple = ("test_user_id", "test_user", True, "test_token") + + with subtests.test(msg="Model type = 'llm'"): + response = await models_endpoint_handler( + request=request, auth=auth, model_type=ModelFilter(model_type="llm") + ) + assert response is not None + assert len(response.models) == 2 + assert response.models[0]["identifier"] == "model1" + assert response.models[0]["model_type"] == "llm" + assert response.models[1]["identifier"] == "model3" + assert response.models[1]["model_type"] == "llm" + + with subtests.test(msg="Model type = 'embedding'"): + response = await models_endpoint_handler( + request=request, auth=auth, model_type=ModelFilter(model_type="embedding") + ) + assert response is not None + assert len(response.models) == 2 + assert response.models[0]["identifier"] == "model2" + assert response.models[0]["model_type"] == "embedding" + assert response.models[1]["identifier"] == "model4" + assert response.models[1]["model_type"] == "embedding" + + with subtests.test(msg="Model type = 'xyzzy'"): + response = await models_endpoint_handler( + request=request, auth=auth, model_type=ModelFilter(model_type="xyzzy") + ) + assert response is not None + assert len(response.models) == 0 + + with subtests.test(msg="Model type is empty string"): + response = await models_endpoint_handler( + request=request, auth=auth, model_type=ModelFilter(model_type="") + ) + assert response is not None + assert len(response.models) == 0 + + @pytest.mark.asyncio async def test_models_endpoint_llama_stack_connection_error( mocker: MockerFixture, @@ -218,7 +466,9 @@ async def test_models_endpoint_llama_stack_connection_error( auth: AuthTuple = ("test_user_id", "test_user", True, "test_token") with pytest.raises(HTTPException) as e: - await models_endpoint_handler(request=request, auth=auth) + await models_endpoint_handler( + request=request, auth=auth, model_type=ModelFilter(model_type=None) + ) assert e.value.status_code == status.HTTP_503_SERVICE_UNAVAILABLE assert e.value.detail["response"] == "Unable to connect to Llama Stack" # type: ignore assert "Unable to connect to Llama Stack" in e.value.detail["cause"] # type: ignore diff --git a/tests/unit/app/endpoints/test_query.py b/tests/unit/app/endpoints/test_query.py index 2aff61eff..f7412fa2f 100644 --- a/tests/unit/app/endpoints/test_query.py +++ b/tests/unit/app/endpoints/test_query.py @@ -6,20 +6,20 @@ from typing import Any import pytest -from fastapi import HTTPException, Request, status -import httpx -from llama_stack_client import APIConnectionError, RateLimitError +from fastapi import HTTPException, Request +from llama_stack_api.openai_responses import OpenAIResponseObject +from llama_stack_client import APIConnectionError, APIStatusError, AsyncLlamaStackClient from pytest_mock import MockerFixture -from app.endpoints.query import ( - get_mcp_tools, - get_rag_tools, - query_endpoint_handler_v2, - retrieve_response, -) +from app.endpoints.query import query_endpoint_handler, retrieve_response +from configuration import AppConfig from models.config import ModelContextProtocolServer +from models.database.conversations import UserConversation from models.requests import Attachment, QueryRequest -from utils.types import ShieldModerationResult +from models.responses import QueryResponse +from utils.responses import get_mcp_tools +from utils.token_counter import TokenCounter +from utils.types import ResponsesApiParams, TurnSummary # User ID must be proper UUID MOCK_AUTH = ( @@ -30,9 +30,9 @@ ) -@pytest.fixture -def dummy_request() -> Request: - """Create a dummy FastAPI Request object for testing. +@pytest.fixture(name="dummy_request") +def create_dummy_request() -> Request: + """Create dummy request fixture for testing. Create a minimal FastAPI Request object suitable for unit tests. @@ -43,81 +43,6 @@ def dummy_request() -> Request: req = Request(scope={"type": "http"}) return req - -def test_get_rag_tools() -> None: - """Test get_rag_tools returns None for empty list and correct tool format for vector stores.""" - assert get_rag_tools([]) is None - - tools = get_rag_tools(["db1", "db2"]) - assert isinstance(tools, list) - assert tools[0]["type"] == "file_search" - assert tools[0]["vector_store_ids"] == ["db1", "db2"] - assert tools[0]["max_num_results"] == 10 - assert "solr" not in tools[0] - - # Test with Solr parameters - solr_params = {"fq": ["product:*openshift*", "product_version:*4.16*"]} - tools_with_solr = get_rag_tools(["db1", "db2"], solr_params) - assert isinstance(tools_with_solr, list) - assert tools_with_solr[0]["type"] == "file_search" - assert tools_with_solr[0]["vector_store_ids"] == ["db1", "db2"] - assert tools_with_solr[0]["max_num_results"] == 10 - assert tools_with_solr[0]["solr"] == solr_params - - -def test_get_mcp_tools_with_and_without_token() -> None: - """Test get_mcp_tools with resolved_authorization_headers.""" - # Servers without authorization headers - servers_no_auth = [ - ModelContextProtocolServer(name="fs", url="http://localhost:3000"), - ModelContextProtocolServer(name="git", url="https://git.example.com/mcp"), - ] - - tools_no_auth = get_mcp_tools(servers_no_auth, token=None) - assert len(tools_no_auth) == 2 - assert tools_no_auth[0]["type"] == "mcp" - assert tools_no_auth[0]["server_label"] == "fs" - assert tools_no_auth[0]["server_url"] == "http://localhost:3000" - assert "headers" not in tools_no_auth[0] - - # Servers with kubernetes auth - servers_k8s = [ - ModelContextProtocolServer( - name="k8s-server", - url="http://localhost:3000", - authorization_headers={"Authorization": "kubernetes"}, - ), - ] - tools_k8s = get_mcp_tools(servers_k8s, token="user-k8s-token") - assert len(tools_k8s) == 1 - assert tools_k8s[0]["headers"] == {"Authorization": "Bearer user-k8s-token"} - - -def test_get_mcp_tools_with_mcp_headers() -> None: - """Test get_mcp_tools with client-provided headers.""" - # Server with client auth - servers = [ - ModelContextProtocolServer( - name="fs", - url="http://localhost:3000", - authorization_headers={"Authorization": "client", "X-Custom": "client"}, - ), - ] - - # Test with mcp_headers provided - mcp_headers = { - "fs": { - "Authorization": "client-provided-token", - "X-Custom": "custom-value", - } - } - tools = get_mcp_tools(servers, token=None, mcp_headers=mcp_headers) - assert len(tools) == 1 - assert tools[0]["headers"] == { - "Authorization": "client-provided-token", - "X-Custom": "custom-value", - } - # Test with mcp_headers=None (server should be skipped since auth is required but unavailable) tools_no_headers = get_mcp_tools(servers, token=None, mcp_headers=None) assert len(tools_no_headers) == 0 # Server skipped due to missing required auth @@ -143,897 +68,653 @@ def test_get_mcp_tools_with_static_headers(tmp_path: Path) -> None: def test_get_mcp_tools_with_mixed_headers(tmp_path: Path) -> None: - """Test get_mcp_tools with mixed header types.""" + """Test get_mcp_tools with mixed header types. # Create a secret file secret_file = tmp_path / "api-key.txt" secret_file.write_text("secret-api-key") - servers = [ - ModelContextProtocolServer( - name="mixed-server", - url="http://localhost:3000", - authorization_headers={ - "Authorization": "kubernetes", - "X-API-Key": str(secret_file), - "X-Custom": "client", - }, - ), - ] + Create a reusable application configuration tailored for unit tests. - mcp_headers = { - "mixed-server": { - "X-Custom": "client-custom-value", - } - } + The returned AppConfig is initialized from a fixed dictionary that sets: + - a lightweight service configuration (localhost, port 8080, minimal workers, logging enabled), + - a test Llama Stack configuration (test API key and URL, not used as a library client), + - user data collection with transcripts disabled, + - an empty MCP servers list, + - a noop conversation cache. - tools = get_mcp_tools(servers, token="k8s-token", mcp_headers=mcp_headers) - assert len(tools) == 1 - assert tools[0]["headers"] == { - "Authorization": "Bearer k8s-token", - "X-API-Key": "secret-api-key", - "X-Custom": "client-custom-value", + Returns: + AppConfig: an initialized configuration instance suitable for test fixtures. + """ + config_dict: dict[Any, Any] = { + "name": "test", + "service": { + "host": "localhost", + "port": 8080, + "auth_enabled": False, + "workers": 1, + "color_log": True, + "access_log": True, + }, + "llama_stack": { + "api_key": "test-key", + "url": "http://test.com:1234", + "use_as_library_client": False, + }, + "user_data_collection": { + "transcripts_enabled": False, + }, + "mcp_servers": [], + "customization": None, + "conversation_cache": { + "type": "noop", + }, } -def test_get_mcp_tools_skips_server_with_missing_auth() -> None: - """Test that servers with required but unavailable auth headers are skipped.""" - servers = [ - # Server with kubernetes auth but no token provided - ModelContextProtocolServer( - name="missing-k8s-auth", - url="http://localhost:3001", - authorization_headers={"Authorization": "kubernetes"}, - ), - # Server with client auth but no MCP-HEADERS provided - ModelContextProtocolServer( - name="missing-client-auth", - url="http://localhost:3002", - authorization_headers={"X-Token": "client"}, - ), - # Server with partial auth (2 headers required, only 1 available) - ModelContextProtocolServer( - name="partial-auth", - url="http://localhost:3003", - authorization_headers={ - "Authorization": "kubernetes", - "X-Custom": "client", - }, - ), - ] +class TestQueryEndpointHandler: + """Tests for query_endpoint_handler function.""" + + @pytest.mark.asyncio + async def test_successful_query_no_conversation( + self, + dummy_request: Request, + setup_configuration: AppConfig, + mocker: MockerFixture, + ) -> None: + """Test successful query without existing conversation.""" + query_request = QueryRequest( + query="What is Kubernetes?" + ) # pyright: ignore[reportCallIssue] + + mocker.patch("app.endpoints.query.configuration", setup_configuration) + mocker.patch("app.endpoints.query.check_configuration_loaded") + mocker.patch("app.endpoints.query.check_tokens_available") + mocker.patch("app.endpoints.query.validate_model_provider_override") + + mock_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + mock_response_obj = mocker.Mock() + mock_response_obj.output = [] + mock_client.responses = mocker.Mock() + mock_client.responses.create = mocker.AsyncMock(return_value=mock_response_obj) + mock_client_holder = mocker.Mock() + mock_client_holder.get_client.return_value = mock_client + mocker.patch( + "app.endpoints.query.AsyncLlamaStackClientHolder", + return_value=mock_client_holder, + ) + mocker.patch( + "app.endpoints.query.get_topic_summary", + new=mocker.AsyncMock(return_value=None), + ) - # No token, no mcp_headers - tools = get_mcp_tools(servers, token=None, mcp_headers=None) - # All servers should be skipped - assert len(tools) == 0 + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.model = "provider1/model1" + mock_responses_params.conversation = "conv_123" + mock_responses_params.model_dump.return_value = { + "input": "test", + "model": "provider1/model1", + } + mocker.patch( + "app.endpoints.query.prepare_responses_params", + new=mocker.AsyncMock(return_value=mock_responses_params), + ) + mock_turn_summary = TurnSummary() + mock_turn_summary.llm_response = ( + "Kubernetes is a container orchestration platform" + ) -def test_get_mcp_tools_includes_server_without_auth() -> None: - """Test that servers without auth config are always included.""" - servers = [ - # Server with no auth requirements - ModelContextProtocolServer( - name="public-server", - url="http://localhost:3000", - authorization_headers={}, - ), - ] + async def mock_retrieve_response(*_args: Any, **_kwargs: Any) -> TurnSummary: + return mock_turn_summary - # Should work even without token or headers - tools = get_mcp_tools(servers, token=None, mcp_headers=None) - assert len(tools) == 1 - assert tools[0]["server_label"] == "public-server" - assert "headers" not in tools[0] - - -@pytest.mark.asyncio -async def test_retrieve_response_no_tools_bypasses_tools(mocker: MockerFixture) -> None: - """Test that no_tools=True bypasses tool configuration and passes None to responses API.""" - mock_client = mocker.Mock() - # responses.create returns a synthetic OpenAI-like response - response_obj = mocker.Mock() - response_obj.id = "resp-1" - response_obj.output = [] - response_obj.usage = None # No usage info - mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - # vector_stores.list should not matter when no_tools=True, but keep it valid - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - # Mock shields.list and models.list for run_shield_moderation - mock_client.shields.list = mocker.AsyncMock(return_value=[]) - mock_client.models.list = mocker.AsyncMock(return_value=[]) - - # Ensure system prompt resolution does not require real config - mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") - mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) - - qr = QueryRequest(query="hello", no_tools=True) - summary, conv_id, referenced_docs, token_usage = await retrieve_response( - mock_client, "model-x", qr, token="tkn" - ) - - assert conv_id == "abc123def456" # Normalized (without conv_ prefix) - assert summary.llm_response == "" - assert referenced_docs == [] - assert token_usage.input_tokens == 0 # No usage info, so 0 - assert token_usage.output_tokens == 0 - # tools must be passed as None - kwargs = mock_client.responses.create.call_args.kwargs - assert kwargs["tools"] is None - assert kwargs["model"] == "model-x" - assert kwargs["instructions"] == "PROMPT" - - -@pytest.mark.asyncio -async def test_retrieve_response_builds_rag_and_mcp_tools( # pylint: disable=too-many-locals - mocker: MockerFixture, -) -> None: - """Test that retrieve_response correctly builds RAG and MCP tools from configuration.""" - mock_client = mocker.Mock() - response_obj = mocker.Mock() - response_obj.id = "resp-2" - response_obj.output = [] - response_obj.usage = None - mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [mocker.Mock(id="dbA")] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - # Mock shields.list and models.list for run_shield_moderation - mock_client.shields.list = mocker.AsyncMock(return_value=[]) - mock_client.models.list = mocker.AsyncMock(return_value=[]) - - # Mock vector_io.query for direct vector querying - mock_query_response = mocker.Mock() - mock_query_response.chunks = [] - mock_query_response.scores = [] - mock_client.vector_io.query = mocker.AsyncMock(return_value=mock_query_response) - - mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") - - # Mock shield moderation - mock_moderation_result = mocker.Mock() - mock_moderation_result.blocked = False - mocker.patch( - "app.endpoints.query_v2.run_shield_moderation", - return_value=mock_moderation_result, - ) - - mock_cfg = mocker.Mock() - mock_cfg.mcp_servers = [ - ModelContextProtocolServer( - name="fs", - url="http://localhost:3000", - authorization_headers={"Authorization": "kubernetes"}, - ), - ] - mocker.patch("app.endpoints.query_v2.configuration", mock_cfg) - - qr = QueryRequest(query="hello") - _summary, conv_id, referenced_docs, token_usage = await retrieve_response( - mock_client, "model-y", qr, token="mytoken" - ) - - assert conv_id == "abc123def456" # Normalized (without conv_ prefix) - assert referenced_docs == [] - assert token_usage.input_tokens == 0 # No usage info, so 0 - assert token_usage.output_tokens == 0 - - kwargs = mock_client.responses.create.call_args.kwargs - tools = kwargs["tools"] - assert isinstance(tools, list) - # Expect only MCP tools since RAG tools are skipped when doing direct vector querying - tool_types = {t.get("type") for t in tools} - assert tool_types == {"mcp"} - mcp_tool = next(t for t in tools if t["type"] == "mcp") - assert mcp_tool["server_label"] == "fs" - assert mcp_tool["headers"] == {"Authorization": "Bearer mytoken"} - - -@pytest.mark.asyncio -async def test_retrieve_response_parses_output_and_tool_calls( - mocker: MockerFixture, -) -> None: - """Test that retrieve_response correctly parses output content and tool calls from response.""" - mock_client = mocker.Mock() - - # Build output with content variants and tool calls - part1 = mocker.Mock(text="Hello ") - part1.annotations = [] # Ensure annotations is a list to avoid iteration error - part2 = mocker.Mock(text="world") - part2.annotations = [] - - output_item_1 = mocker.Mock() - output_item_1.type = "message" - output_item_1.role = "assistant" - output_item_1.content = [part1, part2] - - output_item_2 = mocker.Mock() - output_item_2.type = "message" - output_item_2.role = "assistant" - output_item_2.content = "!" - - # Tool call as a separate output item (Responses API format) - tool_call_item = mocker.Mock() - tool_call_item.type = "function_call" - tool_call_item.id = "tc-1" - tool_call_item.call_id = "tc-1" - tool_call_item.name = "do_something" - tool_call_item.arguments = '{"x": 1}' - tool_call_item.status = None # Explicitly set to avoid Mock auto-creation - - response_obj = mocker.Mock() - response_obj.id = "resp-3" - response_obj.output = [output_item_1, output_item_2, tool_call_item] - response_obj.usage = None - - mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - # Mock shields.list and models.list for run_shield_moderation - mock_client.shields.list = mocker.AsyncMock(return_value=[]) - mock_client.models.list = mocker.AsyncMock(return_value=[]) - - mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") - mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) - - qr = QueryRequest(query="hello") - summary, conv_id, referenced_docs, token_usage = await retrieve_response( - mock_client, "model-z", qr, token="tkn" - ) - - assert conv_id == "abc123def456" # Normalized (without conv_ prefix) - assert summary.llm_response == "Hello world!" - assert len(summary.tool_calls) == 1 - assert summary.tool_calls[0].id == "tc-1" - assert summary.tool_calls[0].name == "do_something" - assert summary.tool_calls[0].args == {"x": 1} - assert referenced_docs == [] - assert token_usage.input_tokens == 0 # No usage info, so 0 - assert token_usage.output_tokens == 0 - - -@pytest.mark.asyncio -async def test_retrieve_response_with_usage_info(mocker: MockerFixture) -> None: - """Test that token usage is extracted when provided by the API as an object.""" - mock_client = mocker.Mock() - - output_item = mocker.Mock() - output_item.type = "message" - output_item.role = "assistant" - output_item.content = "Test response" - output_item.tool_calls = [] - - # Mock usage information as object - mock_usage = mocker.Mock() - mock_usage.input_tokens = 150 - mock_usage.output_tokens = 75 - - response_obj = mocker.Mock() - response_obj.id = "resp-with-usage" - response_obj.output = [output_item] - response_obj.usage = mock_usage - - mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - # Mock shields.list and models.list for run_shield_moderation - mock_client.shields.list = mocker.AsyncMock(return_value=[]) - mock_client.models.list = mocker.AsyncMock(return_value=[]) - - mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") - mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) - - qr = QueryRequest(query="hello") - summary, conv_id, _referenced_docs, token_usage = await retrieve_response( - mock_client, "model-usage", qr, token="tkn", provider_id="test-provider" - ) - - assert conv_id == "abc123def456" # Normalized (without conv_ prefix) - assert summary.llm_response == "Test response" - assert token_usage.input_tokens == 150 - assert token_usage.output_tokens == 75 - assert token_usage.llm_calls == 1 - - -@pytest.mark.asyncio -async def test_retrieve_response_with_usage_dict(mocker: MockerFixture) -> None: - """Test that token usage is extracted when provided by the API as a dict.""" - mock_client = mocker.Mock() - - output_item = mocker.Mock() - output_item.type = "message" - output_item.role = "assistant" - output_item.content = "Test response dict" - output_item.tool_calls = [] - - # Mock usage information as dict (like llama stack does) - response_obj = mocker.Mock() - response_obj.id = "resp-with-usage-dict" - response_obj.output = [output_item] - response_obj.usage = {"input_tokens": 200, "output_tokens": 100} - - mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - # Mock shields.list and models.list for run_shield_moderation - mock_client.shields.list = mocker.AsyncMock(return_value=[]) - mock_client.models.list = mocker.AsyncMock(return_value=[]) - - mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") - mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) - - qr = QueryRequest(query="hello") - summary, conv_id, _referenced_docs, token_usage = await retrieve_response( - mock_client, "model-usage-dict", qr, token="tkn", provider_id="test-provider" - ) - - assert conv_id == "abc123def456" # Normalized (without conv_ prefix) - assert summary.llm_response == "Test response dict" - assert token_usage.input_tokens == 200 - assert token_usage.output_tokens == 100 - assert token_usage.llm_calls == 1 - - -@pytest.mark.asyncio -async def test_retrieve_response_with_empty_usage_dict(mocker: MockerFixture) -> None: - """Test that empty usage dict is handled gracefully.""" - mock_client = mocker.Mock() - - output_item = mocker.Mock() - output_item.type = "message" - output_item.role = "assistant" - output_item.content = "Test response empty usage" - output_item.tool_calls = [] - - # Mock usage information as empty dict (tokens are 0 or missing) - response_obj = mocker.Mock() - response_obj.id = "resp-empty-usage" - response_obj.output = [output_item] - response_obj.usage = {} # Empty dict - - mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - # Mock shields.list and models.list for run_shield_moderation - mock_client.shields.list = mocker.AsyncMock(return_value=[]) - mock_client.models.list = mocker.AsyncMock(return_value=[]) - - mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") - mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) - - qr = QueryRequest(query="hello") - summary, conv_id, _referenced_docs, token_usage = await retrieve_response( - mock_client, "model-empty-usage", qr, token="tkn", provider_id="test-provider" - ) - - assert conv_id == "abc123def456" # Normalized (without conv_ prefix) - assert summary.llm_response == "Test response empty usage" - assert token_usage.input_tokens == 0 - assert token_usage.output_tokens == 0 - assert token_usage.llm_calls == 1 # Always 1, even when no token usage data - - -@pytest.mark.asyncio -async def test_retrieve_response_validates_attachments(mocker: MockerFixture) -> None: - """Test that retrieve_response validates attachments and includes them in the input string.""" - mock_client = mocker.Mock() - response_obj = mocker.Mock() - response_obj.id = "resp-4" - response_obj.output = [] - response_obj.usage = None - mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - # Mock shields.list and models.list for run_shield_moderation - mock_client.shields.list = mocker.AsyncMock(return_value=[]) - mock_client.models.list = mocker.AsyncMock(return_value=[]) - - mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") - mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) - - validate_spy = mocker.patch( - "app.endpoints.query_v2.validate_attachments_metadata", return_value=None - ) - - attachments = [ - Attachment(attachment_type="log", content_type="text/plain", content="x"), - ] + mocker.patch( + "app.endpoints.query.retrieve_response", side_effect=mock_retrieve_response + ) + + mocker.patch( + "app.endpoints.query.normalize_conversation_id", return_value="123" + ) + mocker.patch("app.endpoints.query.store_query_results") + mocker.patch("app.endpoints.query.consume_query_tokens") + mocker.patch("app.endpoints.query.get_available_quotas", return_value={}) - qr = QueryRequest(query="hello", attachments=attachments) - _summary, _cid, _ref_docs, _token_usage = await retrieve_response( - mock_client, "model-a", qr, token="tkn" - ) - - validate_spy.assert_called_once() - # Verify that attachments are included in the input - kwargs = mock_client.responses.create.call_args.kwargs - assert "input" in kwargs - # Input should be a string containing both query and attachment - assert isinstance(kwargs["input"], str) - assert "hello" in kwargs["input"] - assert "[Attachment: log]" in kwargs["input"] - assert "x" in kwargs["input"] - - -@pytest.mark.asyncio -async def test_query_endpoint_handler_v2_success( - mocker: MockerFixture, dummy_request: Request -) -> None: - """Test successful query endpoint handler execution with proper response structure.""" - # Mock configuration to avoid configuration not loaded errors - mock_config = mocker.Mock() - mock_config.llama_stack_configuration = mocker.Mock() - mock_config.quota_limiters = [] - mocker.patch("app.endpoints.query_v2.configuration", mock_config) - - mock_client = mocker.Mock() - mock_client.models.list = mocker.AsyncMock(return_value=[mocker.Mock()]) - mocker.patch( - "client.AsyncLlamaStackClientHolder.get_client", return_value=mock_client - ) - mocker.patch("app.endpoints.query.evaluate_model_hints", return_value=(None, None)) - mocker.patch( - "app.endpoints.query.select_model_and_provider_id", - return_value=("llama/m", "m", "p"), - ) - - summary = mocker.Mock( - llm_response="ANSWER", tool_calls=[], tool_results=[], rag_chunks=[] - ) - token_usage = mocker.Mock(input_tokens=10, output_tokens=20) - # Use a valid SUID for conversation_id - test_conversation_id = "00000000-0000-0000-0000-000000000001" - mocker.patch( - "app.endpoints.query_v2.retrieve_response", - return_value=(summary, test_conversation_id, [], token_usage), - ) - mocker.patch("app.endpoints.query_v2.get_topic_summary", return_value="Topic") - mocker.patch("app.endpoints.query.is_transcripts_enabled", return_value=False) - mocker.patch("app.endpoints.query.persist_user_conversation_details") - mocker.patch("utils.endpoints.store_conversation_into_cache") - mocker.patch("app.endpoints.query.get_session") - - # Add missing mocks for quota functions - mocker.patch("utils.quota.check_tokens_available") - mocker.patch("utils.quota.consume_tokens") - mocker.patch("utils.quota.get_available_quotas", return_value={}) - - # Mock the request state - dummy_request.state.authorized_actions = [] - - res = await query_endpoint_handler_v2( - request=dummy_request, - query_request=QueryRequest(query="hi"), - auth=MOCK_AUTH, - mcp_headers={}, - ) - - assert res.conversation_id == test_conversation_id - assert res.response == "ANSWER" - - -@pytest.mark.asyncio -async def test_query_endpoint_handler_v2_api_connection_error( - mocker: MockerFixture, dummy_request: Request -) -> None: - """Test that query endpoint handler properly handles and reports API connection errors.""" - # Mock configuration to avoid configuration not loaded errors - mock_config = mocker.Mock() - mock_config.llama_stack_configuration = mocker.Mock() - mocker.patch("app.endpoints.query_v2.configuration", mock_config) - - def _raise(*_args: Any, **_kwargs: Any) -> Exception: - """Raises a custom APIConnectionError exception. - - Args: - *_args: Variable length argument list. - **_kwargs: Arbitrary keyword arguments. - - Returns: - None - - Raises: - APIConnectionError: Always raises this exception with a Request object. - """ - request = Request(scope={"type": "http"}) - raise APIConnectionError(request=request) # type: ignore - - mocker.patch("client.AsyncLlamaStackClientHolder.get_client", side_effect=_raise) - - fail_metric = mocker.patch("metrics.llm_calls_failures_total") - - with pytest.raises(HTTPException) as exc: - await query_endpoint_handler_v2( + response = await query_endpoint_handler( request=dummy_request, - query_request=QueryRequest(query="hi"), - auth=("user123", "", False, "token-abc"), + query_request=query_request, + auth=MOCK_AUTH, mcp_headers={}, ) - assert exc.value.status_code == status.HTTP_503_SERVICE_UNAVAILABLE - detail = exc.value.detail - assert isinstance(detail, dict) - assert detail["response"] == "Unable to connect to Llama Stack" # type: ignore[index] - fail_metric.inc.assert_called_once() - - -@pytest.mark.asyncio -async def test_query_endpoint_quota_exceeded( - mocker: MockerFixture, dummy_request: Request -) -> None: - """Test that query endpoint raises HTTP 429 when model quota is exceeded.""" - query_request = QueryRequest( - query="What is OpenStack?", - provider="openai", - model="gpt-4o-mini", - attachments=[], - ) # type: ignore - mock_client = mocker.AsyncMock() - mock_client.models.list = mocker.AsyncMock(return_value=[]) - mock_response = httpx.Response(429, request=httpx.Request("POST", "http://test")) - mock_client.responses.create.side_effect = RateLimitError( - "Rate limit exceeded for model gpt-4o-mini", - response=mock_response, - body=None, - ) - # Mock conversation creation (needed for query_v2) - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - mocker.patch( - "app.endpoints.query.select_model_and_provider_id", - return_value=("openai/gpt-4o-mini", "gpt-4o-mini", "openai"), - ) - mocker.patch("app.endpoints.query.validate_model_provider_override") - mocker.patch( - "client.AsyncLlamaStackClientHolder.get_client", - return_value=mock_client, - ) - mocker.patch("app.endpoints.query.check_tokens_available") - mocker.patch("app.endpoints.query.get_session") - mocker.patch("app.endpoints.query.is_transcripts_enabled", return_value=False) - mocker.patch( - "app.endpoints.query_v2.run_shield_moderation", - return_value=ShieldModerationResult(blocked=False), - ) - mocker.patch( - "app.endpoints.query_v2.prepare_tools_for_responses_api", return_value=None - ) - - with pytest.raises(HTTPException) as exc_info: - await query_endpoint_handler_v2( - dummy_request, query_request=query_request, auth=MOCK_AUTH + assert isinstance(response, QueryResponse) + assert response.conversation_id == "123" + assert response.response == "Kubernetes is a container orchestration platform" + + @pytest.mark.asyncio + async def test_successful_query_with_conversation( + self, + dummy_request: Request, + setup_configuration: AppConfig, + mocker: MockerFixture, + ) -> None: + """Test successful query with existing conversation.""" + query_request = QueryRequest( + query="What is Kubernetes?", + conversation_id="123e4567-e89b-12d3-a456-426614174000", + ) # pyright: ignore[reportCallIssue] + + mocker.patch("app.endpoints.query.configuration", setup_configuration) + mocker.patch("app.endpoints.query.check_configuration_loaded") + mocker.patch("app.endpoints.query.check_tokens_available") + mocker.patch("app.endpoints.query.validate_model_provider_override") + mocker.patch( + "app.endpoints.query.normalize_conversation_id", return_value="123" ) - assert exc_info.value.status_code == status.HTTP_429_TOO_MANY_REQUESTS - detail = exc_info.value.detail - assert isinstance(detail, dict) - assert detail["response"] == "The quota has been exceeded" # type: ignore - assert "gpt-4o-mini" in detail["cause"] # type: ignore - - -@pytest.mark.asyncio -async def test_retrieve_response_with_shields_available(mocker: MockerFixture) -> None: - """Test that shield moderation runs and passes when content is safe.""" - mock_client = mocker.Mock() - - # Create mock shield with provider_resource_id - mock_shield = mocker.Mock() - mock_shield.identifier = "content-safety-shield" - mock_shield.provider_resource_id = "moderation-model" - mock_client.shields.list = mocker.AsyncMock(return_value=[mock_shield]) - - # Create mock model matching the shield's provider_resource_id - mock_model = mocker.Mock() - mock_model.id = "moderation-model" - mock_client.models.list = mocker.AsyncMock(return_value=[mock_model]) - - # Mock moderations.create to return safe (not flagged) content - mock_moderation_result = mocker.Mock() - mock_moderation_result.flagged = False - mock_moderation_response = mocker.Mock() - mock_moderation_response.results = [mock_moderation_result] - mock_client.moderations.create = mocker.AsyncMock( - return_value=mock_moderation_response - ) - - output_item = mocker.Mock() - output_item.type = "message" - output_item.role = "assistant" - output_item.content = "Safe response" - - response_obj = mocker.Mock() - response_obj.id = "resp-shields" - response_obj.output = [output_item] - response_obj.usage = None - - mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - - mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") - mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) - - qr = QueryRequest(query="hello") - summary, conv_id, _referenced_docs, _token_usage = await retrieve_response( - mock_client, "model-shields", qr, token="tkn", provider_id="test-provider" - ) - - assert conv_id == "abc123def456" # Normalized (without conv_ prefix) - assert summary.llm_response == "Safe response" - - # Verify that moderation was called with the user's query - mock_client.moderations.create.assert_called_once_with( - input="hello", model="moderation-model" - ) - # Verify that responses.create was called (moderation passed) - mock_client.responses.create.assert_called_once() - - -@pytest.mark.asyncio -async def test_retrieve_response_with_no_shields_available( - mocker: MockerFixture, -) -> None: - """Test that LLM is called when no shields are configured.""" - mock_client = mocker.Mock() - - # Mock shields.list and models.list for run_shield_moderation - mock_client.shields.list = mocker.AsyncMock(return_value=[]) - mock_client.models.list = mocker.AsyncMock(return_value=[]) - - output_item = mocker.Mock() - output_item.type = "message" - output_item.role = "assistant" - output_item.content = "Response without shields" - - response_obj = mocker.Mock() - response_obj.id = "resp-no-shields" - response_obj.output = [output_item] - response_obj.usage = None - - mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - - mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") - mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) - - qr = QueryRequest(query="hello") - summary, conv_id, _referenced_docs, _token_usage = await retrieve_response( - mock_client, "model-no-shields", qr, token="tkn", provider_id="test-provider" - ) - - assert conv_id == "abc123def456" # Normalized (without conv_ prefix) - assert summary.llm_response == "Response without shields" - - # Verify that responses.create was called - mock_client.responses.create.assert_called_once() - - -@pytest.mark.asyncio -async def test_retrieve_response_detects_shield_violation( - mocker: MockerFixture, -) -> None: - """Test that shield moderation blocks content and returns early.""" - mock_client = mocker.Mock() - - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - mock_client.conversations.items.create = mocker.AsyncMock(return_value=None) - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - - mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") - mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) - - # Mock run_shield_moderation to return blocked - mocker.patch( - "app.endpoints.query_v2.run_shield_moderation", - return_value=ShieldModerationResult( - blocked=True, message="Content violates safety policy" - ), - ) - - qr = QueryRequest(query="dangerous query") - summary, conv_id, _referenced_docs, _token_usage = await retrieve_response( - mock_client, "model-violation", qr, token="tkn", provider_id="test-provider" - ) - - assert conv_id == "abc123def456" # Normalized (without conv_ prefix) - assert summary.llm_response == "Content violates safety policy" - - # Verify that responses.create was NOT called (blocked by moderation) - mock_client.responses.create.assert_not_called() - - -def _create_message_output_with_citations(mocker: MockerFixture) -> Any: - """Create mock message output item with content annotations (citations).""" - # 1. Output item with message content annotations (citations) - output_item = mocker.Mock() - output_item.type = "message" - output_item.role = "assistant" - - # Mock content with annotations - content_part = mocker.Mock() - content_part.type = "output_text" - content_part.text = "Here is a citation." - - annotation1 = mocker.Mock() - annotation1.type = "url_citation" - annotation1.url = "http://example.com/doc1" - annotation1.title = "Doc 1" - - annotation2 = mocker.Mock() - annotation2.type = "file_citation" - annotation2.filename = "file1.txt" - annotation2.url = None - annotation2.title = None - - content_part.annotations = [annotation1, annotation2] - output_item.content = [content_part] - return output_item - - -def _create_file_search_output(mocker: MockerFixture) -> Any: - """Create mock file search tool call output with results.""" - # 2. Output item with file search tool call results - output_item = mocker.Mock() - output_item.type = "file_search_call" - output_item.id = "file-search-1" - output_item.queries = ( - [] - ) # Ensure queries is a list to avoid iteration error in tool summary - output_item.status = "completed" - # Create mock result objects with proper attributes matching real llama-stack response - result_1 = mocker.Mock() - result_1.filename = "file2.pdf" - result_1.attributes = { - "docs_url": "http://example.com/doc2", - "title": "Title 1", - "document_id": "doc-123", - } - result_1.text = "Sample text from file2.pdf" - result_1.score = 0.95 - result_1.file_id = "file-123" - result_1.model_dump = mocker.Mock( - return_value={ - "filename": "file2.pdf", - "attributes": { - "docs_url": "http://example.com/doc2", - "title": "Title 1", - "document_id": "doc-123", - }, - "text": "Sample text from file2.pdf", - "score": 0.95, - "file_id": "file-123", + mock_validate_conv = mocker.patch( + "app.endpoints.query.validate_and_retrieve_conversation", + return_value=mocker.Mock(spec=UserConversation), + ) + + mock_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + mock_client_holder = mocker.Mock() + mock_client_holder.get_client.return_value = mock_client + mocker.patch( + "app.endpoints.query.AsyncLlamaStackClientHolder", + return_value=mock_client_holder, + ) + + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.model = "provider1/model1" + mock_responses_params.conversation = "conv_123" + mock_responses_params.model_dump.return_value = { + "input": "test", + "model": "provider1/model1", } - ) - - result_2 = mocker.Mock() - result_2.filename = "file3.docx" - result_2.attributes = { - "docs_url": "http://example.com/doc3", - "title": "Title 2", - "document_id": "doc-456", - } - result_2.text = "Sample text from file3.docx" - result_2.score = 0.85 - result_2.file_id = "file-456" - result_2.model_dump = mocker.Mock( - return_value={ - "filename": "file3.docx", - "attributes": { - "docs_url": "http://example.com/doc3", - "title": "Title 2", - "document_id": "doc-456", - }, - "text": "Sample text from file3.docx", - "score": 0.85, - "file_id": "file-456", + mocker.patch( + "app.endpoints.query.prepare_responses_params", + new=mocker.AsyncMock(return_value=mock_responses_params), + ) + + mocker.patch( + "app.endpoints.query.retrieve_response", + new=mocker.AsyncMock(return_value=TurnSummary()), + ) + mocker.patch("app.endpoints.query.store_query_results") + mocker.patch("app.endpoints.query.consume_query_tokens") + mocker.patch("app.endpoints.query.get_available_quotas", return_value={}) + + response = await query_endpoint_handler( + request=dummy_request, + query_request=query_request, + auth=MOCK_AUTH, + mcp_headers={}, + ) + + assert isinstance(response, QueryResponse) + mock_validate_conv.assert_called_once() + + @pytest.mark.asyncio + async def test_query_with_attachments( + self, + dummy_request: Request, + setup_configuration: AppConfig, + mocker: MockerFixture, + ) -> None: + """Test query with attachments validation.""" + query_request = QueryRequest( + query="What is Kubernetes?", + attachments=[ + Attachment( + attachment_type="log", + content_type="text/plain", + content="log content", + ) + ], + ) # pyright: ignore[reportCallIssue] + + mocker.patch("app.endpoints.query.configuration", setup_configuration) + mocker.patch("app.endpoints.query.check_configuration_loaded") + mocker.patch("app.endpoints.query.check_tokens_available") + mocker.patch("app.endpoints.query.validate_model_provider_override") + mock_validate = mocker.patch( + "app.endpoints.query.validate_attachments_metadata" + ) + + mock_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + mock_response_obj = mocker.Mock() + mock_response_obj.output = [] + mock_client.responses = mocker.Mock() + mock_client.responses.create = mocker.AsyncMock(return_value=mock_response_obj) + mock_client_holder = mocker.Mock() + mock_client_holder.get_client.return_value = mock_client + mocker.patch( + "app.endpoints.query.AsyncLlamaStackClientHolder", + return_value=mock_client_holder, + ) + mocker.patch( + "app.endpoints.query.get_topic_summary", + new=mocker.AsyncMock(return_value=None), + ) + + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.model = "provider1/model1" + mock_responses_params.conversation = "conv_123" + mock_responses_params.model_dump.return_value = { + "input": "test", + "model": "provider1/model1", + } + mocker.patch( + "app.endpoints.query.prepare_responses_params", + new=mocker.AsyncMock(return_value=mock_responses_params), + ) + + async def mock_retrieve_response(*_args: Any, **_kwargs: Any) -> TurnSummary: + return TurnSummary() + + mocker.patch( + "app.endpoints.query.retrieve_response", side_effect=mock_retrieve_response + ) + mocker.patch( + "app.endpoints.query.normalize_conversation_id", return_value="123" + ) + mocker.patch("app.endpoints.query.store_query_results") + mocker.patch("app.endpoints.query.consume_query_tokens") + mocker.patch("app.endpoints.query.get_available_quotas", return_value={}) + + await query_endpoint_handler( + request=dummy_request, + query_request=query_request, + auth=MOCK_AUTH, + mcp_headers={}, + ) + + mock_validate.assert_called_once_with(query_request.attachments) + + @pytest.mark.asyncio + async def test_query_with_topic_summary( + self, + dummy_request: Request, + setup_configuration: AppConfig, + mocker: MockerFixture, + ) -> None: + """Test query generates topic summary for new conversation.""" + query_request = QueryRequest( + query="What is Kubernetes?", generate_topic_summary=True + ) # pyright: ignore[reportCallIssue] + + mocker.patch("app.endpoints.query.configuration", setup_configuration) + mocker.patch("app.endpoints.query.check_configuration_loaded") + mocker.patch("app.endpoints.query.check_tokens_available") + mocker.patch("app.endpoints.query.validate_model_provider_override") + + mock_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + mock_client_holder = mocker.Mock() + mock_client_holder.get_client.return_value = mock_client + mocker.patch( + "app.endpoints.query.AsyncLlamaStackClientHolder", + return_value=mock_client_holder, + ) + + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.model = "provider1/model1" + mock_responses_params.conversation = "conv_123" + mock_responses_params.model_dump.return_value = { + "input": "test", + "model": "provider1/model1", + } + mocker.patch( + "app.endpoints.query.prepare_responses_params", + new=mocker.AsyncMock(return_value=mock_responses_params), + ) + + mocker.patch( + "app.endpoints.query.retrieve_response", + new=mocker.AsyncMock(return_value=TurnSummary()), + ) + mock_get_topic_summary = mocker.patch( + "app.endpoints.query.get_topic_summary", + new=mocker.AsyncMock(return_value="Topic: Kubernetes"), + ) + mocker.patch( + "app.endpoints.query.normalize_conversation_id", return_value="123" + ) + mocker.patch("app.endpoints.query.store_query_results") + mocker.patch("app.endpoints.query.consume_query_tokens") + mocker.patch("app.endpoints.query.get_available_quotas", return_value={}) + + await query_endpoint_handler( + request=dummy_request, + query_request=query_request, + auth=MOCK_AUTH, + mcp_headers={}, + ) + + mock_get_topic_summary.assert_called_once() + + @pytest.mark.asyncio + async def test_query_azure_token_refresh( + self, + dummy_request: Request, + setup_configuration: AppConfig, + mocker: MockerFixture, + ) -> None: + """Test query refreshes Azure token when needed.""" + query_request = QueryRequest( + query="What is Kubernetes?" + ) # pyright: ignore[reportCallIssue] + + mocker.patch("app.endpoints.query.configuration", setup_configuration) + mocker.patch("app.endpoints.query.check_configuration_loaded") + mocker.patch("app.endpoints.query.check_tokens_available") + mocker.patch("app.endpoints.query.validate_model_provider_override") + + mock_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + mock_response_obj = mocker.Mock() + mock_response_obj.output = [] + mock_client.responses = mocker.Mock() + mock_client.responses.create = mocker.AsyncMock(return_value=mock_response_obj) + mock_client_holder = mocker.Mock() + mock_client_holder.get_client.return_value = mock_client + mocker.patch( + "app.endpoints.query.AsyncLlamaStackClientHolder", + return_value=mock_client_holder, + ) + mocker.patch( + "app.endpoints.query.get_topic_summary", + new=mocker.AsyncMock(return_value=None), + ) + + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.model = "azure/model1" + mock_responses_params.conversation = "conv_123" + mock_responses_params.model_dump.return_value = { + "input": "test", + "model": "azure/model1", + } + mocker.patch( + "app.endpoints.query.prepare_responses_params", + new=mocker.AsyncMock(return_value=mock_responses_params), + ) + + mock_azure_manager = mocker.Mock() + mock_azure_manager.is_entra_id_configured = True + mock_azure_manager.is_token_expired = True + mock_azure_manager.refresh_token.return_value = True + mocker.patch( + "app.endpoints.query.AzureEntraIDManager", return_value=mock_azure_manager + ) + + mock_updated_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + mock_response_obj_updated = mocker.Mock() + mock_response_obj_updated.output = [] + mock_updated_client.responses = mocker.Mock() + mock_updated_client.responses.create = mocker.AsyncMock( + return_value=mock_response_obj_updated + ) + mock_update_token = mocker.patch( + "app.endpoints.query.update_azure_token", + new=mocker.AsyncMock(return_value=mock_updated_client), + ) + mocker.patch( + "app.endpoints.query.get_topic_summary", + new=mocker.AsyncMock(return_value=None), + ) + + async def mock_retrieve_response(*_args: Any, **_kwargs: Any) -> TurnSummary: + return TurnSummary() + + mocker.patch( + "app.endpoints.query.retrieve_response", side_effect=mock_retrieve_response + ) + mocker.patch( + "app.endpoints.query.normalize_conversation_id", return_value="123" + ) + mocker.patch("app.endpoints.query.store_query_results") + mocker.patch("app.endpoints.query.consume_query_tokens") + mocker.patch("app.endpoints.query.get_available_quotas", return_value={}) + + await query_endpoint_handler( + request=dummy_request, + query_request=query_request, + auth=MOCK_AUTH, + mcp_headers={}, + ) + + mock_update_token.assert_called_once() + mock_updated_client.responses.create.assert_called_once() + + +class TestRetrieveResponse: + """Tests for retrieve_response function.""" + + @pytest.mark.asyncio + async def test_retrieve_response_success(self, mocker: MockerFixture) -> None: + """Test successful response retrieval.""" + mock_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.input = "test query" + mock_responses_params.model = "provider1/model1" + mock_responses_params.model_dump.return_value = { + "input": "test query", + "model": "provider1/model1", } - ) - - output_item.results = [result_1, result_2] - return output_item - - -@pytest.mark.asyncio -async def test_retrieve_response_parses_referenced_documents( - mocker: MockerFixture, -) -> None: - """Test that retrieve_response correctly parses referenced documents from response.""" - mock_client = mocker.AsyncMock() - - # Create output items using helper functions - output_item_1 = _create_message_output_with_citations(mocker) - output_item_2 = _create_file_search_output(mocker) - - response_obj = mocker.Mock() - response_obj.id = "resp-docs" - response_obj.output = [output_item_1, output_item_2] - response_obj.usage = None - - mock_client.responses.create = mocker.AsyncMock(return_value=response_obj) - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - # Mock shields.list and models.list for run_shield_moderation - mock_client.shields.list = mocker.AsyncMock(return_value=[]) - mock_client.models.list = mocker.AsyncMock(return_value=[]) - - mocker.patch("app.endpoints.query_v2.get_system_prompt", return_value="PROMPT") - mocker.patch("app.endpoints.query_v2.configuration", mocker.Mock(mcp_servers=[])) - - qr = QueryRequest(query="query with docs") - _summary, _conv_id, referenced_docs, _token_usage = await retrieve_response( - mock_client, "model-docs", qr, token="tkn", provider_id="test-provider" - ) - - # Referenced documents are now extracted only from file_search_call attributes - assert len(referenced_docs) == 2 - - # Verify Title 1 (File search result with URL) - doc1 = next((d for d in referenced_docs if d.doc_title == "Title 1"), None) - assert doc1 - assert doc1.doc_title == "Title 1" - assert str(doc1.doc_url) == "http://example.com/doc2" - - # Verify Title 2 (File search result with URL) - doc2 = next((d for d in referenced_docs if d.doc_title == "Title 2"), None) - assert doc2 - assert doc2.doc_title == "Title 2" - assert str(doc2.doc_url) == "http://example.com/doc3" - - # Verify RAG chunks were extracted from file_search_call results - assert len(_summary.rag_chunks) == 2 - assert _summary.rag_chunks[0].content == "Sample text from file2.pdf" - assert _summary.rag_chunks[0].source == "file2.pdf" - assert _summary.rag_chunks[0].score == 0.95 - assert _summary.rag_chunks[1].content == "Sample text from file3.docx" - assert _summary.rag_chunks[1].source == "file3.docx" - assert _summary.rag_chunks[1].score == 0.85 + + mock_output_item = mocker.Mock() + mock_output_item.type = "message" + mock_output_item.content = "Response text" + + mock_response = mocker.Mock(spec=OpenAIResponseObject) + mock_response.output = [mock_output_item] + + mocker.patch( + "app.endpoints.query.run_shield_moderation", + return_value=mocker.Mock(blocked=False), + ) + mock_client.responses.create = mocker.AsyncMock(return_value=mock_response) + mocker.patch( + "app.endpoints.query.extract_text_from_response_output_item", + return_value="Response text", + ) + mocker.patch( + "app.endpoints.query.build_tool_call_summary", return_value=(None, None) + ) + mocker.patch("app.endpoints.query.parse_referenced_documents", return_value=[]) + mocker.patch( + "app.endpoints.query.extract_token_usage", + return_value=TokenCounter(input_tokens=10, output_tokens=5), + ) + + result = await retrieve_response(mock_client, mock_responses_params) + + assert isinstance(result, TurnSummary) + assert result.llm_response == "Response text" + assert result.token_usage.input_tokens == 10 + assert result.token_usage.output_tokens == 5 + + @pytest.mark.asyncio + async def test_retrieve_response_shield_blocked( + self, mocker: MockerFixture + ) -> None: + """Test response retrieval when shield moderation blocks the request.""" + mock_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.input = "test query" + mock_responses_params.conversation = "conv_123" + mock_responses_params.model_dump.return_value = { + "input": "test query", + "model": "provider1/model1", + } + + mock_moderation_result = mocker.Mock() + mock_moderation_result.blocked = True + mock_moderation_result.message = "Content blocked by moderation" + mocker.patch( + "app.endpoints.query.run_shield_moderation", + return_value=mock_moderation_result, + ) + mock_append = mocker.patch("app.endpoints.query.append_turn_to_conversation") + + result = await retrieve_response(mock_client, mock_responses_params) + + assert isinstance(result, TurnSummary) + assert result.llm_response == "Content blocked by moderation" + mock_append.assert_called_once() + + @pytest.mark.asyncio + async def test_retrieve_response_connection_error( + self, mocker: MockerFixture + ) -> None: + """Test response retrieval raises HTTPException on connection error.""" + mock_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.input = "test query" + mock_responses_params.model_dump.return_value = { + "input": "test query", + "model": "provider1/model1", + } + + mocker.patch( + "app.endpoints.query.run_shield_moderation", + return_value=mocker.Mock(blocked=False), + ) + mock_client.responses.create = mocker.AsyncMock( + side_effect=APIConnectionError( + message="Connection failed", request=mocker.Mock() + ) + ) + + with pytest.raises(HTTPException) as exc_info: + await retrieve_response(mock_client, mock_responses_params) + + assert exc_info.value.status_code == 503 + + @pytest.mark.asyncio + async def test_retrieve_response_api_status_error( + self, mocker: MockerFixture + ) -> None: + """Test response retrieval raises HTTPException on API status error.""" + mock_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.input = "test query" + mock_responses_params.model = "provider1/model1" + mock_responses_params.model_dump.return_value = { + "input": "test query", + "model": "provider1/model1", + } + + mocker.patch( + "app.endpoints.query.run_shield_moderation", + return_value=mocker.Mock(blocked=False), + ) + mock_client.responses.create = mocker.AsyncMock( + side_effect=APIStatusError( + message="API error", response=mocker.Mock(request=None), body=None + ) + ) + mocker.patch( + "app.endpoints.query.handle_known_apistatus_errors", + return_value=mocker.Mock( + model_dump=lambda: { + "status_code": 500, + "detail": {"response": "Error", "cause": "API error"}, + } + ), + ) + + with pytest.raises(HTTPException): + await retrieve_response(mock_client, mock_responses_params) + + @pytest.mark.asyncio + async def test_retrieve_response_runtime_error_context_length( + self, mocker: MockerFixture + ) -> None: + """Test retrieve_response handles RuntimeError with context_length.""" + mock_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.model = "provider1/model1" + mock_responses_params.input = "test query" + mock_responses_params.model_dump.return_value = { + "input": "test query", + "model": "provider1/model1", + } + + mocker.patch( + "app.endpoints.query.run_shield_moderation", + return_value=mocker.Mock(blocked=False), + ) + mock_client.responses.create = mocker.AsyncMock( + side_effect=RuntimeError("context_length exceeded") + ) + + with pytest.raises(HTTPException) as exc_info: + await retrieve_response(mock_client, mock_responses_params) + + assert exc_info.value.status_code == 413 + + @pytest.mark.asyncio + async def test_retrieve_response_runtime_error_other( + self, mocker: MockerFixture + ) -> None: + """Test retrieve_response re-raises RuntimeError without context_length.""" + mock_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.model = "provider1/model1" + mock_responses_params.input = "test query" + mock_responses_params.model_dump.return_value = { + "input": "test query", + "model": "provider1/model1", + } + + mocker.patch( + "app.endpoints.query.run_shield_moderation", + return_value=mocker.Mock(blocked=False), + ) + mock_client.responses.create = mocker.AsyncMock( + side_effect=RuntimeError("Some other error") + ) + + with pytest.raises(RuntimeError): + await retrieve_response(mock_client, mock_responses_params) + + @pytest.mark.asyncio + async def test_retrieve_response_with_tool_calls( + self, mocker: MockerFixture + ) -> None: + """Test response retrieval processes tool calls.""" + mock_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.input = "test query" + mock_responses_params.model = "provider1/model1" + mock_responses_params.model_dump.return_value = { + "input": "test query", + "model": "provider1/model1", + } + + mock_response = mocker.Mock(spec=OpenAIResponseObject) + mock_response.output = [mocker.Mock(type="message")] + + mocker.patch( + "app.endpoints.query.run_shield_moderation", + return_value=mocker.Mock(blocked=False), + ) + mock_client.responses.create = mocker.AsyncMock(return_value=mock_response) + + mock_tool_call = mocker.Mock() + mock_tool_result = mocker.Mock() + mocker.patch( + "app.endpoints.query.extract_text_from_response_output_item", + return_value="Response text", + ) + mocker.patch( + "app.endpoints.query.build_tool_call_summary", + return_value=(mock_tool_call, mock_tool_result), + ) + mocker.patch("app.endpoints.query.parse_referenced_documents", return_value=[]) + mocker.patch( + "app.endpoints.query.extract_token_usage", + return_value=TokenCounter(input_tokens=10, output_tokens=5), + ) + + result = await retrieve_response(mock_client, mock_responses_params) + + assert len(result.tool_calls) == 1 + assert len(result.tool_results) == 1 diff --git a/tests/unit/app/endpoints/test_query_old.py b/tests/unit/app/endpoints/test_query_old.py deleted file mode 100644 index e9eaa4eaa..000000000 --- a/tests/unit/app/endpoints/test_query_old.py +++ /dev/null @@ -1,486 +0,0 @@ -"""Unit tests for the /query REST API endpoint.""" - -# pylint: disable=redefined-outer-name -# pylint: disable=too-many-lines -# pylint: disable=ungrouped-imports - -from typing import Any - -import pytest -from fastapi import HTTPException, Request, status -from pytest_mock import MockerFixture - -from app.endpoints.query_old import ( - evaluate_model_hints, - is_transcripts_enabled, - select_model_and_provider_id, - validate_attachments_metadata, -) -from configuration import AppConfig -from models.config import Action -from models.database.conversations import UserConversation -from models.requests import Attachment, QueryRequest -from utils.token_counter import TokenCounter - -# User ID must be proper UUID -MOCK_AUTH = ( - "00000001-0001-0001-0001-000000000001", - "mock_username", - False, - "mock_token", -) - - -@pytest.fixture -def dummy_request() -> Request: - """Dummy request fixture for testing. - - Create a minimal FastAPI Request with test-ready authorization state. - - The returned Request has a minimal HTTP scope and a - `state.authorized_actions` attribute initialized to a set containing all - members of the `Action` enum, suitable for use in unit tests that require - an authenticated request context. - - Returns: - req (Request): FastAPI Request with `state.authorized_actions` set to `set(Action)`. - """ - req = Request( - scope={ - "type": "http", - } - ) - - req.state.authorized_actions = set(Action) - return req - - -def mock_metrics(mocker: MockerFixture) -> None: - """Helper function to mock metrics operations for query endpoints. - - Configure the provided pytest-mock `mocker` to stub token metrics and - related metrics counters used by query endpoint tests. - - Patches the token metrics extraction helper and the LLM metrics counters so - tests can run without emitting real metrics. - """ - mocker.patch( - "app.endpoints.query.extract_and_update_token_metrics", - return_value=TokenCounter(), - ) - # Mock the metrics that are called inside extract_and_update_token_metrics - mocker.patch("metrics.llm_token_sent_total") - mocker.patch("metrics.llm_token_received_total") - mocker.patch("metrics.llm_calls_total") - - -def mock_database_operations(mocker: MockerFixture) -> None: - """Helper function to mock database operations for query endpoints. - - Patch common database operations used by query endpoint tests. - - This applies test-time patches so that conversation ownership checks - succeed, persistence of conversation details is stubbed out, and - `get_session` returns a context-manager mock whose - `query(...).filter_by(...).first()` returns `None`. - - Parameters: - mocker (MockerFixture): The pytest-mock fixture used to apply patches. - """ - mocker.patch( - "app.endpoints.query.validate_conversation_ownership", return_value=True - ) - mocker.patch("app.endpoints.query.persist_user_conversation_details") - - # Mock the database session and query - mock_session = mocker.Mock() - mock_session.query.return_value.filter_by.return_value.first.return_value = None - mock_session.__enter__ = mocker.Mock(return_value=mock_session) - mock_session.__exit__ = mocker.Mock(return_value=None) - mocker.patch("app.endpoints.query.get_session", return_value=mock_session) - - -@pytest.fixture(name="setup_configuration") -def setup_configuration_fixture() -> AppConfig: - """Set up configuration for tests. - - Create a reusable application configuration tailored for unit tests. - - The returned AppConfig is initialized from a fixed dictionary that sets: - - a lightweight service configuration (localhost, port 8080, minimal workers, logging enabled), - - a test Llama Stack configuration (test API key and URL, not used as a library client), - - user data collection with transcripts disabled, - - an empty MCP servers list, - - a noop conversation cache. - - Returns: - AppConfig: an initialized configuration instance suitable for test fixtures. - """ - config_dict: dict[Any, Any] = { - "name": "test", - "service": { - "host": "localhost", - "port": 8080, - "auth_enabled": False, - "workers": 1, - "color_log": True, - "access_log": True, - }, - "llama_stack": { - "api_key": "test-key", - "url": "http://test.com:1234", - "use_as_library_client": False, - }, - "user_data_collection": { - "transcripts_enabled": False, - }, - "mcp_servers": [], - "customization": None, - "conversation_cache": { - "type": "noop", - }, - } - cfg = AppConfig() - cfg.init_from_dict(config_dict) - return cfg - - -def test_is_transcripts_enabled( - setup_configuration: AppConfig, mocker: MockerFixture -) -> None: - """Test that is_transcripts_enabled returns True when transcripts is not disabled.""" - # Override the transcripts_enabled setting - mocker.patch.object( - setup_configuration.user_data_collection_configuration, - "transcripts_enabled", - True, - ) - mocker.patch("app.endpoints.query.configuration", setup_configuration) - - assert is_transcripts_enabled() is True, "Transcripts should be enabled" - - -def test_is_transcripts_disabled( - setup_configuration: AppConfig, mocker: MockerFixture -) -> None: - """Test that is_transcripts_enabled returns False when transcripts is disabled.""" - # Use default transcripts_enabled=False from setup - mocker.patch("app.endpoints.query.configuration", setup_configuration) - - assert is_transcripts_enabled() is False, "Transcripts should be disabled" - - -def test_select_model_and_provider_id_from_request(mocker: MockerFixture) -> None: - """Test the select_model_and_provider_id function.""" - mocker.patch( - "metrics.utils.configuration.inference.default_provider", - "default_provider", - ) - mocker.patch( - "metrics.utils.configuration.inference.default_model", - "default_model", - ) - - model_list = [ - mocker.Mock( - id="provider1/model1", - custom_metadata={"model_type": "llm", "provider_id": "provider1"}, - ), - mocker.Mock( - id="provider2/model2", - custom_metadata={"model_type": "llm", "provider_id": "provider2"}, - ), - mocker.Mock( - id="default_provider/default_model", - custom_metadata={"model_type": "llm", "provider_id": "default_provider"}, - ), - ] - - # Create a query request with model and provider specified - query_request = QueryRequest( - query="What is OpenStack?", model="model2", provider="provider2" - ) - - # Assert the model and provider from request take precedence from the configuration one - llama_stack_model_id, model_id, provider_id = select_model_and_provider_id( - model_list, query_request.model, query_request.provider - ) - - assert llama_stack_model_id == "provider2/model2" - assert model_id == "model2" - assert provider_id == "provider2" - - -def test_select_model_and_provider_id_from_configuration(mocker: MockerFixture) -> None: - """Test the select_model_and_provider_id function.""" - mocker.patch( - "metrics.utils.configuration.inference.default_provider", - "default_provider", - ) - mocker.patch( - "metrics.utils.configuration.inference.default_model", - "default_model", - ) - - model_list = [ - mocker.Mock( - id="provider1/model1", - custom_metadata={"model_type": "llm", "provider_id": "provider1"}, - ), - mocker.Mock( - id="default_provider/default_model", - custom_metadata={"model_type": "llm", "provider_id": "default_provider"}, - ), - ] - - # Create a query request without model and provider specified - query_request = QueryRequest( - query="What is OpenStack?", - ) - - llama_stack_model_id, model_id, provider_id = select_model_and_provider_id( - model_list, query_request.model, query_request.provider - ) - - # Assert that the default model and provider from the configuration are returned - assert llama_stack_model_id == "default_provider/default_model" - assert model_id == "default_model" - assert provider_id == "default_provider" - - -def test_select_model_and_provider_id_first_from_list(mocker: MockerFixture) -> None: - """Test the select_model_and_provider_id function when no model is specified.""" - model_list = [ - mocker.Mock( - id="not_llm_type", - custom_metadata={"model_type": "embedding", "provider_id": "provider1"}, - ), - mocker.Mock( - id="first_model", - custom_metadata={"model_type": "llm", "provider_id": "provider1"}, - ), - mocker.Mock( - id="second_model", - custom_metadata={"model_type": "llm", "provider_id": "provider2"}, - ), - ] - - query_request = QueryRequest(query="What is OpenStack?") - - llama_stack_model_id, model_id, provider_id = select_model_and_provider_id( - model_list, query_request.model, query_request.provider - ) - - # Assert return the first available LLM model when no model/provider is - # specified in the request or in the configuration - assert llama_stack_model_id == "first_model" - assert model_id == "first_model" - assert provider_id == "provider1" - - -def test_select_model_and_provider_id_invalid_model(mocker: MockerFixture) -> None: - """Test the select_model_and_provider_id function with an invalid model.""" - mock_client = mocker.Mock() - mock_client.models.list.return_value = [ - mocker.Mock( - id="model1", - custom_metadata={"model_type": "llm", "provider_id": "provider1"}, - ), - ] - - query_request = QueryRequest( - query="What is OpenStack?", model="invalid_model", provider="provider1" - ) - - with pytest.raises(HTTPException) as exc_info: - select_model_and_provider_id( - mock_client.models.list(), query_request.model, query_request.provider - ) - - assert exc_info.value.status_code == status.HTTP_404_NOT_FOUND - detail = exc_info.value.detail - assert isinstance(detail, dict) - assert detail["response"] == "Model not found" - assert "invalid_model" in detail["cause"] - - -def test_select_model_and_provider_id_no_available_models( - mocker: MockerFixture, -) -> None: - """Test the select_model_and_provider_id function with no available models.""" - mock_client = mocker.Mock() - # empty list of models - mock_client.models.list.return_value = [] - - query_request = QueryRequest(query="What is OpenStack?", model=None, provider=None) - - with pytest.raises(HTTPException) as exc_info: - select_model_and_provider_id( - mock_client.models.list(), query_request.model, query_request.provider - ) - - assert exc_info.value.status_code == status.HTTP_404_NOT_FOUND - detail = exc_info.value.detail - assert isinstance(detail, dict) - assert detail["response"] == "Model not found" - # The cause may vary, but should indicate no model found - assert "Model" in detail["cause"] - - -def test_validate_attachments_metadata() -> None: - """Test the validate_attachments_metadata function.""" - attachments = [ - Attachment( - attachment_type="log", - content_type="text/plain", - content="this is attachment", - ), - Attachment( - attachment_type="configuration", - content_type="application/yaml", - content="kind: Pod\n metadata:\n name: private-reg", - ), - ] - - # If no exception is raised, the test passes - validate_attachments_metadata(attachments) - - -def test_validate_attachments_metadata_invalid_type() -> None: - """Test the validate_attachments_metadata function with invalid attachment type.""" - attachments = [ - Attachment( - attachment_type="invalid_type", - content_type="text/plain", - content="this is attachment", - ), - ] - - with pytest.raises(HTTPException) as exc_info: - validate_attachments_metadata(attachments) - assert exc_info.value.status_code == status.HTTP_422_UNPROCESSABLE_CONTENT - - detail = exc_info.value.detail - assert isinstance(detail, dict) - assert detail["response"] == "Invalid attribute value" - assert "Invalid attatchment type invalid_type" in detail["cause"] - - -def test_validate_attachments_metadata_invalid_content_type() -> None: - """Test the validate_attachments_metadata function with invalid attachment type.""" - attachments = [ - Attachment( - attachment_type="log", - content_type="text/invalid_content_type", - content="this is attachment", - ), - ] - - with pytest.raises(HTTPException) as exc_info: - validate_attachments_metadata(attachments) - assert exc_info.value.status_code == status.HTTP_422_UNPROCESSABLE_CONTENT - - detail = exc_info.value.detail - assert isinstance(detail, dict) - assert detail["response"] == "Invalid attribute value" - assert ( - "Invalid attatchment content type text/invalid_content_type" in detail["cause"] - ) - - -def test_no_tools_parameter_backward_compatibility() -> None: - """Test that default behavior is unchanged when no_tools parameter is not specified.""" - # This test ensures that existing code that doesn't specify no_tools continues to work - query_request = QueryRequest(query="What is OpenStack?") - - # Verify default value - assert query_request.no_tools is False - - # Test that QueryRequest can be created without no_tools parameter - query_request_minimal = QueryRequest(query="Simple query") - assert query_request_minimal.no_tools is False - - -@pytest.mark.parametrize( - "user_conversation,request_values,expected_values", - [ - # No user conversation, no request values - ( - None, - (None, None), - # Expect no values to be used - (None, None), - ), - # No user conversation, request values provided - ( - None, - ("foo", "bar"), - # Expect request values to be used - ("foo", "bar"), - ), - # User conversation exists, no request values - ( - UserConversation( - id="conv1", - user_id="user1", - last_used_provider="foo", - last_used_model="bar", - message_count=1, - ), - ( - None, - None, - ), - # Expect conversation values to be used - ( - "foo", - "bar", - ), - ), - # Request matches user conversation - ( - UserConversation( - id="conv1", - user_id="user1", - last_used_provider="foo", - last_used_model="bar", - message_count=1, - ), - ( - "foo", - "bar", - ), - # Expect request values to be used - ( - "foo", - "bar", - ), - ), - ], - ids=[ - "No user conversation, no request values", - "No user conversation, request values provided", - "User conversation exists, no request values", - "Request matches user conversation", - ], -) -def test_evaluate_model_hints( - user_conversation: list, - request_values: list, - expected_values: list, -) -> None: - """Test evaluate_model_hints function with various scenarios.""" - # Unpack fixtures - request_provider, request_model = request_values - expected_provider, expected_model = expected_values - - query_request = QueryRequest( - query="What is love?", - provider=request_provider, - model=request_model, - ) # pylint: disable=missing-kwoa - - model_id, provider_id = evaluate_model_hints(user_conversation, query_request) - - assert provider_id == expected_provider - assert model_id == expected_model diff --git a/tests/unit/app/endpoints/test_rlsapi_v1.py b/tests/unit/app/endpoints/test_rlsapi_v1.py index 8bada1185..3a633a32a 100644 --- a/tests/unit/app/endpoints/test_rlsapi_v1.py +++ b/tests/unit/app/endpoints/test_rlsapi_v1.py @@ -40,7 +40,7 @@ def _create_mock_request(mocker: MockerFixture, rh_identity: Any = None) -> Any: """Create a mock FastAPI Request with optional RH Identity data.""" mock_request = mocker.Mock() - mock_request.headers = {"User-Agent": "CLA/0.4.0"} + mock_request.headers = {"User-Agent": "CLA/0.4.1"} if rh_identity is not None: mock_request.state = mocker.Mock() @@ -167,6 +167,55 @@ def test_build_instructions( assert not_expected not in result +# --- Test _build_instructions with customization.system_prompt --- + + +@pytest.mark.parametrize( + ("custom_prompt", "expected_prompt"), + [ + pytest.param( + "You are a RHEL expert.", + "You are a RHEL expert.", + id="customization_system_prompt_set", + ), + pytest.param( + None, + constants.DEFAULT_SYSTEM_PROMPT, + id="customization_system_prompt_none", + ), + ], +) +def test_build_instructions_with_customization( + mocker: MockerFixture, + custom_prompt: str | None, + expected_prompt: str, +) -> None: + """Test _build_instructions uses customization.system_prompt when set.""" + mock_customization = mocker.Mock() + mock_customization.system_prompt = custom_prompt + mock_config = mocker.Mock() + mock_config.customization = mock_customization + mocker.patch("app.endpoints.rlsapi_v1.configuration", mock_config) + + systeminfo = RlsapiV1SystemInfo(os="RHEL", version="9.3", arch="x86_64") + result = _build_instructions(systeminfo) + + assert expected_prompt in result + assert "OS: RHEL" in result + + +def test_build_instructions_no_customization(mocker: MockerFixture) -> None: + """Test _build_instructions falls back when customization is None.""" + mock_config = mocker.Mock() + mock_config.customization = None + mocker.patch("app.endpoints.rlsapi_v1.configuration", mock_config) + + systeminfo = RlsapiV1SystemInfo() + result = _build_instructions(systeminfo) + + assert result == constants.DEFAULT_SYSTEM_PROMPT + + # --- Test _get_default_model_id --- @@ -518,3 +567,92 @@ def test_infer_request_question_is_stripped() -> None: """Test that question whitespace is stripped during validation.""" request = RlsapiV1InferRequest(question=" How do I list files? ") assert request.question == "How do I list files?" + + +# --- Test MCP tools passthrough --- + + +def _setup_responses_mock_with_capture( + mocker: MockerFixture, response_text: str = "Test response." +) -> Any: + """Set up responses.create mock and return the create mock for assertion. + + Unlike _setup_responses_mock, this returns the mock_create object so + callers can inspect call_args to verify tools were passed correctly. + + Args: + mocker: The pytest mocker fixture. + response_text: Text for the mock LLM response. + + Returns: + The mock create coroutine, whose call_args can be inspected. + """ + mock_response = mocker.Mock() + mock_response.output = [_create_mock_response_output(mocker, response_text)] + + mock_create = mocker.AsyncMock(return_value=mock_response) + _setup_responses_mock(mocker, mock_create) + return mock_create + + +@pytest.mark.asyncio +async def test_retrieve_simple_response_passes_tools( + mocker: MockerFixture, mock_configuration: AppConfig +) -> None: + """Test that retrieve_simple_response forwards tools to responses.create().""" + mock_create = _setup_responses_mock_with_capture(mocker) + tools = [ + { + "type": "mcp", + "server_label": "test-mcp", + "server_url": "http://localhost:9000/sse", + "require_approval": "never", + } + ] + + await retrieve_simple_response("Test question", "Instructions", tools=tools) + + mock_create.assert_called_once() + call_kwargs = mock_create.call_args.kwargs + assert call_kwargs["tools"] == tools + + +@pytest.mark.asyncio +async def test_retrieve_simple_response_defaults_to_empty_tools( + mocker: MockerFixture, mock_configuration: AppConfig +) -> None: + """Test that retrieve_simple_response passes empty list when tools is None.""" + mock_create = _setup_responses_mock_with_capture(mocker) + + await retrieve_simple_response("Test question", "Instructions") + + mock_create.assert_called_once() + call_kwargs = mock_create.call_args.kwargs + assert call_kwargs["tools"] == [] + + +@pytest.mark.asyncio +async def test_infer_endpoint_calls_get_mcp_tools( + mocker: MockerFixture, + mock_configuration: AppConfig, + mock_llm_response: None, + mock_auth_resolvers: None, +) -> None: + """Test that infer_endpoint calls get_mcp_tools with configuration.mcp_servers.""" + mock_get_mcp_tools = mocker.patch( + "app.endpoints.rlsapi_v1.get_mcp_tools", + return_value=[{"type": "mcp", "server_label": "test"}], + ) + + infer_request = RlsapiV1InferRequest(question="How do I list files?") + mock_request = _create_mock_request(mocker) + mock_background_tasks = _create_mock_background_tasks(mocker) + + await infer_endpoint( + infer_request=infer_request, + request=mock_request, + background_tasks=mock_background_tasks, + auth=MOCK_AUTH, + ) + + mock_get_mcp_tools.assert_called_once_with(mock_configuration.mcp_servers) diff --git a/tests/unit/app/endpoints/test_streaming_query.py b/tests/unit/app/endpoints/test_streaming_query.py index 64b226a31..82ff8c503 100644 --- a/tests/unit/app/endpoints/test_streaming_query.py +++ b/tests/unit/app/endpoints/test_streaming_query.py @@ -1,637 +1,2043 @@ # pylint: disable=redefined-outer-name,import-error, too-many-function-args """Unit tests for the /streaming_query (v2) endpoint using Responses API.""" -from typing import Any, AsyncIterator -from unittest.mock import Mock +# pylint: disable=too-many-lines,too-many-function-args +import json +from collections.abc import AsyncIterator +from typing import Any import pytest -from fastapi import Request, status +from fastapi import HTTPException, Request from fastapi.responses import StreamingResponse -import httpx -from llama_stack_client import APIConnectionError, RateLimitError +from llama_stack_api.openai_responses import ( + OpenAIResponseObject, + OpenAIResponseObjectStream, + OpenAIResponseObjectStreamResponseCompleted as CompletedChunk, + OpenAIResponseObjectStreamResponseFailed as FailedChunk, + OpenAIResponseObjectStreamResponseIncomplete as IncompleteChunk, + OpenAIResponseObjectStreamResponseMcpCallArgumentsDone as MCPArgsDoneChunk, + OpenAIResponseObjectStreamResponseOutputItemAdded as OutputItemAddedChunk, + OpenAIResponseObjectStreamResponseOutputItemDone as OutputItemDoneChunk, + OpenAIResponseObjectStreamResponseOutputTextDelta as TextDeltaChunk, + OpenAIResponseObjectStreamResponseOutputTextDone as TextDoneChunk, + OpenAIResponseOutputMessageMCPCall as MCPCall, +) +from llama_stack_client import APIConnectionError, APIStatusError, AsyncLlamaStackClient +from pydantic import AnyUrl from pytest_mock import MockerFixture from app.endpoints.streaming_query import ( - retrieve_response, - streaming_query_endpoint_handler_v2, + generate_response, + retrieve_response_generator, + response_generator, + shield_violation_generator, + stream_end_event, + stream_event, + stream_http_error_event, + stream_start_event, + streaming_query_endpoint_handler, +) +from configuration import AppConfig +from constants import ( + LLM_TOKEN_EVENT, + LLM_TOOL_CALL_EVENT, + LLM_TOOL_RESULT_EVENT, + MEDIA_TYPE_JSON, + MEDIA_TYPE_TEXT, +) +from models.config import Action +from models.context import ResponseGeneratorContext +from models.requests import Attachment, QueryRequest +from models.responses import InternalServerErrorResponse +from utils.token_counter import TokenCounter +from utils.types import ReferencedDocument, ResponsesApiParams, TurnSummary + +MOCK_AUTH_STREAMING = ( + "00000001-0001-0001-0001-000000000001", + "mock_username", + False, + "mock_token", ) -from models.config import Action, ModelContextProtocolServer -from models.requests import QueryRequest -from utils.types import ShieldModerationResult -@pytest.fixture -def dummy_request() -> Request: - """Create a dummy FastAPI Request for testing with authorized actions. +@pytest.fixture(autouse=True, name="setup_configuration") +def setup_configuration_fixture() -> AppConfig: + """Set up configuration for tests.""" + config_dict = { + "name": "test", + "service": { + "host": "localhost", + "port": 8080, + "auth_enabled": False, + "workers": 1, + "color_log": True, + "access_log": True, + }, + "llama_stack": { + "api_key": "test-key", + "url": "http://test.com:1234", + "use_as_library_client": False, + }, + "user_data_collection": { + "transcripts_enabled": False, + }, + "mcp_servers": [], + "conversation_cache": { + "type": "noop", + }, + } + cfg = AppConfig() + cfg.init_from_dict(config_dict) + return cfg + + +# ============================================================================ +# OLS Compatibility Tests +# ============================================================================ + + +class TestOLSStreamEventFormatting: + """Test the stream_event function for both media types (OLS compatibility).""" + + def test_stream_event_json_token(self) -> None: + """Test token event formatting for JSON media type.""" + data = {"id": 0, "token": "Hello"} + result = stream_event(data, LLM_TOKEN_EVENT, MEDIA_TYPE_JSON) + + expected = 'data: {"event": "token", "data": {"id": 0, "token": "Hello"}}\n\n' + assert result == expected + + def test_stream_event_text_token(self) -> None: + """Test token event formatting for text media type.""" + data = {"id": 0, "token": "Hello"} + result = stream_event(data, LLM_TOKEN_EVENT, MEDIA_TYPE_TEXT) + + assert result == "Hello" + + def test_stream_event_json_tool_call(self) -> None: + """Test tool call event formatting for JSON media type.""" + data = { + "id": 0, + "token": {"tool_name": "search", "arguments": {"query": "test"}}, + } + result = stream_event(data, LLM_TOOL_CALL_EVENT, MEDIA_TYPE_JSON) + + expected = ( + 'data: {"event": "tool_call", "data": {"id": 0, "token": ' + '{"tool_name": "search", "arguments": {"query": "test"}}}}\n\n' + ) + assert result == expected + + def test_stream_event_text_tool_call(self) -> None: + """Test tool call event formatting for text media type.""" + data = { + "id": 0, + "token": {"tool_name": "search", "arguments": {"query": "test"}}, + } + result = stream_event(data, LLM_TOOL_CALL_EVENT, MEDIA_TYPE_TEXT) + + expected = ( + '\nTool call: {"id": 0, "token": ' + '{"tool_name": "search", "arguments": {"query": "test"}}}\n' + ) + assert result == expected + + def test_stream_event_json_tool_result(self) -> None: + """Test tool result event formatting for JSON media type.""" + data = { + "id": 0, + "token": {"tool_name": "search", "response": "Found results"}, + } + result = stream_event(data, LLM_TOOL_RESULT_EVENT, MEDIA_TYPE_JSON) + + expected = ( + 'data: {"event": "tool_result", "data": {"id": 0, "token": ' + '{"tool_name": "search", "response": "Found results"}}}\n\n' + ) + assert result == expected + + def test_stream_event_text_tool_result(self) -> None: + """Test tool result event formatting for text media type.""" + data = { + "id": 0, + "token": {"tool_name": "search", "response": "Found results"}, + } + result = stream_event(data, LLM_TOOL_RESULT_EVENT, MEDIA_TYPE_TEXT) + + expected = ( + '\nTool result: {"id": 0, "token": ' + '{"tool_name": "search", "response": "Found results"}}\n' + ) + assert result == expected + + def test_stream_event_unknown_type(self) -> None: + """Test handling of unknown event types.""" + data = {"id": 0, "token": "test"} + result = stream_event(data, "unknown_event", MEDIA_TYPE_TEXT) + + assert result == "" + + +class TestOLSStreamEndEvent: + """Test the stream_end_event function for both media types (OLS compatibility).""" + + def test_stream_end_event_json(self) -> None: + """Test end event formatting for JSON media type.""" + token_usage = TokenCounter(input_tokens=100, output_tokens=50) + available_quotas: dict[str, int] = {} + referenced_documents = [ + ReferencedDocument( + doc_url=AnyUrl("https://example.com/doc1"), doc_title="Test Doc 1" + ), + ReferencedDocument( + doc_url=AnyUrl("https://example.com/doc2"), doc_title="Test Doc 2" + ), + ] + result = stream_end_event( + token_usage, + available_quotas, + referenced_documents, + MEDIA_TYPE_JSON, + ) + + data_part = result.replace("data: ", "").strip() + parsed = json.loads(data_part) + + assert parsed["event"] == "end" + assert "referenced_documents" in parsed["data"] + assert len(parsed["data"]["referenced_documents"]) == 2 + assert parsed["data"]["referenced_documents"][0]["doc_title"] == "Test Doc 1" + assert ( + parsed["data"]["referenced_documents"][0]["doc_url"] + == "https://example.com/doc1" + ) + assert "available_quotas" in parsed + + def test_stream_end_event_text(self) -> None: + """Test end event formatting for text media type.""" + token_usage = TokenCounter(input_tokens=100, output_tokens=50) + available_quotas: dict[str, int] = {} + referenced_documents = [ + ReferencedDocument( + doc_url=AnyUrl("https://example.com/doc1"), doc_title="Test Doc 1" + ), + ReferencedDocument( + doc_url=AnyUrl("https://example.com/doc2"), doc_title="Test Doc 2" + ), + ] + result = stream_end_event( + token_usage, + available_quotas, + referenced_documents, + MEDIA_TYPE_TEXT, + ) + + expected = ( + "\n\n---\n\nTest Doc 1: https://example.com/doc1\n" + "Test Doc 2: https://example.com/doc2" + ) + assert result == expected + + def test_stream_end_event_text_no_docs(self) -> None: + """Test end event formatting for text media type with no documents.""" + token_usage = TokenCounter(input_tokens=100, output_tokens=50) + available_quotas: dict[str, int] = {} + referenced_documents: list[ReferencedDocument] = [] + result = stream_end_event( + token_usage, + available_quotas, + referenced_documents, + MEDIA_TYPE_TEXT, + ) + + assert result == "" + + +class TestOLSCompatibilityIntegration: + """Integration tests for OLS compatibility.""" + + def test_media_type_validation(self) -> None: + """Test that media type validation works correctly.""" + valid_request = QueryRequest( + query="test", media_type="application/json" + ) # pyright: ignore[reportCallIssue] + assert valid_request.media_type == "application/json" + + valid_request = QueryRequest( + query="test", media_type="text/plain" + ) # pyright: ignore[reportCallIssue] + assert valid_request.media_type == "text/plain" + + with pytest.raises(ValueError, match="media_type must be either"): + QueryRequest( + query="test", media_type="invalid/type" + ) # pyright: ignore[reportCallIssue] + + def test_ols_end_event_structure(self) -> None: + """Test that end event follows OLS structure.""" + token_usage = TokenCounter(input_tokens=100, output_tokens=50) + available_quotas: dict[str, int] = {} + referenced_documents = [ + ReferencedDocument( + doc_url=AnyUrl("https://example.com/doc"), doc_title="Test Doc" + ), + ] + end_event = stream_end_event( + token_usage, + available_quotas, + referenced_documents, + MEDIA_TYPE_JSON, + ) + data_part = end_event.replace("data: ", "").strip() + parsed = json.loads(data_part) + + assert parsed["event"] == "end" + assert "referenced_documents" in parsed["data"] + assert "truncated" in parsed["data"] + assert "input_tokens" in parsed["data"] + assert "output_tokens" in parsed["data"] + assert "available_quotas" in parsed - Create a FastAPI Request configured for tests with permissive RBAC. - Returns: - Request: A FastAPI Request whose `state.authorized_actions` is set to a - set of all `Action` members. - """ +# ============================================================================ +# Endpoint Handler Tests +# ============================================================================ + + +@pytest.fixture(name="dummy_request") +def dummy_request() -> Request: + """Dummy request fixture for testing.""" req = Request(scope={"type": "http"}) - # Provide a permissive authorized_actions set to satisfy RBAC check req.state.authorized_actions = set(Action) return req -@pytest.mark.asyncio -async def test_retrieve_response_builds_rag_and_mcp_tools( - mocker: MockerFixture, -) -> None: - """Test that retrieve_response correctly builds RAG and MCP tools.""" - mock_client = mocker.Mock() - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [mocker.Mock(id="db1")] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - mock_client.responses.create = mocker.AsyncMock(return_value=mocker.Mock()) - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - # Mock shields.list and models.list for run_shield_moderation - mock_client.shields.list = mocker.AsyncMock(return_value=[]) - mock_client.models.list = mocker.AsyncMock(return_value=[]) - # Mock vector_io.query for direct vector querying - mock_query_response = mocker.Mock() - mock_query_response.chunks = [] - mock_client.vector_io.query = mocker.AsyncMock(return_value=mock_query_response) - - mocker.patch( - "app.endpoints.streaming_query_v2.get_system_prompt", return_value="PROMPT" - ) - - mock_cfg = mocker.Mock() - mock_cfg.mcp_servers = [ - ModelContextProtocolServer( - name="fs", - url="http://localhost:3000", - authorization_headers={"Authorization": "kubernetes"}, - ), - ] - mocker.patch("app.endpoints.streaming_query_v2.configuration", mock_cfg) - mocker.patch("app.endpoints.query_v2.configuration", mock_cfg) - - qr = QueryRequest(query="hello") - await retrieve_response(mock_client, "model-z", qr, token="tok") - - kwargs = mock_client.responses.create.call_args.kwargs - assert kwargs["stream"] is True - tools = kwargs["tools"] - assert isinstance(tools, list) - types = {t.get("type") for t in tools} - # Since we're now skipping RAG tools and doing direct vector querying, - # we should only see MCP tools, not file_search tools - assert types == {"mcp"} - - -@pytest.mark.asyncio -async def test_retrieve_response_no_tools_passes_none(mocker: MockerFixture) -> None: - """Test that retrieve_response passes None for tools when no_tools=True.""" - mock_client = mocker.Mock() - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - mock_client.responses.create = mocker.AsyncMock(return_value=mocker.Mock()) - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - # Mock shields.list and models.list for run_shield_moderation - mock_client.shields.list = mocker.AsyncMock(return_value=[]) - mock_client.models.list = mocker.AsyncMock(return_value=[]) - # Mock vector_io.query for direct vector querying - mock_query_response = mocker.Mock() - mock_query_response.chunks = [] - mock_client.vector_io.query = mocker.AsyncMock(return_value=mock_query_response) - - mocker.patch( - "app.endpoints.streaming_query_v2.get_system_prompt", return_value="PROMPT" - ) - mocker.patch( - "app.endpoints.streaming_query_v2.configuration", mocker.Mock(mcp_servers=[]) - ) - - qr = QueryRequest(query="hello", no_tools=True) - await retrieve_response(mock_client, "model-z", qr, token="tok") - - kwargs = mock_client.responses.create.call_args.kwargs - assert kwargs["tools"] is None - assert kwargs["stream"] is True - - -@pytest.mark.asyncio -async def test_streaming_query_endpoint_handler_v2_success_yields_events( - mocker: MockerFixture, dummy_request: Request -) -> None: - """Test that streaming_query_endpoint_handler_v2 yields correct SSE events.""" - # Skip real config checks - patch in streaming_query where the base handler is - mocker.patch("app.endpoints.streaming_query.check_configuration_loaded") - - # Model selection plumbing - mock_client = mocker.Mock() - mock_client.models.list = mocker.AsyncMock(return_value=[mocker.Mock()]) - mocker.patch( - "client.AsyncLlamaStackClientHolder.get_client", return_value=mock_client - ) - mocker.patch( - "app.endpoints.streaming_query.evaluate_model_hints", - return_value=(None, None), - ) - mocker.patch( - "app.endpoints.streaming_query.select_model_and_provider_id", - return_value=("llama/m", "m", "p"), - ) - - # Replace SSE helpers for deterministic output - mocker.patch( - "app.endpoints.streaming_query_v2.stream_start_event", - lambda conv_id: f"START:{conv_id}\n", - ) - mocker.patch( - "app.endpoints.streaming_query_v2.stream_event", - lambda data, event_type, media_type: f"EV:{event_type}:{data.get('token','')}\n", - ) - mocker.patch( - "app.endpoints.streaming_query_v2.stream_end_event", - lambda _m, _t, _aq, _rd, _media: "END\n", - ) - - # Mock the cleanup function that handles all post-streaming database/cache work - cleanup_spy = mocker.patch( - "app.endpoints.streaming_query_v2.cleanup_after_streaming", - mocker.AsyncMock(return_value=None), - ) - - # Build a fake async stream of chunks - async def fake_stream() -> AsyncIterator[Mock]: - """ - Produce a fake asynchronous stream of response events used for testing streaming endpoints. - - Yields Mock objects that emulate event frames from a - streaming responses API, including: - - a "response.created" event with a conversation id, - - content and text delta events ("response.content_part.added", - "response.output_text.delta"), - - function call events ("response.output_item.done" with completed tool call), - - a final "response.output_text.done" event and a "response.completed" event. - - Returns: - AsyncIterator[Mock]: An async iterator that yields - event-like Mock objects representing the streamed - response frames; the final yielded response contains an `output` - attribute (an empty list) to allow shield violation detection in - tests. - """ - yield Mock(type="response.created", response=Mock(id="conv-xyz")) - yield Mock(type="response.content_part.added") - yield Mock(type="response.output_text.delta", delta="Hello ") - yield Mock(type="response.output_text.delta", delta="world") - item_mock = Mock(type="function_call", id="item1", call_id="call1") - item_mock.name = "search" # 'name' is a special Mock param, set explicitly - item_mock.arguments = '{"q":"x"}' - yield Mock(type="response.output_item.done", item=item_mock) - yield Mock(type="response.output_text.done", text="Hello world") - # Include a response object with output attribute for shield violation detection - mock_response = Mock(output=[]) - yield Mock(type="response.completed", response=mock_response) - - mocker.patch( - "app.endpoints.streaming_query_v2.retrieve_response", - return_value=(fake_stream(), "abc123def456"), - ) - - metric = mocker.patch("metrics.llm_calls_total") - - resp = await streaming_query_endpoint_handler_v2( - request=dummy_request, - query_request=QueryRequest(query="hi"), - auth=("user123", "", True, "token-abc"), # skip_userid_check=True - mcp_headers={}, - ) - - assert isinstance(resp, StreamingResponse) - metric.labels("p", "m").inc.assert_called_once() - - # Collect emitted events - events: list[str] = [] - async for chunk in resp.body_iterator: - s = chunk.decode() if isinstance(chunk, (bytes, bytearray)) else str(chunk) - events.append(s) - - # Validate event sequence and content - assert events[0] == "START:abc123def456\n" - # content_part.added triggers empty token - assert events[1] == "EV:token:\n" - assert events[2] == "EV:token:Hello \n" - assert events[3] == "EV:token:world\n" - # tool call delta - assert events[4].startswith("EV:tool_call:") - # turn complete and end - assert "EV:turn_complete:Hello world\n" in events - assert events[-1] == "END\n" - - # Verify cleanup function was invoked after streaming - assert cleanup_spy.call_count == 1 - # Verify cleanup was called with correct user_id and conversation_id - call_args = cleanup_spy.call_args - assert call_args.kwargs["user_id"] == "user123" - assert call_args.kwargs["conversation_id"] == "abc123def456" - assert call_args.kwargs["model_id"] == "m" - assert call_args.kwargs["provider_id"] == "p" - - -@pytest.mark.asyncio -async def test_streaming_query_endpoint_handler_v2_api_connection_error( - mocker: MockerFixture, dummy_request: Request -) -> None: - """Test that streaming_query_endpoint_handler_v2 handles API connection errors.""" - mocker.patch("app.endpoints.streaming_query.check_configuration_loaded") - - def _raise(*_a: Any, **_k: Any) -> None: - """ - Always raises an APIConnectionError with its `request` attribute set to None. - - Raises: - APIConnectionError: Raised every time the function is called; the - exception's `request` is None. - """ - raise APIConnectionError(request=None) # type: ignore[arg-type] - - mocker.patch("client.AsyncLlamaStackClientHolder.get_client", side_effect=_raise) - - fail_metric = mocker.patch("metrics.llm_calls_failures_total") - - mocker.patch( - "app.endpoints.streaming_query.evaluate_model_hints", - return_value=(None, None), - ) - - response = await streaming_query_endpoint_handler_v2( - request=dummy_request, - query_request=QueryRequest(query="hi"), - auth=("user123", "", False, "tok"), - mcp_headers={}, - ) - - assert isinstance(response, StreamingResponse) - assert response.status_code == status.HTTP_503_SERVICE_UNAVAILABLE - fail_metric.inc.assert_called_once() - - -@pytest.mark.asyncio -async def test_retrieve_response_with_shields_available(mocker: MockerFixture) -> None: - """Test that shield moderation runs and passes when content is safe.""" - mock_client = mocker.Mock() - - # Create mock shield with provider_resource_id - mock_shield = mocker.Mock() - mock_shield.identifier = "content-safety-shield" - mock_shield.provider_resource_id = "moderation-model" - mock_client.shields.list = mocker.AsyncMock(return_value=[mock_shield]) - - # Create mock model matching the shield's provider_resource_id - mock_model = mocker.Mock() - mock_model.id = "moderation-model" - mock_client.models.list = mocker.AsyncMock(return_value=[mock_model]) - - # Mock moderations.create to return safe (not flagged) content - mock_moderation_result = mocker.Mock() - mock_moderation_result.flagged = False - mock_moderation_response = mocker.Mock() - mock_moderation_response.results = [mock_moderation_result] - mock_client.moderations.create = mocker.AsyncMock( - return_value=mock_moderation_response - ) - - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - mock_client.responses.create = mocker.AsyncMock(return_value=mocker.Mock()) - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - - mocker.patch( - "app.endpoints.streaming_query_v2.get_system_prompt", return_value="PROMPT" - ) - mocker.patch( - "app.endpoints.streaming_query_v2.configuration", mocker.Mock(mcp_servers=[]) - ) - - qr = QueryRequest(query="hello") - await retrieve_response(mock_client, "model-shields", qr, token="tok") - - # Verify that moderation was called with the user's query - mock_client.moderations.create.assert_called_once_with( - input="hello", model="moderation-model" - ) - # Verify that responses.create was called (moderation passed) - mock_client.responses.create.assert_called_once() - - -@pytest.mark.asyncio -async def test_retrieve_response_with_no_shields_available( - mocker: MockerFixture, -) -> None: - """Test that LLM is called when no shields are configured.""" - mock_client = mocker.Mock() - - # Mock shields.list and models.list for run_shield_moderation - mock_client.shields.list = mocker.AsyncMock(return_value=[]) - mock_client.models.list = mocker.AsyncMock(return_value=[]) - - mock_vector_stores = mocker.Mock() - mock_vector_stores.data = [] - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mock_vector_stores) - mock_client.responses.create = mocker.AsyncMock(return_value=mocker.Mock()) - # Mock conversations.create for new conversation creation - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123def456" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - - mocker.patch( - "app.endpoints.streaming_query_v2.get_system_prompt", return_value="PROMPT" - ) - mocker.patch( - "app.endpoints.streaming_query_v2.configuration", mocker.Mock(mcp_servers=[]) - ) - - qr = QueryRequest(query="hello") - await retrieve_response(mock_client, "model-no-shields", qr, token="tok") - - # Verify that responses.create was called - mock_client.responses.create.assert_called_once() - - -@pytest.mark.asyncio -async def test_streaming_response_blocked_by_shield_moderation( - mocker: MockerFixture, dummy_request: Request -) -> None: - """Test that when shield moderation blocks, a violation stream is returned.""" - # Skip real config checks - mocker.patch("app.endpoints.streaming_query.check_configuration_loaded") - - # Model selection plumbing - mock_client = mocker.Mock() - mock_client.models.list = mocker.AsyncMock(return_value=[mocker.Mock()]) - mocker.patch( - "client.AsyncLlamaStackClientHolder.get_client", return_value=mock_client - ) - mocker.patch( - "app.endpoints.streaming_query.evaluate_model_hints", - return_value=(None, None), - ) - mocker.patch( - "app.endpoints.streaming_query.select_model_and_provider_id", - return_value=("llama/m", "m", "p"), - ) - - # SSE helpers - mocker.patch( - "app.endpoints.streaming_query_v2.stream_start_event", - lambda conv_id: f"START:{conv_id}\n", - ) - mocker.patch( - "app.endpoints.streaming_query_v2.stream_event", - lambda data, event_type, media_type: f"EV:{event_type}:{data.get('token','')}\n", - ) - mocker.patch( - "app.endpoints.streaming_query_v2.stream_end_event", - lambda _m, _t, _aq, _rd, _media: "END\n", - ) - - # Mock the cleanup function that handles all post-streaming database/cache work - mocker.patch( - "app.endpoints.streaming_query_v2.cleanup_after_streaming", - mocker.AsyncMock(return_value=None), - ) - - # Build a fake async stream for violation response - async def fake_violation_stream() -> AsyncIterator[Mock]: - """Produce an async iterator simulating a shield violation response.""" - yield Mock( - type="response.content_part.added", - response_id="resp_shield", - item_id="msg_shield", - ) - yield Mock( - type="response.output_text.delta", delta="Content violates safety policy" - ) - violation_item = Mock( - type="message", - role="assistant", - content="Content violates safety policy", - refusal=None, - ) - yield Mock( - type="response.completed", - response=Mock(id="resp_shield", output=[violation_item]), - ) - - mocker.patch( - "app.endpoints.streaming_query_v2.retrieve_response", - return_value=(fake_violation_stream(), "conv123"), - ) - - mocker.patch("metrics.llm_calls_total") - - resp = await streaming_query_endpoint_handler_v2( - request=dummy_request, - query_request=QueryRequest(query="dangerous query"), - auth=("user123", "", True, "token-abc"), - mcp_headers={}, - ) - - assert isinstance(resp, StreamingResponse) - - # Collect emitted events to trigger the generator - events: list[str] = [] - async for chunk in resp.body_iterator: - s = chunk.decode() if isinstance(chunk, (bytes, bytearray)) else str(chunk) - events.append(s) - - # Verify that the stream contains the violation message - all_events = "".join(events) - assert "Content violates safety policy" in all_events - - -@pytest.mark.asyncio -async def test_streaming_response_no_shield_violation( - mocker: MockerFixture, dummy_request: Request -) -> None: - """Test that no metric is incremented when there's no shield violation in streaming.""" - # Skip real config checks - mocker.patch("app.endpoints.streaming_query.check_configuration_loaded") - - # Model selection plumbing - mock_client = mocker.Mock() - mock_client.models.list = mocker.AsyncMock(return_value=[mocker.Mock()]) - mocker.patch( - "client.AsyncLlamaStackClientHolder.get_client", return_value=mock_client - ) - mocker.patch( - "app.endpoints.streaming_query.evaluate_model_hints", - return_value=(None, None), - ) - mocker.patch( - "app.endpoints.streaming_query.select_model_and_provider_id", - return_value=("llama/m", "m", "p"), - ) - - # SSE helpers - mocker.patch( - "app.endpoints.streaming_query_v2.stream_start_event", - lambda conv_id: f"START:{conv_id}\n", - ) - mocker.patch( - "app.endpoints.streaming_query_v2.stream_event", - lambda data, event_type, media_type: f"EV:{event_type}:{data.get('token','')}\n", - ) - mocker.patch( - "app.endpoints.streaming_query_v2.stream_end_event", - lambda _m, _t, _aq, _rd, _media: "END\n", - ) - - # Mock the cleanup function that handles all post-streaming database/cache work - mocker.patch( - "app.endpoints.streaming_query_v2.cleanup_after_streaming", - mocker.AsyncMock(return_value=None), - ) - - # Mock the validation error metric - validation_metric = mocker.patch("metrics.llm_calls_validation_errors_total") - - # Build a fake async stream without violation - async def fake_stream_without_violation() -> AsyncIterator[Mock]: - """ - Produce a deterministic sequence of streaming response events that end with a message. - - Yields four events in order: - - `response.created` with a response id, - - `response.output_text.delta` with a text fragment, - - `response.output_text.done` with the final text, - - `response.completed` whose `response.output` contains an assistant - message where `refusal` is `None`. - - Returns: - An iterator yielding Mock objects representing the - streaming events of a successful response with no refusal. - """ - yield Mock(type="response.created", response=Mock(id="conv-safe")) - yield Mock(type="response.output_text.delta", delta="Safe ") - yield Mock(type="response.output_text.done", text="Safe response") - # Response completed without refusal - safe_item = Mock(type="message", role="assistant", refusal=None) - response_safe = Mock(id="conv-safe", output=[safe_item]) - yield Mock(type="response.completed", response=response_safe) - - mocker.patch( - "app.endpoints.streaming_query_v2.retrieve_response", - return_value=(fake_stream_without_violation(), ""), - ) - - mocker.patch("metrics.llm_calls_total") - - resp = await streaming_query_endpoint_handler_v2( - request=dummy_request, - query_request=QueryRequest(query="safe query"), - auth=("user123", "", True, "token-abc"), - mcp_headers={}, - ) - - assert isinstance(resp, StreamingResponse) - - # Collect emitted events to trigger the generator - events: list[str] = [] - async for chunk in resp.body_iterator: - s = chunk.decode() if isinstance(chunk, (bytes, bytearray)) else str(chunk) - events.append(s) - - # Verify that the validation error metric was NOT incremented - validation_metric.inc.assert_not_called() - - -@pytest.mark.asyncio -async def test_streaming_query_endpoint_handler_v2_quota_exceeded( - mocker: MockerFixture, dummy_request: Request -) -> None: - """Test that streaming query endpoint v2 streams HTTP 429 when model quota is exceeded.""" - mocker.patch("app.endpoints.streaming_query.check_configuration_loaded") - - mock_client = mocker.Mock() - mock_client.models.list = mocker.AsyncMock(return_value=[mocker.Mock()]) - mock_response = httpx.Response(429, request=httpx.Request("POST", "http://test")) - mock_client.responses.create.side_effect = RateLimitError( - "Rate limit exceeded for model gpt-4o-mini", - response=mock_response, - body=None, - ) - # Mock conversation creation (needed for query_v2) - mock_conversation = mocker.Mock() - mock_conversation.id = "conv_abc123" - mock_client.conversations.create = mocker.AsyncMock(return_value=mock_conversation) - mock_client.vector_stores.list = mocker.AsyncMock(return_value=mocker.Mock(data=[])) - mock_client.shields.list = mocker.AsyncMock(return_value=[]) - mock_client.models.list = mocker.AsyncMock(return_value=[]) - - mocker.patch( - "client.AsyncLlamaStackClientHolder.get_client", return_value=mock_client - ) - mocker.patch( - "app.endpoints.streaming_query.evaluate_model_hints", - return_value=(None, None), - ) - mocker.patch( - "app.endpoints.streaming_query.select_model_and_provider_id", - return_value=("openai/gpt-4o-mini", "gpt-4o-mini", "openai"), - ) - mocker.patch("app.endpoints.streaming_query.validate_model_provider_override") - mocker.patch( - "app.endpoints.streaming_query_v2.run_shield_moderation", - return_value=ShieldModerationResult(blocked=False), - ) - mocker.patch( - "app.endpoints.streaming_query_v2.prepare_tools_for_responses_api", - return_value=None, - ) - mocker.patch( - "app.endpoints.streaming_query_v2.get_system_prompt", return_value="PROMPT" - ) - mocker.patch( - "app.endpoints.streaming_query_v2.to_llama_stack_conversation_id", - return_value="conv_abc123", - ) - mocker.patch( - "app.endpoints.streaming_query_v2.normalize_conversation_id", - return_value="abc123", - ) - - response = await streaming_query_endpoint_handler_v2( - request=dummy_request, - query_request=QueryRequest(query="What is OpenStack?"), - auth=("user123", "", False, "token-abc"), - mcp_headers={}, - ) - - assert isinstance(response, StreamingResponse) - assert response.status_code == status.HTTP_429_TOO_MANY_REQUESTS - - # Read the streamed error response (SSE format) - content = b"" - async for chunk in response.body_iterator: - if isinstance(chunk, bytes): - content += chunk - elif isinstance(chunk, str): - content += chunk.encode() - else: - # Handle memoryview or other types - content += bytes(chunk) - - content_str = content.decode() - # The error is formatted as SSE: data: {"event":"error","response":"...","cause":"..."}\n\n - # Check for the error message in the content - assert "The quota has been exceeded" in content_str - assert "gpt-4o-mini" in content_str +class TestStreamingQueryEndpointHandler: + """Tests for streaming_query_endpoint_handler function.""" + + @pytest.mark.asyncio + async def test_successful_streaming_query( + self, + dummy_request: Request, # pylint: disable=redefined-outer-name + setup_configuration: AppConfig, + mocker: MockerFixture, + ) -> None: + """Test successful streaming query.""" + query_request = QueryRequest( + query="What is Kubernetes?" + ) # pyright: ignore[reportCallIssue] + + mocker.patch("app.endpoints.streaming_query.configuration", setup_configuration) + mocker.patch("app.endpoints.streaming_query.check_configuration_loaded") + mocker.patch("app.endpoints.streaming_query.check_tokens_available") + mocker.patch("app.endpoints.streaming_query.validate_model_provider_override") + + mock_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + mock_client_holder = mocker.Mock() + mock_client_holder.get_client.return_value = mock_client + mocker.patch( + "app.endpoints.streaming_query.AsyncLlamaStackClientHolder", + return_value=mock_client_holder, + ) + + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.model = "provider1/model1" + mock_responses_params.conversation = "conv_123" + mock_responses_params.model_dump.return_value = { + "input": "test", + "model": "provider1/model1", + } + mocker.patch( + "app.endpoints.streaming_query.prepare_responses_params", + new=mocker.AsyncMock(return_value=mock_responses_params), + ) + + mocker.patch("app.endpoints.streaming_query.AzureEntraIDManager") + mocker.patch( + "app.endpoints.streaming_query.extract_provider_and_model_from_model_id", + return_value=("provider1", "model1"), + ) + mocker.patch("app.endpoints.streaming_query.metrics.llm_calls_total") + + async def mock_generator() -> AsyncIterator[str]: + yield "data: test\n\n" + + mock_turn_summary = TurnSummary() + mocker.patch( + "app.endpoints.streaming_query.retrieve_response_generator", + return_value=(mock_generator(), mock_turn_summary), + ) + + async def mock_generate_response( + *_args: Any, **_kwargs: Any + ) -> AsyncIterator[str]: + async for item in mock_generator(): + yield item + + mocker.patch( + "app.endpoints.streaming_query.generate_response", + side_effect=mock_generate_response, + ) + mocker.patch( + "app.endpoints.streaming_query.normalize_conversation_id", + return_value="123", + ) + + response = await streaming_query_endpoint_handler( + request=dummy_request, + query_request=query_request, + auth=MOCK_AUTH_STREAMING, + mcp_headers={}, + ) + + assert isinstance(response, StreamingResponse) + + @pytest.mark.asyncio + async def test_streaming_query_with_conversation( + self, + dummy_request: Request, # pylint: disable=redefined-outer-name + setup_configuration: AppConfig, + mocker: MockerFixture, + ) -> None: + """Test streaming query with existing conversation.""" + query_request = QueryRequest( + query="What is Kubernetes?", + conversation_id="123e4567-e89b-12d3-a456-426614174000", + ) # pyright: ignore[reportCallIssue] + + mock_conversation = mocker.Mock() + + mocker.patch("app.endpoints.streaming_query.configuration", setup_configuration) + mocker.patch("app.endpoints.streaming_query.check_configuration_loaded") + mocker.patch("app.endpoints.streaming_query.check_tokens_available") + mocker.patch("app.endpoints.streaming_query.validate_model_provider_override") + mocker.patch( + "app.endpoints.streaming_query.normalize_conversation_id", + return_value="normalized_123", + ) + mock_validate_conv = mocker.patch( + "app.endpoints.streaming_query.validate_and_retrieve_conversation", + return_value=mock_conversation, + ) + + mock_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + mock_client_holder = mocker.Mock() + mock_client_holder.get_client.return_value = mock_client + mocker.patch( + "app.endpoints.streaming_query.AsyncLlamaStackClientHolder", + return_value=mock_client_holder, + ) + + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.model = "provider1/model1" + mock_responses_params.conversation = "conv_123" + mock_responses_params.model_dump.return_value = { + "input": "test", + "model": "provider1/model1", + } + mocker.patch( + "app.endpoints.streaming_query.prepare_responses_params", + new=mocker.AsyncMock(return_value=mock_responses_params), + ) + + mocker.patch("app.endpoints.streaming_query.AzureEntraIDManager") + mocker.patch( + "app.endpoints.streaming_query.extract_provider_and_model_from_model_id", + return_value=("provider1", "model1"), + ) + mocker.patch("app.endpoints.streaming_query.metrics.llm_calls_total") + + async def mock_generator() -> AsyncIterator[str]: + yield "data: test\n\n" + + mock_turn_summary = TurnSummary() + mocker.patch( + "app.endpoints.streaming_query.retrieve_response_generator", + return_value=(mock_generator(), mock_turn_summary), + ) + + async def mock_generate_response( + *_args: Any, **_kwargs: Any + ) -> AsyncIterator[str]: + async for item in mock_generator(): + yield item + + mocker.patch( + "app.endpoints.streaming_query.generate_response", + side_effect=mock_generate_response, + ) + mocker.patch( + "app.endpoints.streaming_query.normalize_conversation_id", + return_value="123", + ) + + await streaming_query_endpoint_handler( + request=dummy_request, + query_request=query_request, + auth=MOCK_AUTH_STREAMING, + mcp_headers={}, + ) + + mock_validate_conv.assert_called_once() + + @pytest.mark.asyncio + async def test_streaming_query_with_attachments( + self, + dummy_request: Request, # pylint: disable=redefined-outer-name + setup_configuration: AppConfig, + mocker: MockerFixture, + ) -> None: + """Test streaming query with attachments validation.""" + query_request = QueryRequest( + query="What is Kubernetes?", + attachments=[ + Attachment( + attachment_type="log", + content_type="text/plain", + content="log content", + ) + ], + ) # pyright: ignore[reportCallIssue] + + mocker.patch("app.endpoints.streaming_query.configuration", setup_configuration) + mocker.patch("app.endpoints.streaming_query.check_configuration_loaded") + mocker.patch("app.endpoints.streaming_query.check_tokens_available") + mocker.patch("app.endpoints.streaming_query.validate_model_provider_override") + mock_validate = mocker.patch( + "app.endpoints.streaming_query.validate_attachments_metadata" + ) + + mock_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + mock_client_holder = mocker.Mock() + mock_client_holder.get_client.return_value = mock_client + mocker.patch( + "app.endpoints.streaming_query.AsyncLlamaStackClientHolder", + return_value=mock_client_holder, + ) + + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.model = "provider1/model1" + mock_responses_params.conversation = "conv_123" + mock_responses_params.model_dump.return_value = { + "input": "test", + "model": "provider1/model1", + } + mocker.patch( + "app.endpoints.streaming_query.prepare_responses_params", + new=mocker.AsyncMock(return_value=mock_responses_params), + ) + + mocker.patch("app.endpoints.streaming_query.AzureEntraIDManager") + mocker.patch( + "app.endpoints.streaming_query.extract_provider_and_model_from_model_id", + return_value=("provider1", "model1"), + ) + mocker.patch("app.endpoints.streaming_query.metrics.llm_calls_total") + + async def mock_generator() -> AsyncIterator[str]: + yield "data: test\n\n" + + mock_turn_summary = TurnSummary() + mocker.patch( + "app.endpoints.streaming_query.retrieve_response_generator", + return_value=(mock_generator(), mock_turn_summary), + ) + + async def mock_generate_response( + *_args: Any, **_kwargs: Any + ) -> AsyncIterator[str]: + async for item in mock_generator(): + yield item + + mocker.patch( + "app.endpoints.streaming_query.generate_response", + side_effect=mock_generate_response, + ) + mocker.patch( + "app.endpoints.streaming_query.normalize_conversation_id", + return_value="123", + ) + + await streaming_query_endpoint_handler( + request=dummy_request, + query_request=query_request, + auth=MOCK_AUTH_STREAMING, + mcp_headers={}, + ) + + mock_validate.assert_called_once_with(query_request.attachments) + + @pytest.mark.asyncio + async def test_streaming_query_azure_token_refresh( + self, + dummy_request: Request, # pylint: disable=redefined-outer-name + setup_configuration: AppConfig, + mocker: MockerFixture, + ) -> None: + """Test streaming query refreshes Azure token when needed.""" + query_request = QueryRequest( + query="What is Kubernetes?" + ) # pyright: ignore[reportCallIssue] + + mocker.patch("app.endpoints.streaming_query.configuration", setup_configuration) + mocker.patch("app.endpoints.streaming_query.check_configuration_loaded") + mocker.patch("app.endpoints.streaming_query.check_tokens_available") + mocker.patch("app.endpoints.streaming_query.validate_model_provider_override") + + mock_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + mock_client_holder = mocker.Mock() + mock_client_holder.get_client.return_value = mock_client + mocker.patch( + "app.endpoints.streaming_query.AsyncLlamaStackClientHolder", + return_value=mock_client_holder, + ) + + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.model = "azure/model1" + mock_responses_params.conversation = "conv_123" + mock_responses_params.model_dump.return_value = { + "input": "test", + "model": "azure/model1", + } + mocker.patch( + "app.endpoints.streaming_query.prepare_responses_params", + new=mocker.AsyncMock(return_value=mock_responses_params), + ) + + mock_azure_manager = mocker.Mock() + mock_azure_manager.is_entra_id_configured = True + mock_azure_manager.is_token_expired = True + mock_azure_manager.refresh_token.return_value = True + mocker.patch( + "app.endpoints.streaming_query.AzureEntraIDManager", + return_value=mock_azure_manager, + ) + + mock_updated_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + mock_update_token = mocker.patch( + "app.endpoints.streaming_query.update_azure_token", + new=mocker.AsyncMock(return_value=mock_updated_client), + ) + + mocker.patch( + "app.endpoints.streaming_query.extract_provider_and_model_from_model_id", + return_value=("azure", "model1"), + ) + mocker.patch("app.endpoints.streaming_query.metrics.llm_calls_total") + + async def mock_generator() -> AsyncIterator[str]: + yield "data: test\n\n" + + mock_turn_summary = TurnSummary() + mocker.patch( + "app.endpoints.streaming_query.retrieve_response_generator", + return_value=(mock_generator(), mock_turn_summary), + ) + + async def mock_generate_response( + *_args: Any, **_kwargs: Any + ) -> AsyncIterator[str]: + async for item in mock_generator(): + yield item + + mocker.patch( + "app.endpoints.streaming_query.generate_response", + side_effect=mock_generate_response, + ) + mocker.patch( + "app.endpoints.streaming_query.normalize_conversation_id", + return_value="123", + ) + + await streaming_query_endpoint_handler( + request=dummy_request, + query_request=query_request, + auth=MOCK_AUTH_STREAMING, + mcp_headers={}, + ) + + mock_update_token.assert_called_once() + + +class TestCreateResponseGenerator: + """Tests for retrieve_response_generator function.""" + + @pytest.mark.asyncio + async def test_retrieve_response_generator_success( + self, mocker: MockerFixture + ) -> None: + """Test successful response generator creation.""" + mock_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.model = "provider1/model1" + mock_responses_params.input = "test query" + mock_responses_params.conversation = "conv_123" + mock_responses_params.model_dump.return_value = { + "input": "test query", + "model": "provider1/model1", + } + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.client = mock_client + mock_context.query_request = QueryRequest( + query="test" + ) # pyright: ignore[reportCallIssue] + + async def mock_response_gen() -> AsyncIterator[str]: + yield "test" + + mocker.patch( + "app.endpoints.streaming_query.run_shield_moderation", + new=mocker.AsyncMock(return_value=mocker.Mock(blocked=False)), + ) + mock_client.responses = mocker.Mock() + mock_client.responses.create = mocker.AsyncMock( + return_value=mock_response_gen() + ) + + async def mock_response_generator( + *_args: Any, **_kwargs: Any + ) -> AsyncIterator[str]: + async for item in mock_response_gen(): + yield item + + mocker.patch( + "app.endpoints.streaming_query.response_generator", + side_effect=mock_response_generator, + ) + + generator, turn_summary = await retrieve_response_generator( + mock_responses_params, mock_context + ) + + assert isinstance(turn_summary, TurnSummary) + assert hasattr(generator, "__aiter__") + + @pytest.mark.asyncio + async def test_retrieve_response_generator_shield_blocked( + self, mocker: MockerFixture + ) -> None: + """Test response generator creation when shield blocks.""" + mock_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.model = "provider1/model1" + mock_responses_params.input = "test query" + mock_responses_params.conversation = "conv_123" + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.client = mock_client + mock_context.query_request = QueryRequest( + query="test", media_type=MEDIA_TYPE_TEXT + ) # pyright: ignore[reportCallIssue] + + mock_moderation_result = mocker.Mock() + mock_moderation_result.blocked = True + mock_moderation_result.message = "Content blocked" + mocker.patch( + "app.endpoints.streaming_query.run_shield_moderation", + new=mocker.AsyncMock(return_value=mock_moderation_result), + ) + mocker.patch( + "app.endpoints.streaming_query.append_turn_to_conversation", + new=mocker.AsyncMock(), + ) + + _generator, turn_summary = await retrieve_response_generator( + mock_responses_params, mock_context + ) + + assert isinstance(turn_summary, TurnSummary) + assert turn_summary.llm_response == "Content blocked" + + @pytest.mark.asyncio + async def test_retrieve_response_generator_connection_error( + self, mocker: MockerFixture + ) -> None: + """Test response generator creation raises HTTPException on connection error.""" + mock_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.model = "provider1/model1" + mock_responses_params.input = "test query" + mock_responses_params.conversation = "conv_123" + mock_responses_params.model_dump.return_value = { + "input": "test query", + "model": "provider1/model1", + "conversation": "conv_123", + } + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.client = mock_client + mock_context.query_request = QueryRequest( + query="test" + ) # pyright: ignore[reportCallIssue] + + mocker.patch( + "app.endpoints.streaming_query.run_shield_moderation", + new=mocker.AsyncMock(return_value=mocker.Mock(blocked=False)), + ) + mock_request_obj = mocker.Mock() + mock_client.responses = mocker.Mock() + mock_client.responses.create = mocker.AsyncMock( + side_effect=APIConnectionError( + message="Connection failed", request=mock_request_obj + ) + ) + + mock_error_response = mocker.Mock() + mock_error_response.model_dump.return_value = { + "status_code": 503, + "detail": { + "response": "Unable to connect to Llama Stack", + "cause": "Connection failed", + }, + } + mocker.patch( + "app.endpoints.streaming_query.ServiceUnavailableResponse", + return_value=mock_error_response, + ) + + with pytest.raises(HTTPException) as exc_info: + await retrieve_response_generator(mock_responses_params, mock_context) + + assert exc_info.value.status_code == 503 + + @pytest.mark.asyncio + async def test_retrieve_response_generator_api_status_error( + self, mocker: MockerFixture + ) -> None: + """Test response generator creation raises HTTPException on API status error.""" + mock_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.model = "provider1/model1" + mock_responses_params.input = "test query" + mock_responses_params.conversation = "conv_123" + mock_responses_params.model_dump.return_value = { + "input": "test query", + "model": "provider1/model1", + "conversation": "conv_123", + } + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.client = mock_client + mock_context.query_request = QueryRequest( + query="test" + ) # pyright: ignore[reportCallIssue] + + mocker.patch( + "app.endpoints.streaming_query.run_shield_moderation", + new=mocker.AsyncMock(return_value=mocker.Mock(blocked=False)), + ) + mock_request_obj = mocker.Mock() + mock_client.responses = mocker.Mock() + mock_client.responses.create = mocker.AsyncMock( + side_effect=APIStatusError( + message="API error", response=mock_request_obj, body=None + ) + ) + + mock_error_response = mocker.Mock() + mock_error_response.model_dump.return_value = { + "status_code": 500, + "detail": {"response": "Error", "cause": "API error"}, + } + mocker.patch( + "app.endpoints.streaming_query.handle_known_apistatus_errors", + return_value=mock_error_response, + ) + + with pytest.raises(HTTPException) as exc_info: + await retrieve_response_generator(mock_responses_params, mock_context) + + assert exc_info.value.status_code == 500 + + @pytest.mark.asyncio + async def test_retrieve_response_generator_runtime_error_context_length( + self, mocker: MockerFixture + ) -> None: + """Test response generator raises HTTPException on RuntimeError with context_length.""" + mock_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.model = "provider1/model1" + mock_responses_params.input = "test query" + mock_responses_params.conversation = "conv_123" + mock_responses_params.model_dump.return_value = { + "input": "test query", + "model": "provider1/model1", + "conversation": "conv_123", + } + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.client = mock_client + mock_context.query_request = QueryRequest( + query="test" + ) # pyright: ignore[reportCallIssue] + + mocker.patch( + "app.endpoints.streaming_query.run_shield_moderation", + new=mocker.AsyncMock(return_value=mocker.Mock(blocked=False)), + ) + mock_client.responses = mocker.Mock() + mock_client.responses.create = mocker.AsyncMock( + side_effect=RuntimeError("context_length exceeded") + ) + + mock_error_response = mocker.Mock() + mock_error_response.model_dump.return_value = { + "status_code": 413, + "detail": {"response": "Prompt too long", "model": "provider1/model1"}, + } + mocker.patch( + "app.endpoints.streaming_query.PromptTooLongResponse", + return_value=mock_error_response, + ) + + with pytest.raises(HTTPException) as exc_info: + await retrieve_response_generator(mock_responses_params, mock_context) + + assert exc_info.value.status_code == 413 + + @pytest.mark.asyncio + async def test_retrieve_response_generator_runtime_error_other( + self, mocker: MockerFixture + ) -> None: + """Test response generator creation re-raises RuntimeError without context_length.""" + mock_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.model = "provider1/model1" + mock_responses_params.input = "test query" + mock_responses_params.conversation = "conv_123" + mock_responses_params.model_dump.return_value = { + "input": "test query", + "model": "provider1/model1", + "conversation": "conv_123", + } + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.client = mock_client + mock_context.query_request = QueryRequest( + query="test" + ) # pyright: ignore[reportCallIssue] + + mocker.patch( + "app.endpoints.streaming_query.run_shield_moderation", + new=mocker.AsyncMock(return_value=mocker.Mock(blocked=False)), + ) + mock_client.responses = mocker.Mock() + mock_client.responses.create = mocker.AsyncMock( + side_effect=RuntimeError("Some other error") + ) + + with pytest.raises(RuntimeError): + await retrieve_response_generator(mock_responses_params, mock_context) + + +class TestGenerateResponse: + """Tests for generate_response function.""" + + @pytest.mark.asyncio + async def test_generate_response_success(self, mocker: MockerFixture) -> None: + """Test successful response generation.""" + + async def mock_generator() -> AsyncIterator[str]: + yield "data: token\n\n" + yield "data: end\n\n" + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.conversation_id = "conv_123" + mock_context.user_id = "user_123" + mock_context.query_request = QueryRequest( + query="test" + ) # pyright: ignore[reportCallIssue] + mock_context.started_at = "2024-01-01T00:00:00Z" + mock_context.skip_userid_check = False + + mock_response_obj = mocker.Mock() + mock_response_obj.output = [] + mock_context.client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + mock_context.client.responses = mocker.Mock() + mock_context.client.responses.create = mocker.AsyncMock( + return_value=mock_response_obj + ) + + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.model = "provider1/model1" + + mock_turn_summary = TurnSummary() + mock_turn_summary.token_usage = TokenCounter(input_tokens=10, output_tokens=5) + + mock_config = mocker.Mock() + mock_config.quota_limiters = [] + mocker.patch("app.endpoints.streaming_query.configuration", mock_config) + mocker.patch("app.endpoints.streaming_query.consume_query_tokens") + mocker.patch( + "app.endpoints.streaming_query.get_available_quotas", return_value={} + ) + mocker.patch("app.endpoints.streaming_query.store_query_results") + + result = [] + async for item in generate_response( + mock_generator(), mock_context, mock_responses_params, mock_turn_summary + ): + result.append(item) + + assert len(result) > 0 + assert any("start" in item for item in result) + assert any("end" in item for item in result) + + @pytest.mark.asyncio + async def test_generate_response_with_topic_summary( + self, mocker: MockerFixture + ) -> None: + """Test response generation with topic summary.""" + + async def mock_generator() -> AsyncIterator[str]: + yield "data: token\n\n" + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.conversation_id = "conv_123" + mock_context.user_id = "user_123" + mock_context.query_request = QueryRequest( + query="test", generate_topic_summary=True + ) # pyright: ignore[reportCallIssue] + mock_context.started_at = "2024-01-01T00:00:00Z" + mock_context.skip_userid_check = False + mock_context.client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.model = "provider1/model1" + + mock_turn_summary = TurnSummary() + mock_turn_summary.token_usage = TokenCounter(input_tokens=10, output_tokens=5) + + mock_config = mocker.Mock() + mock_config.quota_limiters = [] + mocker.patch("app.endpoints.streaming_query.configuration", mock_config) + mocker.patch("app.endpoints.streaming_query.consume_query_tokens") + mocker.patch( + "app.endpoints.streaming_query.get_available_quotas", return_value={} + ) + mocker.patch( + "app.endpoints.streaming_query.get_topic_summary", + new=mocker.AsyncMock(return_value="Topic summary"), + ) + mocker.patch("app.endpoints.streaming_query.store_query_results") + + result = [] + async for item in generate_response( + mock_generator(), mock_context, mock_responses_params, mock_turn_summary + ): + result.append(item) + + assert len(result) > 0 + + @pytest.mark.asyncio + async def test_generate_response_connection_error( + self, mocker: MockerFixture + ) -> None: + """Test response generation handles connection error.""" + + async def mock_generator() -> AsyncIterator[str]: + yield "data: token\n\n" + raise APIConnectionError(message="Connection failed", request=mocker.Mock()) + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.conversation_id = "conv_123" + mock_context.query_request = QueryRequest( + query="test" + ) # pyright: ignore[reportCallIssue] + mock_context.started_at = "2024-01-01T00:00:00Z" + mock_context.skip_userid_check = False + + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.model = "provider1/model1" + + mock_turn_summary = TurnSummary() + + result = [] + async for item in generate_response( + mock_generator(), mock_context, mock_responses_params, mock_turn_summary + ): + result.append(item) + + assert len(result) > 0 + assert any("error" in item for item in result) + + @pytest.mark.asyncio + async def test_generate_response_api_status_error( + self, mocker: MockerFixture + ) -> None: + """Test response generation handles API status error.""" + mock_request_obj = mocker.Mock() + + async def mock_generator() -> AsyncIterator[str]: + yield "data: token\n\n" + raise APIStatusError( + message="API error", response=mock_request_obj, body=None + ) + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.conversation_id = "conv_123" + mock_context.query_request = QueryRequest( + query="test" + ) # pyright: ignore[reportCallIssue] + mock_context.started_at = "2024-01-01T00:00:00Z" + mock_context.skip_userid_check = False + + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.model = "provider1/model1" + + mock_turn_summary = TurnSummary() + + mock_error_response = InternalServerErrorResponse.query_failed("API error") + mocker.patch( + "app.endpoints.streaming_query.handle_known_apistatus_errors", + return_value=mock_error_response, + ) + + result = [] + async for item in generate_response( + mock_generator(), mock_context, mock_responses_params, mock_turn_summary + ): + result.append(item) + + assert len(result) > 0 + assert any("error" in item for item in result) + + @pytest.mark.asyncio + async def test_generate_response_runtime_error_context_length( + self, mocker: MockerFixture + ) -> None: + """Test generate_response handles RuntimeError with context_length.""" + + async def mock_generator() -> AsyncIterator[str]: + yield "data: start\n\n" + raise RuntimeError("context_length exceeded") + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.conversation_id = "conv_123" + mock_context.query_request = QueryRequest( + query="test", media_type=MEDIA_TYPE_JSON + ) # pyright: ignore[reportCallIssue] + + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.model = "provider1/model1" + + mock_turn_summary = TurnSummary() + + mock_error_response = mocker.Mock() + mock_error_response.status_code = 413 + mock_error_response.detail = mocker.Mock() + mock_error_response.detail.response = "Prompt too long" + mock_error_response.detail.cause = None + mocker.patch( + "app.endpoints.streaming_query.PromptTooLongResponse", + return_value=mock_error_response, + ) + + result = [] + async for item in generate_response( + mock_generator(), mock_context, mock_responses_params, mock_turn_summary + ): + result.append(item) + + assert len(result) > 0 + assert any("error" in item for item in result) + + @pytest.mark.asyncio + async def test_generate_response_runtime_error_other( + self, mocker: MockerFixture + ) -> None: + """Test generate_response handles RuntimeError without context_length.""" + + async def mock_generator() -> AsyncIterator[str]: + yield "data: start\n\n" + raise RuntimeError("Some other error") + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.conversation_id = "conv_123" + mock_context.query_request = QueryRequest( + query="test", media_type=MEDIA_TYPE_JSON + ) # pyright: ignore[reportCallIssue] + + mock_responses_params = mocker.Mock(spec=ResponsesApiParams) + mock_responses_params.model = "provider1/model1" + + mock_turn_summary = TurnSummary() + + mock_error_response = mocker.Mock() + mock_error_response.status_code = 500 + mock_error_response.detail = mocker.Mock() + mock_error_response.detail.response = "Internal server error" + mock_error_response.detail.cause = None + mocker.patch( + "app.endpoints.streaming_query.InternalServerErrorResponse.generic", + return_value=mock_error_response, + ) + + result = [] + async for item in generate_response( + mock_generator(), mock_context, mock_responses_params, mock_turn_summary + ): + result.append(item) + + assert len(result) > 0 + assert any("error" in item for item in result) + + +class TestResponseGenerator: + """Tests for response_generator function.""" + + @pytest.mark.asyncio + async def test_response_generator_text_delta(self, mocker: MockerFixture) -> None: + """Test response generator processes text delta events.""" + + async def mock_turn_response() -> AsyncIterator[OpenAIResponseObjectStream]: + chunk = mocker.Mock(spec=TextDeltaChunk) + chunk.type = "response.output_text.delta" + chunk.delta = "Hello" + yield chunk + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.query_request = QueryRequest( + query="test", media_type=MEDIA_TYPE_JSON + ) # pyright: ignore[reportCallIssue] + mock_context.model_id = "provider1/model1" + + mock_turn_summary = TurnSummary() + + result = [] + async for item in response_generator( + mock_turn_response(), mock_context, mock_turn_summary + ): + result.append(item) + + assert len(result) > 0 + + @pytest.mark.asyncio + async def test_response_generator_content_part_added( + self, mocker: MockerFixture + ) -> None: + """Test response generator processes content part added events.""" + + async def mock_turn_response() -> AsyncIterator[OpenAIResponseObjectStream]: + chunk = mocker.Mock() + chunk.type = "response.content_part.added" + yield chunk + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.query_request = QueryRequest( + query="test", media_type=MEDIA_TYPE_JSON + ) # pyright: ignore[reportCallIssue] + mock_context.model_id = "provider1/model1" + + mock_turn_summary = TurnSummary() + + result = [] + async for item in response_generator( + mock_turn_response(), mock_context, mock_turn_summary + ): + result.append(item) + + assert len(result) > 0 + + @pytest.mark.asyncio + async def test_response_generator_output_text_done( + self, mocker: MockerFixture + ) -> None: + """Test response generator processes output text done events.""" + + async def mock_turn_response() -> AsyncIterator[OpenAIResponseObjectStream]: + chunk = mocker.Mock(spec=TextDoneChunk) + chunk.type = "response.output_text.done" + chunk.text = "Complete response" + yield chunk + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.query_request = QueryRequest( + query="test", media_type=MEDIA_TYPE_JSON + ) # pyright: ignore[reportCallIssue] + mock_context.model_id = "provider1/model1" + + mock_turn_summary = TurnSummary() + + mocker.patch( + "app.endpoints.streaming_query.extract_token_usage", + return_value=TokenCounter(input_tokens=0, output_tokens=0), + ) + mocker.patch( + "app.endpoints.streaming_query.parse_referenced_documents", return_value=[] + ) + + async for _ in response_generator( + mock_turn_response(), mock_context, mock_turn_summary + ): + pass + + assert mock_turn_summary.llm_response == "Complete response" + + @pytest.mark.asyncio + async def test_response_generator_output_item_done_message_type( + self, mocker: MockerFixture + ) -> None: + """Test response generator skips message type items.""" + mock_output_item = mocker.Mock() + mock_output_item.type = "message" + + async def mock_turn_response() -> AsyncIterator[OpenAIResponseObjectStream]: + chunk = mocker.Mock(spec=OutputItemDoneChunk) + chunk.type = "response.output_item.done" + chunk.item = mock_output_item + chunk.output_index = 0 + yield chunk + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.query_request = QueryRequest( + query="test", media_type=MEDIA_TYPE_JSON + ) # pyright: ignore[reportCallIssue] + mock_context.model_id = "provider1/model1" + + mock_turn_summary = TurnSummary() + + mocker.patch( + "app.endpoints.streaming_query.extract_token_usage", + return_value=TokenCounter(input_tokens=0, output_tokens=0), + ) + mocker.patch( + "app.endpoints.streaming_query.parse_referenced_documents", return_value=[] + ) + + result = [] + async for item in response_generator( + mock_turn_response(), mock_context, mock_turn_summary + ): + result.append(item) + + assert len(result) >= 0 + + @pytest.mark.asyncio + async def test_response_generator_output_item_done( + self, mocker: MockerFixture + ) -> None: + """Test response generator processes output item done events.""" + mock_output_item = mocker.Mock() + mock_output_item.type = "tool_call" + + async def mock_turn_response() -> AsyncIterator[OpenAIResponseObjectStream]: + chunk = mocker.Mock(spec=OutputItemDoneChunk) + chunk.type = "response.output_item.done" + chunk.item = mock_output_item + chunk.output_index = 0 + yield chunk + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.query_request = QueryRequest( + query="test", media_type=MEDIA_TYPE_JSON + ) # pyright: ignore[reportCallIssue] + mock_context.model_id = "provider1/model1" + + mock_turn_summary = TurnSummary() + + mock_tool_call = mocker.Mock() + mock_tool_call.model_dump.return_value = {"tool": "test"} + mocker.patch( + "app.endpoints.streaming_query.build_tool_call_summary", + return_value=(mock_tool_call, None), + ) + + mocker.patch( + "app.endpoints.streaming_query.extract_token_usage", + return_value=TokenCounter(input_tokens=0, output_tokens=0), + ) + mocker.patch( + "app.endpoints.streaming_query.parse_referenced_documents", return_value=[] + ) + + result = [] + async for item in response_generator( + mock_turn_response(), mock_context, mock_turn_summary + ): + result.append(item) + + assert len(result) > 0 + + @pytest.mark.asyncio + async def test_response_generator_output_item_done_with_tool_result( + self, mocker: MockerFixture + ) -> None: + """Test response generator processes output item done events with tool result.""" + mock_output_item = mocker.Mock() + mock_output_item.type = "tool_call" + + async def mock_turn_response() -> AsyncIterator[OpenAIResponseObjectStream]: + chunk = mocker.Mock(spec=OutputItemDoneChunk) + chunk.type = "response.output_item.done" + chunk.item = mock_output_item + chunk.output_index = 0 + yield chunk + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.query_request = QueryRequest( + query="test", media_type=MEDIA_TYPE_JSON + ) # pyright: ignore[reportCallIssue] + mock_context.model_id = "provider1/model1" + + mock_turn_summary = TurnSummary() + + mock_tool_call = mocker.Mock() + mock_tool_call.model_dump.return_value = {"tool": "test"} + mock_tool_result = mocker.Mock() + mock_tool_result.model_dump.return_value = {"result": "test_result"} + mocker.patch( + "app.endpoints.streaming_query.build_tool_call_summary", + return_value=(mock_tool_call, mock_tool_result), + ) + + mocker.patch( + "app.endpoints.streaming_query.extract_token_usage", + return_value=TokenCounter(input_tokens=0, output_tokens=0), + ) + mocker.patch( + "app.endpoints.streaming_query.parse_referenced_documents", return_value=[] + ) + + result = [] + async for item in response_generator( + mock_turn_response(), mock_context, mock_turn_summary + ): + result.append(item) + + assert len(result) > 0 + assert len(mock_turn_summary.tool_results) == 1 + + @pytest.mark.asyncio + async def test_response_generator_response_completed( + self, mocker: MockerFixture + ) -> None: + """Test response generator processes response completed events.""" + mock_response_obj = mocker.Mock(spec=OpenAIResponseObject) + mock_response_obj.usage = mocker.Mock(input_tokens=10, output_tokens=5) + + async def mock_turn_response() -> AsyncIterator[OpenAIResponseObjectStream]: + chunk = mocker.Mock(spec=CompletedChunk) + chunk.type = "response.completed" + chunk.response = mock_response_obj + yield chunk + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.query_request = QueryRequest( + query="test", media_type=MEDIA_TYPE_JSON + ) # pyright: ignore[reportCallIssue] + mock_context.model_id = "provider1/model1" + + mock_turn_summary = TurnSummary() + mock_turn_summary.llm_response = "Response" + + mocker.patch( + "app.endpoints.streaming_query.extract_token_usage", + return_value=TokenCounter(input_tokens=10, output_tokens=5), + ) + mocker.patch( + "app.endpoints.streaming_query.parse_referenced_documents", return_value=[] + ) + + async for _ in response_generator( + mock_turn_response(), mock_context, mock_turn_summary + ): + pass + + assert mock_turn_summary.token_usage.input_tokens == 10 + + @pytest.mark.asyncio + async def test_response_generator_response_completed_uses_text_parts( + self, mocker: MockerFixture + ) -> None: + """Test response generator uses text_parts when llm_response is empty.""" + mock_response_obj = mocker.Mock(spec=OpenAIResponseObject) + mock_response_obj.usage = mocker.Mock(input_tokens=10, output_tokens=5) + + async def mock_turn_response() -> AsyncIterator[OpenAIResponseObjectStream]: + # Add text delta first + delta_chunk = mocker.Mock(spec=TextDeltaChunk) + delta_chunk.type = "response.output_text.delta" + delta_chunk.delta = "Hello" + yield delta_chunk + + # Then completed (without output_text.done, so llm_response is empty) + completed_chunk = mocker.Mock(spec=CompletedChunk) + completed_chunk.type = "response.completed" + completed_chunk.response = mock_response_obj + yield completed_chunk + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.query_request = QueryRequest( + query="test", media_type=MEDIA_TYPE_JSON + ) # pyright: ignore[reportCallIssue] + mock_context.model_id = "provider1/model1" + + mock_turn_summary = TurnSummary() + + mocker.patch( + "app.endpoints.streaming_query.extract_token_usage", + return_value=TokenCounter(input_tokens=10, output_tokens=5), + ) + mocker.patch( + "app.endpoints.streaming_query.parse_referenced_documents", return_value=[] + ) + + result = [] + async for item in response_generator( + mock_turn_response(), mock_context, mock_turn_summary + ): + result.append(item) + + # Should use text_parts for turn_complete event + assert len(result) > 0 + assert any("turn_complete" in item for item in result) + + @pytest.mark.asyncio + async def test_response_generator_response_incomplete( + self, mocker: MockerFixture + ) -> None: + """Test response generator processes incomplete response events.""" + + async def mock_turn_response() -> AsyncIterator[OpenAIResponseObjectStream]: + chunk = mocker.Mock(spec=IncompleteChunk) + chunk.type = "response.incomplete" + mock_response = mocker.Mock() + mock_response.output = [] + # Create a simple object with message attribute as a string + mock_error = type("Error", (), {"message": "context_length exceeded"})() + mock_response.error = mock_error + chunk.response = mock_response + yield chunk + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.query_request = QueryRequest( + query="test", media_type=MEDIA_TYPE_JSON + ) # pyright: ignore[reportCallIssue] + mock_context.model_id = "provider1/model1" + + mock_turn_summary = TurnSummary() + + mocker.patch( + "app.endpoints.streaming_query.extract_token_usage", + return_value=TokenCounter(input_tokens=0, output_tokens=0), + ) + mocker.patch( + "app.endpoints.streaming_query.parse_referenced_documents", return_value=[] + ) + + result = [] + async for item in response_generator( + mock_turn_response(), mock_context, mock_turn_summary + ): + result.append(item) + + assert len(result) > 0 + assert any("error" in item for item in result) + + @pytest.mark.asyncio + async def test_response_generator_response_failed( + self, mocker: MockerFixture + ) -> None: + """Test response generator processes failed response events.""" + mock_error = mocker.Mock() + mock_error.message = "Error message" + + async def mock_turn_response() -> AsyncIterator[OpenAIResponseObjectStream]: + chunk = mocker.Mock(spec=FailedChunk) + chunk.type = "response.failed" + mock_response = mocker.Mock() + mock_response.output = [] + mock_response.error = mock_error + chunk.response = mock_response + yield chunk + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.query_request = QueryRequest( + query="test", media_type=MEDIA_TYPE_JSON + ) # pyright: ignore[reportCallIssue] + mock_context.model_id = "provider1/model1" + + mock_turn_summary = TurnSummary() + + mocker.patch( + "app.endpoints.streaming_query.extract_token_usage", + return_value=TokenCounter(input_tokens=0, output_tokens=0), + ) + mocker.patch( + "app.endpoints.streaming_query.parse_referenced_documents", return_value=[] + ) + + result = [] + async for item in response_generator( + mock_turn_response(), mock_context, mock_turn_summary + ): + result.append(item) + + assert len(result) > 0 + assert any("error" in item for item in result) + + @pytest.mark.asyncio + async def test_response_generator_response_failed_no_error( + self, mocker: MockerFixture + ) -> None: + """Test response generator handles failed response with no error object.""" + + async def mock_turn_response() -> AsyncIterator[OpenAIResponseObjectStream]: + chunk = mocker.Mock(spec=FailedChunk) + chunk.type = "response.failed" + mock_response = mocker.Mock() + mock_response.output = [] + mock_response.error = None + chunk.response = mock_response + yield chunk + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.query_request = QueryRequest( + query="test", media_type=MEDIA_TYPE_JSON + ) # pyright: ignore[reportCallIssue] + mock_context.model_id = "provider1/model1" + + mock_turn_summary = TurnSummary() + + mocker.patch( + "app.endpoints.streaming_query.extract_token_usage", + return_value=TokenCounter(input_tokens=0, output_tokens=0), + ) + mocker.patch( + "app.endpoints.streaming_query.parse_referenced_documents", return_value=[] + ) + + result = [] + async for item in response_generator( + mock_turn_response(), mock_context, mock_turn_summary + ): + result.append(item) + + assert len(result) > 0 + + @pytest.mark.asyncio + async def test_response_generator_response_failed_context_length( + self, mocker: MockerFixture + ) -> None: + """Test response generator handles failed response with context_length error.""" + mock_error = mocker.Mock() + mock_error.message = "context_length exceeded" + + async def mock_turn_response() -> AsyncIterator[OpenAIResponseObjectStream]: + chunk = mocker.Mock(spec=FailedChunk) + chunk.type = "response.failed" + mock_response = mocker.Mock() + mock_response.output = [] + mock_response.error = mock_error + chunk.response = mock_response + yield chunk + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.query_request = QueryRequest( + query="test", media_type=MEDIA_TYPE_JSON + ) # pyright: ignore[reportCallIssue] + mock_context.model_id = "provider1/model1" + + mock_turn_summary = TurnSummary() + + mocker.patch( + "app.endpoints.streaming_query.extract_token_usage", + return_value=TokenCounter(input_tokens=0, output_tokens=0), + ) + mocker.patch( + "app.endpoints.streaming_query.parse_referenced_documents", return_value=[] + ) + + result = [] + async for item in response_generator( + mock_turn_response(), mock_context, mock_turn_summary + ): + result.append(item) + + assert len(result) > 0 + + @pytest.mark.asyncio + async def test_response_generator_response_incomplete_no_error( + self, mocker: MockerFixture + ) -> None: + """Test response generator handles incomplete response with no error object.""" + + async def mock_turn_response() -> AsyncIterator[OpenAIResponseObjectStream]: + chunk = mocker.Mock(spec=IncompleteChunk) + chunk.type = "response.incomplete" + mock_response = mocker.Mock() + mock_response.output = [] + mock_response.error = None + chunk.response = mock_response + yield chunk + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.query_request = QueryRequest( + query="test", media_type=MEDIA_TYPE_JSON + ) # pyright: ignore[reportCallIssue] + mock_context.model_id = "provider1/model1" + + mock_turn_summary = TurnSummary() + + mocker.patch( + "app.endpoints.streaming_query.extract_token_usage", + return_value=TokenCounter(input_tokens=0, output_tokens=0), + ) + mocker.patch( + "app.endpoints.streaming_query.parse_referenced_documents", return_value=[] + ) + + result = [] + async for item in response_generator( + mock_turn_response(), mock_context, mock_turn_summary + ): + result.append(item) + + assert len(result) > 0 + assert any("error" in item for item in result) + + +class TestStreamHttpErrorEvent: + """Tests for stream_http_error_event function.""" + + def test_stream_http_error_event_json(self, mocker: MockerFixture) -> None: + """Test HTTP error event formatting for JSON media type.""" + error = InternalServerErrorResponse.query_failed("Test error") + mocker.patch("app.endpoints.streaming_query.logger") + + result = stream_http_error_event(error, MEDIA_TYPE_JSON) + + assert "error" in result + assert "Test error" in result + + def test_stream_http_error_event_text(self, mocker: MockerFixture) -> None: + """Test HTTP error event formatting for text media type.""" + error = InternalServerErrorResponse.query_failed("Test error") + mocker.patch("app.endpoints.streaming_query.logger") + + result = stream_http_error_event(error, MEDIA_TYPE_TEXT) + + assert "Status:" in result + assert "500" in result + assert "Test error" in result + + def test_stream_http_error_event_default(self, mocker: MockerFixture) -> None: + """Test HTTP error event formatting with default media type.""" + error = InternalServerErrorResponse.query_failed("Test error") + mocker.patch("app.endpoints.streaming_query.logger") + + result = stream_http_error_event(error) + + assert "error" in result + assert "500" in result or "status_code" in result + + +class TestStreamStartEvent: # pylint: disable=too-few-public-methods + """Tests for stream_start_event function.""" + + def test_stream_start_event(self) -> None: + """Test start event formatting.""" + result = stream_start_event("conv_123") + + assert "start" in result + assert "conv_123" in result + + +class TestShieldViolationGenerator: + """Tests for shield_violation_generator function.""" + + @pytest.mark.asyncio + async def test_shield_violation_generator_json(self) -> None: + """Test shield violation generator for JSON media type.""" + result = [] + async for item in shield_violation_generator( + "Violation message", MEDIA_TYPE_JSON + ): + result.append(item) + + assert len(result) > 0 + assert any("Violation message" in item for item in result) + + @pytest.mark.asyncio + async def test_shield_violation_generator_text(self) -> None: + """Test shield violation generator for text media type.""" + result = [] + async for item in shield_violation_generator( + "Violation message", MEDIA_TYPE_TEXT + ): + result.append(item) + + assert len(result) > 0 + + +class TestResponseGeneratorMCPCalls: + """Tests for MCP call specific event handling in response_generator.""" + + @pytest.mark.asyncio + async def test_response_generator_mcp_call_output_item_added( + self, mocker: MockerFixture + ) -> None: + """Test response generator stores MCP call item info when output_item.added.""" + mock_mcp_item = mocker.Mock(spec=MCPCall) + mock_mcp_item.type = "mcp_call" + mock_mcp_item.id = "mcp_call_123" + mock_mcp_item.name = "test_mcp_tool" + + async def mock_turn_response() -> AsyncIterator[OpenAIResponseObjectStream]: + chunk = mocker.Mock(spec=OutputItemAddedChunk) + chunk.type = "response.output_item.added" + chunk.item = mock_mcp_item + chunk.output_index = 0 + yield chunk + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.query_request = QueryRequest( + query="test", media_type=MEDIA_TYPE_JSON + ) # pyright: ignore[reportCallIssue] + mock_context.model_id = "provider1/model1" + + mock_turn_summary = TurnSummary() + + mocker.patch( + "app.endpoints.streaming_query.extract_token_usage", + return_value=TokenCounter(input_tokens=0, output_tokens=0), + ) + mocker.patch( + "app.endpoints.streaming_query.parse_referenced_documents", return_value=[] + ) + + result = [] + async for item in response_generator( + mock_turn_response(), mock_context, mock_turn_summary + ): + result.append(item) + + # Should process without error + assert True + + @pytest.mark.asyncio + async def test_response_generator_mcp_call_arguments_done( + self, mocker: MockerFixture + ) -> None: + """Test response generator emits tool call when MCP arguments.done.""" + mock_mcp_item = mocker.Mock(spec=MCPCall) + mock_mcp_item.type = "mcp_call" + mock_mcp_item.id = "mcp_call_123" + mock_mcp_item.name = "test_mcp_tool" + + async def mock_turn_response() -> AsyncIterator[OpenAIResponseObjectStream]: + # First, output_item.added + added_chunk = mocker.Mock(spec=OutputItemAddedChunk) + added_chunk.type = "response.output_item.added" + added_chunk.item = mock_mcp_item + added_chunk.output_index = 0 + yield added_chunk + + # Then, arguments.done + args_chunk = mocker.Mock(spec=MCPArgsDoneChunk) + args_chunk.type = "response.mcp_call.arguments.done" + args_chunk.output_index = 0 + args_chunk.arguments = '{"param": "value"}' + yield args_chunk + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.query_request = QueryRequest( + query="test", media_type=MEDIA_TYPE_JSON + ) # pyright: ignore[reportCallIssue] + mock_context.model_id = "provider1/model1" + + mock_turn_summary = TurnSummary() + + mock_tool_call = mocker.Mock() + mock_tool_call.model_dump.return_value = { + "id": "mcp_call_123", + "name": "test_mcp_tool", + } + mocker.patch( + "app.endpoints.streaming_query.build_mcp_tool_call_from_arguments_done", + return_value=mock_tool_call, + ) + + mocker.patch( + "app.endpoints.streaming_query.extract_token_usage", + return_value=TokenCounter(input_tokens=0, output_tokens=0), + ) + mocker.patch( + "app.endpoints.streaming_query.parse_referenced_documents", return_value=[] + ) + + result = [] + async for item in response_generator( + mock_turn_response(), mock_context, mock_turn_summary + ): + result.append(item) + + # Should emit tool call event + assert len(result) > 0 + assert len(mock_turn_summary.tool_calls) == 1 + + @pytest.mark.asyncio + async def test_response_generator_mcp_call_output_item_done_with_arguments_done( + self, mocker: MockerFixture + ) -> None: + """Test response generator emits only result when MCP output_item.done after arguments.""" + mock_mcp_item = mocker.Mock(spec=MCPCall) + mock_mcp_item.type = "mcp_call" + mock_mcp_item.id = "mcp_call_123" + mock_mcp_item.name = "test_mcp_tool" + mock_mcp_item.error = None + mock_mcp_item.output = "Result output" + + async def mock_turn_response() -> AsyncIterator[OpenAIResponseObjectStream]: + # First, output_item.added + added_chunk = mocker.Mock(spec=OutputItemAddedChunk) + added_chunk.type = "response.output_item.added" + added_chunk.item = mock_mcp_item + added_chunk.output_index = 0 + yield added_chunk + + # Then, arguments.done (removes from mcp_calls dict) + args_chunk = mocker.Mock(spec=MCPArgsDoneChunk) + args_chunk.type = "response.mcp_call.arguments.done" + args_chunk.output_index = 0 + args_chunk.arguments = '{"param": "value"}' + yield args_chunk + + # Finally, output_item.done (should only emit result) + done_chunk = mocker.Mock(spec=OutputItemDoneChunk) + done_chunk.type = "response.output_item.done" + done_chunk.item = mock_mcp_item + done_chunk.output_index = 0 + yield done_chunk + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.query_request = QueryRequest( + query="test", media_type=MEDIA_TYPE_JSON + ) # pyright: ignore[reportCallIssue] + mock_context.model_id = "provider1/model1" + + mock_turn_summary = TurnSummary() + + mock_tool_call = mocker.Mock() + mock_tool_call.model_dump.return_value = {"id": "mcp_call_123"} + + # Use side_effect to actually remove item from mcp_calls dict + def build_mcp_tool_call_side_effect( + output_index: int, + arguments: str, + mcp_call_items: dict[int, tuple[str, str]], + ) -> Any: + # Remove item from dict to simulate real behavior + # arguments parameter is required by function signature but unused here + _ = arguments # noqa: F841 + if output_index in mcp_call_items: + del mcp_call_items[output_index] + return mock_tool_call + + mocker.patch( + "app.endpoints.streaming_query.build_mcp_tool_call_from_arguments_done", + side_effect=build_mcp_tool_call_side_effect, + ) + + mock_tool_result = mocker.Mock() + mock_tool_result.model_dump.return_value = { + "id": "mcp_call_123", + "status": "success", + } + mocker.patch( + "app.endpoints.streaming_query.build_tool_result_from_mcp_output_item_done", + return_value=mock_tool_result, + ) + + mocker.patch( + "app.endpoints.streaming_query.extract_token_usage", + return_value=TokenCounter(input_tokens=0, output_tokens=0), + ) + mocker.patch( + "app.endpoints.streaming_query.parse_referenced_documents", return_value=[] + ) + + result = [] + async for item in response_generator( + mock_turn_response(), mock_context, mock_turn_summary + ): + result.append(item) + + # Should have one tool call (from arguments.done) and one tool result + assert len(mock_turn_summary.tool_calls) == 1 + assert len(mock_turn_summary.tool_results) == 1 + + @pytest.mark.asyncio + async def test_response_generator_mcp_call_output_item_done_without_arguments_done( + self, mocker: MockerFixture + ) -> None: + """Test response generator emits both call and result when MCP output_item.done.""" + mock_mcp_item = mocker.Mock(spec=MCPCall) + mock_mcp_item.type = "mcp_call" + mock_mcp_item.id = "mcp_call_123" + mock_mcp_item.name = "test_mcp_tool" + mock_mcp_item.error = None + mock_mcp_item.output = "Result output" + mock_mcp_item.arguments = '{"param": "value"}' + mock_mcp_item.server_label = None + + async def mock_turn_response() -> AsyncIterator[OpenAIResponseObjectStream]: + # Only output_item.added (arguments.done was missed) + added_chunk = mocker.Mock(spec=OutputItemAddedChunk) + added_chunk.type = "response.output_item.added" + added_chunk.item = mock_mcp_item + added_chunk.output_index = 0 + yield added_chunk + + # output_item.done (should emit both call and result since arguments.done didn't happen) + done_chunk = mocker.Mock(spec=OutputItemDoneChunk) + done_chunk.type = "response.output_item.done" + done_chunk.item = mock_mcp_item + done_chunk.output_index = 0 + yield done_chunk + + mock_context = mocker.Mock(spec=ResponseGeneratorContext) + mock_context.query_request = QueryRequest( + query="test", media_type=MEDIA_TYPE_JSON + ) # pyright: ignore[reportCallIssue] + mock_context.model_id = "provider1/model1" + + mock_turn_summary = TurnSummary() + + mock_tool_call = mocker.Mock() + mock_tool_call.model_dump.return_value = {"id": "mcp_call_123"} + mock_tool_result = mocker.Mock() + mock_tool_result.model_dump.return_value = { + "id": "mcp_call_123", + "status": "success", + } + mocker.patch( + "app.endpoints.streaming_query.build_tool_call_summary", + return_value=(mock_tool_call, mock_tool_result), + ) + + mocker.patch( + "app.endpoints.streaming_query.extract_token_usage", + return_value=TokenCounter(input_tokens=0, output_tokens=0), + ) + mocker.patch( + "app.endpoints.streaming_query.parse_referenced_documents", return_value=[] + ) + + result = [] + async for item in response_generator( + mock_turn_response(), mock_context, mock_turn_summary + ): + result.append(item) + + # Should have both tool call and result (fallback behavior) + assert len(mock_turn_summary.tool_calls) == 1 + assert len(mock_turn_summary.tool_results) == 1 diff --git a/tests/unit/app/endpoints/test_streaming_query_old.py b/tests/unit/app/endpoints/test_streaming_query_old.py deleted file mode 100644 index 9552d2885..000000000 --- a/tests/unit/app/endpoints/test_streaming_query_old.py +++ /dev/null @@ -1,654 +0,0 @@ -"""Unit tests for the /streaming-query REST API endpoint.""" - -# pylint: disable=too-many-lines,too-many-function-args -import json -from typing import Any - -import pytest -from pydantic import AnyUrl -from pytest_mock import MockerFixture - -from app.endpoints.streaming_query_old import ( - LLM_TOKEN_EVENT, - LLM_TOOL_CALL_EVENT, - LLM_TOOL_RESULT_EVENT, - generic_llm_error, - prompt_too_long_error, - stream_end_event, - stream_event, -) -from configuration import AppConfig -from constants import MEDIA_TYPE_JSON, MEDIA_TYPE_TEXT -from models.requests import QueryRequest -from models.responses import ReferencedDocument -from utils.token_counter import TokenCounter - -# Note: content_delta module doesn't exist in llama-stack-client 0.3.x -# These are mock classes for backward compatibility with Agent API tests -# pylint: disable=too-few-public-methods,redefined-builtin - - -class TextDelta: - """Mock TextDelta for Agent API tests.""" - - def __init__(self, text: str, type: str = "text"): # noqa: A002 - """ - Initialize the object with textual content and a chunk type. - - Parameters: - text (str): The textual content for this instance. - type (str): The content type or category (for example, "text"). Defaults to "text". - """ - self.text = text - self.type = type - - -class ToolCallDelta: - """Mock ToolCallDelta for Agent API tests.""" - - def __init__(self, **kwargs: Any): - """ - Initialize the instance by setting attributes from the provided keyword arguments. - - Parameters: - **kwargs: Any - Attribute names and values to assign to the instance. Each key in - `kwargs` becomes an attribute on the created object with the - corresponding value. - """ - for key, value in kwargs.items(): - setattr(self, key, value) - - -# Note: Agent API types don't exist in llama-stack-client 0.3.x -# These are mock classes for backward compatibility with Agent API tests - - -class TurnResponseEvent: - """Mock TurnResponseEvent for Agent API tests.""" - - def __init__(self, **kwargs: Any): - """ - Initialize the instance by setting attributes from the provided keyword arguments. - - Parameters: - **kwargs: Any - Attribute names and values to assign to the instance. Each key in - `kwargs` becomes an attribute on the created object with the - corresponding value. - """ - for key, value in kwargs.items(): - setattr(self, key, value) - - -class AgentTurnResponseStreamChunk: - """Mock AgentTurnResponseStreamChunk for Agent API tests.""" - - def __init__(self, **kwargs: Any): - """ - Initialize the instance by setting attributes from the provided keyword arguments. - - Parameters: - **kwargs: Any - Attribute names and values to assign to the instance. Each key in - `kwargs` becomes an attribute on the created object with the - corresponding value. - """ - for key, value in kwargs.items(): - setattr(self, key, value) - - -class AgentTurnResponseStepCompletePayload: - """Mock AgentTurnResponseStepCompletePayload for Agent API tests.""" - - def __init__(self, **kwargs: Any): - """ - Initialize the instance by setting attributes from the provided keyword arguments. - - Parameters: - **kwargs: Any - Attribute names and values to assign to the instance. Each key in - `kwargs` becomes an attribute on the created object with the - corresponding value. - """ - for key, value in kwargs.items(): - setattr(self, key, value) - - -class AgentTurnResponseStepProgressPayload: - """Mock AgentTurnResponseStepProgressPayload for Agent API tests.""" - - def __init__(self, **kwargs: Any): - """ - Initialize the instance by setting attributes from the provided keyword arguments. - - Parameters: - **kwargs: Any - Attribute names and values to assign to the instance. Each key in - `kwargs` becomes an attribute on the created object with the - corresponding value. - """ - for key, value in kwargs.items(): - setattr(self, key, value) - - -class AgentTurnResponseTurnAwaitingInputPayload: - """Mock AgentTurnResponseTurnAwaitingInputPayload for Agent API tests.""" - - def __init__(self, **kwargs: Any): - """ - Initialize the instance by setting attributes from the provided keyword arguments. - - Parameters: - **kwargs: Any - Attribute names and values to assign to the instance. Each key in - `kwargs` becomes an attribute on the created object with the - corresponding value. - """ - for key, value in kwargs.items(): - setattr(self, key, value) - - -class AgentTurnResponseTurnCompletePayload: - """Mock AgentTurnResponseTurnCompletePayload for Agent API tests.""" - - def __init__(self, **kwargs: Any): - """ - Initialize the instance by setting attributes from the provided keyword arguments. - - Parameters: - **kwargs: Any - Attribute names and values to assign to the instance. Each key in - `kwargs` becomes an attribute on the created object with the - corresponding value. - """ - for key, value in kwargs.items(): - setattr(self, key, value) - - -class AgentTurnResponseTurnStartPayload: - """Mock AgentTurnResponseTurnStartPayload for Agent API tests.""" - - def __init__(self, **kwargs: Any): - """ - Initialize the instance by setting attributes from the provided keyword arguments. - - Parameters: - **kwargs: Any - Attribute names and values to assign to the instance. Each key in - `kwargs` becomes an attribute on the created object with the - corresponding value. - """ - for key, value in kwargs.items(): - setattr(self, key, value) - - -class ToolExecutionStep: - """Mock ToolExecutionStep for Agent API tests.""" - - def __init__(self, **kwargs: Any): - """ - Initialize the instance by setting attributes from the provided keyword arguments. - - Parameters: - **kwargs: Any - Attribute names and values to assign to the instance. Each key in - `kwargs` becomes an attribute on the created object with the - corresponding value. - """ - for key, value in kwargs.items(): - setattr(self, key, value) - - -class ToolResponse: - """Mock ToolResponse for Agent API tests.""" - - def __init__(self, **kwargs: Any): - """ - Initialize the instance by setting attributes from the provided keyword arguments. - - Parameters: - **kwargs: Any - Attribute names and values to assign to the instance. Each key in - `kwargs` becomes an attribute on the created object with the - corresponding value. - """ - for key, value in kwargs.items(): - setattr(self, key, value) - - -# pylint: enable=too-few-public-methods,redefined-builtin - -MOCK_AUTH = ( - "017adfa4-7cc6-46e4-b663-3653e1ae69df", - "mock_username", - False, - "mock_token", -) - - -def mock_database_operations(mocker: MockerFixture) -> None: - """Helper function to mock database operations for streaming query endpoints. - - Configure test mocks for conversation ownership validation and post-stream - cleanup used by streaming-query tests. - - Parameters: - mocker (MockerFixture): Pytest-mock fixture used to patch functions. - After calling this helper, `validate_conversation_ownership` is patched - to return `True` and `cleanup_after_streaming` is patched to an async - no-op. - """ - mocker.patch( - "app.endpoints.streaming_query.validate_conversation_ownership", - return_value=True, - ) - # Mock the cleanup function that handles all post-streaming database/cache work - mocker.patch( - "app.endpoints.streaming_query.cleanup_after_streaming", - mocker.AsyncMock(return_value=None), - ) - - -def mock_metrics(mocker: MockerFixture) -> None: - """Helper function to mock metrics operations for streaming query endpoints.""" - # Mock the metrics that are used in the streaming query endpoints - mocker.patch("metrics.llm_token_sent_total") - mocker.patch("metrics.llm_token_received_total") - mocker.patch("metrics.llm_calls_total") - - -SAMPLE_KNOWLEDGE_SEARCH_RESULTS = [ - """knowledge_search tool found 2 chunks: -BEGIN of knowledge_search tool results. -""", - """Result 1 -Content: ABC -Metadata: {'docs_url': 'https://example.com/doc1', 'title': 'Doc1', 'document_id': 'doc-1', \ -'source': None} -""", - """Result 2 -Content: ABC -Metadata: {'docs_url': 'https://example.com/doc2', 'title': 'Doc2', 'document_id': 'doc-2', \ -'source': None} -""", - """END of knowledge_search tool results. -""", - # Following metadata contains an intentionally incorrect keyword "Title" (instead of "title") - # and it is not picked as a referenced document. - """Result 3 -Content: ABC -Metadata: {'docs_url': 'https://example.com/doc3', 'Title': 'Doc3', 'document_id': 'doc-3', \ -'source': None} -""", - """The above results were retrieved to help answer the user\'s query: "Sample Query". -Use them as supporting information only in answering this query. -""", -] - - -@pytest.fixture(autouse=True, name="setup_configuration") -def setup_configuration_fixture() -> AppConfig: - """Set up configuration for tests. - - Create and initialize an AppConfig instance preconfigured for unit tests. - - The configuration uses a local service (localhost:8080), a test Llama Stack - API key and URL, disables user transcript collection, and sets a noop - conversation cache and empty MCP servers to avoid external dependencies. - - Returns: - AppConfig: An initialized AppConfig populated with the test settings. - """ - config_dict = { - "name": "test", - "service": { - "host": "localhost", - "port": 8080, - "auth_enabled": False, - "workers": 1, - "color_log": True, - "access_log": True, - }, - "llama_stack": { - "api_key": "test-key", - "url": "http://test.com:1234", - "use_as_library_client": False, - }, - "user_data_collection": { - "transcripts_enabled": False, - }, - "mcp_servers": [], - "conversation_cache": { - "type": "noop", - }, - } - cfg = AppConfig() - cfg.init_from_dict(config_dict) - return cfg - - -# ============================================================================ -# OLS Compatibility Tests -# ============================================================================ - - -class TestOLSStreamEventFormatting: - """Test the stream_event function for both media types (OLS compatibility).""" - - def test_stream_event_json_token(self) -> None: - """Test token event formatting for JSON media type.""" - data = {"id": 0, "token": "Hello"} - result = stream_event(data, LLM_TOKEN_EVENT, MEDIA_TYPE_JSON) - - expected = 'data: {"event": "token", "data": {"id": 0, "token": "Hello"}}\n\n' - assert result == expected - - def test_stream_event_text_token(self) -> None: - """Test token event formatting for text media type.""" - - data = {"id": 0, "token": "Hello"} - result = stream_event(data, LLM_TOKEN_EVENT, MEDIA_TYPE_TEXT) - - assert result == "Hello" - - def test_stream_event_json_tool_call(self) -> None: - """Test tool call event formatting for JSON media type.""" - - data = { - "id": 0, - "token": {"tool_name": "search", "arguments": {"query": "test"}}, - } - result = stream_event(data, LLM_TOOL_CALL_EVENT, MEDIA_TYPE_JSON) - - expected = ( - 'data: {"event": "tool_call", "data": {"id": 0, "token": ' - '{"tool_name": "search", "arguments": {"query": "test"}}}}\n\n' - ) - assert result == expected - - def test_stream_event_text_tool_call(self) -> None: - """Test tool call event formatting for text media type.""" - - data = { - "id": 0, - "token": {"tool_name": "search", "arguments": {"query": "test"}}, - } - result = stream_event(data, LLM_TOOL_CALL_EVENT, MEDIA_TYPE_TEXT) - - expected = ( - '\nTool call: {"id": 0, "token": ' - '{"tool_name": "search", "arguments": {"query": "test"}}}\n' - ) - assert result == expected - - def test_stream_event_json_tool_result(self) -> None: - """Test tool result event formatting for JSON media type.""" - - data = { - "id": 0, - "token": {"tool_name": "search", "response": "Found results"}, - } - result = stream_event(data, LLM_TOOL_RESULT_EVENT, MEDIA_TYPE_JSON) - - expected = ( - 'data: {"event": "tool_result", "data": {"id": 0, "token": ' - '{"tool_name": "search", "response": "Found results"}}}\n\n' - ) - assert result == expected - - def test_stream_event_text_tool_result(self) -> None: - """Test tool result event formatting for text media type.""" - - data = { - "id": 0, - "token": {"tool_name": "search", "response": "Found results"}, - } - result = stream_event(data, LLM_TOOL_RESULT_EVENT, MEDIA_TYPE_TEXT) - - expected = ( - '\nTool result: {"id": 0, "token": ' - '{"tool_name": "search", "response": "Found results"}}\n' - ) - assert result == expected - - def test_stream_event_unknown_type(self) -> None: - """Test handling of unknown event types.""" - - data = {"id": 0, "token": "test"} - result = stream_event(data, "unknown_event", MEDIA_TYPE_TEXT) - - assert result == "" - - -class TestOLSStreamEndEvent: - """Test the stream_end_event function for both media types (OLS compatibility).""" - - def test_stream_end_event_json(self) -> None: - """Test end event formatting for JSON media type.""" - - metadata_map = { - "doc1": {"title": "Test Doc 1", "docs_url": "https://example.com/doc1"}, - "doc2": {"title": "Test Doc 2", "docs_url": "https://example.com/doc2"}, - } - # Create mock objects for the test - mock_token_usage = TokenCounter(input_tokens=100, output_tokens=50) - available_quotas: dict[str, int] = {} - referenced_documents = [ - ReferencedDocument( - doc_url=AnyUrl("https://example.com/doc1"), doc_title="Test Doc 1" - ), - ReferencedDocument( - doc_url=AnyUrl("https://example.com/doc2"), doc_title="Test Doc 2" - ), - ] - result = stream_end_event( - metadata_map, - mock_token_usage, - available_quotas, - referenced_documents, - MEDIA_TYPE_JSON, - ) - - # Parse the result to verify structure - data_part = result.replace("data: ", "").strip() - parsed = json.loads(data_part) - - assert parsed["event"] == "end" - assert "referenced_documents" in parsed["data"] - assert len(parsed["data"]["referenced_documents"]) == 2 - assert parsed["data"]["referenced_documents"][0]["doc_title"] == "Test Doc 1" - assert ( - parsed["data"]["referenced_documents"][0]["doc_url"] - == "https://example.com/doc1" - ) - assert "available_quotas" in parsed - - def test_stream_end_event_text(self) -> None: - """Test end event formatting for text media type.""" - - metadata_map = { - "doc1": {"title": "Test Doc 1", "docs_url": "https://example.com/doc1"}, - "doc2": {"title": "Test Doc 2", "docs_url": "https://example.com/doc2"}, - } - # Create mock objects for the test - mock_token_usage = TokenCounter(input_tokens=100, output_tokens=50) - available_quotas: dict[str, int] = {} - referenced_documents = [ - ReferencedDocument( - doc_url=AnyUrl("https://example.com/doc1"), doc_title="Test Doc 1" - ), - ReferencedDocument( - doc_url=AnyUrl("https://example.com/doc2"), doc_title="Test Doc 2" - ), - ] - result = stream_end_event( - metadata_map, - mock_token_usage, - available_quotas, - referenced_documents, - MEDIA_TYPE_TEXT, - ) - - expected = ( - "\n\n---\n\nTest Doc 1: https://example.com/doc1\n" - "Test Doc 2: https://example.com/doc2" - ) - assert result == expected - - def test_stream_end_event_text_no_docs(self) -> None: - """Test end event formatting for text media type with no documents.""" - - metadata_map: dict = {} - # Create mock objects for the test - mock_token_usage = TokenCounter(input_tokens=100, output_tokens=50) - available_quotas: dict[str, int] = {} - referenced_documents: list[ReferencedDocument] = [] - result = stream_end_event( - metadata_map, - mock_token_usage, - available_quotas, - referenced_documents, - MEDIA_TYPE_TEXT, - ) - - assert result == "" - - -class TestOLSErrorHandling: - """Test error handling functions (OLS compatibility).""" - - def test_prompt_too_long_error_json(self) -> None: - """Test prompt too long error for JSON media type.""" - - error = Exception("Prompt exceeds maximum length") - result = prompt_too_long_error(error, MEDIA_TYPE_JSON) - - data_part = result.replace("data: ", "").strip() - parsed = json.loads(data_part) - assert parsed["event"] == "error" - assert parsed["data"]["status_code"] == 413 - assert parsed["data"]["response"] == "Prompt is too long" - assert parsed["data"]["cause"] == "Prompt exceeds maximum length" - - def test_prompt_too_long_error_text(self) -> None: - """Test prompt too long error for text media type.""" - - error = Exception("Prompt exceeds maximum length") - result = prompt_too_long_error(error, MEDIA_TYPE_TEXT) - - assert result == "Prompt is too long: Prompt exceeds maximum length" - - def test_generic_llm_error_json(self) -> None: - """Test generic LLM error for JSON media type.""" - - error = Exception("Connection failed") - result = generic_llm_error(error, MEDIA_TYPE_JSON) - - data_part = result.replace("data: ", "").strip() - parsed = json.loads(data_part) - assert parsed["event"] == "error" - assert parsed["data"]["response"] == "Internal server error" - assert parsed["data"]["cause"] == "Connection failed" - - def test_generic_llm_error_text(self) -> None: - """Test generic LLM error for text media type.""" - - error = Exception("Connection failed") - result = generic_llm_error(error, MEDIA_TYPE_TEXT) - - assert result == "Error: Connection failed" - - -class TestOLSCompatibilityIntegration: - """Integration tests for OLS compatibility.""" - - def test_media_type_validation(self) -> None: - """Test that media type validation works correctly.""" - - # Valid media types - valid_request = QueryRequest(query="test", media_type="application/json") - assert valid_request.media_type == "application/json" - - valid_request = QueryRequest(query="test", media_type="text/plain") - assert valid_request.media_type == "text/plain" - - # Invalid media type should raise error - with pytest.raises(ValueError, match="media_type must be either"): - QueryRequest(query="test", media_type="invalid/type") - - def test_ols_event_structure(self) -> None: - """Test that events follow OLS structure.""" - - # Test token event structure - token_data = {"id": 0, "token": "Hello"} - token_event = stream_event(token_data, LLM_TOKEN_EVENT, MEDIA_TYPE_JSON) - - data_part = token_event.replace("data: ", "").strip() - parsed = json.loads(data_part) - - assert parsed["event"] == "token" - assert "id" in parsed["data"] - assert "token" in parsed["data"] - assert "role" not in parsed["data"] # Role field is not included - - # Test tool call event structure - tool_data = { - "id": 0, - "token": {"tool_name": "search", "arguments": {"query": "test"}}, - } - tool_event = stream_event(tool_data, LLM_TOOL_CALL_EVENT, MEDIA_TYPE_JSON) - - data_part = tool_event.replace("data: ", "").strip() - parsed = json.loads(data_part) - - assert parsed["event"] == "tool_call" - assert "id" in parsed["data"] - assert "role" not in parsed["data"] - assert "token" in parsed["data"] - - # Test tool result event structure - result_data = { - "id": 0, - "token": {"tool_name": "search", "response": "Found results"}, - } - result_event = stream_event(result_data, LLM_TOOL_RESULT_EVENT, MEDIA_TYPE_JSON) - - data_part = result_event.replace("data: ", "").strip() - parsed = json.loads(data_part) - - assert parsed["event"] == "tool_result" - assert "id" in parsed["data"] - assert "role" not in parsed["data"] - assert "token" in parsed["data"] - - def test_ols_end_event_structure(self) -> None: - """Test that end event follows OLS structure.""" - - metadata_map = { - "doc1": {"title": "Test Doc", "docs_url": "https://example.com/doc"} - } - # Create mock objects for the test - mock_token_usage = TokenCounter(input_tokens=100, output_tokens=50) - available_quotas: dict[str, int] = {} - referenced_documents = [ - ReferencedDocument( - doc_url=AnyUrl("https://example.com/doc"), doc_title="Test Doc" - ), - ] - end_event = stream_end_event( - metadata_map, - mock_token_usage, - available_quotas, - referenced_documents, - MEDIA_TYPE_JSON, - ) - data_part = end_event.replace("data: ", "").strip() - parsed = json.loads(data_part) - - assert parsed["event"] == "end" - assert "referenced_documents" in parsed["data"] - assert "truncated" in parsed["data"] - assert "input_tokens" in parsed["data"] - assert "output_tokens" in parsed["data"] - assert "available_quotas" in parsed # At root level, not inside data diff --git a/tests/unit/app/test_routers.py b/tests/unit/app/test_routers.py index 6ef18c5a5..776aa0472 100644 --- a/tests/unit/app/test_routers.py +++ b/tests/unit/app/test_routers.py @@ -8,8 +8,7 @@ from app.endpoints import ( conversations_v2, - conversations_v3, - query, + conversations_v1, root, info, models, @@ -26,6 +25,7 @@ mcp_auth, rlsapi_v1, a2a, + query, ) # noqa:E402 @@ -114,9 +114,7 @@ def test_include_routers() -> None: assert mcp_auth.router in app.get_routers() assert shields.router in app.get_routers() assert providers.router in app.get_routers() - # assert query.router in app.get_routers() assert query.router in app.get_routers() - # assert streaming_query.router in app.get_routers() assert streaming_query.router in app.get_routers() assert config.router in app.get_routers() assert feedback.router in app.get_routers() @@ -124,7 +122,7 @@ def test_include_routers() -> None: assert authorized.router in app.get_routers() # assert conversations.router in app.get_routers() assert conversations_v2.router in app.get_routers() - assert conversations_v3.router in app.get_routers() + assert conversations_v1.router in app.get_routers() assert metrics.router in app.get_routers() assert rlsapi_v1.router in app.get_routers() assert a2a.router in app.get_routers() @@ -153,8 +151,6 @@ def test_check_prefixes() -> None: assert app.get_router_prefix(shields.router) == "/v1" assert app.get_router_prefix(providers.router) == "/v1" assert app.get_router_prefix(rags.router) == "/v1" - # assert app.get_router_prefix(query.router) == "/v1" - # assert app.get_router_prefix(streaming_query.router) == "/v1" assert app.get_router_prefix(query.router) == "/v1" assert app.get_router_prefix(streaming_query.router) == "/v1" assert app.get_router_prefix(config.router) == "/v1" @@ -163,7 +159,7 @@ def test_check_prefixes() -> None: assert app.get_router_prefix(authorized.router) == "" # assert app.get_router_prefix(conversations.router) == "/v1" assert app.get_router_prefix(conversations_v2.router) == "/v2" - assert app.get_router_prefix(conversations_v3.router) == "/v1" + assert app.get_router_prefix(conversations_v1.router) == "/v1" assert app.get_router_prefix(metrics.router) == "" assert app.get_router_prefix(rlsapi_v1.router) == "/v1" assert app.get_router_prefix(a2a.router) == "" diff --git a/tests/unit/authentication/test_noop.py b/tests/unit/authentication/test_noop.py index e651ff673..d179f33e7 100644 --- a/tests/unit/authentication/test_noop.py +++ b/tests/unit/authentication/test_noop.py @@ -1,6 +1,8 @@ """Unit tests for functions defined in authentication/noop.py""" -from fastapi import Request +from fastapi import Request, HTTPException +import pytest + from authentication.noop import NoopAuthDependency from constants import DEFAULT_USER_NAME, DEFAULT_USER_UID, NO_USER_TOKEN @@ -37,3 +39,18 @@ async def test_noop_auth_dependency_custom_user_id() -> None: assert username == DEFAULT_USER_NAME assert skip_userid_check is True assert user_token == NO_USER_TOKEN + + +async def test_noop_auth_dependency_empty_user_id() -> None: + """Test that NoopAuthDependency rejects empty user_id with HTTP 400.""" + dependency = NoopAuthDependency() + + # Create a mock request with empty user_id + request = Request(scope={"type": "http", "query_string": b"user_id="}) + + # Assert that an HTTPException is raised for empty user_id + with pytest.raises(HTTPException) as exc_info: + await dependency(request) + + assert exc_info.value.status_code == 400 + assert exc_info.value.detail == "user_id cannot be empty" diff --git a/tests/unit/authentication/test_noop_with_token.py b/tests/unit/authentication/test_noop_with_token.py index 9d08a87a3..d120f072b 100644 --- a/tests/unit/authentication/test_noop_with_token.py +++ b/tests/unit/authentication/test_noop_with_token.py @@ -117,3 +117,26 @@ async def test_noop_with_token_auth_dependency_no_bearer() -> None: detail = cast(dict[str, str], exc_info.value.detail) assert detail["response"] == ("Missing or invalid credentials provided by client") assert detail["cause"] == "No token found in Authorization header" + + +async def test_noop_with_token_auth_dependency_empty_user_id() -> None: + """Test that NoopWithTokenAuthDependency rejects empty user_id with HTTP 400.""" + dependency = NoopWithTokenAuthDependency() + + # Create a mock request with empty user_id but valid token + request = Request( + scope={ + "type": "http", + "query_string": b"user_id=", + "headers": [ + (b"authorization", b"Bearer spongebob-token"), + ], + }, + ) + + # Assert that an HTTPException is raised for empty user_id + with pytest.raises(HTTPException) as exc_info: + await dependency(request) + + assert exc_info.value.status_code == 400 + assert exc_info.value.detail == "user_id cannot be empty" diff --git a/tests/unit/cache/test_postgres_cache.py b/tests/unit/cache/test_postgres_cache.py index 0b4a3011c..42d54dd54 100644 --- a/tests/unit/cache/test_postgres_cache.py +++ b/tests/unit/cache/test_postgres_cache.py @@ -12,9 +12,9 @@ from cache.postgres_cache import PostgresCache from models.cache_entry import CacheEntry from models.config import PostgreSQLDatabaseConfiguration -from models.responses import ConversationData, ReferencedDocument +from models.responses import ConversationData from utils import suid -from utils.types import ToolCallSummary, ToolResultSummary +from utils.types import ReferencedDocument, ToolCallSummary, ToolResultSummary USER_ID_1 = suid.get_suid() USER_ID_2 = suid.get_suid() diff --git a/tests/unit/cache/test_sqlite_cache.py b/tests/unit/cache/test_sqlite_cache.py index a62195db7..748835af6 100644 --- a/tests/unit/cache/test_sqlite_cache.py +++ b/tests/unit/cache/test_sqlite_cache.py @@ -11,9 +11,9 @@ from models.config import SQLiteDatabaseConfiguration from models.cache_entry import CacheEntry -from models.responses import ConversationData, ReferencedDocument +from models.responses import ConversationData from utils import suid -from utils.types import ToolCallSummary, ToolResultSummary +from utils.types import ReferencedDocument, ToolCallSummary, ToolResultSummary from cache.cache_error import CacheError from cache.sqlite_cache import SQLiteCache diff --git a/tests/unit/models/config/test_authentication_configuration.py b/tests/unit/models/config/test_authentication_configuration.py index 5e99f3aa2..7575751a0 100644 --- a/tests/unit/models/config/test_authentication_configuration.py +++ b/tests/unit/models/config/test_authentication_configuration.py @@ -307,7 +307,7 @@ def test_authentication_configuration_in_config_noop() -> None: llama_stack=LlamaStackConfiguration( use_as_library_client=True, library_client_config_path="tests/configuration/run.yaml", - url="localhost", + url="http://localhost", api_key=SecretStr(""), timeout=60, ), @@ -346,7 +346,7 @@ def test_authentication_configuration_skip_readiness_probe() -> None: llama_stack=LlamaStackConfiguration( use_as_library_client=True, library_client_config_path="tests/configuration/run.yaml", - url="localhost", + url="http://localhost", api_key=SecretStr(""), timeout=60, ), @@ -393,7 +393,7 @@ def test_authentication_configuration_in_config_k8s() -> None: llama_stack=LlamaStackConfiguration( use_as_library_client=True, library_client_config_path="tests/configuration/run.yaml", - url="localhost", + url="http://localhost", api_key=SecretStr(""), timeout=60, ), @@ -450,7 +450,7 @@ def test_authentication_configuration_in_config_rh_identity() -> None: llama_stack=LlamaStackConfiguration( use_as_library_client=True, library_client_config_path="tests/configuration/run.yaml", - url="localhost", + url="http://localhost", api_key=SecretStr(""), timeout=60, ), @@ -497,7 +497,7 @@ def test_authentication_configuration_in_config_jwktoken() -> None: llama_stack=LlamaStackConfiguration( use_as_library_client=True, library_client_config_path="tests/configuration/run.yaml", - url="localhost", + url="http://localhost", api_key=SecretStr(""), timeout=60, ), diff --git a/tests/unit/models/config/test_dump_configuration.py b/tests/unit/models/config/test_dump_configuration.py index c293c4a7d..436053c0e 100644 --- a/tests/unit/models/config/test_dump_configuration.py +++ b/tests/unit/models/config/test_dump_configuration.py @@ -119,6 +119,7 @@ def test_dump_configuration(tmp_path: Path) -> None: "tls_key_password": "tests/configuration/password", "tls_key_path": "tests/configuration/server.key", }, + "root_path": "", "cors": { "allow_credentials": False, "allow_headers": [ @@ -442,6 +443,7 @@ def test_dump_configuration_with_quota_limiters(tmp_path: Path) -> None: "tls_key_password": "tests/configuration/password", "tls_key_path": "tests/configuration/server.key", }, + "root_path": "", "cors": { "allow_credentials": False, "allow_headers": [ @@ -662,6 +664,7 @@ def test_dump_configuration_with_quota_limiters_different_values( "tls_key_password": "tests/configuration/password", "tls_key_path": "tests/configuration/server.key", }, + "root_path": "", "cors": { "allow_credentials": False, "allow_headers": [ @@ -862,6 +865,7 @@ def test_dump_configuration_byok(tmp_path: Path) -> None: "tls_key_password": "tests/configuration/password", "tls_key_path": "tests/configuration/server.key", }, + "root_path": "", "cors": { "allow_credentials": False, "allow_headers": [ @@ -1051,6 +1055,7 @@ def test_dump_configuration_pg_namespace(tmp_path: Path) -> None: "tls_key_password": "tests/configuration/password", "tls_key_path": "tests/configuration/server.key", }, + "root_path": "", "cors": { "allow_credentials": False, "allow_headers": [ diff --git a/tests/unit/models/config/test_llama_stack_configuration.py b/tests/unit/models/config/test_llama_stack_configuration.py index 4d8465b4c..cc2db8236 100644 --- a/tests/unit/models/config/test_llama_stack_configuration.py +++ b/tests/unit/models/config/test_llama_stack_configuration.py @@ -1,6 +1,7 @@ """Unit tests for LlamaStackConfiguration model.""" import pytest +from pydantic import ValidationError from utils.checks import InvalidConfigurationError @@ -89,3 +90,37 @@ def test_llama_stack_wrong_configuration_no_config_file() -> None: LlamaStackConfiguration( use_as_library_client=True ) # pyright: ignore[reportCallIssue] + + +def test_llama_stack_configuration_valid_http_url() -> None: + """Test that valid HTTP URLs are accepted.""" + config = LlamaStackConfiguration( + url="http://localhost:8321" + ) # pyright: ignore[reportCallIssue] + assert config is not None + assert str(config.url) == "http://localhost:8321/" + + +def test_llama_stack_configuration_valid_https_url() -> None: + """Test that valid HTTPS URLs are accepted.""" + config = LlamaStackConfiguration( + url="https://llama-stack.example.com:8321" + ) # pyright: ignore[reportCallIssue] + assert config is not None + assert str(config.url) == "https://llama-stack.example.com:8321/" + + +def test_llama_stack_configuration_malformed_url_rejected() -> None: + """Test that malformed URLs are rejected with a ValidationError.""" + with pytest.raises(ValidationError, match="Input should be a valid URL"): + LlamaStackConfiguration( + url="not-a-valid-url" + ) # pyright: ignore[reportCallIssue] + + +def test_llama_stack_configuration_invalid_scheme_rejected() -> None: + """Test that URLs without http/https scheme are rejected.""" + with pytest.raises(ValidationError, match="URL scheme should be 'http' or 'https'"): + LlamaStackConfiguration( + url="ftp://localhost:8321" + ) # pyright: ignore[reportCallIssue] diff --git a/tests/unit/models/config/test_service_configuration.py b/tests/unit/models/config/test_service_configuration.py index 986424e48..a3ce865db 100644 --- a/tests/unit/models/config/test_service_configuration.py +++ b/tests/unit/models/config/test_service_configuration.py @@ -19,6 +19,7 @@ def test_service_configuration_constructor() -> None: assert s.port == 8080 assert s.auth_enabled is False assert s.workers == 1 + assert s.root_path == "" assert s.color_log is True assert s.access_log is True assert s.tls_config == TLSConfiguration() # pyright: ignore[reportCallIssue] @@ -33,6 +34,27 @@ def test_service_configuration_port_value() -> None: ServiceConfiguration(port=100000) # pyright: ignore[reportCallIssue] +def test_service_configuration_root_path() -> None: + """Test the ServiceConfiguration root_path field.""" + s = ServiceConfiguration( + root_path="/api/lightspeed" + ) # pyright: ignore[reportCallIssue] + assert s.root_path == "/api/lightspeed" + + +def test_service_configuration_root_path_validation() -> None: + """Test root_path validation rejects invalid formats.""" + with pytest.raises(ValidationError, match="root_path must start with '/'"): + ServiceConfiguration( + root_path="api/lightspeed" + ) # pyright: ignore[reportCallIssue] + + with pytest.raises(ValidationError, match="root_path must not end with '/'"): + ServiceConfiguration( + root_path="/api/lightspeed/" + ) # pyright: ignore[reportCallIssue] + + def test_service_configuration_workers_value() -> None: """Test the ServiceConfiguration workers value validation.""" with pytest.raises(ValidationError, match="Input should be greater than 0"): diff --git a/tests/unit/models/responses/README.md b/tests/unit/models/responses/README.md index dd662770d..b76b1b253 100644 --- a/tests/unit/models/responses/README.md +++ b/tests/unit/models/responses/README.md @@ -15,6 +15,12 @@ Unit tests for QueryResponse model. ## [test_rag_chunk.py](test_rag_chunk.py) Unit tests for RAGChunk model. +## [test_response_types.py](test_response_types.py) +Unit tests for response-related type models defined in models/responses.py. + ## [test_successful_responses.py](test_successful_responses.py) Unit tests for all successful response models. +## [test_types.py](test_types.py) +Unit tests for response-related type models. + diff --git a/tests/unit/models/responses/test_query_response.py b/tests/unit/models/responses/test_query_response.py index a6c846f06..f27e02da3 100644 --- a/tests/unit/models/responses/test_query_response.py +++ b/tests/unit/models/responses/test_query_response.py @@ -2,8 +2,8 @@ from pydantic import AnyUrl -from models.responses import QueryResponse, ReferencedDocument -from utils.types import ToolCallSummary, ToolResultSummary +from models.responses import QueryResponse +from utils.types import ReferencedDocument, ToolCallSummary, ToolResultSummary class TestQueryResponse: diff --git a/tests/unit/models/responses/test_response_types.py b/tests/unit/models/responses/test_response_types.py new file mode 100644 index 000000000..69743e1db --- /dev/null +++ b/tests/unit/models/responses/test_response_types.py @@ -0,0 +1,83 @@ +"""Unit tests for response-related type models defined in models/responses.py.""" + +import pytest +from pydantic import ValidationError + +from models.responses import ConversationData, ConversationDetails, ProviderHealthStatus + + +class TestConversationDetails: + """Test cases for ConversationDetails type.""" + + def test_constructor(self) -> None: + """Test ConversationDetails with all fields.""" + details = ConversationDetails( + conversation_id="123e4567-e89b-12d3-a456-426614174000", + created_at="2024-01-01T00:00:00Z", + last_message_at="2024-01-01T00:05:00Z", + message_count=5, + last_used_model="gpt-4", + last_used_provider="openai", + topic_summary="Test topic", + ) + assert details.conversation_id == "123e4567-e89b-12d3-a456-426614174000" + assert details.created_at == "2024-01-01T00:00:00Z" + assert details.last_message_at == "2024-01-01T00:05:00Z" + assert details.message_count == 5 + assert details.last_used_model == "gpt-4" + assert details.last_used_provider == "openai" + assert details.topic_summary == "Test topic" + + def test_missing_required_fields(self) -> None: + """Test ConversationDetails raises ValidationError when required fields are missing.""" + with pytest.raises(ValidationError): + ConversationDetails() # type: ignore[call-arg] + + +class TestConversationData: + """Test cases for ConversationData type.""" + + def test_constructor(self) -> None: + """Test ConversationData with all fields.""" + data = ConversationData( + conversation_id="123e4567-e89b-12d3-a456-426614174000", + topic_summary="Test topic", + last_message_timestamp=1704067200.0, + ) + assert data.conversation_id == "123e4567-e89b-12d3-a456-426614174000" + assert data.topic_summary == "Test topic" + assert data.last_message_timestamp == 1704067200.0 + + def test_topic_summary_none(self) -> None: + """Test ConversationData with None topic_summary.""" + data = ConversationData( + conversation_id="conv-123", + topic_summary=None, + last_message_timestamp=1704067200.0, + ) + assert data.topic_summary is None + + def test_missing_required_fields(self) -> None: + """Test ConversationData raises ValidationError when required fields are missing.""" + with pytest.raises(ValidationError): + ConversationData() # type: ignore[call-arg] + + +class TestProviderHealthStatus: + """Test cases for ProviderHealthStatus type.""" + + def test_constructor(self) -> None: + """Test ProviderHealthStatus with all fields.""" + status_obj = ProviderHealthStatus( + provider_id="provider1", + status="healthy", + message="All systems operational", + ) + assert status_obj.provider_id == "provider1" + assert status_obj.status == "healthy" + assert status_obj.message == "All systems operational" + + def test_missing_required_fields(self) -> None: + """Test ProviderHealthStatus raises ValidationError when required fields are missing.""" + with pytest.raises(ValidationError): + ProviderHealthStatus() # type: ignore[call-arg] diff --git a/tests/unit/models/responses/test_successful_responses.py b/tests/unit/models/responses/test_successful_responses.py index 996b98f46..408ce8725 100644 --- a/tests/unit/models/responses/test_successful_responses.py +++ b/tests/unit/models/responses/test_successful_responses.py @@ -1,4 +1,4 @@ -# pylint: disable=unsupported-membership-test,unsubscriptable-object, too-many-lines +# pylint: disable=unsupported-membership-test,unsubscriptable-object,too-many-lines """Unit tests for all successful response models.""" @@ -16,9 +16,7 @@ AbstractSuccessfulResponse, AuthorizedResponse, ConfigurationResponse, - ConversationData, ConversationDeleteResponse, - ConversationDetails, ConversationResponse, ConversationsListResponse, ConversationsListResponseV2, @@ -27,19 +25,22 @@ FeedbackStatusUpdateResponse, InfoResponse, LivenessResponse, + MCPClientAuthOptionsResponse, + MCPServerAuthInfo, ModelsResponse, - ProviderHealthStatus, ProviderResponse, ProvidersListResponse, QueryResponse, + RAGInfoResponse, + RAGListResponse, ReadinessResponse, - ReferencedDocument, ShieldsResponse, StatusResponse, StreamingQueryResponse, ToolsResponse, ) -from utils.types import ToolCallSummary, ToolResultSummary +from models.responses import ConversationData, ConversationDetails, ProviderHealthStatus +from utils.types import ReferencedDocument, ToolCallSummary, ToolResultSummary class TestModelsResponse: @@ -592,6 +593,10 @@ def test_constructor(self) -> None: {"content": "Hello", "type": "user"}, {"content": "Hi there!", "type": "assistant"}, ], + "tool_calls": [], + "tool_results": [], + "provider": "google", + "model": "gemini-2.0-flash-exp", "started_at": "2024-01-01T00:01:00Z", "completed_at": "2024-01-01T00:01:05Z", } @@ -602,7 +607,9 @@ def test_constructor(self) -> None: ) assert isinstance(response, AbstractSuccessfulResponse) assert response.conversation_id == "123e4567-e89b-12d3-a456-426614174000" - assert response.chat_history == chat_history + # Convert ConversationTurn objects to dicts for comparison + actual_history = [turn.model_dump() for turn in response.chat_history] + assert actual_history == chat_history def test_empty_chat_history(self) -> None: """Test ConversationResponse with empty chat_history.""" @@ -651,11 +658,17 @@ def test_constructor_not_deleted(self) -> None: assert response.response == "Conversation cannot be deleted" def test_missing_required_parameters(self) -> None: - """Test ConversationDeleteResponse raises ValidationError when required fields missing.""" + """Test ConversationDeleteResponse raises TypeError when required fields missing.""" with pytest.raises(TypeError): ConversationDeleteResponse() # pylint: disable=missing-kwoa # pyright: ignore with pytest.raises(TypeError): - ConversationDeleteResponse(deleted=True) # pylint: disable=missing-kwoa + ConversationDeleteResponse( # pylint: disable=missing-kwoa # pyright: ignore[reportCallIssue] + deleted=True + ) + with pytest.raises(TypeError): + ConversationDeleteResponse( # pylint: disable=missing-kwoa # pyright: ignore[reportCallIssue] + conversation_id="conv-123" + ) def test_openapi_response(self) -> None: """Test ConversationDeleteResponse.openapi_response() method.""" @@ -1011,6 +1024,121 @@ def test_model_json_schema_has_examples(self) -> None: assert isinstance(schema["examples"][0], str) +class TestMCPClientAuthOptionsResponse: + """Test cases for MCPClientAuthOptionsResponse.""" + + def test_constructor(self) -> None: + """Test MCPClientAuthOptionsResponse with servers list.""" + servers = [ + MCPServerAuthInfo(name="github", client_auth_headers=["Authorization"]), + MCPServerAuthInfo( + name="gitlab", client_auth_headers=["Authorization", "X-API-Key"] + ), + ] + response = MCPClientAuthOptionsResponse(servers=servers) + assert isinstance(response, AbstractSuccessfulResponse) + assert len(response.servers) == 2 + assert response.servers[0].name == "github" + + def test_empty_servers_list(self) -> None: + """Test MCPClientAuthOptionsResponse with empty servers list.""" + response = MCPClientAuthOptionsResponse(servers=[]) + assert response.servers == [] + + def test_openapi_response(self) -> None: + """Test MCPClientAuthOptionsResponse.openapi_response() method.""" + schema = MCPClientAuthOptionsResponse.model_json_schema() + model_examples = schema.get("examples", []) + expected_count = len(model_examples) + + result = MCPClientAuthOptionsResponse.openapi_response() + assert result["description"] == "Successful response" + assert result["model"] == MCPClientAuthOptionsResponse + assert "example" in result["content"]["application/json"] + + assert expected_count == 1 + + +class TestRAGInfoResponse: + """Test cases for RAGInfoResponse.""" + + def test_constructor(self) -> None: + """Test RAGInfoResponse with all fields.""" + response = RAGInfoResponse( + id="vs_7b52a8cf-0fa3-489c-beab-27e061d102f3", + name="Test RAG", + created_at=1763391371, + last_active_at=1763391371, + usage_bytes=1024000, + expires_at=None, + object="vector_store", + status="completed", + ) + assert isinstance(response, AbstractSuccessfulResponse) + assert response.id == "vs_7b52a8cf-0fa3-489c-beab-27e061d102f3" + assert response.name == "Test RAG" + assert response.created_at == 1763391371 + assert response.usage_bytes == 1024000 + assert response.object == "vector_store" + assert response.status == "completed" + + def test_missing_required_parameters(self) -> None: + """Test RAGInfoResponse raises ValidationError when required fields are missing.""" + with pytest.raises(ValidationError): + RAGInfoResponse() # type: ignore[call-arg] + + def test_openapi_response(self) -> None: + """Test RAGInfoResponse.openapi_response() method.""" + schema = RAGInfoResponse.model_json_schema() + model_examples = schema.get("examples", []) + expected_count = len(model_examples) + + result = RAGInfoResponse.openapi_response() + assert result["description"] == "Successful response" + assert result["model"] == RAGInfoResponse + assert "example" in result["content"]["application/json"] + + assert expected_count == 1 + + +class TestRAGListResponse: + """Test cases for RAGListResponse.""" + + def test_constructor(self) -> None: + """Test RAGListResponse with RAG list.""" + rags = [ + "vs_00000000-cafe-babe-0000-000000000000", + "vs_7b52a8cf-0fa3-489c-beab-27e061d102f3", + ] + response = RAGListResponse(rags=rags) + assert isinstance(response, AbstractSuccessfulResponse) + assert len(response.rags) == 2 + assert response.rags[0] == "vs_00000000-cafe-babe-0000-000000000000" + + def test_empty_rags_list(self) -> None: + """Test RAGListResponse with empty rags list.""" + response = RAGListResponse(rags=[]) + assert response.rags == [] + + def test_missing_required_parameter(self) -> None: + """Test RAGListResponse raises ValidationError when rags is missing.""" + with pytest.raises(ValidationError): + RAGListResponse() # type: ignore[call-arg] + + def test_openapi_response(self) -> None: + """Test RAGListResponse.openapi_response() method.""" + schema = RAGListResponse.model_json_schema() + model_examples = schema.get("examples", []) + expected_count = len(model_examples) + + result = RAGListResponse.openapi_response() + assert result["description"] == "Successful response" + assert result["model"] == RAGListResponse + assert "example" in result["content"]["application/json"] + + assert expected_count == 1 + + class TestAbstractSuccessfulResponseOpenAPI: """Test cases for AbstractSuccessfulResponse.openapi_response() edge cases.""" diff --git a/tests/unit/models/responses/test_types.py b/tests/unit/models/responses/test_types.py new file mode 100644 index 000000000..35142b062 --- /dev/null +++ b/tests/unit/models/responses/test_types.py @@ -0,0 +1,83 @@ +"""Unit tests for response-related type models.""" + +import pytest +from pydantic import ValidationError + +from models.responses import ConversationData, ConversationDetails, ProviderHealthStatus + + +class TestConversationDetails: + """Test cases for ConversationDetails type.""" + + def test_constructor(self) -> None: + """Test ConversationDetails with all fields.""" + details = ConversationDetails( + conversation_id="123e4567-e89b-12d3-a456-426614174000", + created_at="2024-01-01T00:00:00Z", + last_message_at="2024-01-01T00:05:00Z", + message_count=5, + last_used_model="gpt-4", + last_used_provider="openai", + topic_summary="Test topic", + ) + assert details.conversation_id == "123e4567-e89b-12d3-a456-426614174000" + assert details.created_at == "2024-01-01T00:00:00Z" + assert details.last_message_at == "2024-01-01T00:05:00Z" + assert details.message_count == 5 + assert details.last_used_model == "gpt-4" + assert details.last_used_provider == "openai" + assert details.topic_summary == "Test topic" + + def test_missing_required_fields(self) -> None: + """Test ConversationDetails raises ValidationError when required fields are missing.""" + with pytest.raises(ValidationError): + ConversationDetails() # type: ignore[call-arg] + + +class TestConversationData: + """Test cases for ConversationData type.""" + + def test_constructor(self) -> None: + """Test ConversationData with all fields.""" + data = ConversationData( + conversation_id="123e4567-e89b-12d3-a456-426614174000", + topic_summary="Test topic", + last_message_timestamp=1704067200.0, + ) + assert data.conversation_id == "123e4567-e89b-12d3-a456-426614174000" + assert data.topic_summary == "Test topic" + assert data.last_message_timestamp == 1704067200.0 + + def test_topic_summary_none(self) -> None: + """Test ConversationData with None topic_summary.""" + data = ConversationData( + conversation_id="conv-123", + topic_summary=None, + last_message_timestamp=1704067200.0, + ) + assert data.topic_summary is None + + def test_missing_required_fields(self) -> None: + """Test ConversationData raises ValidationError when required fields are missing.""" + with pytest.raises(ValidationError): + ConversationData() # type: ignore[call-arg] + + +class TestProviderHealthStatus: + """Test cases for ProviderHealthStatus type.""" + + def test_constructor(self) -> None: + """Test ProviderHealthStatus with all fields.""" + status_obj = ProviderHealthStatus( + provider_id="provider1", + status="healthy", + message="All systems operational", + ) + assert status_obj.provider_id == "provider1" + assert status_obj.status == "healthy" + assert status_obj.message == "All systems operational" + + def test_missing_required_fields(self) -> None: + """Test ProviderHealthStatus raises ValidationError when required fields are missing.""" + with pytest.raises(ValidationError): + ProviderHealthStatus() # type: ignore[call-arg] diff --git a/tests/unit/observability/formats/test_rlsapi.py b/tests/unit/observability/formats/test_rlsapi.py index e8c7fa82f..608b2a71a 100644 --- a/tests/unit/observability/formats/test_rlsapi.py +++ b/tests/unit/observability/formats/test_rlsapi.py @@ -18,7 +18,7 @@ def sample_event_data_fixture() -> InferenceEventData: org_id="12345678", system_id="abc-def-123", request_id="req_xyz789", - cla_version="CLA/0.4.0", + cla_version="CLA/0.4.1", system_os="RHEL", system_version="9.3", system_arch="x86_64", @@ -39,7 +39,7 @@ def test_builds_event_with_all_fields(sample_event_data: InferenceEventData) -> assert event["org_id"] == "12345678" assert event["system_id"] == "abc-def-123" assert event["request_id"] == "req_xyz789" - assert event["cla_version"] == "CLA/0.4.0" + assert event["cla_version"] == "CLA/0.4.1" assert event["system_os"] == "RHEL" assert event["system_version"] == "9.3" assert event["system_arch"] == "x86_64" diff --git a/tests/unit/runners/test_uvicorn_runner.py b/tests/unit/runners/test_uvicorn_runner.py index d1586fda4..d75e677d2 100644 --- a/tests/unit/runners/test_uvicorn_runner.py +++ b/tests/unit/runners/test_uvicorn_runner.py @@ -22,6 +22,7 @@ def test_start_uvicorn(mocker: MockerFixture) -> None: host="localhost", port=8080, workers=1, + root_path="", log_level=20, ssl_certfile=None, ssl_keyfile=None, @@ -45,6 +46,7 @@ def test_start_uvicorn_different_host_port(mocker: MockerFixture) -> None: host="x.y.com", port=1234, workers=10, + root_path="", log_level=20, ssl_certfile=None, ssl_keyfile=None, @@ -69,6 +71,7 @@ def test_start_uvicorn_empty_tls_configuration(mocker: MockerFixture) -> None: host="x.y.com", port=1234, workers=10, + root_path="", log_level=20, ssl_certfile=None, ssl_keyfile=None, @@ -97,6 +100,7 @@ def test_start_uvicorn_tls_configuration(mocker: MockerFixture) -> None: host="x.y.com", port=1234, workers=10, + root_path="", log_level=20, ssl_certfile=Path("tests/configuration/server.crt"), ssl_keyfile=Path("tests/configuration/server.key"), @@ -104,3 +108,27 @@ def test_start_uvicorn_tls_configuration(mocker: MockerFixture) -> None: use_colors=True, access_log=True, ) + + +def test_start_uvicorn_with_root_path(mocker: MockerFixture) -> None: + """Test the function to start Uvicorn server with a custom root path.""" + configuration = ServiceConfiguration( + host="localhost", port=8080, workers=1, root_path="/api/lightspeed" + ) # pyright: ignore[reportCallIssue] + + # don't start real Uvicorn server + mocked_run = mocker.patch("uvicorn.run") + start_uvicorn(configuration) + mocked_run.assert_called_once_with( + "app.main:app", + host="localhost", + port=8080, + workers=1, + root_path="/api/lightspeed", + log_level=20, + ssl_certfile=None, + ssl_keyfile=None, + ssl_keyfile_password="", + use_colors=True, + access_log=True, + ) diff --git a/tests/unit/test_configuration.py b/tests/unit/test_configuration.py index 3edd3999a..6783aa485 100644 --- a/tests/unit/test_configuration.py +++ b/tests/unit/test_configuration.py @@ -35,64 +35,84 @@ def test_default_configuration() -> None: assert cfg is not None # configuration is not loaded - with pytest.raises(Exception, match="logic error: configuration is not loaded"): + with pytest.raises(LogicError, match="logic error: configuration is not loaded"): # try to read property _ = cfg.configuration # pylint: disable=pointless-statement - with pytest.raises(Exception, match="logic error: configuration is not loaded"): + with pytest.raises(LogicError, match="logic error: configuration is not loaded"): # try to read property _ = cfg.service_configuration # pylint: disable=pointless-statement - with pytest.raises(Exception, match="logic error: configuration is not loaded"): + with pytest.raises(LogicError, match="logic error: configuration is not loaded"): # try to read property _ = cfg.llama_stack_configuration # pylint: disable=pointless-statement - with pytest.raises(Exception, match="logic error: configuration is not loaded"): + with pytest.raises(LogicError, match="logic error: configuration is not loaded"): # try to read property _ = ( cfg.user_data_collection_configuration ) # pylint: disable=pointless-statement - with pytest.raises(Exception, match="logic error: configuration is not loaded"): + with pytest.raises(LogicError, match="logic error: configuration is not loaded"): # try to read property _ = cfg.mcp_servers # pylint: disable=pointless-statement - with pytest.raises(Exception, match="logic error: configuration is not loaded"): + with pytest.raises(LogicError, match="logic error: configuration is not loaded"): # try to read property _ = cfg.authentication_configuration # pylint: disable=pointless-statement - with pytest.raises(Exception, match="logic error: configuration is not loaded"): + with pytest.raises(LogicError, match="logic error: configuration is not loaded"): # try to read property _ = cfg.customization # pylint: disable=pointless-statement - with pytest.raises(Exception, match="logic error: configuration is not loaded"): + with pytest.raises(LogicError, match="logic error: configuration is not loaded"): # try to read property _ = cfg.authorization_configuration # pylint: disable=pointless-statement - with pytest.raises(Exception, match="logic error: configuration is not loaded"): + with pytest.raises(LogicError, match="logic error: configuration is not loaded"): # try to read property _ = cfg.inference # pylint: disable=pointless-statement - with pytest.raises(Exception, match="logic error: configuration is not loaded"): + with pytest.raises(LogicError, match="logic error: configuration is not loaded"): # try to read property _ = cfg.database_configuration # pylint: disable=pointless-statement - with pytest.raises(Exception, match="logic error: configuration is not loaded"): + with pytest.raises(LogicError, match="logic error: configuration is not loaded"): # try to read property _ = cfg.conversation_cache_configuration # pylint: disable=pointless-statement - with pytest.raises(Exception, match="logic error: configuration is not loaded"): + with pytest.raises(LogicError, match="logic error: configuration is not loaded"): # try to read property _ = cfg.quota_handlers_configuration # pylint: disable=pointless-statement - with pytest.raises(Exception, match="logic error: configuration is not loaded"): + with pytest.raises(LogicError, match="logic error: configuration is not loaded"): # try to read property _ = cfg.conversation_cache # pylint: disable=pointless-statement - with pytest.raises(Exception, match="logic error: configuration is not loaded"): + with pytest.raises(LogicError, match="logic error: configuration is not loaded"): # try to read property _ = cfg.quota_limiters # pylint: disable=pointless-statement + with pytest.raises(LogicError, match="logic error: configuration is not loaded"): + # try to read property + _ = cfg.a2a_state # pylint: disable=pointless-statement + + with pytest.raises(LogicError, match="logic error: configuration is not loaded"): + # try to read property + _ = cfg.token_usage_history # pylint: disable=pointless-statement + + with pytest.raises(LogicError, match="logic error: configuration is not loaded"): + # try to read property + _ = cfg.azure_entra_id # pylint: disable=pointless-statement + + with pytest.raises(LogicError, match="logic error: configuration is not loaded"): + # try to read property + _ = cfg.splunk # pylint: disable=pointless-statement + + with pytest.raises(LogicError, match="logic error: configuration is not loaded"): + # try to read property + _ = cfg.deployment_environment # pylint: disable=pointless-statement + def test_configuration_is_singleton() -> None: """Test that configuration is singleton.""" @@ -126,6 +146,19 @@ def test_init_from_dict() -> None: "authentication": { "module": "noop", }, + "a2a_state": { + "sqlite": None, + "postgres": None, + }, + "splunk": { + "enabled": False, + "url": "foo.bar.baz", + "index": "index", + "source": "source", + "timeout": 10, + "verify_ssl": False, + }, + "deployment_environment": "foo", } cfg = AppConfig() cfg.init_from_dict(config_dict) @@ -142,7 +175,7 @@ def test_init_from_dict() -> None: # check for llama_stack_configuration subsection assert cfg.llama_stack_configuration.api_key is not None assert cfg.llama_stack_configuration.api_key.get_secret_value() == "xyzzy" - assert cfg.llama_stack_configuration.url == "http://x.y.com:1234" + assert str(cfg.llama_stack_configuration.url) == "http://x.y.com:1234/" assert cfg.llama_stack_configuration.use_as_library_client is False # check for service_configuration subsection @@ -172,6 +205,26 @@ def test_init_from_dict() -> None: # check conversation cache assert cfg.conversation_cache_configuration is not None + # check a2a state + assert cfg.a2a_state is not None + assert cfg.a2a_state.sqlite is None + assert cfg.a2a_state.postgres is None + + # check Splunk + assert cfg.splunk is not None + assert cfg.splunk.enabled is False + assert cfg.splunk.url == "foo.bar.baz" + assert cfg.splunk.index == "index" + assert cfg.splunk.source == "source" + assert cfg.splunk.timeout == 10 + assert cfg.splunk.verify_ssl is False + + # check deployment_environment + assert cfg.deployment_environment is not None + + # check token usage history + assert cfg.token_usage_history is None + def test_init_from_dict_with_mcp_servers() -> None: """Test initialization with MCP servers configuration.""" @@ -724,6 +777,44 @@ def test_configuration_with_quota_handlers_no_storage(tmpdir: Path) -> None: assert cfg.quota_handlers_configuration.scheduler.period == 1 +def test_configuration_with_token_history_no_storage(tmpdir: Path) -> None: + """Test loading configuration from YAML file with quota handlers configuration.""" + cfg_filename = tmpdir / "config.yaml" + with open(cfg_filename, "w", encoding="utf-8") as fout: + fout.write(""" +name: test service +service: + host: localhost + port: 8080 + auth_enabled: false + workers: 1 + color_log: true + access_log: true +llama_stack: + use_as_library_client: false + url: http://localhost:8321 + api_key: test-key +user_data_collection: + feedback_enabled: false +quota_handlers: + scheduler: + # scheduler ticks in seconds + period: 1 + enable_token_history: true + """) + + cfg = AppConfig() + cfg.load_configuration(str(cfg_filename)) + + assert cfg.quota_handlers_configuration is not None + assert cfg.quota_handlers_configuration.sqlite is None + assert cfg.quota_handlers_configuration.postgres is None + assert cfg.quota_handlers_configuration.scheduler is not None + + # check the token usage history + assert cfg.token_usage_history is not None + + def test_configuration_with_quota_handlers(tmpdir: Path) -> None: """Test loading configuration from YAML file with quota handlers configuration.""" cfg_filename = tmpdir / "config.yaml" diff --git a/tests/unit/utils/README.md b/tests/unit/utils/README.md index 65afa7b24..22e9d2899 100644 --- a/tests/unit/utils/README.md +++ b/tests/unit/utils/README.md @@ -15,6 +15,9 @@ Test module for utils/common.py. ## [test_connection_decorator.py](test_connection_decorator.py) Unit tests for the connection decorator. +## [test_conversations.py](test_conversations.py) +Unit tests for conversation utility functions. + ## [test_endpoints.py](test_endpoints.py) Unit tests for endpoints utility functions. @@ -27,6 +30,12 @@ Unit tests for MCP authorization headers utilities. ## [test_mcp_headers.py](test_mcp_headers.py) Unit tests for MCP headers utility functions. +## [test_prompts.py](test_prompts.py) +Unit tests for prompts utility functions. + +## [test_query.py](test_query.py) +Unit tests for utils/query.py functions. + ## [test_responses.py](test_responses.py) Unit tests for utils/responses.py functions. @@ -40,5 +49,5 @@ Unit tests for functions defined in utils.suid module. Unit tests for functions defined in utils.transcripts module. ## [test_types.py](test_types.py) -Unit tests for functions defined in utils/types.py. +Unit tests for functions and types defined in utils/types.py. diff --git a/tests/unit/utils/test_checks.py b/tests/unit/utils/test_checks.py index 64893f63a..1c5dd20ed 100644 --- a/tests/unit/utils/test_checks.py +++ b/tests/unit/utils/test_checks.py @@ -146,6 +146,41 @@ def test_import_python_module_success() -> None: assert isinstance(result, ModuleType) +def test_import_python_module_custom_name() -> None: + """Test importing a Python module.""" + module_path = "tests/profiles/test/profile.py" + module_name = "profileX" + result = checks.import_python_module(module_name, module_path) + + assert isinstance(result, ModuleType) + + +def test_import_python_empty_file() -> None: + """Test importing a Python module that is an empty.""" + module_path = "tests/profiles/empty.py" + module_name = "profile" + result = checks.import_python_module(module_name, module_path) + + assert isinstance(result, ModuleType) + + +def test_import_python_syntax_error() -> None: + """Test importing a Python module that contains syntax error.""" + module_path = "tests/profiles/syntax_error.py" + module_name = "profile" + result = checks.import_python_module(module_name, module_path) + + assert result is None + + +def test_import_python_non_existing_path() -> None: + """Test importing a Python module that is a .txt file.""" + module_path = "foo.py" + module_name = "profile" + with pytest.raises(FileNotFoundError, match="No such file"): + checks.import_python_module(module_name, module_path) + + def test_import_python_module_error() -> None: """Test importing a Python module that is a .txt file.""" module_path = "tests/profiles/test_two/test.txt" @@ -177,3 +212,28 @@ def test_invalid_profile() -> None: result = checks.is_valid_profile(fetched_module) assert result is False + + +def test_no_profile() -> None: + """Test if an imported profile is valid (expect invalid)""" + module_path = "tests/profiles/empty.py" + module_name = "profile" + fetched_module = checks.import_python_module(module_name, module_path) + result = False + if fetched_module: + result = checks.is_valid_profile(fetched_module) + + assert result is False + + +def test_no_system_prompts_profile() -> None: + """Test if an imported profile contains system prompt if config.""" + module_path = "tests/profiles/test_four/profile.py" + module_name = "profile" + fetched_module = checks.import_python_module(module_name, module_path) + + result = False + if fetched_module: + result = checks.is_valid_profile(fetched_module) + + assert result is False diff --git a/tests/unit/utils/test_conversations.py b/tests/unit/utils/test_conversations.py new file mode 100644 index 000000000..e4120f145 --- /dev/null +++ b/tests/unit/utils/test_conversations.py @@ -0,0 +1,722 @@ +"""Unit tests for conversation utility functions.""" + +from datetime import datetime, UTC +from typing import Any + +import pytest +from pytest_mock import MockerFixture + +from constants import DEFAULT_RAG_TOOL +from models.database.conversations import UserTurn +from utils.conversations import ( + _build_tool_call_summary_from_item, + _extract_text_from_content, + build_conversation_turns_from_items, +) +from utils.types import ToolCallSummary + +# Default conversation start time for tests +DEFAULT_CONVERSATION_START_TIME = datetime.fromisoformat( + "2024-01-01T00:00:00Z" +).replace(tzinfo=UTC) + + +@pytest.fixture(name="create_mock_user_turn") +def create_mock_user_turn_fixture(mocker: MockerFixture) -> Any: + """Factory fixture to create mock UserTurn objects. + + Args: + mocker: Mocker fixture + + Returns: + Function that creates a mock UserTurn with specified attributes + """ + + def _create( + turn_number: int = 1, + started_at: str = "2024-01-01T00:01:00Z", + completed_at: str = "2024-01-01T00:01:05Z", + provider: str = "google", + model: str = "gemini-2.0-flash-exp", + ) -> Any: + mock_turn = mocker.Mock(spec=UserTurn) + mock_turn.turn_number = turn_number + mock_turn.started_at = datetime.fromisoformat(started_at).replace(tzinfo=UTC) + mock_turn.completed_at = datetime.fromisoformat(completed_at).replace( + tzinfo=UTC + ) + mock_turn.provider = provider + mock_turn.model = model + return mock_turn + + return _create + + +class TestExtractTextFromContent: + """Test cases for _extract_text_from_content function.""" + + def test_string_input(self) -> None: + """Test extracting text from string input.""" + content = "Simple text message" + result = _extract_text_from_content(content) + + assert result == "Simple text message" + + def test_composed_input(self) -> None: + """Test extracting text from composed (list) input.""" + + # Create simple objects with text and refusal attributes + class TextPart: # pylint: disable=too-few-public-methods + """Helper class for testing text extraction.""" + + def __init__(self, text: str) -> None: + self.text = text + + class RefusalPart: # pylint: disable=too-few-public-methods + """Helper class for testing refusal extraction.""" + + def __init__(self, refusal: str) -> None: + self.refusal = refusal + + # Create composed content with various types + content = [ + "String part", + TextPart("First part"), + RefusalPart("Refusal message"), + {"text": "Dict text"}, + {"refusal": "Dict refusal"}, + ] + + result = _extract_text_from_content(content) + + assert result == "String partFirst partRefusal messageDict textDict refusal" + + +class TestBuildToolCallSummaryFromItem: + """Test cases for _build_tool_call_summary_from_item function.""" + + def test_function_call_item(self, mocker: MockerFixture) -> None: + """Test parsing a function_call item.""" + mock_item = mocker.Mock() + mock_item.type = "function_call" + mock_item.call_id = "call_123" + mock_item.name = "test_function" + mock_item.arguments = '{"arg1": "value1"}' + + tool_call, tool_result = _build_tool_call_summary_from_item(mock_item) + + assert tool_call is not None + assert isinstance(tool_call, ToolCallSummary) + assert tool_call.id == "call_123" + assert tool_call.name == "test_function" + assert tool_call.type == "function_call" + assert tool_result is None + + def test_file_search_call_with_results(self, mocker: MockerFixture) -> None: + """Test parsing a file_search_call item with results.""" + mock_result = mocker.Mock() + mock_result.model_dump.return_value = {"file": "test.txt", "content": "test"} + + mock_item = mocker.Mock() + mock_item.type = "file_search_call" + mock_item.id = "file_search_123" + mock_item.queries = ["query1", "query2"] + mock_item.status = "success" + mock_item.results = [mock_result] + + tool_call, tool_result = _build_tool_call_summary_from_item(mock_item) + + assert tool_call is not None + assert tool_call.id == "file_search_123" + assert tool_call.name == DEFAULT_RAG_TOOL + assert tool_call.type == "file_search_call" + assert tool_call.args == {"queries": ["query1", "query2"]} + + assert tool_result is not None + assert tool_result.id == "file_search_123" + assert tool_result.status == "success" + assert tool_result.type == "file_search_call" + assert tool_result.round == 1 + assert "results" in tool_result.content + + def test_file_search_call_without_results(self, mocker: MockerFixture) -> None: + """Test parsing a file_search_call item without results.""" + mock_item = mocker.Mock() + mock_item.type = "file_search_call" + mock_item.id = "file_search_123" + mock_item.queries = ["query1"] + mock_item.status = "success" + mock_item.results = None + + tool_call, tool_result = _build_tool_call_summary_from_item(mock_item) + + assert tool_call is not None + assert tool_result is not None + assert tool_result.content == "" + + def test_web_search_call(self, mocker: MockerFixture) -> None: + """Test parsing a web_search_call item.""" + mock_item = mocker.Mock() + mock_item.type = "web_search_call" + mock_item.id = "web_search_123" + mock_item.status = "success" + + tool_call, tool_result = _build_tool_call_summary_from_item(mock_item) + + assert tool_call is not None + assert tool_call.id == "web_search_123" + assert tool_call.name == "web_search" + assert tool_call.type == "web_search_call" + assert tool_call.args == {} + + assert tool_result is not None + assert tool_result.id == "web_search_123" + assert tool_result.status == "success" + assert tool_result.type == "web_search_call" + assert tool_result.content == "" + assert tool_result.round == 1 + + def test_mcp_call_with_error(self, mocker: MockerFixture) -> None: + """Test parsing an mcp_call item with error.""" + mock_item = mocker.Mock() + mock_item.type = "mcp_call" + mock_item.id = "mcp_123" + mock_item.name = "test_mcp_tool" + mock_item.arguments = '{"param": "value"}' + mock_item.server_label = "test_server" + mock_item.error = "Error occurred" + mock_item.output = None + + tool_call, tool_result = _build_tool_call_summary_from_item(mock_item) + + assert tool_call is not None + assert tool_call.id == "mcp_123" + assert tool_call.name == "test_mcp_tool" + assert tool_call.type == "mcp_call" + assert "server_label" in tool_call.args + assert tool_call.args["server_label"] == "test_server" + + assert tool_result is not None + assert tool_result.status == "failure" + assert tool_result.content == "Error occurred" + + def test_mcp_call_with_output(self, mocker: MockerFixture) -> None: + """Test parsing an mcp_call item with output.""" + mock_item = mocker.Mock() + mock_item.type = "mcp_call" + mock_item.id = "mcp_123" + mock_item.name = "test_mcp_tool" + mock_item.arguments = '{"param": "value"}' + mock_item.server_label = "test_server" + mock_item.error = None + mock_item.output = "Success output" + + _, tool_result = _build_tool_call_summary_from_item(mock_item) + + assert tool_result is not None + assert tool_result.status == "success" + assert tool_result.content == "Success output" + + def test_mcp_call_without_server_label(self, mocker: MockerFixture) -> None: + """Test parsing an mcp_call item without server_label.""" + mock_item = mocker.Mock() + mock_item.type = "mcp_call" + mock_item.id = "mcp_123" + mock_item.name = "test_mcp_tool" + mock_item.arguments = '{"param": "value"}' + mock_item.server_label = None + mock_item.error = None + mock_item.output = "output" + + tool_call, _ = _build_tool_call_summary_from_item(mock_item) + + assert tool_call is not None + assert "server_label" not in tool_call.args + + def test_mcp_list_tools(self, mocker: MockerFixture) -> None: + """Test parsing an mcp_list_tools item.""" + mock_tool = mocker.Mock() + mock_tool.name = "tool1" + mock_tool.description = "Description" + mock_tool.input_schema = {"type": "object"} + + mock_item = mocker.Mock() + mock_item.type = "mcp_list_tools" + mock_item.id = "list_tools_123" + mock_item.server_label = "test_server" + mock_item.tools = [mock_tool] + + tool_call, tool_result = _build_tool_call_summary_from_item(mock_item) + + assert tool_call is not None + assert tool_call.id == "list_tools_123" + assert tool_call.name == "mcp_list_tools" + assert tool_call.type == "mcp_list_tools" + assert tool_call.args == {"server_label": "test_server"} + + assert tool_result is not None + assert tool_result.status == "success" + assert "tools" in tool_result.content + assert "test_server" in tool_result.content + + def test_mcp_approval_request(self, mocker: MockerFixture) -> None: + """Test parsing an mcp_approval_request item.""" + mock_item = mocker.Mock() + mock_item.type = "mcp_approval_request" + mock_item.id = "approval_123" + mock_item.name = "approve_action" + mock_item.arguments = '{"action": "delete"}' + + tool_call, tool_result = _build_tool_call_summary_from_item(mock_item) + + assert tool_call is not None + assert tool_call.id == "approval_123" + assert tool_call.name == "approve_action" + assert tool_call.type == "tool_call" + assert tool_result is None + + def test_mcp_approval_response_approved(self, mocker: MockerFixture) -> None: + """Test parsing an mcp_approval_response item with approval.""" + mock_item = mocker.Mock() + mock_item.type = "mcp_approval_response" + mock_item.approval_request_id = "approval_123" + mock_item.approve = True + mock_item.reason = "Looks good" + + tool_call, tool_result = _build_tool_call_summary_from_item(mock_item) + + assert tool_call is None + assert tool_result is not None + assert tool_result.id == "approval_123" + assert tool_result.status == "success" + assert tool_result.type == "mcp_approval_response" + assert "reason" in tool_result.content + + def test_mcp_approval_response_denied(self, mocker: MockerFixture) -> None: + """Test parsing an mcp_approval_response item with denial.""" + mock_item = mocker.Mock() + mock_item.type = "mcp_approval_response" + mock_item.approval_request_id = "approval_123" + mock_item.approve = False + mock_item.reason = "Not allowed" + + _, tool_result = _build_tool_call_summary_from_item(mock_item) + + assert tool_result is not None + assert tool_result.status == "denied" + + def test_mcp_approval_response_without_reason(self, mocker: MockerFixture) -> None: + """Test parsing an mcp_approval_response item without reason.""" + mock_item = mocker.Mock() + mock_item.type = "mcp_approval_response" + mock_item.approval_request_id = "approval_123" + mock_item.approve = True + mock_item.reason = None + + _, tool_result = _build_tool_call_summary_from_item(mock_item) + + assert tool_result is not None + assert tool_result.content == "{}" + + def test_function_call_output(self, mocker: MockerFixture) -> None: + """Test parsing a function_call_output item.""" + mock_item = mocker.Mock() + mock_item.type = "function_call_output" + mock_item.call_id = "call_123" + mock_item.status = "success" + mock_item.output = "Function result" + + tool_call, tool_result = _build_tool_call_summary_from_item(mock_item) + + assert tool_call is None + assert tool_result is not None + assert tool_result.id == "call_123" + assert tool_result.status == "success" + assert tool_result.content == "Function result" + assert tool_result.type == "function_call_output" + assert tool_result.round == 1 + + def test_function_call_output_without_status(self, mocker: MockerFixture) -> None: + """Test parsing a function_call_output item without status.""" + mock_item = mocker.Mock() + mock_item.type = "function_call_output" + mock_item.call_id = "call_123" + mock_item.status = None + mock_item.output = "Function result" + + _, tool_result = _build_tool_call_summary_from_item(mock_item) + + assert tool_result is not None + assert tool_result.status == "success" # Defaults to "success" + + def test_unknown_item_type(self, mocker: MockerFixture) -> None: + """Test parsing an unknown item type.""" + mock_item = mocker.Mock() + mock_item.type = "unknown_type" + + tool_call, tool_result = _build_tool_call_summary_from_item(mock_item) + + assert tool_call is None + assert tool_result is None + + def test_item_without_type_attribute(self, mocker: MockerFixture) -> None: + """Test parsing an item without type attribute.""" + mock_item = mocker.Mock(spec=[]) + # Don't set type attribute + + tool_call, tool_result = _build_tool_call_summary_from_item(mock_item) + + assert tool_call is None + assert tool_result is None + + +class TestBuildConversationTurnsFromItems: + """Test cases for build_conversation_turns_from_items function.""" + + def test_empty_items(self) -> None: + """Test with empty items list.""" + result = build_conversation_turns_from_items( + [], [], DEFAULT_CONVERSATION_START_TIME + ) + + assert not result + + def test_single_turn_user_and_assistant( + self, + mocker: MockerFixture, + create_mock_user_turn: Any, + ) -> None: + """Test building a single turn with user and assistant messages.""" + mock_user_msg = mocker.Mock() + mock_user_msg.type = "message" + mock_user_msg.role = "user" + mock_user_msg.content = "Hello" + + mock_assistant_msg = mocker.Mock() + mock_assistant_msg.type = "message" + mock_assistant_msg.role = "assistant" + mock_assistant_msg.content = "Hi there!" + + items = [mock_user_msg, mock_assistant_msg] + turns_metadata = [create_mock_user_turn(turn_number=1)] + + result = build_conversation_turns_from_items( + items, turns_metadata, DEFAULT_CONVERSATION_START_TIME + ) + + assert len(result) == 1 + turn = result[0] + assert len(turn.messages) == 2 + assert turn.messages[0].type == "user" + assert turn.messages[0].content == "Hello" + assert turn.messages[1].type == "assistant" + assert turn.messages[1].content == "Hi there!" + assert turn.tool_calls == [] + assert turn.tool_results == [] + + def test_multiple_turns( + self, mocker: MockerFixture, create_mock_user_turn: Any + ) -> None: + """Test building multiple turns.""" + items = [ + mocker.Mock(type="message", role="user", content="Question 1"), + mocker.Mock(type="message", role="assistant", content="Answer 1"), + mocker.Mock(type="message", role="user", content="Question 2"), + mocker.Mock(type="message", role="assistant", content="Answer 2"), + ] + turns_metadata = [ + create_mock_user_turn(turn_number=1), + create_mock_user_turn(turn_number=2), + ] + + result = build_conversation_turns_from_items( + items, turns_metadata, DEFAULT_CONVERSATION_START_TIME + ) + + assert len(result) == 2 + assert result[0].messages[0].content == "Question 1" + assert result[0].messages[1].content == "Answer 1" + assert result[1].messages[0].content == "Question 2" + assert result[1].messages[1].content == "Answer 2" + + def test_turn_with_tool_calls( + self, + mocker: MockerFixture, + create_mock_user_turn: Any, + ) -> None: + """Test building a turn with tool calls.""" + mock_function_call = mocker.Mock() + mock_function_call.type = "function_call" + mock_function_call.call_id = "call_1" + mock_function_call.name = "test_tool" + mock_function_call.arguments = '{"arg": "value"}' + + items = [ + mocker.Mock(type="message", role="user", content="Use tool"), + mock_function_call, + mocker.Mock(type="message", role="assistant", content="Done"), + ] + turns_metadata = [create_mock_user_turn(turn_number=1)] + + result = build_conversation_turns_from_items( + items, turns_metadata, DEFAULT_CONVERSATION_START_TIME + ) + + assert len(result) == 1 + assert len(result[0].tool_calls) == 1 + assert result[0].tool_calls[0].name == "test_tool" + + def test_turn_with_tool_results( + self, + mocker: MockerFixture, + create_mock_user_turn: Any, + ) -> None: + """Test building a turn with tool results.""" + mock_function_output = mocker.Mock() + mock_function_output.type = "function_call_output" + mock_function_output.call_id = "call_1" + mock_function_output.status = "success" + mock_function_output.output = "Result" + + items = [ + mocker.Mock(type="message", role="user", content="Use tool"), + mock_function_output, + mocker.Mock(type="message", role="assistant", content="Done"), + ] + turns_metadata = [create_mock_user_turn(turn_number=1)] + + result = build_conversation_turns_from_items( + items, turns_metadata, DEFAULT_CONVERSATION_START_TIME + ) + + assert len(result) == 1 + assert len(result[0].tool_results) == 1 + assert result[0].tool_results[0].status == "success" + + def test_turn_with_both_tool_calls_and_results( + self, + mocker: MockerFixture, + create_mock_user_turn: Any, + ) -> None: + """Test building a turn with both tool calls and results.""" + mock_function_call = mocker.Mock() + mock_function_call.type = "function_call" + mock_function_call.call_id = "call_1" + mock_function_call.name = "test_tool" + mock_function_call.arguments = "{}" + + mock_function_output = mocker.Mock() + mock_function_output.type = "function_call_output" + mock_function_output.call_id = "call_1" + mock_function_output.status = "success" + mock_function_output.output = "Result" + + items = [ + mocker.Mock(type="message", role="user", content="Use tool"), + mock_function_call, + mock_function_output, + mocker.Mock(type="message", role="assistant", content="Done"), + ] + turns_metadata = [create_mock_user_turn(turn_number=1)] + + result = build_conversation_turns_from_items( + items, turns_metadata, DEFAULT_CONVERSATION_START_TIME + ) + + assert len(result) == 1 + assert len(result[0].tool_calls) == 1 + assert len(result[0].tool_results) == 1 + + def test_turn_with_file_search_tool( + self, + mocker: MockerFixture, + create_mock_user_turn: Any, + ) -> None: + """Test building a turn with file_search_call tool.""" + mock_file_search = mocker.Mock() + mock_file_search.type = "file_search_call" + mock_file_search.id = "file_1" + mock_file_search.queries = ["query1"] + mock_file_search.status = "success" + mock_file_search.results = None + + items = [ + mocker.Mock(type="message", role="user", content="Search files"), + mock_file_search, + mocker.Mock(type="message", role="assistant", content="Found files"), + ] + turns_metadata = [create_mock_user_turn(turn_number=1)] + + result = build_conversation_turns_from_items( + items, turns_metadata, DEFAULT_CONVERSATION_START_TIME + ) + + assert len(result) == 1 + assert len(result[0].tool_calls) == 1 + assert len(result[0].tool_results) == 1 + assert result[0].tool_calls[0].name == DEFAULT_RAG_TOOL + + def test_turn_with_multiple_assistant_messages( + self, + mocker: MockerFixture, + create_mock_user_turn: Any, + ) -> None: + """Test building a turn with multiple assistant messages.""" + items = [ + mocker.Mock(type="message", role="user", content="Question"), + mocker.Mock(type="message", role="assistant", content="Part 1"), + mocker.Mock(type="message", role="assistant", content="Part 2"), + ] + turns_metadata = [create_mock_user_turn(turn_number=1)] + + result = build_conversation_turns_from_items( + items, turns_metadata, DEFAULT_CONVERSATION_START_TIME + ) + + assert len(result) == 1 + assert len(result[0].messages) == 3 + assert result[0].messages[0].type == "user" + assert result[0].messages[1].type == "assistant" + assert result[0].messages[2].type == "assistant" + + def test_turn_metadata_used_correctly( + self, + mocker: MockerFixture, + create_mock_user_turn: Any, + ) -> None: + """Test that turn metadata (provider, model, timestamps) is used correctly.""" + items = [ + mocker.Mock(type="message", role="user", content="Test"), + mocker.Mock(type="message", role="assistant", content="Response"), + ] + turns_metadata = [ + create_mock_user_turn( + turn_number=1, + provider="openai", + model="gpt-4", + started_at="2024-01-01T10:00:00Z", + completed_at="2024-01-01T10:00:05Z", + ) + ] + + result = build_conversation_turns_from_items( + items, turns_metadata, DEFAULT_CONVERSATION_START_TIME + ) + + assert len(result) == 1 + turn = result[0] + assert turn.provider == "openai" + assert turn.model == "gpt-4" + assert turn.started_at == "2024-01-01T10:00:00Z" + assert turn.completed_at == "2024-01-01T10:00:05Z" + + def test_turn_with_only_tool_items_no_messages( + self, + mocker: MockerFixture, + create_mock_user_turn: Any, + ) -> None: + """Test building a turn with only tool items (no messages).""" + mock_function_call = mocker.Mock() + mock_function_call.type = "function_call" + mock_function_call.call_id = "call_1" + mock_function_call.name = "test_tool" + mock_function_call.arguments = "{}" + + items = [mock_function_call] + turns_metadata = [create_mock_user_turn(turn_number=1)] + + result = build_conversation_turns_from_items( + items, turns_metadata, DEFAULT_CONVERSATION_START_TIME + ) + + # Should still create a turn if there are tool calls/results + assert len(result) == 1 + assert len(result[0].messages) == 0 + assert len(result[0].tool_calls) == 1 + + def test_multiple_turns_with_tools( + self, + mocker: MockerFixture, + create_mock_user_turn: Any, + ) -> None: + """Test building multiple turns where some have tools.""" + mock_function_call = mocker.Mock() + mock_function_call.type = "function_call" + mock_function_call.call_id = "call_1" + mock_function_call.name = "test_tool" + mock_function_call.arguments = "{}" + + items = [ + mocker.Mock(type="message", role="user", content="Question 1"), + mocker.Mock(type="message", role="assistant", content="Answer 1"), + mocker.Mock(type="message", role="user", content="Question 2"), + mock_function_call, + mocker.Mock(type="message", role="assistant", content="Answer 2"), + ] + turns_metadata = [ + create_mock_user_turn(turn_number=1), + create_mock_user_turn(turn_number=2), + ] + + result = build_conversation_turns_from_items( + items, turns_metadata, DEFAULT_CONVERSATION_START_TIME + ) + + assert len(result) == 2 + assert len(result[0].tool_calls) == 0 + assert len(result[1].tool_calls) == 1 + + def test_turn_indexing_with_metadata( + self, + mocker: MockerFixture, + create_mock_user_turn: Any, + ) -> None: + """Test that turn metadata is correctly indexed by turn number.""" + items = [ + mocker.Mock(type="message", role="user", content="Q1"), + mocker.Mock(type="message", role="assistant", content="A1"), + mocker.Mock(type="message", role="user", content="Q2"), + mocker.Mock(type="message", role="assistant", content="A2"), + mocker.Mock(type="message", role="user", content="Q3"), + mocker.Mock(type="message", role="assistant", content="A3"), + ] + turns_metadata = [ + create_mock_user_turn(turn_number=1, provider="provider1"), + create_mock_user_turn(turn_number=2, provider="provider2"), + create_mock_user_turn(turn_number=3, provider="provider3"), + ] + + result = build_conversation_turns_from_items( + items, turns_metadata, DEFAULT_CONVERSATION_START_TIME + ) + + assert len(result) == 3 + assert result[0].provider == "provider1" + assert result[1].provider == "provider2" + assert result[2].provider == "provider3" + + def test_legacy_conversation_without_metadata(self, mocker: MockerFixture) -> None: + """Test building turns for legacy conversation without stored turn metadata.""" + # Legacy conversations have items but no turns_metadata + items = [ + mocker.Mock(type="message", role="user", content="Question"), + mocker.Mock(type="message", role="assistant", content="Answer"), + ] + turns_metadata: list[UserTurn] = [] # Empty metadata for legacy conversation + conversation_start_time = datetime.fromisoformat( + "2024-01-01T10:00:00Z" + ).replace(tzinfo=UTC) + + result = build_conversation_turns_from_items( + items, turns_metadata, conversation_start_time + ) + + assert len(result) == 1 + turn = result[0] + assert len(turn.messages) == 2 + # Legacy conversations should use dummy metadata with N/A values + assert turn.provider == "N/A" + assert turn.model == "N/A" + # Timestamps should match conversation start time + assert turn.started_at == "2024-01-01T10:00:00Z" + assert turn.completed_at == "2024-01-01T10:00:00Z" diff --git a/tests/unit/utils/test_endpoints.py b/tests/unit/utils/test_endpoints.py index 70fc3fa70..cc092a1ed 100644 --- a/tests/unit/utils/test_endpoints.py +++ b/tests/unit/utils/test_endpoints.py @@ -9,16 +9,11 @@ from fastapi import HTTPException from pydantic import AnyUrl from pytest_mock import MockerFixture +from sqlalchemy.exc import SQLAlchemyError -import constants -from configuration import AppConfig -from models.config import Action, CustomProfile -from models.requests import QueryRequest -from models.responses import ReferencedDocument -from tests.unit import config_dict +from models.database.conversations import UserConversation from utils import endpoints - -CONFIGURED_SYSTEM_PROMPT = "This is a configured system prompt" +from utils.types import ReferencedDocument @pytest.fixture(name="input_file") @@ -30,324 +25,6 @@ def input_file_fixture(tmp_path: Path) -> str: return filename -@pytest.fixture(name="config_without_system_prompt") -def config_without_system_prompt_fixture() -> AppConfig: - """Configuration w/o custom system prompt set.""" - test_config = config_dict.copy() - - # no customization provided - test_config["customization"] = None - - cfg = AppConfig() - cfg.init_from_dict(test_config) - - return cfg - - -@pytest.fixture(name="config_with_custom_system_prompt") -def config_with_custom_system_prompt_fixture() -> AppConfig: - """Configuration with custom system prompt set.""" - test_config = config_dict.copy() - - # system prompt is customized - test_config["customization"] = { - "system_prompt": CONFIGURED_SYSTEM_PROMPT, - } - cfg = AppConfig() - cfg.init_from_dict(test_config) - - return cfg - - -@pytest.fixture(name="config_with_custom_system_prompt_and_disable_query_system_prompt") -def config_with_custom_system_prompt_and_disable_query_system_prompt_fixture() -> ( - AppConfig -): - """Configuration with custom system prompt and disabled query system prompt set.""" - test_config = config_dict.copy() - - # system prompt is customized and query system prompt is disabled - test_config["customization"] = { - "system_prompt": CONFIGURED_SYSTEM_PROMPT, - "disable_query_system_prompt": True, - } - cfg = AppConfig() - cfg.init_from_dict(test_config) - - return cfg - - -@pytest.fixture( - name="config_with_custom_profile_prompt_and_enabled_query_system_prompt" -) -def config_with_custom_profile_prompt_and_enabled_query_system_prompt_fixture() -> ( - AppConfig -): - """Configuration with custom profile loaded for prompt and disabled query system prompt set.""" - test_config = config_dict.copy() - - test_config["customization"] = { - "profile_path": "tests/profiles/test/profile.py", - "system_prompt": CONFIGURED_SYSTEM_PROMPT, - "disable_query_system_prompt": False, - } - cfg = AppConfig() - cfg.init_from_dict(test_config) - - return cfg - - -@pytest.fixture( - name="config_with_custom_profile_prompt_and_disable_query_system_prompt" -) -def config_with_custom_profile_prompt_and_disable_query_system_prompt_fixture() -> ( - AppConfig -): - """Configuration with custom profile loaded for prompt and disabled query system prompt set.""" - test_config = config_dict.copy() - - test_config["customization"] = { - "profile_path": "tests/profiles/test/profile.py", - "system_prompt": CONFIGURED_SYSTEM_PROMPT, - "disable_query_system_prompt": True, - } - cfg = AppConfig() - cfg.init_from_dict(test_config) - - return cfg - - -@pytest.fixture(name="query_request_without_system_prompt") -def query_request_without_system_prompt_fixture() -> QueryRequest: - """Fixture for query request without system prompt.""" - return QueryRequest( - query="query", system_prompt=None - ) # pyright: ignore[reportCallIssue] - - -@pytest.fixture(name="query_request_with_system_prompt") -def query_request_with_system_prompt_fixture() -> QueryRequest: - """Fixture for query request with system prompt.""" - return QueryRequest( - query="query", system_prompt="System prompt defined in query" - ) # pyright: ignore[reportCallIssue] - - -@pytest.fixture(name="setup_configuration") -def setup_configuration_fixture() -> AppConfig: - """Set up configuration for tests.""" - test_config_dict = { - "name": "test", - "service": { - "host": "localhost", - "port": 8080, - "auth_enabled": False, - "workers": 1, - "color_log": True, - "access_log": True, - }, - "llama_stack": { - "api_key": "test-key", - "url": "http://test.com:1234", - "use_as_library_client": False, - }, - "user_data_collection": { - "transcripts_enabled": False, - }, - "mcp_servers": [], - } - cfg = AppConfig() - cfg.init_from_dict(test_config_dict) - return cfg - - -def test_get_default_system_prompt( - config_without_system_prompt: AppConfig, - query_request_without_system_prompt: QueryRequest, -) -> None: - """Test that default system prompt is returned when other prompts are not provided.""" - system_prompt = endpoints.get_system_prompt( - query_request_without_system_prompt, config_without_system_prompt - ) - assert system_prompt == constants.DEFAULT_SYSTEM_PROMPT - - -def test_get_customized_system_prompt( - config_with_custom_system_prompt: AppConfig, - query_request_without_system_prompt: QueryRequest, -) -> None: - """Test that customized system prompt is used when system prompt is not provided in query.""" - system_prompt = endpoints.get_system_prompt( - query_request_without_system_prompt, config_with_custom_system_prompt - ) - assert system_prompt == CONFIGURED_SYSTEM_PROMPT - - -def test_get_query_system_prompt( - config_without_system_prompt: AppConfig, - query_request_with_system_prompt: QueryRequest, -) -> None: - """Test that system prompt from query is returned.""" - system_prompt = endpoints.get_system_prompt( - query_request_with_system_prompt, config_without_system_prompt - ) - assert system_prompt == query_request_with_system_prompt.system_prompt - - -def test_get_query_system_prompt_not_customized_one( - config_with_custom_system_prompt: AppConfig, - query_request_with_system_prompt: QueryRequest, -) -> None: - """Test that system prompt from query is returned even when customized one is specified.""" - system_prompt = endpoints.get_system_prompt( - query_request_with_system_prompt, config_with_custom_system_prompt - ) - assert system_prompt == query_request_with_system_prompt.system_prompt - - -def test_get_system_prompt_with_disable_query_system_prompt( - config_with_custom_system_prompt_and_disable_query_system_prompt: AppConfig, - query_request_with_system_prompt: QueryRequest, -) -> None: - """Test that query system prompt is disallowed when disable_query_system_prompt is True.""" - with pytest.raises(HTTPException) as exc_info: - endpoints.get_system_prompt( - query_request_with_system_prompt, - config_with_custom_system_prompt_and_disable_query_system_prompt, - ) - assert exc_info.value.status_code == 422 - - -def test_get_system_prompt_with_disable_query_system_prompt_and_non_system_prompt_query( - config_with_custom_system_prompt_and_disable_query_system_prompt: AppConfig, - query_request_without_system_prompt: QueryRequest, -) -> None: - """Test that query without system prompt is allowed when disable_query_system_prompt is True.""" - system_prompt = endpoints.get_system_prompt( - query_request_without_system_prompt, - config_with_custom_system_prompt_and_disable_query_system_prompt, - ) - assert system_prompt == CONFIGURED_SYSTEM_PROMPT - - -def test_get_profile_prompt_with_disable_query_system_prompt( - config_with_custom_profile_prompt_and_disable_query_system_prompt: AppConfig, - query_request_without_system_prompt: QueryRequest, -) -> None: - """Test that system prompt is set if profile enabled and query system prompt disabled.""" - custom_profile = CustomProfile(path="tests/profiles/test/profile.py") - prompts = custom_profile.get_prompts() - system_prompt = endpoints.get_system_prompt( - query_request_without_system_prompt, - config_with_custom_profile_prompt_and_disable_query_system_prompt, - ) - assert system_prompt == prompts.get("default") - - -def test_get_profile_prompt_with_enabled_query_system_prompt( - config_with_custom_profile_prompt_and_enabled_query_system_prompt: AppConfig, - query_request_with_system_prompt: QueryRequest, -) -> None: - """Test that profile system prompt is overridden by query system prompt enabled.""" - system_prompt = endpoints.get_system_prompt( - query_request_with_system_prompt, - config_with_custom_profile_prompt_and_enabled_query_system_prompt, - ) - assert system_prompt == query_request_with_system_prompt.system_prompt - - -def test_validate_model_provider_override_allowed_with_action() -> None: - """Ensure no exception when caller has MODEL_OVERRIDE and request includes model/provider.""" - query_request = QueryRequest( - query="q", model="m", provider="p" - ) # pyright: ignore[reportCallIssue] - authorized_actions = {Action.MODEL_OVERRIDE} - endpoints.validate_model_provider_override(query_request, authorized_actions) - - -def test_validate_model_provider_override_rejected_without_action() -> None: - """Ensure HTTP 403 when request includes model/provider and caller lacks permission.""" - query_request = QueryRequest( - query="q", model="m", provider="p" - ) # pyright: ignore[reportCallIssue] - authorized_actions: set[Action] = set() - with pytest.raises(HTTPException) as exc_info: - endpoints.validate_model_provider_override(query_request, authorized_actions) - assert exc_info.value.status_code == 403 - - -def test_validate_model_provider_override_no_override_without_action() -> None: - """No exception when request does not include model/provider regardless of permission.""" - query_request = QueryRequest(query="q") # pyright:ignore[reportCallIssue] - endpoints.validate_model_provider_override(query_request, set()) - - -def test_get_topic_summary_system_prompt_default( - setup_configuration: AppConfig, -) -> None: - """Test that default topic summary system prompt is returned when no custom - profile is configured. - """ - topic_summary_prompt = endpoints.get_topic_summary_system_prompt( - setup_configuration - ) - assert topic_summary_prompt == constants.DEFAULT_TOPIC_SUMMARY_SYSTEM_PROMPT - - -def test_get_topic_summary_system_prompt_with_custom_profile() -> None: - """Test that custom profile topic summary prompt is returned when available.""" - test_config = config_dict.copy() - test_config["customization"] = { - "profile_path": "tests/profiles/test/profile.py", - } - cfg = AppConfig() - cfg.init_from_dict(test_config) - - # Mock the custom profile to return a topic_summary prompt - custom_profile = CustomProfile(path="tests/profiles/test/profile.py") - prompts = custom_profile.get_prompts() - - topic_summary_prompt = endpoints.get_topic_summary_system_prompt(cfg) - assert topic_summary_prompt == prompts.get("topic_summary") - - -def test_get_topic_summary_system_prompt_with_custom_profile_no_topic_summary( - mocker: MockerFixture, -) -> None: - """Test that default topic summary prompt is returned when custom profile has - no topic_summary prompt. - """ - test_config = config_dict.copy() - test_config["customization"] = { - "profile_path": "tests/profiles/test/profile.py", - } - cfg = AppConfig() - cfg.init_from_dict(test_config) - - # Mock the custom profile to return None for topic_summary prompt - mock_profile = mocker.Mock() - mock_profile.get_prompts.return_value = { - "default": "some prompt" - } # No topic_summary key - - # Patch the custom_profile property to return our mock - mocker.patch.object(cfg.customization, "custom_profile", mock_profile) - - topic_summary_prompt = endpoints.get_topic_summary_system_prompt(cfg) - assert topic_summary_prompt == constants.DEFAULT_TOPIC_SUMMARY_SYSTEM_PROMPT - - -def test_get_topic_summary_system_prompt_no_customization() -> None: - """Test that default topic summary prompt is returned when customization is None.""" - test_config = config_dict.copy() - test_config["customization"] = None - cfg = AppConfig() - cfg.init_from_dict(test_config) - - topic_summary_prompt = endpoints.get_topic_summary_system_prompt(cfg) - assert topic_summary_prompt == constants.DEFAULT_TOPIC_SUMMARY_SYSTEM_PROMPT - - # Tests for unified create_referenced_documents function class TestCreateReferencedDocuments: """Test cases for the unified create_referenced_documents function.""" @@ -492,109 +169,285 @@ def test_create_referenced_documents_invalid_urls(self) -> None: assert result[1].doc_title == "doc1" -@pytest.mark.asyncio -async def test_cleanup_after_streaming_generate_topic_summary_default_true( - mocker: MockerFixture, -) -> None: - """Test that topic summary is generated by default for new conversations.""" - mock_is_transcripts_enabled = mocker.Mock(return_value=False) - mock_get_topic_summary = mocker.AsyncMock(return_value="Generated topic") - mock_store_transcript = mocker.Mock() - mock_persist_conversation = mocker.Mock() - mock_client = mocker.AsyncMock() - mock_config = mocker.Mock() - - mock_session = mocker.Mock() - mock_session.query.return_value.filter_by.return_value.first.return_value = None - mock_session.__enter__ = mocker.Mock(return_value=mock_session) - mock_session.__exit__ = mocker.Mock(return_value=None) - mocker.patch("utils.endpoints.get_session", return_value=mock_session) - - mocker.patch( - "utils.endpoints.create_referenced_documents_with_metadata", return_value=[] - ) - mocker.patch("utils.endpoints.store_conversation_into_cache") - - query_request = QueryRequest(query="test query") # pyright: ignore[reportCallIssue] - - await endpoints.cleanup_after_streaming( - user_id="test_user", - conversation_id="test_conv_id", - model_id="test_model", - provider_id="test_provider", - llama_stack_model_id="test_llama_model", - query_request=query_request, - summary=mocker.Mock( - llm_response="test response", tool_calls=[], tool_results=[] - ), - metadata_map={}, - started_at="2024-01-01T00:00:00Z", - client=mock_client, - config=mock_config, - skip_userid_check=False, - get_topic_summary_func=mock_get_topic_summary, - is_transcripts_enabled_func=mock_is_transcripts_enabled, - store_transcript_func=mock_store_transcript, - persist_user_conversation_details_func=mock_persist_conversation, - ) - - mock_get_topic_summary.assert_called_once_with( - "test query", mock_client, "test_llama_model" - ) - - mock_persist_conversation.assert_called_once() - assert mock_persist_conversation.call_args[1]["topic_summary"] == "Generated topic" - - -@pytest.mark.asyncio -async def test_cleanup_after_streaming_generate_topic_summary_explicit_false( - mocker: MockerFixture, -) -> None: - """Test that topic summary is NOT generated when explicitly set to False.""" - mock_is_transcripts_enabled = mocker.Mock(return_value=False) - mock_get_topic_summary = mocker.AsyncMock(return_value="Generated topic") - mock_store_transcript = mocker.Mock() - mock_persist_conversation = mocker.Mock() - mock_client = mocker.AsyncMock() - mock_config = mocker.Mock() - - mock_session = mocker.Mock() - mock_session.query.return_value.filter_by.return_value.first.return_value = None - mock_session.__enter__ = mocker.Mock(return_value=mock_session) - mock_session.__exit__ = mocker.Mock(return_value=None) - mocker.patch("utils.endpoints.get_session", return_value=mock_session) - - mocker.patch( - "utils.endpoints.create_referenced_documents_with_metadata", return_value=[] - ) - mocker.patch("utils.endpoints.store_conversation_into_cache") - - query_request = QueryRequest( - query="test query", generate_topic_summary=False - ) # pyright: ignore[reportCallIssue] - - await endpoints.cleanup_after_streaming( - user_id="test_user", - conversation_id="test_conv_id", - model_id="test_model", - provider_id="test_provider", - llama_stack_model_id="test_llama_model", - query_request=query_request, - summary=mocker.Mock( - llm_response="test response", tool_calls=[], tool_results=[] - ), - metadata_map={}, - started_at="2024-01-01T00:00:00Z", - client=mock_client, - config=mock_config, - skip_userid_check=False, - get_topic_summary_func=mock_get_topic_summary, - is_transcripts_enabled_func=mock_is_transcripts_enabled, - store_transcript_func=mock_store_transcript, - persist_user_conversation_details_func=mock_persist_conversation, - ) - - mock_get_topic_summary.assert_not_called() - - mock_persist_conversation.assert_called_once() - assert mock_persist_conversation.call_args[1]["topic_summary"] is None +class TestValidateAndRetrieveConversation: + """Tests for validate_and_retrieve_conversation function.""" + + def test_successful_retrieval(self, mocker: MockerFixture) -> None: + """Test successful conversation retrieval when user has access.""" + normalized_conv_id = "123e4567-e89b-12d3-a456-426614174000" + user_id = "user-123" + + mock_conversation = mocker.Mock(spec=UserConversation) + mock_conversation.id = normalized_conv_id + mock_conversation.user_id = user_id + + mocker.patch("utils.endpoints.can_access_conversation", return_value=True) + mocker.patch( + "utils.endpoints.retrieve_conversation", return_value=mock_conversation + ) + + result = endpoints.validate_and_retrieve_conversation( + normalized_conv_id=normalized_conv_id, + user_id=user_id, + others_allowed=False, + ) + + assert result == mock_conversation + + def test_forbidden_access(self, mocker: MockerFixture) -> None: + """Test that 403 Forbidden is raised when user doesn't have access.""" + normalized_conv_id = "123e4567-e89b-12d3-a456-426614174000" + user_id = "user-123" + + mocker.patch("utils.endpoints.can_access_conversation", return_value=False) + mocker.patch("utils.endpoints.logger") + + with pytest.raises(HTTPException) as exc_info: + endpoints.validate_and_retrieve_conversation( + normalized_conv_id=normalized_conv_id, + user_id=user_id, + others_allowed=False, + ) + + assert exc_info.value.status_code == 403 + # Check that it's a forbidden response with proper error details + assert isinstance(exc_info.value.detail, dict) + assert "response" in exc_info.value.detail + assert "cause" in exc_info.value.detail + + def test_conversation_not_found(self, mocker: MockerFixture) -> None: + """Test that 404 Not Found is raised when conversation doesn't exist.""" + normalized_conv_id = "123e4567-e89b-12d3-a456-426614174000" + user_id = "user-123" + + mocker.patch("utils.endpoints.can_access_conversation", return_value=True) + mocker.patch("utils.endpoints.retrieve_conversation", return_value=None) + mocker.patch("utils.endpoints.logger") + + with pytest.raises(HTTPException) as exc_info: + endpoints.validate_and_retrieve_conversation( + normalized_conv_id=normalized_conv_id, + user_id=user_id, + others_allowed=False, + ) + + assert exc_info.value.status_code == 404 + # Check that it's a not found response with proper error details + assert isinstance(exc_info.value.detail, dict) + assert "response" in exc_info.value.detail + assert "cause" in exc_info.value.detail + + def test_database_error(self, mocker: MockerFixture) -> None: + """Test that 500 Internal Server Error is raised on database error.""" + normalized_conv_id = "123e4567-e89b-12d3-a456-426614174000" + user_id = "user-123" + + mocker.patch("utils.endpoints.can_access_conversation", return_value=True) + mocker.patch( + "utils.endpoints.retrieve_conversation", + side_effect=SQLAlchemyError("Database connection error", None, None), + ) + mocker.patch("utils.endpoints.logger") + + with pytest.raises(HTTPException) as exc_info: + endpoints.validate_and_retrieve_conversation( + normalized_conv_id=normalized_conv_id, + user_id=user_id, + others_allowed=False, + ) + + assert exc_info.value.status_code == 500 + # Check that it's an internal server error response with proper error details + assert isinstance(exc_info.value.detail, dict) + assert "response" in exc_info.value.detail + assert "cause" in exc_info.value.detail + + def test_successful_retrieval_with_others_allowed( + self, mocker: MockerFixture + ) -> None: + """Test successful retrieval when others_allowed is True.""" + normalized_conv_id = "123e4567-e89b-12d3-a456-426614174000" + user_id = "user-123" + + mock_conversation = mocker.Mock(spec=UserConversation) + mock_conversation.id = normalized_conv_id + mock_conversation.user_id = "other-user" # Different user + + mocker.patch("utils.endpoints.can_access_conversation", return_value=True) + mocker.patch( + "utils.endpoints.retrieve_conversation", return_value=mock_conversation + ) + + result = endpoints.validate_and_retrieve_conversation( + normalized_conv_id=normalized_conv_id, + user_id=user_id, + others_allowed=True, # Allow access to others' conversations + ) + + assert result == mock_conversation + + +class TestValidateConversationOwnership: + """Tests for validate_conversation_ownership function.""" + + def test_successful_retrieval_own_conversation(self, mocker: MockerFixture) -> None: + """Test successful retrieval when conversation belongs to user.""" + conversation_id = "123e4567-e89b-12d3-a456-426614174000" + user_id = "user-123" + + mock_conversation = mocker.Mock(spec=UserConversation) + mock_conversation.id = conversation_id + mock_conversation.user_id = user_id + + # Mock the database session and query chain + mock_query = mocker.Mock() + mock_filtered_query = mocker.Mock() + mock_filtered_query.first.return_value = mock_conversation + mock_query.filter_by.return_value = mock_filtered_query + + mock_session = mocker.Mock() + mock_session.query.return_value = mock_query + mock_session.__enter__ = mocker.Mock(return_value=mock_session) + mock_session.__exit__ = mocker.Mock(return_value=None) + + mocker.patch("utils.endpoints.get_session", return_value=mock_session) + + result = endpoints.validate_conversation_ownership( + user_id=user_id, + conversation_id=conversation_id, + others_allowed=False, + ) + + assert result == mock_conversation + # Verify filter_by was called with both id and user_id + mock_query.filter_by.assert_called_once_with( + id=conversation_id, user_id=user_id + ) + + def test_returns_none_when_not_own_conversation( + self, mocker: MockerFixture + ) -> None: + """Test returns None when conversation doesn't belong to user.""" + conversation_id = "123e4567-e89b-12d3-a456-426614174000" + user_id = "user-123" + + # Mock the database session and query chain - returns None + mock_query = mocker.Mock() + mock_filtered_query = mocker.Mock() + mock_filtered_query.first.return_value = None + mock_query.filter_by.return_value = mock_filtered_query + + mock_session = mocker.Mock() + mock_session.query.return_value = mock_query + mock_session.__enter__ = mocker.Mock(return_value=mock_session) + mock_session.__exit__ = mocker.Mock(return_value=None) + + mocker.patch("utils.endpoints.get_session", return_value=mock_session) + + result = endpoints.validate_conversation_ownership( + user_id=user_id, + conversation_id=conversation_id, + others_allowed=False, + ) + + assert result is None + # Verify filter_by was called with both id and user_id + mock_query.filter_by.assert_called_once_with( + id=conversation_id, user_id=user_id + ) + + def test_successful_retrieval_others_allowed(self, mocker: MockerFixture) -> None: + """Test successful retrieval when others_allowed=True (admin access).""" + conversation_id = "123e4567-e89b-12d3-a456-426614174000" + user_id = "user-123" + + mock_conversation = mocker.Mock(spec=UserConversation) + mock_conversation.id = conversation_id + mock_conversation.user_id = "other-user" # Different user + + # Mock the database session and query chain + mock_query = mocker.Mock() + mock_filtered_query = mocker.Mock() + mock_filtered_query.first.return_value = mock_conversation + mock_query.filter_by.return_value = mock_filtered_query + + mock_session = mocker.Mock() + mock_session.query.return_value = mock_query + mock_session.__enter__ = mocker.Mock(return_value=mock_session) + mock_session.__exit__ = mocker.Mock(return_value=None) + + mocker.patch("utils.endpoints.get_session", return_value=mock_session) + + result = endpoints.validate_conversation_ownership( + user_id=user_id, + conversation_id=conversation_id, + others_allowed=True, + ) + + assert result == mock_conversation + # Verify filter_by was called with only id (not user_id) when others_allowed=True + mock_query.filter_by.assert_called_once_with(id=conversation_id) + + def test_returns_none_when_conversation_not_found_others_allowed( + self, mocker: MockerFixture + ) -> None: + """Test returns None when conversation doesn't exist even with others_allowed=True.""" + conversation_id = "123e4567-e89b-12d3-a456-426614174000" + user_id = "user-123" + + # Mock the database session and query chain - returns None + mock_query = mocker.Mock() + mock_filtered_query = mocker.Mock() + mock_filtered_query.first.return_value = None + mock_query.filter_by.return_value = mock_filtered_query + + mock_session = mocker.Mock() + mock_session.query.return_value = mock_query + mock_session.__enter__ = mocker.Mock(return_value=mock_session) + mock_session.__exit__ = mocker.Mock(return_value=None) + + mocker.patch("utils.endpoints.get_session", return_value=mock_session) + + result = endpoints.validate_conversation_ownership( + user_id=user_id, + conversation_id=conversation_id, + others_allowed=True, + ) + + assert result is None + # Verify filter_by was called with only id + mock_query.filter_by.assert_called_once_with(id=conversation_id) + + def test_default_others_allowed_false(self, mocker: MockerFixture) -> None: + """Test that others_allowed defaults to False.""" + conversation_id = "123e4567-e89b-12d3-a456-426614174000" + user_id = "user-123" + + mock_conversation = mocker.Mock(spec=UserConversation) + mock_conversation.id = conversation_id + mock_conversation.user_id = user_id + + # Mock the database session and query chain + mock_query = mocker.Mock() + mock_filtered_query = mocker.Mock() + mock_filtered_query.first.return_value = mock_conversation + mock_query.filter_by.return_value = mock_filtered_query + + mock_session = mocker.Mock() + mock_session.query.return_value = mock_query + mock_session.__enter__ = mocker.Mock(return_value=mock_session) + mock_session.__exit__ = mocker.Mock(return_value=None) + + mocker.patch("utils.endpoints.get_session", return_value=mock_session) + + # Call without others_allowed parameter (should default to False) + result = endpoints.validate_conversation_ownership( + user_id=user_id, + conversation_id=conversation_id, + ) + + assert result == mock_conversation + # Verify filter_by was called with both id and user_id (default behavior) + mock_query.filter_by.assert_called_once_with( + id=conversation_id, user_id=user_id + ) diff --git a/tests/unit/utils/test_prompts.py b/tests/unit/utils/test_prompts.py new file mode 100644 index 000000000..acbf6b219 --- /dev/null +++ b/tests/unit/utils/test_prompts.py @@ -0,0 +1,304 @@ +"""Unit tests for prompts utility functions.""" + +import pytest +from fastapi import HTTPException +from pytest_mock import MockerFixture + +import constants +from configuration import AppConfig +from models.config import CustomProfile +from models.requests import QueryRequest +from tests.unit import config_dict +from utils import prompts + +CONFIGURED_SYSTEM_PROMPT = "This is a configured system prompt" + + +@pytest.fixture(name="config_without_system_prompt") +def config_without_system_prompt_fixture() -> AppConfig: + """Configuration w/o custom system prompt set.""" + test_config = config_dict.copy() + + # no customization provided + test_config["customization"] = None + + cfg = AppConfig() + cfg.init_from_dict(test_config) + + return cfg + + +@pytest.fixture(name="config_with_custom_system_prompt") +def config_with_custom_system_prompt_fixture() -> AppConfig: + """Configuration with custom system prompt set.""" + test_config = config_dict.copy() + + # system prompt is customized + test_config["customization"] = { + "system_prompt": CONFIGURED_SYSTEM_PROMPT, + } + cfg = AppConfig() + cfg.init_from_dict(test_config) + + return cfg + + +@pytest.fixture(name="config_with_custom_system_prompt_and_disable_query_system_prompt") +def config_with_custom_system_prompt_and_disable_query_system_prompt_fixture() -> ( + AppConfig +): + """Configuration with custom system prompt and disabled query system prompt set.""" + test_config = config_dict.copy() + + # system prompt is customized and query system prompt is disabled + test_config["customization"] = { + "system_prompt": CONFIGURED_SYSTEM_PROMPT, + "disable_query_system_prompt": True, + } + cfg = AppConfig() + cfg.init_from_dict(test_config) + + return cfg + + +@pytest.fixture( + name="config_with_custom_profile_prompt_and_enabled_query_system_prompt" +) +def config_with_custom_profile_prompt_and_enabled_query_system_prompt_fixture() -> ( + AppConfig +): + """Configuration with custom profile loaded for prompt and disabled query system prompt set.""" + test_config = config_dict.copy() + + test_config["customization"] = { + "profile_path": "tests/profiles/test/profile.py", + "system_prompt": CONFIGURED_SYSTEM_PROMPT, + "disable_query_system_prompt": False, + } + cfg = AppConfig() + cfg.init_from_dict(test_config) + + return cfg + + +@pytest.fixture( + name="config_with_custom_profile_prompt_and_disable_query_system_prompt" +) +def config_with_custom_profile_prompt_and_disable_query_system_prompt_fixture() -> ( + AppConfig +): + """Configuration with custom profile loaded for prompt and disabled query system prompt set.""" + test_config = config_dict.copy() + + test_config["customization"] = { + "profile_path": "tests/profiles/test/profile.py", + "system_prompt": CONFIGURED_SYSTEM_PROMPT, + "disable_query_system_prompt": True, + } + cfg = AppConfig() + cfg.init_from_dict(test_config) + + return cfg + + +@pytest.fixture(name="query_request_without_system_prompt") +def query_request_without_system_prompt_fixture() -> QueryRequest: + """Fixture for query request without system prompt.""" + return QueryRequest( + query="query", system_prompt=None + ) # pyright: ignore[reportCallIssue] + + +@pytest.fixture(name="query_request_with_system_prompt") +def query_request_with_system_prompt_fixture() -> QueryRequest: + """Fixture for query request with system prompt.""" + return QueryRequest( + query="query", system_prompt="System prompt defined in query" + ) # pyright: ignore[reportCallIssue] + + +@pytest.fixture(name="setup_configuration") +def setup_configuration_fixture() -> AppConfig: + """Set up configuration for tests.""" + test_config_dict = { + "name": "test", + "service": { + "host": "localhost", + "port": 8080, + "auth_enabled": False, + "workers": 1, + "color_log": True, + "access_log": True, + }, + "llama_stack": { + "api_key": "test-key", + "url": "http://test.com:1234", + "use_as_library_client": False, + }, + "user_data_collection": { + "transcripts_enabled": False, + }, + "mcp_servers": [], + } + cfg = AppConfig() + cfg.init_from_dict(test_config_dict) + return cfg + + +def test_get_default_system_prompt( + config_without_system_prompt: AppConfig, + query_request_without_system_prompt: QueryRequest, +) -> None: + """Test that default system prompt is returned when other prompts are not provided.""" + system_prompt = prompts.get_system_prompt( + query_request_without_system_prompt, config_without_system_prompt + ) + assert system_prompt == constants.DEFAULT_SYSTEM_PROMPT + + +def test_get_customized_system_prompt( + config_with_custom_system_prompt: AppConfig, + query_request_without_system_prompt: QueryRequest, +) -> None: + """Test that customized system prompt is used when system prompt is not provided in query.""" + system_prompt = prompts.get_system_prompt( + query_request_without_system_prompt, config_with_custom_system_prompt + ) + assert system_prompt == CONFIGURED_SYSTEM_PROMPT + + +def test_get_query_system_prompt( + config_without_system_prompt: AppConfig, + query_request_with_system_prompt: QueryRequest, +) -> None: + """Test that system prompt from query is returned.""" + system_prompt = prompts.get_system_prompt( + query_request_with_system_prompt, config_without_system_prompt + ) + assert system_prompt == query_request_with_system_prompt.system_prompt + + +def test_get_query_system_prompt_not_customized_one( + config_with_custom_system_prompt: AppConfig, + query_request_with_system_prompt: QueryRequest, +) -> None: + """Test that system prompt from query is returned even when customized one is specified.""" + system_prompt = prompts.get_system_prompt( + query_request_with_system_prompt, config_with_custom_system_prompt + ) + assert system_prompt == query_request_with_system_prompt.system_prompt + + +def test_get_system_prompt_with_disable_query_system_prompt( + config_with_custom_system_prompt_and_disable_query_system_prompt: AppConfig, + query_request_with_system_prompt: QueryRequest, +) -> None: + """Test that query system prompt is disallowed when disable_query_system_prompt is True.""" + with pytest.raises(HTTPException) as exc_info: + prompts.get_system_prompt( + query_request_with_system_prompt, + config_with_custom_system_prompt_and_disable_query_system_prompt, + ) + assert exc_info.value.status_code == 422 + + +def test_get_system_prompt_with_disable_query_system_prompt_and_non_system_prompt_query( + config_with_custom_system_prompt_and_disable_query_system_prompt: AppConfig, + query_request_without_system_prompt: QueryRequest, +) -> None: + """Test that query without system prompt is allowed when disable_query_system_prompt is True.""" + system_prompt = prompts.get_system_prompt( + query_request_without_system_prompt, + config_with_custom_system_prompt_and_disable_query_system_prompt, + ) + assert system_prompt == CONFIGURED_SYSTEM_PROMPT + + +def test_get_profile_prompt_with_disable_query_system_prompt( + config_with_custom_profile_prompt_and_disable_query_system_prompt: AppConfig, + query_request_without_system_prompt: QueryRequest, +) -> None: + """Test that system prompt is set if profile enabled and query system prompt disabled.""" + custom_profile = CustomProfile(path="tests/profiles/test/profile.py") + profile_prompts = custom_profile.get_prompts() + system_prompt = prompts.get_system_prompt( + query_request_without_system_prompt, + config_with_custom_profile_prompt_and_disable_query_system_prompt, + ) + assert system_prompt == profile_prompts.get("default") + + +def test_get_profile_prompt_with_enabled_query_system_prompt( + config_with_custom_profile_prompt_and_enabled_query_system_prompt: AppConfig, + query_request_with_system_prompt: QueryRequest, +) -> None: + """Test that profile system prompt is overridden by query system prompt enabled.""" + system_prompt = prompts.get_system_prompt( + query_request_with_system_prompt, + config_with_custom_profile_prompt_and_enabled_query_system_prompt, + ) + assert system_prompt == query_request_with_system_prompt.system_prompt + + +def test_get_topic_summary_system_prompt_default( + setup_configuration: AppConfig, +) -> None: + """Test that default topic summary system prompt is returned when no custom + profile is configured. + """ + topic_summary_prompt = prompts.get_topic_summary_system_prompt(setup_configuration) + assert topic_summary_prompt == constants.DEFAULT_TOPIC_SUMMARY_SYSTEM_PROMPT + + +def test_get_topic_summary_system_prompt_with_custom_profile() -> None: + """Test that custom profile topic summary prompt is returned when available.""" + test_config = config_dict.copy() + test_config["customization"] = { + "profile_path": "tests/profiles/test/profile.py", + } + cfg = AppConfig() + cfg.init_from_dict(test_config) + + # Mock the custom profile to return a topic_summary prompt + custom_profile = CustomProfile(path="tests/profiles/test/profile.py") + profile_prompts = custom_profile.get_prompts() + + topic_summary_prompt = prompts.get_topic_summary_system_prompt(cfg) + assert topic_summary_prompt == profile_prompts.get("topic_summary") + + +def test_get_topic_summary_system_prompt_with_custom_profile_no_topic_summary( + mocker: MockerFixture, +) -> None: + """Test that default topic summary prompt is returned when custom profile has + no topic_summary prompt. + """ + test_config = config_dict.copy() + test_config["customization"] = { + "profile_path": "tests/profiles/test/profile.py", + } + cfg = AppConfig() + cfg.init_from_dict(test_config) + + # Mock the custom profile to return None for topic_summary prompt + mock_profile = mocker.Mock() + mock_profile.get_prompts.return_value = { + "default": "some prompt" + } # No topic_summary key + + # Patch the custom_profile property to return our mock + mocker.patch.object(cfg.customization, "custom_profile", mock_profile) + + topic_summary_prompt = prompts.get_topic_summary_system_prompt(cfg) + assert topic_summary_prompt == constants.DEFAULT_TOPIC_SUMMARY_SYSTEM_PROMPT + + +def test_get_topic_summary_system_prompt_no_customization() -> None: + """Test that default topic summary prompt is returned when customization is None.""" + test_config = config_dict.copy() + test_config["customization"] = None + cfg = AppConfig() + cfg.init_from_dict(test_config) + + topic_summary_prompt = prompts.get_topic_summary_system_prompt(cfg) + assert topic_summary_prompt == constants.DEFAULT_TOPIC_SUMMARY_SYSTEM_PROMPT diff --git a/tests/unit/utils/test_query.py b/tests/unit/utils/test_query.py new file mode 100644 index 000000000..26bd0f0a2 --- /dev/null +++ b/tests/unit/utils/test_query.py @@ -0,0 +1,1027 @@ +"""Unit tests for utils/query.py functions.""" + +# pylint: disable=too-many-lines + +import sqlite3 +from typing import Any + +import psycopg2 +import pytest +from fastapi import HTTPException +from llama_stack_client import APIConnectionError, APIStatusError +from llama_stack_client.types import ModelListResponse +from pytest_mock import MockerFixture +from sqlalchemy.exc import SQLAlchemyError + +from cache.cache_error import CacheError +from configuration import AppConfig +from models.cache_entry import CacheEntry +from models.config import Action +from models.database.conversations import UserConversation, UserTurn +from models.requests import Attachment, QueryRequest +from models.responses import ( + InternalServerErrorResponse, + PromptTooLongResponse, + QuotaExceededResponse, +) + +from tests.unit import config_dict +from utils.query import ( + consume_query_tokens, + evaluate_model_hints, + extract_provider_and_model_from_model_id, + handle_known_apistatus_errors, + is_input_shield, + is_output_shield, + is_transcripts_enabled, + persist_user_conversation_details, + prepare_input, + select_model_and_provider_id, + store_conversation_into_cache, + store_query_results, + update_azure_token, + validate_attachments_metadata, + validate_model_provider_override, +) +from utils.token_counter import TokenCounter +from utils.types import TurnSummary + + +@pytest.fixture(name="mock_config") +def mock_config_fixture() -> AppConfig: + """Create a mock configuration for tests.""" + cfg = AppConfig() + cfg.init_from_dict(config_dict) + return cfg + + +@pytest.fixture(name="mock_models") +def mock_models_fixture() -> ModelListResponse: + """Create mock models list.""" + model1 = type( + "Model", + (), + { + "id": "provider1/model1", + "custom_metadata": {"model_type": "llm", "provider_id": "provider1"}, + }, + )() + model2 = type( + "Model", + (), + { + "id": "provider2/model2", + "custom_metadata": {"model_type": "llm", "provider_id": "provider2"}, + }, + )() + return [model1, model2] + + +class TestStoreConversationIntoCache: + """Tests for store_conversation_into_cache function.""" + + def test_store_with_cache_configured(self, mocker: MockerFixture) -> None: + """Test storing conversation when cache is configured.""" + mock_config = mocker.Mock(spec=AppConfig) + mock_cache = mocker.Mock() + mock_config.conversation_cache = mock_cache + mock_config.conversation_cache_configuration = mocker.Mock() + mock_config.conversation_cache_configuration.type = "sqlite" + + cache_entry = CacheEntry( + query="test query", + response="test response", + provider="test_provider", + model="test_model", + started_at="2024-01-01T00:00:00Z", + completed_at="2024-01-01T00:00:05Z", + ) + + store_conversation_into_cache( + config=mock_config, + user_id="test_user", + conversation_id="test_conv", + cache_entry=cache_entry, + _skip_userid_check=False, + topic_summary="Test topic", + ) + + mock_cache.insert_or_append.assert_called_once_with( + "test_user", "test_conv", cache_entry, False + ) + mock_cache.set_topic_summary.assert_called_once_with( + "test_user", "test_conv", "Test topic", False + ) + + def test_store_without_topic_summary(self, mocker: MockerFixture) -> None: + """Test storing conversation without topic summary.""" + mock_config = mocker.Mock(spec=AppConfig) + mock_cache = mocker.Mock() + mock_config.conversation_cache = mock_cache + mock_config.conversation_cache_configuration = mocker.Mock() + mock_config.conversation_cache_configuration.type = "sqlite" + + cache_entry = CacheEntry( + query="test query", + response="test response", + provider="test_provider", + model="test_model", + started_at="2024-01-01T00:00:00Z", + completed_at="2024-01-01T00:00:05Z", + ) + + store_conversation_into_cache( + config=mock_config, + user_id="test_user", + conversation_id="test_conv", + cache_entry=cache_entry, + _skip_userid_check=False, + topic_summary=None, + ) + + mock_cache.insert_or_append.assert_called_once() + mock_cache.set_topic_summary.assert_not_called() + + def test_store_with_cache_not_initialized(self, mocker: MockerFixture) -> None: + """Test storing when cache is configured but not initialized.""" + mock_config = mocker.Mock(spec=AppConfig) + mock_config.conversation_cache = None + mock_config.conversation_cache_configuration = mocker.Mock() + mock_config.conversation_cache_configuration.type = "sqlite" + + cache_entry = CacheEntry( + query="test query", + response="test response", + provider="test_provider", + model="test_model", + started_at="2024-01-01T00:00:00Z", + completed_at="2024-01-01T00:00:05Z", + ) + + # Should not raise an exception, just log a warning + store_conversation_into_cache( + config=mock_config, + user_id="test_user", + conversation_id="test_conv", + cache_entry=cache_entry, + _skip_userid_check=False, + topic_summary=None, + ) + + +class TestSelectModelAndProviderId: + """Tests for select_model_and_provider_id function.""" + + def test_select_from_request(self, mock_models: ModelListResponse) -> None: + """Test selecting model and provider from request.""" + result = select_model_and_provider_id( + models=mock_models, + model_id="model1", + provider_id="provider1", + ) + assert result == ("provider1/model1", "model1", "provider1") + + def test_select_first_available_llm(self, mock_models: ModelListResponse) -> None: + """Test selecting first available LLM when no model specified.""" + result = select_model_and_provider_id( + models=mock_models, + model_id=None, + provider_id=None, + ) + assert result[0] in ("provider1/model1", "provider2/model2") + assert result[1] in ("model1", "model2") + assert result[2] in ("provider1", "provider2") + + def test_select_model_not_found(self, mock_models: ModelListResponse) -> None: + """Test selecting non-existent model raises HTTPException.""" + with pytest.raises(HTTPException) as exc_info: + select_model_and_provider_id( + models=mock_models, + model_id="nonexistent", + provider_id="provider1", + ) + assert exc_info.value.status_code == 404 + + def test_select_model_no_llm_models_available(self, mocker: MockerFixture) -> None: + """Test selecting model when no LLM models are available raises HTTPException.""" + # Mock configuration to have no default model/provider + mocker.patch("utils.query.configuration.inference.default_model", None) + mocker.patch("utils.query.configuration.inference.default_provider", None) + + # Empty models list + empty_models: ModelListResponse = [] + with pytest.raises(HTTPException) as exc_info: + select_model_and_provider_id( + models=empty_models, + model_id=None, + provider_id=None, + ) + assert exc_info.value.status_code == 404 + # Verify it's a NotFoundResponse for model resource + detail = exc_info.value.detail + assert isinstance(detail, dict) + assert detail.get("response") == "Model not found" + assert "Model with ID" in detail.get("cause", "") + + def test_select_model_no_llm_models_with_non_llm_only( + self, mocker: MockerFixture + ) -> None: + """Test selecting model when only non-LLM models are available raises HTTPException.""" + # Mock configuration to have no default model/provider + mocker.patch("utils.query.configuration.inference.default_model", None) + mocker.patch("utils.query.configuration.inference.default_provider", None) + + # Models list with only non-LLM models + non_llm_models = [ + type( + "Model", + (), + { + "id": "provider1/model1", + "custom_metadata": { + "model_type": "embeddings", + "provider_id": "provider1", + }, + }, + )(), + ] + with pytest.raises(HTTPException) as exc_info: + select_model_and_provider_id( + models=non_llm_models, + model_id=None, + provider_id=None, + ) + assert exc_info.value.status_code == 404 + # Verify it's a NotFoundResponse for model resource + detail = exc_info.value.detail + assert isinstance(detail, dict) + assert detail.get("response") == "Model not found" + assert "Model with ID" in detail.get("cause", "") + + def test_select_model_attribute_error(self, mocker: MockerFixture) -> None: + """Test selecting model when model lacks custom_metadata raises HTTPException.""" + # Mock configuration to have no default model/provider + mocker.patch("utils.query.configuration.inference.default_model", None) + mocker.patch("utils.query.configuration.inference.default_provider", None) + + # Models list with model that has no custom_metadata attribute + models_without_metadata = [ + type("Model", (), {"id": "provider1/model1"})(), + ] + with pytest.raises(HTTPException) as exc_info: + select_model_and_provider_id( + models=models_without_metadata, + model_id=None, + provider_id=None, + ) + assert exc_info.value.status_code == 404 + # Verify it's a NotFoundResponse for model resource + detail = exc_info.value.detail + assert isinstance(detail, dict) + assert detail.get("response") == "Model not found" + assert "Model with ID" in detail.get("cause", "") + + +class TestValidateModelProviderOverride: + """Tests for validate_model_provider_override function.""" + + def test_allowed_with_action(self) -> None: + """Test that override is allowed when user has MODEL_OVERRIDE action.""" + query_request = QueryRequest( + query="test", model="model1", provider="provider1" + ) # pyright: ignore[reportCallIssue] + validate_model_provider_override(query_request, {Action.MODEL_OVERRIDE}) + + def test_rejected_without_action(self) -> None: + """Test that override is rejected when user lacks MODEL_OVERRIDE action.""" + query_request = QueryRequest( + query="test", model="model1", provider="provider1" + ) # pyright: ignore[reportCallIssue] + with pytest.raises(HTTPException) as exc_info: + validate_model_provider_override(query_request, set()) + assert exc_info.value.status_code == 403 + + def test_no_override_allowed(self) -> None: + """Test that request without override is allowed regardless of permissions.""" + query_request = QueryRequest(query="test") # pyright: ignore[reportCallIssue] + validate_model_provider_override(query_request, set()) + + +class TestShieldFunctions: + """Tests for shield-related functions.""" + + def test_is_output_shield_output_prefix(self) -> None: + """Test is_output_shield returns True for output_ prefix.""" + shield = type("Shield", (), {"identifier": "output_test"})() + assert is_output_shield(shield) is True + + def test_is_output_shield_inout_prefix(self) -> None: + """Test is_output_shield returns True for inout_ prefix.""" + shield = type("Shield", (), {"identifier": "inout_test"})() + assert is_output_shield(shield) is True + + def test_is_output_shield_other(self) -> None: + """Test is_output_shield returns False for other prefixes.""" + shield = type("Shield", (), {"identifier": "input_test"})() + assert is_output_shield(shield) is False + + def test_is_input_shield_input_prefix(self) -> None: + """Test is_input_shield returns True for input prefix.""" + shield = type("Shield", (), {"identifier": "input_test"})() + assert is_input_shield(shield) is True + + def test_is_input_shield_inout_prefix(self) -> None: + """Test is_input_shield returns True for inout_ prefix.""" + shield = type("Shield", (), {"identifier": "inout_test"})() + assert is_input_shield(shield) is True + + def test_is_input_shield_output_prefix(self) -> None: + """Test is_input_shield returns False for output_ prefix.""" + shield = type("Shield", (), {"identifier": "output_test"})() + assert is_input_shield(shield) is False + + +class TestEvaluateModelHints: + """Tests for evaluate_model_hints function.""" + + def test_with_user_conversation_no_request_hints(self) -> None: + """Test using hints from user conversation when request has none.""" + user_conv = UserConversation( + id="conv1", + user_id="user1", + last_used_model="model1", + last_used_provider="provider1", + ) + query_request = QueryRequest(query="test") # pyright: ignore[reportCallIssue] + + model_id, provider_id = evaluate_model_hints(user_conv, query_request) + assert model_id == "model1" + assert provider_id == "provider1" + + def test_with_user_conversation_and_request_hints(self) -> None: + """Test request hints take precedence over conversation hints.""" + user_conv = UserConversation( + id="conv1", + user_id="user1", + last_used_model="model1", + last_used_provider="provider1", + ) + query_request = QueryRequest( + query="test", model="model2", provider="provider2" + ) # pyright: ignore[reportCallIssue] + + model_id, provider_id = evaluate_model_hints(user_conv, query_request) + assert model_id == "model2" + assert provider_id == "provider2" + + def test_without_user_conversation(self) -> None: + """Test without user conversation returns request hints or None.""" + query_request = QueryRequest(query="test") # pyright: ignore[reportCallIssue] + + model_id, provider_id = evaluate_model_hints(None, query_request) + assert model_id is None + assert provider_id is None + + +class TestPrepareInput: + """Tests for prepare_input function.""" + + def test_prepare_input_without_attachments(self) -> None: + """Test preparing input without attachments.""" + query_request = QueryRequest( + query="test query" + ) # pyright: ignore[reportCallIssue] + result = prepare_input(query_request) + assert result == "test query" + + def test_prepare_input_with_attachments(self) -> None: + """Test preparing input with attachments.""" + attachment = Attachment( + attachment_type="text", + content="attachment content", + content_type="text/plain", + ) + query_request = QueryRequest( + query="test query", attachments=[attachment] + ) # pyright: ignore[reportCallIssue] + result = prepare_input(query_request) + assert "test query" in result + assert "[Attachment: text]" in result + assert "attachment content" in result + + +class TestExtractProviderAndModelFromModelId: + """Tests for extract_provider_and_model_from_model_id function.""" + + def test_extract_with_provider(self) -> None: + """Test extracting provider and model from full model ID.""" + provider, model = extract_provider_and_model_from_model_id("provider1/model1") + assert provider == "provider1" + assert model == "model1" + + def test_extract_without_provider(self) -> None: + """Test extracting when model ID has no provider.""" + provider, model = extract_provider_and_model_from_model_id("model1") + assert provider == "" + assert model == "model1" + + +class TestHandleKnownApistatusErrors: + """Tests for handle_known_apistatus_errors function.""" + + def test_context_length_exceeded(self) -> None: + """Test handling context length exceeded error.""" + error = type( + "APIStatusError", + (), + {"status_code": 400, "message": "context_length_exceeded: prompt too long"}, + )() + result = handle_known_apistatus_errors(error, "model1") + assert isinstance(result, PromptTooLongResponse) + detail = result.model_dump()["detail"] + assert detail["response"] == "Prompt is too long" + assert "model1" in detail["cause"] + assert "context window size" in detail["cause"] + + def test_quota_exceeded(self) -> None: + """Test handling quota exceeded error.""" + error = type( + "APIStatusError", (), {"status_code": 429, "message": "Rate limit exceeded"} + )() + result = handle_known_apistatus_errors(error, "model1") + assert isinstance(result, QuotaExceededResponse) + detail = result.model_dump()["detail"] + assert "quota" in detail["response"].lower() + + def test_generic_error(self) -> None: + """Test handling generic error.""" + error = type( + "APIStatusError", + (), + {"status_code": 500, "message": "Internal server error"}, + )() + result = handle_known_apistatus_errors(error, "model1") + assert isinstance(result, InternalServerErrorResponse) + detail = result.model_dump()["detail"] + assert detail["response"] == "Internal server error" + + +class TestValidateAttachmentsMetadata: + """Tests for validate_attachments_metadata function.""" + + def test_valid_attachment(self) -> None: + """Test validation passes for valid attachment.""" + attachment = Attachment( + attachment_type="log", + content="content", + content_type="text/plain", + ) + validate_attachments_metadata([attachment]) + + def test_invalid_attachment_type(self) -> None: + """Test validation fails for invalid attachment type.""" + attachment = Attachment( + attachment_type="invalid", + content="content", + content_type="text/plain", + ) + with pytest.raises(HTTPException) as exc_info: + validate_attachments_metadata([attachment]) + assert exc_info.value.status_code == 422 + + def test_invalid_content_type(self) -> None: + """Test validation fails for invalid content type.""" + # Use valid attachment_type to ensure we hit the content_type check + attachment = Attachment( + attachment_type="log", + content="content", + content_type="invalid/type", + ) + with pytest.raises(HTTPException) as exc_info: + validate_attachments_metadata([attachment]) + assert exc_info.value.status_code == 422 + assert "Invalid attachment content type" in str(exc_info.value.detail) + + +class TestIsTranscriptsEnabled: + """Tests for is_transcripts_enabled function.""" + + def test_transcripts_enabled(self, mocker: MockerFixture) -> None: + """Test when transcripts are enabled.""" + mocker.patch( + "utils.query.configuration.user_data_collection_configuration.transcripts_enabled", + True, + ) + assert is_transcripts_enabled() is True + + def test_transcripts_disabled(self, mocker: MockerFixture) -> None: + """Test when transcripts are disabled.""" + mocker.patch( + "utils.query.configuration.user_data_collection_configuration.transcripts_enabled", + False, + ) + assert is_transcripts_enabled() is False + + +class TestPersistUserConversationDetails: + """Tests for persist_user_conversation_details function.""" + + def test_create_new_conversation(self, mocker: MockerFixture) -> None: + """Test creating a new conversation.""" + mock_session = mocker.Mock() + + # Mock the UserConversation query + mock_conv_query = mocker.Mock() + mock_conv_query.filter_by.return_value.first.return_value = None + + # Mock the max turn number query + mock_filtered_query = mocker.Mock() + mock_filtered_query.scalar.return_value = None + mock_max_query = mocker.Mock() + mock_max_query.filter_by.return_value = mock_filtered_query + + def query_side_effect(*args: Any) -> Any: + """Route queries based on the argument type.""" + if args and args[0] is UserConversation: + return mock_conv_query + return mock_max_query + + mock_session.query.side_effect = query_side_effect + mock_session.__enter__ = mocker.Mock(return_value=mock_session) + mock_session.__exit__ = mocker.Mock(return_value=None) + mocker.patch("utils.query.get_session", return_value=mock_session) + + persist_user_conversation_details( + user_id="user1", + conversation_id="conv1", + started_at="2024-01-01T00:00:00Z", + completed_at="2024-01-01T00:00:05Z", + model_id="model1", + provider_id="provider1", + topic_summary="Topic", + ) + + mock_session.add.assert_called() + mock_session.commit.assert_called_once() + + def test_update_existing_conversation(self, mocker: MockerFixture) -> None: + """Test updating an existing conversation.""" + existing_conv = UserConversation( + id="conv1", + user_id="user1", + last_used_model="old_model", + last_used_provider="old_provider", + message_count=5, + ) + mock_session = mocker.Mock() + + # Mock the UserConversation query + mock_conv_query = mocker.Mock() + mock_conv_query.filter_by.return_value.first.return_value = existing_conv + + # Mock the max turn number query + # The query chain is: session.query(func.max(...)).filter_by(...).scalar() + mock_filtered_query = mocker.Mock() + mock_filtered_query.scalar.return_value = None + mock_max_query = mocker.Mock() + mock_max_query.filter_by.return_value = mock_filtered_query + + def query_side_effect(*args: Any) -> Any: + """Route queries based on the argument type.""" + if args and args[0] is UserConversation: + return mock_conv_query + # func.max(UserTurn.turn_number) doesn't match UserTurn type, falls through + return mock_max_query + + mock_session.query.side_effect = query_side_effect + mock_session.__enter__ = mocker.Mock(return_value=mock_session) + mock_session.__exit__ = mocker.Mock(return_value=None) + mocker.patch("utils.query.get_session", return_value=mock_session) + + persist_user_conversation_details( + user_id="user1", + conversation_id="conv1", + started_at="2024-01-01T00:00:00Z", + completed_at="2024-01-01T00:00:05Z", + model_id="new_model", + provider_id="new_provider", + topic_summary=None, + ) + + assert existing_conv.last_used_model == "new_model" + assert existing_conv.last_used_provider == "new_provider" + assert existing_conv.message_count == 6 + mock_session.commit.assert_called_once() + + def test_create_new_conversation_with_existing_turns( + self, mocker: MockerFixture + ) -> None: + """Test creating a new conversation when there are existing turns.""" + mock_session = mocker.Mock() + + # Mock the UserConversation query + mock_conv_query = mocker.Mock() + mock_conv_query.filter_by.return_value.first.return_value = None + + # Mock the max turn number query - return existing turn number + mock_filtered_query = mocker.Mock() + mock_filtered_query.scalar.return_value = 5 # Existing max turn number + mock_max_query = mocker.Mock() + mock_max_query.filter_by.return_value = mock_filtered_query + + def query_side_effect(*args: Any) -> Any: + """Route queries based on the argument type.""" + if args and args[0] is UserConversation: + return mock_conv_query + # func.max(UserTurn.turn_number) doesn't match UserTurn type, falls through + return mock_max_query + + mock_session.query.side_effect = query_side_effect + mock_session.__enter__ = mocker.Mock(return_value=mock_session) + mock_session.__exit__ = mocker.Mock(return_value=None) + mocker.patch("utils.query.get_session", return_value=mock_session) + + persist_user_conversation_details( + user_id="user1", + conversation_id="conv1", + started_at="2024-01-01T00:00:00Z", + completed_at="2024-01-01T00:00:05Z", + model_id="model1", + provider_id="provider1", + topic_summary="Topic", + ) + + # Verify that the turn number is incremented correctly + add_calls = mock_session.add.call_args_list + assert len(add_calls) == 2 # Conversation and UserTurn + + # Find the UserTurn object in the add calls + turn_added = None + for call in add_calls: + obj = call[0][0] + if isinstance(obj, UserTurn): + turn_added = obj + break + + assert turn_added is not None, "UserTurn should have been added" + assert ( + turn_added.turn_number == 6 + ), "Turn number should be incremented from 5 to 6" + mock_session.commit.assert_called_once() + + +class TestConsumeQueryTokens: + """Tests for consume_query_tokens function.""" + + def test_consume_tokens_success( + self, mock_config: AppConfig, mocker: MockerFixture + ) -> None: + """Test successful token consumption.""" + mock_consume = mocker.patch("utils.query.consume_tokens") + + token_usage = TokenCounter(input_tokens=100, output_tokens=50) + consume_query_tokens( + user_id="user1", + model_id="provider1/model1", + token_usage=token_usage, + configuration=mock_config, + ) + + # Verify consume_tokens was called + mock_consume.assert_called_once() + + def test_consume_tokens_database_error( + self, mock_config: AppConfig, mocker: MockerFixture + ) -> None: + """Test token consumption raises HTTPException on database error.""" + mocker.patch( + "utils.query.consume_tokens", side_effect=sqlite3.Error("DB error") + ) + + token_usage = TokenCounter(input_tokens=100, output_tokens=50) + with pytest.raises(HTTPException) as exc_info: + consume_query_tokens( + user_id="user1", + model_id="provider1/model1", + token_usage=token_usage, + configuration=mock_config, + ) + assert exc_info.value.status_code == 500 + + +class TestUpdateAzureToken: + """Tests for update_azure_token function.""" + + @pytest.mark.asyncio + async def test_update_with_library_client(self, mocker: MockerFixture) -> None: + """Test updating token with library client.""" + mock_client_holder = mocker.Mock() + mock_client_holder.is_library_client = True + mock_client_holder.reload_library_client = mocker.AsyncMock( + return_value="client" + ) + mocker.patch( + "utils.query.AsyncLlamaStackClientHolder", return_value=mock_client_holder + ) + + mock_client = mocker.Mock() + result = await update_azure_token(mock_client) + assert result == "client" + mock_client_holder.reload_library_client.assert_called_once() + + @pytest.mark.asyncio + async def test_update_with_remote_client(self, mocker: MockerFixture) -> None: + """Test updating token with remote client.""" + mock_client_holder = mocker.Mock() + mock_client_holder.is_library_client = False + mock_client_holder.update_provider_data = mocker.Mock( + return_value="updated_client" + ) + mocker.patch( + "utils.query.AsyncLlamaStackClientHolder", return_value=mock_client_holder + ) + + mock_provider = type( + "Provider", + (), + { + "provider_type": "remote::azure", + "config": {"api_base": "https://api.example.com"}, + }, + )() + mock_client = mocker.AsyncMock() + mock_client.providers.list = mocker.AsyncMock(return_value=[mock_provider]) + + mocker.patch( + "utils.query.AzureEntraIDManager", + return_value=mocker.Mock( + access_token=mocker.Mock( + get_secret_value=mocker.Mock(return_value="token") + ) + ), + ) + + result = await update_azure_token(mock_client) + assert result == "updated_client" + + @pytest.mark.asyncio + async def test_update_with_connection_error(self, mocker: MockerFixture) -> None: + """Test updating token raises HTTPException on connection error.""" + mock_client_holder = mocker.Mock() + mock_client_holder.is_library_client = False + mocker.patch( + "utils.query.AsyncLlamaStackClientHolder", return_value=mock_client_holder + ) + + mock_client = mocker.AsyncMock() + mock_client.providers.list = mocker.AsyncMock( + side_effect=APIConnectionError( + message="Connection failed", request=mocker.Mock() + ) + ) + + with pytest.raises(HTTPException) as exc_info: + await update_azure_token(mock_client) + assert exc_info.value.status_code == 503 + + @pytest.mark.asyncio + async def test_update_with_api_status_error(self, mocker: MockerFixture) -> None: + """Test updating token raises HTTPException on API status error.""" + mock_client_holder = mocker.Mock() + mock_client_holder.is_library_client = False + mocker.patch( + "utils.query.AsyncLlamaStackClientHolder", return_value=mock_client_holder + ) + + mock_client = mocker.AsyncMock() + # Create a mock exception that will be caught by except APIStatusError + mock_error = APIStatusError( + message="API error", response=mocker.Mock(request=None), body=None + ) + mock_client.providers.list = mocker.AsyncMock(side_effect=mock_error) + + with pytest.raises(HTTPException) as exc_info: + await update_azure_token(mock_client) + assert exc_info.value.status_code == 500 + + +class TestStoreQueryResults: + """Tests for store_query_results function.""" + + def test_store_query_results_success( + self, mock_config: AppConfig, mocker: MockerFixture + ) -> None: + """Test successful storage of query results.""" + mocker.patch("utils.query.is_transcripts_enabled", return_value=False) + mock_persist = mocker.patch("utils.query.persist_user_conversation_details") + mock_store_cache = mocker.patch("utils.query.store_conversation_into_cache") + + summary = TurnSummary() + summary.llm_response = "response" + summary.rag_chunks = [] + + query_request = QueryRequest(query="test") # pyright: ignore[reportCallIssue] + + store_query_results( + user_id="user1", + conversation_id="conv1", + model="provider1/model1", + started_at="2024-01-01T00:00:00Z", + completed_at="2024-01-01T00:00:05Z", + summary=summary, + query_request=query_request, + configuration=mock_config, + skip_userid_check=False, + topic_summary="Topic", + ) + + # Verify functions were called + mock_persist.assert_called_once() + mock_store_cache.assert_called_once() + + def test_store_query_results_transcript_error( + self, mock_config: AppConfig, mocker: MockerFixture + ) -> None: + """Test storage raises HTTPException on transcript error.""" + mocker.patch("utils.query.is_transcripts_enabled", return_value=True) + mocker.patch("utils.query.store_transcript", side_effect=IOError("IO error")) + + summary = TurnSummary() + summary.llm_response = "response" + summary.rag_chunks = [] + + query_request = QueryRequest(query="test") # pyright: ignore[reportCallIssue] + + with pytest.raises(HTTPException) as exc_info: + store_query_results( + user_id="user1", + conversation_id="conv1", + model="provider1/model1", + started_at="2024-01-01T00:00:00Z", + completed_at="2024-01-01T00:00:05Z", + summary=summary, + query_request=query_request, + configuration=mock_config, + skip_userid_check=False, + topic_summary=None, + ) + assert exc_info.value.status_code == 500 + + def test_store_query_results_sqlalchemy_error( + self, mock_config: AppConfig, mocker: MockerFixture + ) -> None: + """Test storage raises HTTPException on SQLAlchemy error.""" + mocker.patch("utils.query.is_transcripts_enabled", return_value=False) + mocker.patch( + "utils.query.persist_user_conversation_details", + side_effect=SQLAlchemyError("Database error", None, None), + ) + + summary = TurnSummary() + summary.llm_response = "response" + summary.rag_chunks = [] + + query_request = QueryRequest(query="test") # pyright: ignore[reportCallIssue] + + with pytest.raises(HTTPException) as exc_info: + store_query_results( + user_id="user1", + conversation_id="conv1", + model="provider1/model1", + started_at="2024-01-01T00:00:00Z", + completed_at="2024-01-01T00:00:05Z", + summary=summary, + query_request=query_request, + configuration=mock_config, + skip_userid_check=False, + topic_summary=None, + ) + assert exc_info.value.status_code == 500 + + def test_store_query_results_cache_error( + self, mock_config: AppConfig, mocker: MockerFixture + ) -> None: + """Test storage raises HTTPException on cache error.""" + mocker.patch("utils.query.is_transcripts_enabled", return_value=False) + mocker.patch("utils.query.persist_user_conversation_details") + mocker.patch( + "utils.query.store_conversation_into_cache", + side_effect=CacheError("Cache error"), + ) + + summary = TurnSummary() + summary.llm_response = "response" + summary.rag_chunks = [] + + query_request = QueryRequest(query="test") # pyright: ignore[reportCallIssue] + + with pytest.raises(HTTPException) as exc_info: + store_query_results( + user_id="user1", + conversation_id="conv1", + model="provider1/model1", + started_at="2024-01-01T00:00:00Z", + completed_at="2024-01-01T00:00:05Z", + summary=summary, + query_request=query_request, + configuration=mock_config, + skip_userid_check=False, + topic_summary=None, + ) + assert exc_info.value.status_code == 500 + + def test_store_query_results_value_error( + self, mock_config: AppConfig, mocker: MockerFixture + ) -> None: + """Test storage raises HTTPException on ValueError.""" + mocker.patch("utils.query.is_transcripts_enabled", return_value=False) + mocker.patch("utils.query.persist_user_conversation_details") + mocker.patch( + "utils.query.store_conversation_into_cache", + side_effect=ValueError("Invalid value"), + ) + + summary = TurnSummary() + summary.llm_response = "response" + summary.rag_chunks = [] + + query_request = QueryRequest(query="test") # pyright: ignore[reportCallIssue] + + with pytest.raises(HTTPException) as exc_info: + store_query_results( + user_id="user1", + conversation_id="conv1", + model="provider1/model1", + started_at="2024-01-01T00:00:00Z", + completed_at="2024-01-01T00:00:05Z", + summary=summary, + query_request=query_request, + configuration=mock_config, + skip_userid_check=False, + topic_summary=None, + ) + assert exc_info.value.status_code == 500 + + def test_store_query_results_psycopg2_error( + self, mock_config: AppConfig, mocker: MockerFixture + ) -> None: + """Test storage raises HTTPException on psycopg2 error.""" + mocker.patch("utils.query.is_transcripts_enabled", return_value=False) + mocker.patch("utils.query.persist_user_conversation_details") + mocker.patch( + "utils.query.store_conversation_into_cache", + side_effect=psycopg2.Error("PostgreSQL error"), + ) + + summary = TurnSummary() + summary.llm_response = "response" + summary.rag_chunks = [] + + query_request = QueryRequest(query="test") # pyright: ignore[reportCallIssue] + + with pytest.raises(HTTPException) as exc_info: + store_query_results( + user_id="user1", + conversation_id="conv1", + model="provider1/model1", + started_at="2024-01-01T00:00:00Z", + completed_at="2024-01-01T00:00:05Z", + summary=summary, + query_request=query_request, + configuration=mock_config, + skip_userid_check=False, + topic_summary=None, + ) + assert exc_info.value.status_code == 500 + + def test_store_query_results_sqlite_error( + self, mock_config: AppConfig, mocker: MockerFixture + ) -> None: + """Test storage raises HTTPException on sqlite3 error.""" + mocker.patch("utils.query.is_transcripts_enabled", return_value=False) + mocker.patch("utils.query.persist_user_conversation_details") + mocker.patch( + "utils.query.store_conversation_into_cache", + side_effect=sqlite3.Error("SQLite error"), + ) + + summary = TurnSummary() + summary.llm_response = "response" + summary.rag_chunks = [] + + query_request = QueryRequest(query="test") # pyright: ignore[reportCallIssue] + + with pytest.raises(HTTPException) as exc_info: + store_query_results( + user_id="user1", + conversation_id="conv1", + model="provider1/model1", + started_at="2024-01-01T00:00:00Z", + completed_at="2024-01-01T00:00:05Z", + summary=summary, + query_request=query_request, + configuration=mock_config, + skip_userid_check=False, + topic_summary=None, + ) + assert exc_info.value.status_code == 500 diff --git a/tests/unit/utils/test_responses.py b/tests/unit/utils/test_responses.py index e58e7013d..a47ae2fe1 100644 --- a/tests/unit/utils/test_responses.py +++ b/tests/unit/utils/test_responses.py @@ -1,16 +1,76 @@ """Unit tests for utils/responses.py functions.""" -from types import SimpleNamespace +# pylint: disable=line-too-long,too-many-lines + +import json +from pathlib import Path from typing import Any, Optional import pytest +from fastapi import HTTPException +from llama_stack_api.openai_responses import ( + OpenAIResponseOutputMessageFileSearchToolCall as FileSearchCall, + OpenAIResponseOutputMessageFunctionToolCall as FunctionCall, + OpenAIResponseOutputMessageMCPCall as MCPCall, + OpenAIResponseOutputMessageMCPListTools as MCPListTools, + OpenAIResponseMCPApprovalRequest as MCPApprovalRequest, + OpenAIResponseMCPApprovalResponse as MCPApprovalResponse, + OpenAIResponseOutputMessageWebSearchToolCall as WebSearchCall, +) +from llama_stack_client import APIConnectionError, APIStatusError, AsyncLlamaStackClient +from pydantic import AnyUrl +from pytest_mock import MockerFixture + +from configuration import AppConfig +from models.config import ModelContextProtocolServer +from models.requests import QueryRequest +from utils.responses import ( + build_mcp_tool_call_from_arguments_done, + build_tool_call_summary, + build_tool_result_from_mcp_output_item_done, + extract_rag_chunks_from_file_search_item, + extract_text_from_response_output_item, + extract_token_usage, + get_mcp_tools, + get_rag_tools, + get_topic_summary, + parse_arguments_string, + parse_referenced_documents, + prepare_responses_params, + prepare_tools, + _increment_llm_call_metric, +) +from utils.types import RAGChunk + + +class MockOutputItem: # pylint: disable=too-few-public-methods + """Mock Responses API output item.""" + + def __init__( + self, + item_type: Optional[str] = None, + role: Optional[str] = None, + content: Any = None, + ) -> None: + # Use setattr to avoid conflict with built-in 'type' + setattr(self, "type", item_type) + self.role = role + self.content = content + -from utils.responses import extract_text_from_response_output_item +class MockContentPart: # pylint: disable=too-few-public-methods + """Mock content part for message content.""" + + def __init__( + self, text: Optional[str] = None, refusal: Optional[str] = None + ) -> None: + self.text = text + self.refusal = refusal def make_output_item( item_type: Optional[str] = None, role: Optional[str] = None, content: Any = None -) -> SimpleNamespace: +) -> MockOutputItem: """Create a mock Responses API output item. Args: @@ -19,14 +79,15 @@ def make_output_item( content: The content of the message (can be str, list, or None) Returns: - SimpleNamespace: Mock object with type, role, and content attributes + MockOutputItem: Mock object with type, role, and content attributes """ - return SimpleNamespace(type=item_type, role=role, content=content) + mock_item = MockOutputItem(item_type=item_type, role=role, content=content) + return mock_item def make_content_part( text: Optional[str] = None, refusal: Optional[str] = None -) -> SimpleNamespace: +) -> MockContentPart: """Create a mock content part for message content. Args: @@ -34,9 +95,9 @@ def make_content_part( refusal: Refusal message content Returns: - SimpleNamespace: Mock object with text and/or refusal attributes + MockContentPart: Mock object with text and/or refusal attributes """ - return SimpleNamespace(text=text, refusal=refusal) + return MockContentPart(text=text, refusal=refusal) @pytest.mark.parametrize( @@ -258,3 +319,1258 @@ class PartialMock: # Should return empty string when critical attributes are missing assert result == "" + + +class TestGetRAGTools: + """Test cases for get_rag_tools utility function.""" + + def test_get_rag_tools_empty_list(self) -> None: + """Test get_rag_tools returns None for empty list.""" + assert get_rag_tools([]) is None + + def test_get_rag_tools_with_vector_stores(self) -> None: + """Test get_rag_tools returns correct tool format for vector stores.""" + tools = get_rag_tools(["db1", "db2"]) + assert isinstance(tools, list) + assert len(tools) == 1 + assert tools[0]["type"] == "file_search" + assert tools[0]["vector_store_ids"] == ["db1", "db2"] + assert tools[0]["max_num_results"] == 10 + + +class TestGetMCPTools: + """Test cases for get_mcp_tools utility function.""" + + def test_get_mcp_tools_without_auth(self) -> None: + """Test get_mcp_tools with servers without authorization headers.""" + servers_no_auth = [ + ModelContextProtocolServer(name="fs", url="http://localhost:3000"), + ModelContextProtocolServer(name="git", url="https://git.example.com/mcp"), + ] + + tools_no_auth = get_mcp_tools(servers_no_auth, token=None) + assert len(tools_no_auth) == 2 + assert tools_no_auth[0]["type"] == "mcp" + assert tools_no_auth[0]["server_label"] == "fs" + assert tools_no_auth[0]["server_url"] == "http://localhost:3000" + assert "headers" not in tools_no_auth[0] + + def test_get_mcp_tools_with_kubernetes_auth(self) -> None: + """Test get_mcp_tools with kubernetes auth.""" + servers_k8s = [ + ModelContextProtocolServer( + name="k8s-server", + url="http://localhost:3000", + authorization_headers={"Authorization": "kubernetes"}, + ), + ] + tools_k8s = get_mcp_tools(servers_k8s, token="user-k8s-token") + assert len(tools_k8s) == 1 + assert tools_k8s[0]["headers"] == {"Authorization": "Bearer user-k8s-token"} + + def test_get_mcp_tools_with_mcp_headers(self) -> None: + """Test get_mcp_tools with client-provided headers.""" + servers = [ + ModelContextProtocolServer( + name="fs", + url="http://localhost:3000", + authorization_headers={"Authorization": "client", "X-Custom": "client"}, + ), + ] + + mcp_headers = { + "fs": { + "Authorization": "client-provided-token", + "X-Custom": "custom-value", + } + } + tools = get_mcp_tools(servers, token=None, mcp_headers=mcp_headers) + assert len(tools) == 1 + assert tools[0]["headers"] == { + "Authorization": "client-provided-token", + "X-Custom": "custom-value", + } + + # Test with mcp_headers=None (server should be skipped) + tools_no_headers = get_mcp_tools(servers, token=None, mcp_headers=None) + assert len(tools_no_headers) == 0 + + def test_get_mcp_tools_client_auth_no_mcp_headers(self) -> None: + """Test get_mcp_tools skips server when mcp_headers is None and server requires client auth.""" # noqa: E501 + servers = [ + ModelContextProtocolServer( + name="client-auth-server", + url="http://localhost:3000", + authorization_headers={"X-Custom": "client"}, + ), + ] + + # When mcp_headers is None and server requires client auth, + # should return None for that header + # This tests the specific path at line 391 + tools = get_mcp_tools(servers, token=None, mcp_headers=None) + # Server should be skipped because it requires client auth but mcp_headers is None + assert len(tools) == 0 + + def test_get_mcp_tools_client_auth_missing_server_in_headers(self) -> None: + """Test get_mcp_tools skips server when mcp_headers doesn't contain server name.""" + servers = [ + ModelContextProtocolServer( + name="client-auth-server", + url="http://localhost:3000", + authorization_headers={"X-Custom": "client"}, + ), + ] + + # mcp_headers exists but doesn't contain this server name + # This tests the specific path at line 394 + mcp_headers = {"other-server": {"X-Custom": "value"}} + tools = get_mcp_tools(servers, token=None, mcp_headers=mcp_headers) + # Server should be skipped because mcp_headers doesn't contain this server + assert len(tools) == 0 + + def test_get_mcp_tools_with_static_headers(self, tmp_path: Path) -> None: + """Test get_mcp_tools with static headers from config files.""" + secret_file = tmp_path / "token.txt" + secret_file.write_text("static-secret-token") + + servers = [ + ModelContextProtocolServer( + name="server1", + url="http://localhost:3000", + authorization_headers={"Authorization": str(secret_file)}, + ), + ] + + tools = get_mcp_tools(servers, token=None) + assert len(tools) == 1 + assert tools[0]["headers"] == {"Authorization": "static-secret-token"} + + def test_get_mcp_tools_with_mixed_headers(self, tmp_path: Path) -> None: + """Test get_mcp_tools with mixed header types.""" + secret_file = tmp_path / "api-key.txt" + secret_file.write_text("secret-api-key") + + servers = [ + ModelContextProtocolServer( + name="mixed-server", + url="http://localhost:3000", + authorization_headers={ + "Authorization": "kubernetes", + "X-API-Key": str(secret_file), + "X-Custom": "client", + }, + ), + ] + + mcp_headers = { + "mixed-server": { + "X-Custom": "client-custom-value", + } + } + + tools = get_mcp_tools(servers, token="k8s-token", mcp_headers=mcp_headers) + assert len(tools) == 1 + assert tools[0]["headers"] == { + "Authorization": "Bearer k8s-token", + "X-API-Key": "secret-api-key", + "X-Custom": "client-custom-value", + } + + def test_get_mcp_tools_skips_server_with_missing_auth(self) -> None: + """Test that servers with required but unavailable auth headers are skipped.""" + servers = [ + ModelContextProtocolServer( + name="missing-k8s-auth", + url="http://localhost:3001", + authorization_headers={"Authorization": "kubernetes"}, + ), + ModelContextProtocolServer( + name="missing-client-auth", + url="http://localhost:3002", + authorization_headers={"X-Token": "client"}, + ), + ] + + tools = get_mcp_tools(servers, token=None, mcp_headers=None) + assert len(tools) == 0 + + def test_get_mcp_tools_includes_server_without_auth(self) -> None: + """Test that servers without auth config are always included.""" + servers = [ + ModelContextProtocolServer( + name="public-server", + url="http://localhost:3000", + authorization_headers={}, + ), + ] + + tools = get_mcp_tools(servers, token=None, mcp_headers=None) + assert len(tools) == 1 + assert tools[0]["server_label"] == "public-server" + assert "headers" not in tools[0] + + +class TestGetTopicSummary: + """Tests for get_topic_summary function.""" + + @pytest.mark.asyncio + async def test_get_topic_summary_success(self, mocker: MockerFixture) -> None: + """Test successful topic summary generation.""" + mock_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + mock_output_item = make_output_item( + item_type="message", role="assistant", content="Topic Summary" + ) + mock_response = mocker.Mock() + mock_response.output = [mock_output_item] + mock_client.responses.create = mocker.AsyncMock(return_value=mock_response) + + mocker.patch( + "utils.responses.get_topic_summary_system_prompt", return_value="Summarize:" + ) + mocker.patch("utils.responses.configuration", mocker.Mock()) + + result = await get_topic_summary("test question", mock_client, "model1") + assert result == "Topic Summary" + mock_client.responses.create.assert_called_once() + + @pytest.mark.asyncio + async def test_get_topic_summary_empty_response( + self, mocker: MockerFixture + ) -> None: + """Test topic summary with empty response.""" + mock_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + mock_response = mocker.Mock() + mock_response.output = [] + mock_client.responses.create = mocker.AsyncMock(return_value=mock_response) + + mocker.patch( + "utils.responses.get_topic_summary_system_prompt", return_value="Summarize:" + ) + mocker.patch("utils.responses.configuration", mocker.Mock()) + + result = await get_topic_summary("test question", mock_client, "model1") + assert result == "" + + @pytest.mark.asyncio + async def test_get_topic_summary_connection_error( + self, mocker: MockerFixture + ) -> None: + """Test topic summary raises HTTPException on connection error.""" + mock_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + mock_client.responses.create = mocker.AsyncMock( + side_effect=APIConnectionError( + message="Connection failed", request=mocker.Mock() + ) + ) + + mocker.patch( + "utils.responses.get_topic_summary_system_prompt", return_value="Summarize:" + ) + mocker.patch("utils.responses.configuration", mocker.Mock()) + + with pytest.raises(HTTPException) as exc_info: + await get_topic_summary("test question", mock_client, "model1") + assert exc_info.value.status_code == 503 + + @pytest.mark.asyncio + async def test_get_topic_summary_api_error(self, mocker: MockerFixture) -> None: + """Test topic summary raises HTTPException on API error.""" + mock_client = mocker.AsyncMock(spec=AsyncLlamaStackClient) + # Create a mock exception that will be caught by except APIStatusError + mock_error = APIStatusError( + message="API error", response=mocker.Mock(request=None), body=None + ) + mock_client.responses.create = mocker.AsyncMock(side_effect=mock_error) + + mocker.patch( + "utils.responses.get_topic_summary_system_prompt", return_value="Summarize:" + ) + mocker.patch("utils.responses.configuration", mocker.Mock()) + mocker.patch( + "utils.responses.handle_known_apistatus_errors", + return_value=mocker.Mock( + model_dump=lambda: { + "status_code": 500, + "detail": {"response": "Error", "cause": "API error"}, + } + ), + ) + + with pytest.raises(HTTPException): + await get_topic_summary("test question", mock_client, "model1") + + +class TestPrepareTools: + """Tests for prepare_tools function.""" + + @pytest.mark.asyncio + async def test_prepare_tools_no_tools(self, mocker: MockerFixture) -> None: + """Test prepare_tools returns None when no_tools is True.""" + mock_client = mocker.AsyncMock() + query_request = QueryRequest( + query="test", no_tools=True + ) # pyright: ignore[reportCallIssue] + mock_config = mocker.Mock(spec=AppConfig) + mock_config.mcp_servers = [] + + result = await prepare_tools(mock_client, query_request, "token", mock_config) + assert result is None + + @pytest.mark.asyncio + async def test_prepare_tools_with_vector_store_ids( + self, mocker: MockerFixture + ) -> None: + """Test prepare_tools with specified vector store IDs.""" + mock_client = mocker.AsyncMock() + query_request = QueryRequest( + query="test", vector_store_ids=["vs1", "vs2"] + ) # pyright: ignore[reportCallIssue] + mock_config = mocker.Mock(spec=AppConfig) + mock_config.mcp_servers = [] + + result = await prepare_tools(mock_client, query_request, "token", mock_config) + assert result is not None + assert len(result) == 1 + assert result[0]["type"] == "file_search" + assert result[0]["vector_store_ids"] == ["vs1", "vs2"] + + @pytest.mark.asyncio + async def test_prepare_tools_fetch_vector_stores( + self, mocker: MockerFixture + ) -> None: + """Test prepare_tools fetches vector stores when not specified.""" + mock_client = mocker.AsyncMock() + mock_vector_store1 = mocker.Mock() + mock_vector_store1.id = "vs1" + mock_vector_store2 = mocker.Mock() + mock_vector_store2.id = "vs2" + mock_vector_stores = mocker.Mock() + mock_vector_stores.data = [mock_vector_store1, mock_vector_store2] + mock_client.vector_stores.list = mocker.AsyncMock( + return_value=mock_vector_stores + ) + + query_request = QueryRequest(query="test") # pyright: ignore[reportCallIssue] + mock_config = mocker.Mock(spec=AppConfig) + mock_config.mcp_servers = [] + + result = await prepare_tools(mock_client, query_request, "token", mock_config) + assert result is not None + assert len(result) == 1 + assert result[0]["vector_store_ids"] == ["vs1", "vs2"] + + @pytest.mark.asyncio + async def test_prepare_tools_connection_error(self, mocker: MockerFixture) -> None: + """Test prepare_tools raises HTTPException on connection error.""" + mock_client = mocker.AsyncMock() + mock_client.vector_stores.list = mocker.AsyncMock( + side_effect=APIConnectionError( + message="Connection failed", request=mocker.Mock() + ) + ) + + query_request = QueryRequest(query="test") # pyright: ignore[reportCallIssue] + mock_config = mocker.Mock(spec=AppConfig) + mock_config.mcp_servers = [] + + with pytest.raises(HTTPException) as exc_info: + await prepare_tools(mock_client, query_request, "token", mock_config) + assert exc_info.value.status_code == 503 + + @pytest.mark.asyncio + async def test_prepare_tools_with_mcp_servers(self, mocker: MockerFixture) -> None: + """Test prepare_tools includes MCP tools.""" + mock_client = mocker.AsyncMock() + query_request = QueryRequest( + query="test", vector_store_ids=["vs1"] + ) # pyright: ignore[reportCallIssue] + mock_config = mocker.Mock(spec=AppConfig) + mock_config.mcp_servers = [ + ModelContextProtocolServer(name="test-server", url="http://localhost:3000") + ] + + result = await prepare_tools(mock_client, query_request, "token", mock_config) + assert result is not None + assert len(result) == 2 # RAG tool + MCP tool + assert any(tool.get("type") == "mcp" for tool in result) + + @pytest.mark.asyncio + async def test_prepare_tools_api_status_error(self, mocker: MockerFixture) -> None: + """Test prepare_tools raises HTTPException on API status error when fetching vector stores.""" # noqa: E501 + mock_client = mocker.AsyncMock() + mock_client.vector_stores.list = mocker.AsyncMock( + side_effect=APIStatusError( + message="API error", response=mocker.Mock(request=None), body=None + ) + ) + + query_request = QueryRequest(query="test") # pyright: ignore[reportCallIssue] + mock_config = mocker.Mock(spec=AppConfig) + mock_config.mcp_servers = [] + + with pytest.raises(HTTPException) as exc_info: + await prepare_tools(mock_client, query_request, "token", mock_config) + assert exc_info.value.status_code == 500 + + @pytest.mark.asyncio + async def test_prepare_tools_empty_toolgroups(self, mocker: MockerFixture) -> None: + """Test prepare_tools returns None when no tools are available.""" + mock_client = mocker.AsyncMock() + mock_vector_stores = mocker.Mock() + mock_vector_stores.data = [] # No vector stores + mock_client.vector_stores.list = mocker.AsyncMock( + return_value=mock_vector_stores + ) + + query_request = QueryRequest(query="test") # pyright: ignore[reportCallIssue] + mock_config = mocker.Mock(spec=AppConfig) + mock_config.mcp_servers = [] # No MCP servers + + result = await prepare_tools(mock_client, query_request, "token", mock_config) + assert result is None + + +class TestPrepareResponsesParams: + """Tests for prepare_responses_params function.""" + + @pytest.mark.asyncio + async def test_prepare_responses_params_with_conversation_id( + self, mocker: MockerFixture + ) -> None: + """Test prepare_responses_params with existing conversation ID.""" + mock_client = mocker.AsyncMock() + mock_model = mocker.Mock() + mock_model.id = "provider1/model1" + mock_model.custom_metadata = {"model_type": "llm", "provider_id": "provider1"} + mock_client.models.list = mocker.AsyncMock(return_value=[mock_model]) + + query_request = QueryRequest( + query="test", conversation_id="123e4567-e89b-12d3-a456-426614174000" + ) # pyright: ignore[reportCallIssue] + + mocker.patch("utils.responses.configuration", mocker.Mock()) + mocker.patch( + "utils.responses.select_model_and_provider_id", + return_value=("provider1/model1", "model1", "provider1"), + ) + mocker.patch("utils.responses.evaluate_model_hints", return_value=(None, None)) + mocker.patch("utils.responses.get_system_prompt", return_value="System prompt") + mocker.patch("utils.responses.prepare_tools", return_value=None) + mocker.patch("utils.responses.prepare_input", return_value="test") + mocker.patch( + "utils.responses.to_llama_stack_conversation_id", return_value="llama_conv1" + ) + + result = await prepare_responses_params( + mock_client, query_request, None, "token" + ) + assert result.input == "test" + assert result.model == "provider1/model1" + assert result.conversation == "llama_conv1" + + @pytest.mark.asyncio + async def test_prepare_responses_params_create_conversation( + self, mocker: MockerFixture + ) -> None: + """Test prepare_responses_params creates new conversation when ID not provided.""" + mock_client = mocker.AsyncMock() + mock_model = mocker.Mock() + mock_model.id = "provider1/model1" + mock_model.custom_metadata = {"model_type": "llm", "provider_id": "provider1"} + mock_client.models.list = mocker.AsyncMock(return_value=[mock_model]) + + mock_conversation = mocker.Mock() + mock_conversation.id = "new_conv_id" + mock_client.conversations.create = mocker.AsyncMock( + return_value=mock_conversation + ) + + query_request = QueryRequest(query="test") # pyright: ignore[reportCallIssue] + + mocker.patch("utils.responses.configuration", mocker.Mock()) + mocker.patch( + "utils.responses.select_model_and_provider_id", + return_value=("provider1/model1", "model1", "provider1"), + ) + mocker.patch("utils.responses.evaluate_model_hints", return_value=(None, None)) + mocker.patch("utils.responses.get_system_prompt", return_value="System prompt") + mocker.patch("utils.responses.prepare_tools", return_value=None) + mocker.patch("utils.responses.prepare_input", return_value="test") + + result = await prepare_responses_params( + mock_client, query_request, None, "token" + ) + assert result.conversation == "new_conv_id" + mock_client.conversations.create.assert_called_once() + + @pytest.mark.asyncio + async def test_prepare_responses_params_connection_error_on_models( + self, mocker: MockerFixture + ) -> None: + """Test prepare_responses_params raises HTTPException on connection error when fetching models.""" # noqa: E501 + mock_client = mocker.AsyncMock() + mock_client.models.list = mocker.AsyncMock( + side_effect=APIConnectionError( + message="Connection failed", request=mocker.Mock() + ) + ) + + query_request = QueryRequest(query="test") # pyright: ignore[reportCallIssue] + mocker.patch("utils.responses.configuration", mocker.Mock()) + + with pytest.raises(HTTPException) as exc_info: + await prepare_responses_params(mock_client, query_request, None, "token") + assert exc_info.value.status_code == 503 + + @pytest.mark.asyncio + async def test_prepare_responses_params_connection_error_on_conversation( + self, mocker: MockerFixture + ) -> None: + """Test prepare_responses_params raises HTTPException on connection error when creating conversation.""" # noqa: E501 + mock_client = mocker.AsyncMock() + mock_model = mocker.Mock() + mock_model.id = "provider1/model1" + mock_model.custom_metadata = {"model_type": "llm", "provider_id": "provider1"} + mock_client.models.list = mocker.AsyncMock(return_value=[mock_model]) + mock_client.conversations.create = mocker.AsyncMock( + side_effect=APIConnectionError( + message="Connection failed", request=mocker.Mock() + ) + ) + + query_request = QueryRequest(query="test") # pyright: ignore[reportCallIssue] + + mocker.patch("utils.responses.configuration", mocker.Mock()) + mocker.patch( + "utils.responses.select_model_and_provider_id", + return_value=("provider1/model1", "model1", "provider1"), + ) + mocker.patch("utils.responses.evaluate_model_hints", return_value=(None, None)) + mocker.patch("utils.responses.get_system_prompt", return_value="System prompt") + mocker.patch("utils.responses.prepare_tools", return_value=None) + mocker.patch("utils.responses.prepare_input", return_value="test") + + with pytest.raises(HTTPException) as exc_info: + await prepare_responses_params(mock_client, query_request, None, "token") + assert exc_info.value.status_code == 503 + + @pytest.mark.asyncio + async def test_prepare_responses_params_api_status_error_on_models( + self, mocker: MockerFixture + ) -> None: + """Test prepare_responses_params raises HTTPException on API status error when fetching models.""" # noqa: E501 + mock_client = mocker.AsyncMock() + mock_client.models.list = mocker.AsyncMock( + side_effect=APIStatusError( + message="API error", response=mocker.Mock(request=None), body=None + ) + ) + + query_request = QueryRequest(query="test") # pyright: ignore[reportCallIssue] + mocker.patch("utils.responses.configuration", mocker.Mock()) + + with pytest.raises(HTTPException) as exc_info: + await prepare_responses_params(mock_client, query_request, None, "token") + assert exc_info.value.status_code == 500 + + @pytest.mark.asyncio + async def test_prepare_responses_params_api_status_error_on_conversation( + self, mocker: MockerFixture + ) -> None: + """Test prepare_responses_params raises HTTPException on API status error when creating conversation.""" # noqa: E501 + mock_client = mocker.AsyncMock() + mock_model = mocker.Mock() + mock_model.id = "provider1/model1" + mock_model.custom_metadata = {"model_type": "llm", "provider_id": "provider1"} + mock_client.models.list = mocker.AsyncMock(return_value=[mock_model]) + mock_client.conversations.create = mocker.AsyncMock( + side_effect=APIStatusError( + message="API error", response=mocker.Mock(request=None), body=None + ) + ) + + query_request = QueryRequest(query="test") # pyright: ignore[reportCallIssue] + + mocker.patch("utils.responses.configuration", mocker.Mock()) + mocker.patch( + "utils.responses.select_model_and_provider_id", + return_value=("provider1/model1", "model1", "provider1"), + ) + mocker.patch("utils.responses.evaluate_model_hints", return_value=(None, None)) + mocker.patch("utils.responses.get_system_prompt", return_value="System prompt") + mocker.patch("utils.responses.prepare_tools", return_value=None) + mocker.patch("utils.responses.prepare_input", return_value="test") + + with pytest.raises(HTTPException) as exc_info: + await prepare_responses_params(mock_client, query_request, None, "token") + assert exc_info.value.status_code == 500 + + +class TestParseReferencedDocuments: + """Tests for parse_referenced_documents function.""" + + def test_parse_referenced_documents_none_response(self) -> None: + """Test parsing with None response.""" + result = parse_referenced_documents(None) + assert not result + + def test_parse_referenced_documents_empty_output( + self, mocker: MockerFixture + ) -> None: + """Test parsing with empty output.""" + mock_response = mocker.Mock() + mock_response.output = [] + result = parse_referenced_documents(mock_response) + assert not result + + def test_parse_referenced_documents_file_search_call( + self, mocker: MockerFixture + ) -> None: + """Test parsing from file_search_call results.""" + mock_result1 = mocker.Mock() + mock_result1.attributes = { + "link": "https://example.com/doc1", + "title": "Document 1", + } + + mock_result2 = { + "attributes": {"url": "https://example.com/doc2", "title": "Document 2"}, + } + + mock_output_item = mocker.Mock() + mock_output_item.type = "file_search_call" + mock_output_item.results = [mock_result1, mock_result2] + + mock_response = mocker.Mock() + mock_response.output = [mock_output_item] + + result = parse_referenced_documents(mock_response) + assert len(result) == 2 + assert result[0].doc_title == "Document 1" + assert result[0].doc_url == AnyUrl("https://example.com/doc1") + assert result[1].doc_title == "Document 2" + assert result[1].doc_url == AnyUrl("https://example.com/doc2") + + def test_parse_referenced_documents_message_annotations( + self, mocker: MockerFixture + ) -> None: + """Test parsing from message content annotations - no longer supported.""" + # Message annotations are no longer parsed by parse_referenced_documents + # This test verifies that message type output items are ignored + mock_annotation1 = mocker.Mock() + mock_annotation1.type = "url_citation" + mock_annotation1.url = "https://example.com/doc1" + mock_annotation1.title = "Document 1" + + mock_annotation2 = {"type": "file_citation", "filename": "doc2.pdf"} + + mock_part = mocker.Mock() + mock_part.annotations = [mock_annotation1, mock_annotation2] + + mock_output_item = mocker.Mock() + mock_output_item.type = "message" + mock_output_item.content = [mock_part] + + mock_response = mocker.Mock() + mock_response.output = [mock_output_item] + + result = parse_referenced_documents(mock_response) + # Message annotations are no longer parsed, so result should be empty + assert len(result) == 0 + + def test_parse_referenced_documents_string_parts_skipped( + self, mocker: MockerFixture + ) -> None: + """Test that message type output items are ignored.""" + # Message annotations are no longer parsed by parse_referenced_documents + mock_annotation = mocker.Mock() + mock_annotation.type = "url_citation" + mock_annotation.url = "https://example.com/doc1" + mock_annotation.title = "Document 1" + + mock_part = mocker.Mock() + mock_part.annotations = [mock_annotation] + + mock_output_item = mocker.Mock() + mock_output_item.type = "message" + # Include a string part that should be skipped + mock_output_item.content = ["string part", mock_part] + + mock_response = mocker.Mock() + mock_response.output = [mock_output_item] + + result = parse_referenced_documents(mock_response) + # Message type is no longer parsed, so result should be empty + assert len(result) == 0 + + def test_parse_referenced_documents_deduplication( + self, mocker: MockerFixture + ) -> None: + """Test that duplicate documents are deduplicated.""" + mock_result = mocker.Mock() + mock_result.attributes = { + "link": "https://example.com/doc1", + "title": "Document 1", + } + + mock_output_item = mocker.Mock() + mock_output_item.type = "file_search_call" + mock_output_item.results = [mock_result, mock_result] # Duplicate + + mock_response = mocker.Mock() + mock_response.output = [mock_output_item] + + result = parse_referenced_documents(mock_response) + assert len(result) == 1 # Should be deduplicated + + +class TestExtractTokenUsage: + """Tests for extract_token_usage function.""" + + def test_extract_token_usage_with_dict_usage(self, mocker: MockerFixture) -> None: + """Test extracting token usage from dict format.""" + mock_response = mocker.Mock() + mock_response.usage = {"input_tokens": 100, "output_tokens": 50} + + mocker.patch( + "utils.responses.extract_provider_and_model_from_model_id", + return_value=("provider1", "model1"), + ) + mocker.patch("utils.responses.metrics.llm_token_sent_total") + mocker.patch("utils.responses.metrics.llm_token_received_total") + mocker.patch("utils.responses._increment_llm_call_metric") + + result = extract_token_usage(mock_response, "provider1/model1") + assert result.input_tokens == 100 + assert result.output_tokens == 50 + assert result.llm_calls == 1 + + def test_extract_token_usage_with_object_usage(self, mocker: MockerFixture) -> None: + """Test extracting token usage from object format.""" + mock_usage = mocker.Mock() + mock_usage.input_tokens = 200 + mock_usage.output_tokens = 100 + + mock_response = mocker.Mock() + mock_response.usage = mock_usage + + mocker.patch( + "utils.responses.extract_provider_and_model_from_model_id", + return_value=("provider1", "model1"), + ) + mocker.patch("utils.responses.metrics.llm_token_sent_total") + mocker.patch("utils.responses.metrics.llm_token_received_total") + mocker.patch("utils.responses._increment_llm_call_metric") + + result = extract_token_usage(mock_response, "provider1/model1") + assert result.input_tokens == 200 + assert result.output_tokens == 100 + + def test_extract_token_usage_no_usage(self, mocker: MockerFixture) -> None: + """Test extracting token usage when usage is None.""" + mock_response = mocker.Mock() + mock_response.usage = None + + mocker.patch( + "utils.responses.extract_provider_and_model_from_model_id", + return_value=("provider1", "model1"), + ) + mocker.patch("utils.responses._increment_llm_call_metric") + + result = extract_token_usage(mock_response, "provider1/model1") + assert result.input_tokens == 0 + assert result.output_tokens == 0 + assert result.llm_calls == 1 + + def test_extract_token_usage_zero_tokens(self, mocker: MockerFixture) -> None: + """Test extracting token usage when tokens are 0.""" + mock_usage = mocker.Mock() + mock_usage.input_tokens = 0 + mock_usage.output_tokens = 0 + + mock_response = mocker.Mock() + mock_response.usage = mock_usage + + mocker.patch( + "utils.responses.extract_provider_and_model_from_model_id", + return_value=("provider1", "model1"), + ) + mocker.patch("utils.responses._increment_llm_call_metric") + + result = extract_token_usage(mock_response, "provider1/model1") + assert result.input_tokens == 0 + assert result.output_tokens == 0 + + def test_extract_token_usage_none_response(self, mocker: MockerFixture) -> None: + """Test extracting token usage with None response.""" + mocker.patch( + "utils.responses.extract_provider_and_model_from_model_id", + return_value=("provider1", "model1"), + ) + mocker.patch("utils.responses._increment_llm_call_metric") + + result = extract_token_usage(None, "provider1/model1") + assert result.input_tokens == 0 + assert result.output_tokens == 0 + + def test_extract_token_usage_metrics_error(self, mocker: MockerFixture) -> None: + """Test extracting token usage handles errors when updating metrics.""" + mock_usage = mocker.Mock() + mock_usage.input_tokens = 100 + mock_usage.output_tokens = 50 + + mock_response = mocker.Mock() + mock_response.usage = mock_usage + + mocker.patch( + "utils.responses.extract_provider_and_model_from_model_id", + return_value=("provider1", "model1"), + ) + # Make metrics raise an error + mock_metric = mocker.Mock() + mock_metric.labels.return_value.inc = mocker.Mock( + side_effect=AttributeError("No attribute") + ) + mocker.patch("utils.responses.metrics.llm_token_sent_total", mock_metric) + mocker.patch("utils.responses.metrics.llm_token_received_total", mock_metric) + mocker.patch("utils.responses.logger") + mocker.patch("utils.responses._increment_llm_call_metric") + + # Should not raise, just log warning + result = extract_token_usage(mock_response, "provider1/model1") + assert result.input_tokens == 100 + assert result.output_tokens == 50 + + def test_extract_token_usage_extraction_error(self, mocker: MockerFixture) -> None: + """Test extracting token usage handles errors when extracting usage.""" + + # Create a usage object that raises TypeError when attributes are accessed + # getattr catches AttributeError but not TypeError, so TypeError will propagate + class ErrorUsage: # pylint: disable=too-few-public-methods + """Mock usage object that raises TypeError.""" + + def __getattribute__(self, name: str) -> Any: + if name in ("input_tokens", "output_tokens"): + # Raise TypeError which getattr won't catch (only catches AttributeError) + raise TypeError(f"Cannot access {name}") + return super().__getattribute__(name) + + mock_usage = ErrorUsage() + mock_response = mocker.Mock() + mock_response.usage = mock_usage + + mocker.patch( + "utils.responses.extract_provider_and_model_from_model_id", + return_value=("provider1", "model1"), + ) + mocker.patch("utils.responses.logger") + mocker.patch("utils.responses._increment_llm_call_metric") + + # getattr with default catches AttributeError but not TypeError + # TypeError will propagate to exception handler at line 611 + # Should not raise, just log warning and return 0 tokens + result = extract_token_usage(mock_response, "provider1/model1") + assert result.input_tokens == 0 + assert result.output_tokens == 0 + + +class TestBuildToolCallSummary: + """Tests for build_tool_call_summary function.""" + + def test_build_tool_call_summary_function_call(self, mocker: MockerFixture) -> None: + """Test building summary for function_call.""" + mock_item = mocker.Mock(spec=FunctionCall) + mock_item.type = "function_call" + mock_item.call_id = "call_123" + mock_item.name = "test_function" + mock_item.arguments = '{"arg1": "value1"}' + + rag_chunks: list[RAGChunk] = [] + mocker.patch( + "utils.responses.parse_arguments_string", return_value={"arg1": "value1"} + ) + + call_summary, result_summary = build_tool_call_summary(mock_item, rag_chunks) + assert call_summary is not None + assert call_summary.name == "test_function" + assert call_summary.args == {"arg1": "value1"} + assert result_summary is None + + def test_build_tool_call_summary_file_search_call( + self, mocker: MockerFixture + ) -> None: + """Test building summary for file_search_call.""" + mock_result = mocker.Mock() + mock_result.text = "chunk text" + mock_result.filename = "doc.pdf" + mock_result.score = 0.9 + mock_result.model_dump = mocker.Mock( + return_value={"text": "chunk text", "filename": "doc.pdf", "score": 0.9} + ) + + mock_item = mocker.Mock(spec=FileSearchCall) + mock_item.type = "file_search_call" + mock_item.id = "search_123" + mock_item.queries = ["query1"] + mock_item.results = [mock_result] + mock_item.status = "success" + + rag_chunks: list[RAGChunk] = [] + call_summary, result_summary = build_tool_call_summary(mock_item, rag_chunks) + + assert call_summary is not None + assert call_summary.name == "knowledge_search" + assert len(rag_chunks) == 1 + assert result_summary is not None + assert result_summary.status == "success" + + def test_build_tool_call_summary_web_search_call( + self, mocker: MockerFixture + ) -> None: + """Test building summary for web_search_call.""" + mock_item = mocker.Mock(spec=WebSearchCall) + mock_item.type = "web_search_call" + mock_item.id = "web_123" + mock_item.status = "success" + + rag_chunks: list[RAGChunk] = [] + call_summary, result_summary = build_tool_call_summary(mock_item, rag_chunks) + + assert call_summary is not None + assert call_summary.name == "web_search" + assert result_summary is not None + assert result_summary.status == "success" + + def test_build_tool_call_summary_mcp_call(self, mocker: MockerFixture) -> None: + """Test building summary for mcp_call.""" + mock_item = mocker.Mock(spec=MCPCall) + mock_item.type = "mcp_call" + mock_item.id = "mcp_123" + mock_item.name = "mcp_tool" + mock_item.arguments = '{"arg": "value"}' + mock_item.server_label = "test_server" + mock_item.error = None + mock_item.output = "output" + + rag_chunks: list[RAGChunk] = [] + mocker.patch( + "utils.responses.parse_arguments_string", return_value={"arg": "value"} + ) + + call_summary, result_summary = build_tool_call_summary(mock_item, rag_chunks) + assert call_summary is not None + assert call_summary.name == "mcp_tool" + assert call_summary.args["server_label"] == "test_server" + assert result_summary is not None + assert result_summary.status == "success" + + def test_build_tool_call_summary_mcp_call_with_error( + self, mocker: MockerFixture + ) -> None: + """Test building summary for mcp_call with error.""" + mock_item = mocker.Mock(spec=MCPCall) + mock_item.type = "mcp_call" + mock_item.id = "mcp_123" + mock_item.name = "mcp_tool" + mock_item.arguments = "{}" + mock_item.server_label = None + mock_item.error = "Error occurred" + mock_item.output = None + + rag_chunks: list[RAGChunk] = [] + mocker.patch("utils.responses.parse_arguments_string", return_value={}) + + _call_summary, result_summary = build_tool_call_summary(mock_item, rag_chunks) + assert result_summary is not None + assert result_summary.status == "failure" + assert result_summary.content == "Error occurred" + + def test_build_tool_call_summary_mcp_list_tools( + self, mocker: MockerFixture + ) -> None: + """Test building summary for mcp_list_tools.""" + mock_tool = mocker.Mock() + mock_tool.name = "tool1" + mock_tool.description = "Description" + mock_tool.input_schema = {"type": "object"} + + mock_item = mocker.Mock(spec=MCPListTools) + mock_item.type = "mcp_list_tools" + mock_item.id = "list_123" + mock_item.server_label = "test_server" + mock_item.tools = [mock_tool] + + rag_chunks: list[RAGChunk] = [] + call_summary, result_summary = build_tool_call_summary(mock_item, rag_chunks) + + assert call_summary is not None + assert call_summary.name == "mcp_list_tools" + assert result_summary is not None + assert "tools" in json.loads(result_summary.content) + + def test_build_tool_call_summary_mcp_approval_request( + self, mocker: MockerFixture + ) -> None: + """Test building summary for mcp_approval_request.""" + mock_item = mocker.Mock(spec=MCPApprovalRequest) + mock_item.type = "mcp_approval_request" + mock_item.id = "approval_123" + mock_item.name = "approve_action" + mock_item.arguments = '{"action": "delete"}' + + rag_chunks: list[RAGChunk] = [] + mocker.patch( + "utils.responses.parse_arguments_string", return_value={"action": "delete"} + ) + + call_summary, result_summary = build_tool_call_summary(mock_item, rag_chunks) + assert call_summary is not None + assert call_summary.name == "approve_action" + assert result_summary is None + + def test_build_tool_call_summary_mcp_approval_response( + self, mocker: MockerFixture + ) -> None: + """Test building summary for mcp_approval_response.""" + mock_item = mocker.Mock(spec=MCPApprovalResponse) + mock_item.type = "mcp_approval_response" + mock_item.approval_request_id = "request_123" + mock_item.approve = True + mock_item.reason = "Approved" + + rag_chunks: list[RAGChunk] = [] + call_summary, result_summary = build_tool_call_summary(mock_item, rag_chunks) + + assert call_summary is None + assert result_summary is not None + assert result_summary.status == "success" + assert "reason" in json.loads(result_summary.content) + + def test_build_tool_call_summary_unknown_type(self, mocker: MockerFixture) -> None: + """Test building summary for unknown type returns None.""" + mock_item = mocker.Mock() + mock_item.type = "unknown_type" + + rag_chunks: list[RAGChunk] = [] + call_summary, result_summary = build_tool_call_summary(mock_item, rag_chunks) + assert call_summary is None + assert result_summary is None + + +class TestExtractRagChunksFromFileSearchItem: + """Tests for extract_rag_chunks_from_file_search_item function.""" + + def test_extract_rag_chunks_with_results(self, mocker: MockerFixture) -> None: + """Test extracting RAG chunks from file search results.""" + mock_result1 = mocker.Mock() + mock_result1.text = "chunk 1" + mock_result1.filename = "doc1.pdf" + mock_result1.score = 0.9 + + mock_result2 = mocker.Mock() + mock_result2.text = "chunk 2" + mock_result2.filename = "doc2.pdf" + mock_result2.score = 0.8 + + mock_item = mocker.Mock(spec=FileSearchCall) + mock_item.results = [mock_result1, mock_result2] + + rag_chunks: list[RAGChunk] = [] + extract_rag_chunks_from_file_search_item(mock_item, rag_chunks) + + assert len(rag_chunks) == 2 + assert rag_chunks[0].content == "chunk 1" + assert rag_chunks[0].source == "doc1.pdf" + assert rag_chunks[0].score == 0.9 + + def test_extract_rag_chunks_no_results(self, mocker: MockerFixture) -> None: + """Test extracting RAG chunks when results is None.""" + mock_item = mocker.Mock(spec=FileSearchCall) + mock_item.results = None + + rag_chunks: list[RAGChunk] = [] + extract_rag_chunks_from_file_search_item(mock_item, rag_chunks) + assert len(rag_chunks) == 0 + + +class TestParseArgumentsString: + """Tests for parse_arguments_string function.""" + + def test_parse_arguments_string_valid_json(self) -> None: + """Test parsing valid JSON string.""" + result = parse_arguments_string('{"key": "value", "num": 123}') + assert result == {"key": "value", "num": 123} + + def test_parse_arguments_string_wrapped_content(self) -> None: + """Test parsing string that needs wrapping.""" + result = parse_arguments_string('"key": "value"') + assert result == {"key": "value"} + + def test_parse_arguments_string_invalid_json(self) -> None: + """Test parsing invalid JSON falls back to args wrapper.""" + result = parse_arguments_string("not json at all") + assert result == {"args": "not json at all"} + + def test_parse_arguments_string_non_dict_json(self) -> None: + """Test parsing JSON that's not a dict falls back.""" + result = parse_arguments_string('["array", "not", "dict"]') + assert result == {"args": '["array", "not", "dict"]'} + + def test_parse_arguments_string_empty_string(self) -> None: + """Test parsing empty string.""" + result = parse_arguments_string("") + assert result == {"args": ""} + + +class TestIncrementLlmCallMetric: + """Tests for _increment_llm_call_metric function.""" + + def test_increment_llm_call_metric_success(self, mocker: MockerFixture) -> None: + """Test successful metric increment.""" + mock_metric = mocker.Mock() + mock_metric.labels.return_value.inc = mocker.Mock() + mocker.patch("utils.responses.metrics.llm_calls_total", mock_metric) + + _increment_llm_call_metric("provider1", "model1") + + mock_metric.labels.assert_called_once_with("provider1", "model1") + mock_metric.labels.return_value.inc.assert_called_once() + + def test_increment_llm_call_metric_attribute_error( + self, mocker: MockerFixture + ) -> None: + """Test metric increment handles AttributeError.""" + mocker.patch( + "utils.responses.metrics.llm_calls_total", + side_effect=AttributeError("No attribute"), + ) + mocker.patch("utils.responses.logger") + + # Should not raise exception + _increment_llm_call_metric("provider1", "model1") + + def test_increment_llm_call_metric_type_error(self, mocker: MockerFixture) -> None: + """Test metric increment handles TypeError.""" + mock_metric = mocker.Mock() + mock_metric.labels.return_value.inc = mocker.Mock( + side_effect=TypeError("Invalid type") + ) + mocker.patch("utils.responses.metrics.llm_calls_total", mock_metric) + mocker.patch("utils.responses.logger") + + # Should not raise exception + _increment_llm_call_metric("provider1", "model1") + + def test_increment_llm_call_metric_value_error(self, mocker: MockerFixture) -> None: + """Test metric increment handles ValueError.""" + mock_metric = mocker.Mock() + mock_metric.labels.return_value.inc = mocker.Mock( + side_effect=ValueError("Invalid value") + ) + mocker.patch("utils.responses.metrics.llm_calls_total", mock_metric) + mocker.patch("utils.responses.logger") + + # Should not raise exception + _increment_llm_call_metric("provider1", "model1") + + +class TestBuildMCPToolCallFromArgumentsDone: + """Tests for build_mcp_tool_call_from_arguments_done function.""" + + def test_build_mcp_tool_call_with_valid_item(self) -> None: + """Test building MCP tool call with valid item info.""" + mcp_call_items = {0: ("call_123", "test_tool")} + tool_call = build_mcp_tool_call_from_arguments_done( + output_index=0, + arguments='{"param": "value"}', + mcp_call_items=mcp_call_items, + ) + + assert tool_call is not None + assert tool_call.id == "call_123" + assert tool_call.name == "test_tool" + assert tool_call.type == "mcp_call" + assert tool_call.args == {"param": "value"} + # Item should be removed from dict + assert 0 not in mcp_call_items + + def test_build_mcp_tool_call_with_missing_item(self) -> None: + """Test building MCP tool call when item info is missing.""" + mcp_call_items: dict[int, tuple[str, str]] = {} + tool_call = build_mcp_tool_call_from_arguments_done( + output_index=0, + arguments='{"param": "value"}', + mcp_call_items=mcp_call_items, + ) + + assert tool_call is None + + def test_build_mcp_tool_call_parses_arguments(self) -> None: + """Test that arguments are properly parsed.""" + mcp_call_items = {1: ("call_456", "another_tool")} + tool_call = build_mcp_tool_call_from_arguments_done( + output_index=1, + arguments='{"key1": "val1", "key2": 42}', + mcp_call_items=mcp_call_items, + ) + + assert tool_call is not None + assert tool_call.args == {"key1": "val1", "key2": 42} + + +class TestBuildToolResultFromMCPOutputItemDone: + """Tests for build_tool_result_from_mcp_output_item_done function.""" + + def test_build_mcp_tool_result_success(self, mocker: MockerFixture) -> None: + """Test building MCP tool result for successful call.""" + mock_item = mocker.Mock(spec=MCPCall) + mock_item.id = "call_123" + mock_item.error = None + mock_item.output = "Success output" + + result = build_tool_result_from_mcp_output_item_done(mock_item) + + assert result.id == "call_123" + assert result.status == "success" + assert result.content == "Success output" + assert result.type == "mcp_call" + assert result.round == 1 + + def test_build_mcp_tool_result_failure(self, mocker: MockerFixture) -> None: + """Test building MCP tool result for failed call.""" + mock_item = mocker.Mock(spec=MCPCall) + mock_item.id = "call_456" + mock_item.error = "Error message" + mock_item.output = None + + result = build_tool_result_from_mcp_output_item_done(mock_item) + + assert result.id == "call_456" + assert result.status == "failure" + assert result.content == "Error message" + assert result.type == "mcp_call" + + def test_build_mcp_tool_result_empty_output(self, mocker: MockerFixture) -> None: + """Test building MCP tool result with empty output.""" + mock_item = mocker.Mock(spec=MCPCall) + mock_item.id = "call_789" + mock_item.error = None + mock_item.output = "" + + result = build_tool_result_from_mcp_output_item_done(mock_item) + + assert result.status == "success" + assert result.content == "" + + def test_build_mcp_tool_result_none_output(self, mocker: MockerFixture) -> None: + """Test building MCP tool result with None output.""" + mock_item = mocker.Mock(spec=MCPCall) + mock_item.id = "call_999" + mock_item.error = None + mock_item.output = None + + result = build_tool_result_from_mcp_output_item_done(mock_item) + + assert result.status == "success" + assert result.content == "" diff --git a/tests/unit/utils/test_shields.py b/tests/unit/utils/test_shields.py index adf3fe8b1..5c352758b 100644 --- a/tests/unit/utils/test_shields.py +++ b/tests/unit/utils/test_shields.py @@ -1,9 +1,7 @@ """Unit tests for utils/shields.py functions.""" -import httpx import pytest from fastapi import HTTPException, status -from llama_stack_client import BadRequestError from pytest_mock import MockerFixture from utils.shields import ( @@ -278,7 +276,7 @@ async def test_raises_http_exception_when_shield_has_no_provider_resource_id( async def test_returns_blocked_on_bad_request_error( self, mocker: MockerFixture ) -> None: - """Test that run_shield_moderation returns blocked when BadRequestError is raised.""" + """Test that run_shield_moderation returns blocked when ValueError is raised.""" mock_metric = mocker.patch( "utils.shields.metrics.llm_calls_validation_errors_total" ) @@ -295,14 +293,9 @@ async def test_returns_blocked_on_bad_request_error( model.id = "moderation-model" mock_client.models.list = mocker.AsyncMock(return_value=[model]) - # Setup moderation to raise BadRequestError - mock_response = httpx.Response( - 400, request=httpx.Request("POST", "http://test") - ) + # Setup moderation to raise ValueError (known Llama Stack bug) mock_client.moderations.create = mocker.AsyncMock( - side_effect=BadRequestError( - "Bad request", response=mock_response, body=None - ) + side_effect=ValueError("Bad request") ) result = await run_shield_moderation(mock_client, "test input") diff --git a/tests/unit/utils/test_suid.py b/tests/unit/utils/test_suid.py index 42fdb0724..d530b2d2c 100644 --- a/tests/unit/utils/test_suid.py +++ b/tests/unit/utils/test_suid.py @@ -21,6 +21,11 @@ def test_check_suid_valid_uuid(self) -> None: valid_suid = "123e4567-e89b-12d3-a456-426614174000" assert suid.check_suid(valid_suid), "check_suid should return True for UUID" + def test_check_suid_invalid_uuid_with_conv_prefix(self) -> None: + """Test that check_suid returns True for a valid UUID.""" + valid_suid = "conv_123e4567-e89b-12d3-a456-426614174000" + assert not suid.check_suid(valid_suid), "check_suid should return True for UUID" + def test_check_suid_valid_48char_hex(self) -> None: """Test that check_suid returns True for a 48-char hex string.""" valid_hex = "e6afd7aaa97b49ce8f4f96a801b07893d9cb784d72e53e3c" @@ -75,3 +80,23 @@ def test_check_suid_invalid_non_hex_chars(self) -> None: def test_check_suid_invalid_type(self, invalid_type: Any) -> None: """Test that check_suid returns False for non-string types.""" assert not suid.check_suid(invalid_type) + + def test_normalize_conversation_id(self) -> None: + """Test the SUID normalization function.""" + valid_conv = "conv_e6afd7aaa97b49ce8f4f96a801b07893d9cb784d72e53e3c" + expected_conv = "e6afd7aaa97b49ce8f4f96a801b07893d9cb784d72e53e3c" + assert suid.normalize_conversation_id(valid_conv) == expected_conv + + valid_conv = "e6afd7aaa97b49ce8f4f96a801b07893d9cb784d72e53e3c" + expected_conv = "e6afd7aaa97b49ce8f4f96a801b07893d9cb784d72e53e3c" + assert suid.normalize_conversation_id(valid_conv) == expected_conv + + def test_to_llama_stack_conversation_id(self) -> None: + """Test the function to_llama_stack_conversation_id.""" + valid_conv = "conv_e6afd7aaa97b49ce8f4f96a801b07893d9cb784d72e53e3c" + expected_conv = "conv_e6afd7aaa97b49ce8f4f96a801b07893d9cb784d72e53e3c" + assert suid.to_llama_stack_conversation_id(valid_conv) == expected_conv + + valid_conv = "e6afd7aaa97b49ce8f4f96a801b07893d9cb784d72e53e3c" + expected_conv = "conv_e6afd7aaa97b49ce8f4f96a801b07893d9cb784d72e53e3c" + assert suid.to_llama_stack_conversation_id(valid_conv) == expected_conv diff --git a/tests/unit/utils/test_types.py b/tests/unit/utils/test_types.py index e4f12278a..05224793e 100644 --- a/tests/unit/utils/test_types.py +++ b/tests/unit/utils/test_types.py @@ -1,8 +1,20 @@ -"""Unit tests for functions defined in utils/types.py.""" +"""Unit tests for functions and types defined in utils/types.py.""" +from llama_stack_client.types.shared.interleaved_content_item import ( + ImageContentItem, + TextContentItem, +) +import pytest +from pydantic import AnyUrl, ValidationError from pytest_mock import MockerFixture -from utils.types import GraniteToolParser +from utils.types import ( + GraniteToolParser, + ReferencedDocument, + ToolCallSummary, + ToolResultSummary, + content_to_str, +) class TestGraniteToolParser: @@ -56,3 +68,135 @@ def test_get_tool_calls_from_completion_message_when_message_has_tool_calls( assert ( tool_parser.get_tool_calls(completion_message) == tool_calls ), f"get_tool_calls should return {tool_calls}" + + +class TestContentToStr: + """Tests for content_to_str function.""" + + def test_content_to_str_none(self) -> None: + """Test content_to_str with None.""" + assert content_to_str(None) == "" + + def test_content_to_str_string(self) -> None: + """Test content_to_str with string.""" + assert content_to_str("test string") == "test string" + + def test_content_to_str_text_content_item(self) -> None: + """Test content_to_str with TextContentItem.""" + text_item = TextContentItem(text="text content") + result = content_to_str(text_item) + assert result == "text content" + + def test_content_to_str_image_content_item(self) -> None: + """Test content_to_str with ImageContentItem.""" + + # ImageContentItem is a Pydantic model that requires 'image' parameter, not 'image_url' + # Use a mock that passes isinstance check by creating a subclass + class MockImageContentItem(ImageContentItem): + """Mock ImageContentItem for testing.""" + + image_item = MockImageContentItem.__new__(MockImageContentItem) + result = content_to_str(image_item) + assert result == "" + + def test_content_to_str_list(self) -> None: + """Test content_to_str with list.""" + result = content_to_str(["item1", "item2", "item3"]) + assert result == "item1 item2 item3" + + def test_content_to_str_nested_list(self) -> None: + """Test content_to_str with nested list.""" + text_item = TextContentItem(text="nested text") + result = content_to_str(["outer", text_item, ["inner1", "inner2"]]) + assert "outer" in result + assert "nested text" in result + assert "inner1" in result + assert "inner2" in result + + def test_content_to_str_mixed_types(self) -> None: + """Test content_to_str with mixed types in list.""" + text_item = TextContentItem(text="text") + result = content_to_str(["string", text_item, 123, None]) + assert "string" in result + assert "text" in result + assert "123" in result + + def test_content_to_str_other_type(self) -> None: + """Test content_to_str with other type falls back to str().""" + result = content_to_str(123) + assert result == "123" + + +class TestToolCallSummary: + """Test cases for ToolCallSummary type.""" + + def test_constructor(self) -> None: + """Test ToolCallSummary with all fields.""" + tool_call = ToolCallSummary( + id="call-1", + name="knowledge_search", + args={"query": "test"}, + type="tool_call", + ) + assert tool_call.id == "call-1" + assert tool_call.name == "knowledge_search" + assert tool_call.args == {"query": "test"} + assert tool_call.type == "tool_call" + + def test_missing_required_fields(self) -> None: + """Test ToolCallSummary raises ValidationError when required fields are missing.""" + with pytest.raises(ValidationError): + ToolCallSummary() # type: ignore[call-arg] + + +class TestToolResultSummary: + """Test cases for ToolResultSummary type.""" + + def test_constructor(self) -> None: + """Test ToolResultSummary with all fields.""" + tool_result = ToolResultSummary( + id="call-1", + status="success", + content='{"chunks_found": 5}', + type="tool_result", + round=1, + ) + assert tool_result.id == "call-1" + assert tool_result.status == "success" + assert tool_result.content == '{"chunks_found": 5}' + assert tool_result.type == "tool_result" + assert tool_result.round == 1 + + def test_missing_required_fields(self) -> None: + """Test ToolResultSummary raises ValidationError when required fields are missing.""" + with pytest.raises(ValidationError): + ToolResultSummary() # type: ignore[call-arg] + + +class TestReferencedDocument: + """Test cases for ReferencedDocument type.""" + + def test_constructor(self) -> None: + """Test ReferencedDocument with all fields.""" + doc = ReferencedDocument( + doc_url=AnyUrl("https://example.com/doc"), + doc_title="Test Document", + ) + assert str(doc.doc_url) == "https://example.com/doc" + assert doc.doc_title == "Test Document" + + def test_constructor_with_defaults(self) -> None: + """Test ReferencedDocument with no arguments uses None defaults.""" + doc = ReferencedDocument() + assert doc.doc_url is None + assert doc.doc_title is None + + def test_constructor_partial_fields(self) -> None: + """Test ReferencedDocument with partial fields.""" + doc = ReferencedDocument(doc_url=AnyUrl("https://example.com/doc")) + assert str(doc.doc_url) == "https://example.com/doc" + assert doc.doc_title is None + + doc = ReferencedDocument(doc_title="Test Title") + assert doc.doc_url is None + assert doc.doc_title == "Test Title" diff --git a/ubi.repo b/ubi.repo deleted file mode 100644 index 8e3e8987a..000000000 --- a/ubi.repo +++ /dev/null @@ -1,62 +0,0 @@ -[ubi-9-for-$basearch-baseos-rpms] -name = Red Hat Universal Base Image 9 (RPMs) - BaseOS -baseurl = https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/$basearch/baseos/os -enabled = 1 -gpgkey = file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release -gpgcheck = 1 - -[ubi-9-for-$basearch-baseos-debug-rpms] -name = Red Hat Universal Base Image 9 (Debug RPMs) - BaseOS -baseurl = https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/$basearch/baseos/debug -enabled = 0 -gpgkey = file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release -gpgcheck = 1 - -[ubi-9-for-$basearch-baseos-source-rpms] -name = Red Hat Universal Base Image 9 (Source RPMs) - BaseOS -baseurl = https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/$basearch/baseos/source/SRPMS -enabled = 0 -gpgkey = file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release -gpgcheck = 1 - -[ubi-9-for-$basearch-appstream-rpms] -name = Red Hat Universal Base Image 9 (RPMs) - AppStream -baseurl = https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/$basearch/appstream/os -enabled = 1 -gpgkey = file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release -gpgcheck = 1 - -[ubi-9-for-$basearch-appstream-debug-rpms] -name = Red Hat Universal Base Image 9 (Debug RPMs) - AppStream -baseurl = https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/$basearch/appstream/debug -enabled = 0 -gpgkey = file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release -gpgcheck = 1 - -[ubi-9-for-$basearch-appstream-source-rpms] -name = Red Hat Universal Base Image 9 (Source RPMs) - AppStream -baseurl = https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/$basearch/appstream/source/SRPMS -enabled = 0 -gpgkey = file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release -gpgcheck = 1 - -[codeready-builder-for-ubi-9-$basearch-rpms] -name = Red Hat Universal Base Image 9 (RPMs) - CodeReady Builder -baseurl = https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/$basearch/codeready-builder/os -enabled = 1 -gpgkey = file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release -gpgcheck = 1 - -[codeready-builder-for-ubi-9-$basearch-debug-rpms] -name = Red Hat Universal Base Image 9 (Debug RPMs) - CodeReady Builder -baseurl = https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/$basearch/codeready-builder/debug -enabled = 0 -gpgkey = file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release -gpgcheck = 1 - -[codeready-builder-for-ubi-9-$basearch-source-rpms] -name = Red Hat Universal Base Image 9 (Source RPMs) - CodeReady Builder -baseurl = https://cdn-ubi.redhat.com/content/public/ubi/dist/ubi9/9/$basearch/codeready-builder/source/SRPMS -enabled = 0 -gpgkey = file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release -gpgcheck = 1 diff --git a/uv.lock b/uv.lock index 0be316333..d53196afb 100644 --- a/uv.lock +++ b/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 3 +revision = 2 requires-python = ">=3.12, <3.14" resolution-markers = [ "python_full_version >= '3.13' and sys_platform == 'win32'", @@ -162,11 +162,11 @@ wheels = [ [[package]] name = "astroid" -version = "4.0.3" +version = "4.0.4" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a1/ca/c17d0f83016532a1ad87d1de96837164c99d47a3b6bbba28bd597c25b37a/astroid-4.0.3.tar.gz", hash = "sha256:08d1de40d251cc3dc4a7a12726721d475ac189e4e583d596ece7422bc176bda3", size = 406224, upload-time = "2026-01-03T22:14:26.096Z" } +sdist = { url = "https://files.pythonhosted.org/packages/07/63/0adf26577da5eff6eb7a177876c1cfa213856be9926a000f65c4add9692b/astroid-4.0.4.tar.gz", hash = "sha256:986fed8bcf79fb82c78b18a53352a0b287a73817d6dbcfba3162da36667c49a0", size = 406358, upload-time = "2026-02-07T23:35:07.509Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ce/66/686ac4fc6ef48f5bacde625adac698f41d5316a9753c2b20bb0931c9d4e2/astroid-4.0.3-py3-none-any.whl", hash = "sha256:864a0a34af1bd70e1049ba1e61cee843a7252c826d97825fcee9b2fcbd9e1b14", size = 276443, upload-time = "2026-01-03T22:14:24.412Z" }, + { url = "https://files.pythonhosted.org/packages/b0/cf/1c5f42b110e57bc5502eb80dbc3b03d256926062519224835ef08134f1f9/astroid-4.0.4-py3-none-any.whl", hash = "sha256:52f39653876c7dec3e3afd4c2696920e05c83832b9737afc21928f2d2eb7a753", size = 276445, upload-time = "2026-02-07T23:35:05.344Z" }, ] [[package]] @@ -204,14 +204,14 @@ wheels = [ [[package]] name = "authlib" -version = "1.6.6" +version = "1.6.7" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cryptography" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/bb/9b/b1661026ff24bc641b76b78c5222d614776b0c085bcfdac9bd15a1cb4b35/authlib-1.6.6.tar.gz", hash = "sha256:45770e8e056d0f283451d9996fbb59b70d45722b45d854d58f32878d0a40c38e", size = 164894, upload-time = "2025-12-12T08:01:41.464Z" } +sdist = { url = "https://files.pythonhosted.org/packages/49/dc/ed1681bf1339dd6ea1ce56136bad4baabc6f7ad466e375810702b0237047/authlib-1.6.7.tar.gz", hash = "sha256:dbf10100011d1e1b34048c9d120e83f13b35d69a826ae762b93d2fb5aafc337b", size = 164950, upload-time = "2026-02-06T14:04:14.171Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/54/51/321e821856452f7386c4e9df866f196720b1ad0c5ea1623ea7399969ae3b/authlib-1.6.6-py2.py3-none-any.whl", hash = "sha256:7d9e9bc535c13974313a87f53e8430eb6ea3d1cf6ae4f6efcd793f2e949143fd", size = 244005, upload-time = "2025-12-12T08:01:40.209Z" }, + { url = "https://files.pythonhosted.org/packages/f8/00/3ed12264094ec91f534fae429945efbaa9f8c666f3aa7061cc3b2a26a0cd/authlib-1.6.7-py2.py3-none-any.whl", hash = "sha256:c637340d9a02789d2efa1d003a7437d10d3e565237bcb5fcbc6c134c7b95bab0", size = 244115, upload-time = "2026-02-06T14:04:12.141Z" }, ] [[package]] @@ -319,7 +319,7 @@ wheels = [ [[package]] name = "blobfile" -version = "3.1.0" +version = "3.2.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "filelock" }, @@ -327,9 +327,9 @@ dependencies = [ { name = "pycryptodomex" }, { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f0/6d/2e7567da75ddbb24fe979f52284b708da349d67a41042635af36071a5a6b/blobfile-3.1.0.tar.gz", hash = "sha256:d45b6b1fa3b0920732314c23ddbdb4f494ca12f787c2b6eb6bba6faa51382671", size = 77229, upload-time = "2025-09-06T00:36:15.583Z" } +sdist = { url = "https://files.pythonhosted.org/packages/59/3e/9f613b3bf2f70a96a03ee102f8ad0d570d5637674f0e1814e7c301c68134/blobfile-3.2.0.tar.gz", hash = "sha256:78514a9265b9aa7d4607042dc77c5e6461ab27036450ad8e1f6ef9a7f29bf958", size = 78442, upload-time = "2026-02-07T03:10:54.273Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/77/a7/51af11120d75af2828f8eede0b13a4caff650d708ac50e62d000aefe1ffb/blobfile-3.1.0-py3-none-any.whl", hash = "sha256:2b4c5e766ebb7dfa20e4990cf6ec3d2106bdc91d632fb9377f170a234c5a5c6a", size = 75741, upload-time = "2025-09-06T00:36:14.11Z" }, + { url = "https://files.pythonhosted.org/packages/90/ab/e0a104d874f18e2552d981e6e978c64d3c8fa2fad4fbc46e9daa42b31db3/blobfile-3.2.0-py3-none-any.whl", hash = "sha256:e5e4095477da9f09e2077f41320c006001b2102a61f07d41ceaaecdf5d9741d8", size = 76958, upload-time = "2026-02-07T03:10:52.86Z" }, ] [[package]] @@ -348,11 +348,11 @@ wheels = [ [[package]] name = "cachetools" -version = "6.2.6" +version = "7.0.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/39/91/d9ae9a66b01102a18cd16db0cf4cd54187ffe10f0865cc80071a4104fbb3/cachetools-6.2.6.tar.gz", hash = "sha256:16c33e1f276b9a9c0b49ab5782d901e3ad3de0dd6da9bf9bcd29ac5672f2f9e6", size = 32363, upload-time = "2026-01-27T20:32:59.956Z" } +sdist = { url = "https://files.pythonhosted.org/packages/98/af/df70e9b65bc77a1cbe0768c0aa4617147f30f8306ded98c1744bcdc0ae1e/cachetools-7.0.0.tar.gz", hash = "sha256:a9abf18ff3b86c7d05b27ead412e235e16ae045925e531fae38d5fada5ed5b08", size = 35796, upload-time = "2026-02-01T18:59:47.411Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/90/45/f458fa2c388e79dd9d8b9b0c99f1d31b568f27388f2fdba7bb66bbc0c6ed/cachetools-6.2.6-py3-none-any.whl", hash = "sha256:8c9717235b3c651603fff0076db52d6acbfd1b338b8ed50256092f7ce9c85bda", size = 11668, upload-time = "2026-01-27T20:32:58.527Z" }, + { url = "https://files.pythonhosted.org/packages/28/df/2dd32cce20cbcf6f2ec456b58d44368161ad28320729f64e5e1d5d7bd0ae/cachetools-7.0.0-py3-none-any.whl", hash = "sha256:d52fef60e6e964a1969cfb61ccf6242a801b432790fe520d78720d757c81cbd2", size = 13487, upload-time = "2026-02-01T18:59:45.981Z" }, ] [[package]] @@ -399,6 +399,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/cb/0e/02ceeec9a7d6ee63bb596121c2c8e9b3a9e150936f4fbef6ca1943e6137c/cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91", size = 177780, upload-time = "2025-09-08T23:23:16.761Z" }, ] +[[package]] +name = "chardet" +version = "5.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/0d/f7b6ab21ec75897ed80c17d79b15951a719226b9fababf1e40ea74d69079/chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7", size = 2069618, upload-time = "2023-08-01T19:23:02.662Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/6f/f5fbc992a329ee4e0f288c1fe0e2ad9485ed064cac731ed2fe47dcc38cbf/chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970", size = 199385, upload-time = "2023-08-01T19:23:00.661Z" }, +] + [[package]] name = "charset-normalizer" version = "3.4.4" @@ -472,50 +481,50 @@ wheels = [ [[package]] name = "coverage" -version = "7.13.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ad/49/349848445b0e53660e258acbcc9b0d014895b6739237920886672240f84b/coverage-7.13.2.tar.gz", hash = "sha256:044c6951ec37146b72a50cc81ef02217d27d4c3640efd2640311393cbbf143d3", size = 826523, upload-time = "2026-01-25T13:00:04.889Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/46/39/e92a35f7800222d3f7b2cbb7bbc3b65672ae8d501cb31801b2d2bd7acdf1/coverage-7.13.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f106b2af193f965d0d3234f3f83fc35278c7fb935dfbde56ae2da3dd2c03b84d", size = 219142, upload-time = "2026-01-25T12:58:00.448Z" }, - { url = "https://files.pythonhosted.org/packages/45/7a/8bf9e9309c4c996e65c52a7c5a112707ecdd9fbaf49e10b5a705a402bbb4/coverage-7.13.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:78f45d21dc4d5d6bd29323f0320089ef7eae16e4bef712dff79d184fa7330af3", size = 219503, upload-time = "2026-01-25T12:58:02.451Z" }, - { url = "https://files.pythonhosted.org/packages/87/93/17661e06b7b37580923f3f12406ac91d78aeed293fb6da0b69cc7957582f/coverage-7.13.2-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:fae91dfecd816444c74531a9c3d6ded17a504767e97aa674d44f638107265b99", size = 251006, upload-time = "2026-01-25T12:58:04.059Z" }, - { url = "https://files.pythonhosted.org/packages/12/f0/f9e59fb8c310171497f379e25db060abef9fa605e09d63157eebec102676/coverage-7.13.2-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:264657171406c114787b441484de620e03d8f7202f113d62fcd3d9688baa3e6f", size = 253750, upload-time = "2026-01-25T12:58:05.574Z" }, - { url = "https://files.pythonhosted.org/packages/e5/b1/1935e31add2232663cf7edd8269548b122a7d100047ff93475dbaaae673e/coverage-7.13.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ae47d8dcd3ded0155afbb59c62bd8ab07ea0fd4902e1c40567439e6db9dcaf2f", size = 254862, upload-time = "2026-01-25T12:58:07.647Z" }, - { url = "https://files.pythonhosted.org/packages/af/59/b5e97071ec13df5f45da2b3391b6cdbec78ba20757bc92580a5b3d5fa53c/coverage-7.13.2-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:8a0b33e9fd838220b007ce8f299114d406c1e8edb21336af4c97a26ecfd185aa", size = 251420, upload-time = "2026-01-25T12:58:09.309Z" }, - { url = "https://files.pythonhosted.org/packages/3f/75/9495932f87469d013dc515fb0ce1aac5fa97766f38f6b1a1deb1ee7b7f3a/coverage-7.13.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b3becbea7f3ce9a2d4d430f223ec15888e4deb31395840a79e916368d6004cce", size = 252786, upload-time = "2026-01-25T12:58:10.909Z" }, - { url = "https://files.pythonhosted.org/packages/6a/59/af550721f0eb62f46f7b8cb7e6f1860592189267b1c411a4e3a057caacee/coverage-7.13.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:f819c727a6e6eeb8711e4ce63d78c620f69630a2e9d53bc95ca5379f57b6ba94", size = 250928, upload-time = "2026-01-25T12:58:12.449Z" }, - { url = "https://files.pythonhosted.org/packages/9b/b1/21b4445709aae500be4ab43bbcfb4e53dc0811c3396dcb11bf9f23fd0226/coverage-7.13.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:4f7b71757a3ab19f7ba286e04c181004c1d61be921795ee8ba6970fd0ec91da5", size = 250496, upload-time = "2026-01-25T12:58:14.047Z" }, - { url = "https://files.pythonhosted.org/packages/ba/b1/0f5d89dfe0392990e4f3980adbde3eb34885bc1effb2dc369e0bf385e389/coverage-7.13.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b7fc50d2afd2e6b4f6f2f403b70103d280a8e0cb35320cbbe6debcda02a1030b", size = 252373, upload-time = "2026-01-25T12:58:15.976Z" }, - { url = "https://files.pythonhosted.org/packages/01/c9/0cf1a6a57a9968cc049a6b896693faa523c638a5314b1fc374eb2b2ac904/coverage-7.13.2-cp312-cp312-win32.whl", hash = "sha256:292250282cf9bcf206b543d7608bda17ca6fc151f4cbae949fc7e115112fbd41", size = 221696, upload-time = "2026-01-25T12:58:17.517Z" }, - { url = "https://files.pythonhosted.org/packages/4d/05/d7540bf983f09d32803911afed135524570f8c47bb394bf6206c1dc3a786/coverage-7.13.2-cp312-cp312-win_amd64.whl", hash = "sha256:eeea10169fac01549a7921d27a3e517194ae254b542102267bef7a93ed38c40e", size = 222504, upload-time = "2026-01-25T12:58:19.115Z" }, - { url = "https://files.pythonhosted.org/packages/15/8b/1a9f037a736ced0a12aacf6330cdaad5008081142a7070bc58b0f7930cbc/coverage-7.13.2-cp312-cp312-win_arm64.whl", hash = "sha256:2a5b567f0b635b592c917f96b9a9cb3dbd4c320d03f4bf94e9084e494f2e8894", size = 221120, upload-time = "2026-01-25T12:58:21.334Z" }, - { url = "https://files.pythonhosted.org/packages/a7/f0/3d3eac7568ab6096ff23791a526b0048a1ff3f49d0e236b2af6fb6558e88/coverage-7.13.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ed75de7d1217cf3b99365d110975f83af0528c849ef5180a12fd91b5064df9d6", size = 219168, upload-time = "2026-01-25T12:58:23.376Z" }, - { url = "https://files.pythonhosted.org/packages/a3/a6/f8b5cfeddbab95fdef4dcd682d82e5dcff7a112ced57a959f89537ee9995/coverage-7.13.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:97e596de8fa9bada4d88fde64a3f4d37f1b6131e4faa32bad7808abc79887ddc", size = 219537, upload-time = "2026-01-25T12:58:24.932Z" }, - { url = "https://files.pythonhosted.org/packages/7b/e6/8d8e6e0c516c838229d1e41cadcec91745f4b1031d4db17ce0043a0423b4/coverage-7.13.2-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:68c86173562ed4413345410c9480a8d64864ac5e54a5cda236748031e094229f", size = 250528, upload-time = "2026-01-25T12:58:26.567Z" }, - { url = "https://files.pythonhosted.org/packages/8e/78/befa6640f74092b86961f957f26504c8fba3d7da57cc2ab7407391870495/coverage-7.13.2-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7be4d613638d678b2b3773b8f687537b284d7074695a43fe2fbbfc0e31ceaed1", size = 253132, upload-time = "2026-01-25T12:58:28.251Z" }, - { url = "https://files.pythonhosted.org/packages/9d/10/1630db1edd8ce675124a2ee0f7becc603d2bb7b345c2387b4b95c6907094/coverage-7.13.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d7f63ce526a96acd0e16c4af8b50b64334239550402fb1607ce6a584a6d62ce9", size = 254374, upload-time = "2026-01-25T12:58:30.294Z" }, - { url = "https://files.pythonhosted.org/packages/ed/1d/0d9381647b1e8e6d310ac4140be9c428a0277330991e0c35bdd751e338a4/coverage-7.13.2-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:406821f37f864f968e29ac14c3fccae0fec9fdeba48327f0341decf4daf92d7c", size = 250762, upload-time = "2026-01-25T12:58:32.036Z" }, - { url = "https://files.pythonhosted.org/packages/43/e4/5636dfc9a7c871ee8776af83ee33b4c26bc508ad6cee1e89b6419a366582/coverage-7.13.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ee68e5a4e3e5443623406b905db447dceddffee0dceb39f4e0cd9ec2a35004b5", size = 252502, upload-time = "2026-01-25T12:58:33.961Z" }, - { url = "https://files.pythonhosted.org/packages/02/2a/7ff2884d79d420cbb2d12fed6fff727b6d0ef27253140d3cdbbd03187ee0/coverage-7.13.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2ee0e58cca0c17dd9c6c1cdde02bb705c7b3fbfa5f3b0b5afeda20d4ebff8ef4", size = 250463, upload-time = "2026-01-25T12:58:35.529Z" }, - { url = "https://files.pythonhosted.org/packages/91/c0/ba51087db645b6c7261570400fc62c89a16278763f36ba618dc8657a187b/coverage-7.13.2-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:6e5bbb5018bf76a56aabdb64246b5288d5ae1b7d0dd4d0534fe86df2c2992d1c", size = 250288, upload-time = "2026-01-25T12:58:37.226Z" }, - { url = "https://files.pythonhosted.org/packages/03/07/44e6f428551c4d9faf63ebcefe49b30e5c89d1be96f6a3abd86a52da9d15/coverage-7.13.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a55516c68ef3e08e134e818d5e308ffa6b1337cc8b092b69b24287bf07d38e31", size = 252063, upload-time = "2026-01-25T12:58:38.821Z" }, - { url = "https://files.pythonhosted.org/packages/c2/67/35b730ad7e1859dd57e834d1bc06080d22d2f87457d53f692fce3f24a5a9/coverage-7.13.2-cp313-cp313-win32.whl", hash = "sha256:5b20211c47a8abf4abc3319d8ce2464864fa9f30c5fcaf958a3eed92f4f1fef8", size = 221716, upload-time = "2026-01-25T12:58:40.484Z" }, - { url = "https://files.pythonhosted.org/packages/0d/82/e5fcf5a97c72f45fc14829237a6550bf49d0ab882ac90e04b12a69db76b4/coverage-7.13.2-cp313-cp313-win_amd64.whl", hash = "sha256:14f500232e521201cf031549fb1ebdfc0a40f401cf519157f76c397e586c3beb", size = 222522, upload-time = "2026-01-25T12:58:43.247Z" }, - { url = "https://files.pythonhosted.org/packages/b1/f1/25d7b2f946d239dd2d6644ca2cc060d24f97551e2af13b6c24c722ae5f97/coverage-7.13.2-cp313-cp313-win_arm64.whl", hash = "sha256:9779310cb5a9778a60c899f075a8514c89fa6d10131445c2207fc893e0b14557", size = 221145, upload-time = "2026-01-25T12:58:45Z" }, - { url = "https://files.pythonhosted.org/packages/9e/f7/080376c029c8f76fadfe43911d0daffa0cbdc9f9418a0eead70c56fb7f4b/coverage-7.13.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:e64fa5a1e41ce5df6b547cbc3d3699381c9e2c2c369c67837e716ed0f549d48e", size = 219861, upload-time = "2026-01-25T12:58:46.586Z" }, - { url = "https://files.pythonhosted.org/packages/42/11/0b5e315af5ab35f4c4a70e64d3314e4eec25eefc6dec13be3a7d5ffe8ac5/coverage-7.13.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b01899e82a04085b6561eb233fd688474f57455e8ad35cd82286463ba06332b7", size = 220207, upload-time = "2026-01-25T12:58:48.277Z" }, - { url = "https://files.pythonhosted.org/packages/b2/0c/0874d0318fb1062117acbef06a09cf8b63f3060c22265adaad24b36306b7/coverage-7.13.2-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:838943bea48be0e2768b0cf7819544cdedc1bbb2f28427eabb6eb8c9eb2285d3", size = 261504, upload-time = "2026-01-25T12:58:49.904Z" }, - { url = "https://files.pythonhosted.org/packages/83/5e/1cd72c22ecb30751e43a72f40ba50fcef1b7e93e3ea823bd9feda8e51f9a/coverage-7.13.2-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:93d1d25ec2b27e90bcfef7012992d1f5121b51161b8bffcda756a816cf13c2c3", size = 263582, upload-time = "2026-01-25T12:58:51.582Z" }, - { url = "https://files.pythonhosted.org/packages/9b/da/8acf356707c7a42df4d0657020308e23e5a07397e81492640c186268497c/coverage-7.13.2-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:93b57142f9621b0d12349c43fc7741fe578e4bc914c1e5a54142856cfc0bf421", size = 266008, upload-time = "2026-01-25T12:58:53.234Z" }, - { url = "https://files.pythonhosted.org/packages/41/41/ea1730af99960309423c6ea8d6a4f1fa5564b2d97bd1d29dda4b42611f04/coverage-7.13.2-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f06799ae1bdfff7ccb8665d75f8291c69110ba9585253de254688aa8a1ccc6c5", size = 260762, upload-time = "2026-01-25T12:58:55.372Z" }, - { url = "https://files.pythonhosted.org/packages/22/fa/02884d2080ba71db64fdc127b311db60e01fe6ba797d9c8363725e39f4d5/coverage-7.13.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:7f9405ab4f81d490811b1d91c7a20361135a2df4c170e7f0b747a794da5b7f23", size = 263571, upload-time = "2026-01-25T12:58:57.52Z" }, - { url = "https://files.pythonhosted.org/packages/d2/6b/4083aaaeba9b3112f55ac57c2ce7001dc4d8fa3fcc228a39f09cc84ede27/coverage-7.13.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:f9ab1d5b86f8fbc97a5b3cd6280a3fd85fef3b028689d8a2c00918f0d82c728c", size = 261200, upload-time = "2026-01-25T12:58:59.255Z" }, - { url = "https://files.pythonhosted.org/packages/e9/d2/aea92fa36d61955e8c416ede9cf9bf142aa196f3aea214bb67f85235a050/coverage-7.13.2-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:f674f59712d67e841525b99e5e2b595250e39b529c3bda14764e4f625a3fa01f", size = 260095, upload-time = "2026-01-25T12:59:01.066Z" }, - { url = "https://files.pythonhosted.org/packages/0d/ae/04ffe96a80f107ea21b22b2367175c621da920063260a1c22f9452fd7866/coverage-7.13.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c6cadac7b8ace1ba9144feb1ae3cb787a6065ba6d23ffc59a934b16406c26573", size = 262284, upload-time = "2026-01-25T12:59:02.802Z" }, - { url = "https://files.pythonhosted.org/packages/1c/7a/6f354dcd7dfc41297791d6fb4e0d618acb55810bde2c1fd14b3939e05c2b/coverage-7.13.2-cp313-cp313t-win32.whl", hash = "sha256:14ae4146465f8e6e6253eba0cccd57423e598a4cb925958b240c805300918343", size = 222389, upload-time = "2026-01-25T12:59:04.563Z" }, - { url = "https://files.pythonhosted.org/packages/8d/d5/080ad292a4a3d3daf411574be0a1f56d6dee2c4fdf6b005342be9fac807f/coverage-7.13.2-cp313-cp313t-win_amd64.whl", hash = "sha256:9074896edd705a05769e3de0eac0a8388484b503b68863dd06d5e473f874fd47", size = 223450, upload-time = "2026-01-25T12:59:06.677Z" }, - { url = "https://files.pythonhosted.org/packages/88/96/df576fbacc522e9fb8d1c4b7a7fc62eb734be56e2cba1d88d2eabe08ea3f/coverage-7.13.2-cp313-cp313t-win_arm64.whl", hash = "sha256:69e526e14f3f854eda573d3cf40cffd29a1a91c684743d904c33dbdcd0e0f3e7", size = 221707, upload-time = "2026-01-25T12:59:08.363Z" }, - { url = "https://files.pythonhosted.org/packages/d2/db/d291e30fdf7ea617a335531e72294e0c723356d7fdde8fba00610a76bda9/coverage-7.13.2-py3-none-any.whl", hash = "sha256:40ce1ea1e25125556d8e76bd0b61500839a07944cc287ac21d5626f3e620cad5", size = 210943, upload-time = "2026-01-25T13:00:02.388Z" }, +version = "7.13.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/11/43/3e4ac666cc35f231fa70c94e9f38459299de1a152813f9d2f60fc5f3ecaf/coverage-7.13.3.tar.gz", hash = "sha256:f7f6182d3dfb8802c1747eacbfe611b669455b69b7c037484bb1efbbb56711ac", size = 826832, upload-time = "2026-02-03T14:02:30.944Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/44/330f8e83b143f6668778ed61d17ece9dc48459e9e74669177de02f45fec5/coverage-7.13.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ed48b4170caa2c4420e0cd27dc977caaffc7eecc317355751df8373dddcef595", size = 219441, upload-time = "2026-02-03T14:00:22.585Z" }, + { url = "https://files.pythonhosted.org/packages/08/e7/29db05693562c2e65bdf6910c0af2fd6f9325b8f43caf7a258413f369e30/coverage-7.13.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8f2adf4bcffbbec41f366f2e6dffb9d24e8172d16e91da5799c9b7ed6b5716e6", size = 219801, upload-time = "2026-02-03T14:00:24.186Z" }, + { url = "https://files.pythonhosted.org/packages/90/ae/7f8a78249b02b0818db46220795f8ac8312ea4abd1d37d79ea81db5cae81/coverage-7.13.3-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:01119735c690786b6966a1e9f098da4cd7ca9174c4cfe076d04e653105488395", size = 251306, upload-time = "2026-02-03T14:00:25.798Z" }, + { url = "https://files.pythonhosted.org/packages/62/71/a18a53d1808e09b2e9ebd6b47dad5e92daf4c38b0686b4c4d1b2f3e42b7f/coverage-7.13.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8bb09e83c603f152d855f666d70a71765ca8e67332e5829e62cb9466c176af23", size = 254051, upload-time = "2026-02-03T14:00:27.474Z" }, + { url = "https://files.pythonhosted.org/packages/4a/0a/eb30f6455d04c5a3396d0696cad2df0269ae7444bb322f86ffe3376f7bf9/coverage-7.13.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b607a40cba795cfac6d130220d25962931ce101f2f478a29822b19755377fb34", size = 255160, upload-time = "2026-02-03T14:00:29.024Z" }, + { url = "https://files.pythonhosted.org/packages/7b/7e/a45baac86274ce3ed842dbb84f14560c673ad30535f397d89164ec56c5df/coverage-7.13.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:44f14a62f5da2e9aedf9080e01d2cda61df39197d48e323538ec037336d68da8", size = 251709, upload-time = "2026-02-03T14:00:30.641Z" }, + { url = "https://files.pythonhosted.org/packages/c0/df/dd0dc12f30da11349993f3e218901fdf82f45ee44773596050c8f5a1fb25/coverage-7.13.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:debf29e0b157769843dff0981cc76f79e0ed04e36bb773c6cac5f6029054bd8a", size = 253083, upload-time = "2026-02-03T14:00:32.14Z" }, + { url = "https://files.pythonhosted.org/packages/ab/32/fc764c8389a8ce95cb90eb97af4c32f392ab0ac23ec57cadeefb887188d3/coverage-7.13.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:824bb95cd71604031ae9a48edb91fd6effde669522f960375668ed21b36e3ec4", size = 251227, upload-time = "2026-02-03T14:00:34.721Z" }, + { url = "https://files.pythonhosted.org/packages/dd/ca/d025e9da8f06f24c34d2da9873957cfc5f7e0d67802c3e34d0caa8452130/coverage-7.13.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:8f1010029a5b52dc427c8e2a8dbddb2303ddd180b806687d1acd1bb1d06649e7", size = 250794, upload-time = "2026-02-03T14:00:36.278Z" }, + { url = "https://files.pythonhosted.org/packages/45/c7/76bf35d5d488ec8f68682eb8e7671acc50a6d2d1c1182de1d2b6d4ffad3b/coverage-7.13.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:cd5dee4fd7659d8306ffa79eeaaafd91fa30a302dac3af723b9b469e549247e0", size = 252671, upload-time = "2026-02-03T14:00:38.368Z" }, + { url = "https://files.pythonhosted.org/packages/bf/10/1921f1a03a7c209e1cb374f81a6b9b68b03cdb3ecc3433c189bc90e2a3d5/coverage-7.13.3-cp312-cp312-win32.whl", hash = "sha256:f7f153d0184d45f3873b3ad3ad22694fd73aadcb8cdbc4337ab4b41ea6b4dff1", size = 221986, upload-time = "2026-02-03T14:00:40.442Z" }, + { url = "https://files.pythonhosted.org/packages/3c/7c/f5d93297f8e125a80c15545edc754d93e0ed8ba255b65e609b185296af01/coverage-7.13.3-cp312-cp312-win_amd64.whl", hash = "sha256:03a6e5e1e50819d6d7436f5bc40c92ded7e484e400716886ac921e35c133149d", size = 222793, upload-time = "2026-02-03T14:00:42.106Z" }, + { url = "https://files.pythonhosted.org/packages/43/59/c86b84170015b4555ebabca8649bdf9f4a1f737a73168088385ed0f947c4/coverage-7.13.3-cp312-cp312-win_arm64.whl", hash = "sha256:51c4c42c0e7d09a822b08b6cf79b3c4db8333fffde7450da946719ba0d45730f", size = 221410, upload-time = "2026-02-03T14:00:43.726Z" }, + { url = "https://files.pythonhosted.org/packages/81/f3/4c333da7b373e8c8bfb62517e8174a01dcc373d7a9083698e3b39d50d59c/coverage-7.13.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:853c3d3c79ff0db65797aad79dee6be020efd218ac4510f15a205f1e8d13ce25", size = 219468, upload-time = "2026-02-03T14:00:45.829Z" }, + { url = "https://files.pythonhosted.org/packages/d6/31/0714337b7d23630c8de2f4d56acf43c65f8728a45ed529b34410683f7217/coverage-7.13.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f75695e157c83d374f88dcc646a60cb94173304a9258b2e74ba5a66b7614a51a", size = 219839, upload-time = "2026-02-03T14:00:47.407Z" }, + { url = "https://files.pythonhosted.org/packages/12/99/bd6f2a2738144c98945666f90cae446ed870cecf0421c767475fcf42cdbe/coverage-7.13.3-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:2d098709621d0819039f3f1e471ee554f55a0b2ac0d816883c765b14129b5627", size = 250828, upload-time = "2026-02-03T14:00:49.029Z" }, + { url = "https://files.pythonhosted.org/packages/6f/99/97b600225fbf631e6f5bfd3ad5bcaf87fbb9e34ff87492e5a572ff01bbe2/coverage-7.13.3-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:16d23d6579cf80a474ad160ca14d8b319abaa6db62759d6eef53b2fc979b58c8", size = 253432, upload-time = "2026-02-03T14:00:50.655Z" }, + { url = "https://files.pythonhosted.org/packages/5f/5c/abe2b3490bda26bd4f5e3e799be0bdf00bd81edebedc2c9da8d3ef288fa8/coverage-7.13.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:00d34b29a59d2076e6f318b30a00a69bf63687e30cd882984ed444e753990cc1", size = 254672, upload-time = "2026-02-03T14:00:52.757Z" }, + { url = "https://files.pythonhosted.org/packages/31/ba/5d1957c76b40daff53971fe0adb84d9c2162b614280031d1d0653dd010c1/coverage-7.13.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ab6d72bffac9deb6e6cb0f61042e748de3f9f8e98afb0375a8e64b0b6e11746b", size = 251050, upload-time = "2026-02-03T14:00:54.332Z" }, + { url = "https://files.pythonhosted.org/packages/69/dc/dffdf3bfe9d32090f047d3c3085378558cb4eb6778cda7de414ad74581ed/coverage-7.13.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e129328ad1258e49cae0123a3b5fcb93d6c2fa90d540f0b4c7cdcdc019aaa3dc", size = 252801, upload-time = "2026-02-03T14:00:56.121Z" }, + { url = "https://files.pythonhosted.org/packages/87/51/cdf6198b0f2746e04511a30dc9185d7b8cdd895276c07bdb538e37f1cd50/coverage-7.13.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2213a8d88ed35459bda71597599d4eec7c2ebad201c88f0bfc2c26fd9b0dd2ea", size = 250763, upload-time = "2026-02-03T14:00:58.719Z" }, + { url = "https://files.pythonhosted.org/packages/d7/1a/596b7d62218c1d69f2475b69cc6b211e33c83c902f38ee6ae9766dd422da/coverage-7.13.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:00dd3f02de6d5f5c9c3d95e3e036c3c2e2a669f8bf2d3ceb92505c4ce7838f67", size = 250587, upload-time = "2026-02-03T14:01:01.197Z" }, + { url = "https://files.pythonhosted.org/packages/f7/46/52330d5841ff660f22c130b75f5e1dd3e352c8e7baef5e5fef6b14e3e991/coverage-7.13.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f9bada7bc660d20b23d7d312ebe29e927b655cf414dadcdb6335a2075695bd86", size = 252358, upload-time = "2026-02-03T14:01:02.824Z" }, + { url = "https://files.pythonhosted.org/packages/36/8a/e69a5be51923097ba7d5cff9724466e74fe486e9232020ba97c809a8b42b/coverage-7.13.3-cp313-cp313-win32.whl", hash = "sha256:75b3c0300f3fa15809bd62d9ca8b170eb21fcf0100eb4b4154d6dc8b3a5bbd43", size = 222007, upload-time = "2026-02-03T14:01:04.876Z" }, + { url = "https://files.pythonhosted.org/packages/0a/09/a5a069bcee0d613bdd48ee7637fa73bc09e7ed4342b26890f2df97cc9682/coverage-7.13.3-cp313-cp313-win_amd64.whl", hash = "sha256:a2f7589c6132c44c53f6e705e1a6677e2b7821378c22f7703b2cf5388d0d4587", size = 222812, upload-time = "2026-02-03T14:01:07.296Z" }, + { url = "https://files.pythonhosted.org/packages/3d/4f/d62ad7dfe32f9e3d4a10c178bb6f98b10b083d6e0530ca202b399371f6c1/coverage-7.13.3-cp313-cp313-win_arm64.whl", hash = "sha256:123ceaf2b9d8c614f01110f908a341e05b1b305d6b2ada98763b9a5a59756051", size = 221433, upload-time = "2026-02-03T14:01:09.156Z" }, + { url = "https://files.pythonhosted.org/packages/04/b2/4876c46d723d80b9c5b695f1a11bf5f7c3dabf540ec00d6edc076ff025e6/coverage-7.13.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:cc7fd0f726795420f3678ac82ff882c7fc33770bd0074463b5aef7293285ace9", size = 220162, upload-time = "2026-02-03T14:01:11.409Z" }, + { url = "https://files.pythonhosted.org/packages/fc/04/9942b64a0e0bdda2c109f56bda42b2a59d9d3df4c94b85a323c1cae9fc77/coverage-7.13.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:d358dc408edc28730aed5477a69338e444e62fba0b7e9e4a131c505fadad691e", size = 220510, upload-time = "2026-02-03T14:01:13.038Z" }, + { url = "https://files.pythonhosted.org/packages/5a/82/5cfe1e81eae525b74669f9795f37eb3edd4679b873d79d1e6c1c14ee6c1c/coverage-7.13.3-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:5d67b9ed6f7b5527b209b24b3df9f2e5bf0198c1bbf99c6971b0e2dcb7e2a107", size = 261801, upload-time = "2026-02-03T14:01:14.674Z" }, + { url = "https://files.pythonhosted.org/packages/0b/ec/a553d7f742fd2cd12e36a16a7b4b3582d5934b496ef2b5ea8abeb10903d4/coverage-7.13.3-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:59224bfb2e9b37c1335ae35d00daa3a5b4e0b1a20f530be208fff1ecfa436f43", size = 263882, upload-time = "2026-02-03T14:01:16.343Z" }, + { url = "https://files.pythonhosted.org/packages/e1/58/8f54a2a93e3d675635bc406de1c9ac8d551312142ff52c9d71b5e533ad45/coverage-7.13.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ae9306b5299e31e31e0d3b908c66bcb6e7e3ddca143dea0266e9ce6c667346d3", size = 266306, upload-time = "2026-02-03T14:01:18.02Z" }, + { url = "https://files.pythonhosted.org/packages/1a/be/e593399fd6ea1f00aee79ebd7cc401021f218d34e96682a92e1bae092ff6/coverage-7.13.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:343aaeb5f8bb7bcd38620fd7bc56e6ee8207847d8c6103a1e7b72322d381ba4a", size = 261051, upload-time = "2026-02-03T14:01:19.757Z" }, + { url = "https://files.pythonhosted.org/packages/5c/e5/e9e0f6138b21bcdebccac36fbfde9cf15eb1bbcea9f5b1f35cd1f465fb91/coverage-7.13.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b2182129f4c101272ff5f2f18038d7b698db1bf8e7aa9e615cb48440899ad32e", size = 263868, upload-time = "2026-02-03T14:01:21.487Z" }, + { url = "https://files.pythonhosted.org/packages/9a/bf/de72cfebb69756f2d4a2dde35efcc33c47d85cd3ebdf844b3914aac2ef28/coverage-7.13.3-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:94d2ac94bd0cc57c5626f52f8c2fffed1444b5ae8c9fc68320306cc2b255e155", size = 261498, upload-time = "2026-02-03T14:01:23.097Z" }, + { url = "https://files.pythonhosted.org/packages/f2/91/4a2d313a70fc2e98ca53afd1c8ce67a89b1944cd996589a5b1fe7fbb3e5c/coverage-7.13.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:65436cde5ecabe26fb2f0bf598962f0a054d3f23ad529361326ac002c61a2a1e", size = 260394, upload-time = "2026-02-03T14:01:24.949Z" }, + { url = "https://files.pythonhosted.org/packages/40/83/25113af7cf6941e779eb7ed8de2a677865b859a07ccee9146d4cc06a03e3/coverage-7.13.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:db83b77f97129813dbd463a67e5335adc6a6a91db652cc085d60c2d512746f96", size = 262579, upload-time = "2026-02-03T14:01:26.703Z" }, + { url = "https://files.pythonhosted.org/packages/1e/19/a5f2b96262977e82fb9aabbe19b4d83561f5d063f18dde3e72f34ffc3b2f/coverage-7.13.3-cp313-cp313t-win32.whl", hash = "sha256:dfb428e41377e6b9ba1b0a32df6db5409cb089a0ed1d0a672dc4953ec110d84f", size = 222679, upload-time = "2026-02-03T14:01:28.553Z" }, + { url = "https://files.pythonhosted.org/packages/81/82/ef1747b88c87a5c7d7edc3704799ebd650189a9158e680a063308b6125ef/coverage-7.13.3-cp313-cp313t-win_amd64.whl", hash = "sha256:5badd7e596e6b0c89aa8ec6d37f4473e4357f982ce57f9a2942b0221cd9cf60c", size = 223740, upload-time = "2026-02-03T14:01:30.776Z" }, + { url = "https://files.pythonhosted.org/packages/1c/4c/a67c7bb5b560241c22736a9cb2f14c5034149ffae18630323fde787339e4/coverage-7.13.3-cp313-cp313t-win_arm64.whl", hash = "sha256:989aa158c0eb19d83c76c26f4ba00dbb272485c56e452010a3450bdbc9daafd9", size = 221996, upload-time = "2026-02-03T14:01:32.495Z" }, + { url = "https://files.pythonhosted.org/packages/7d/fb/70af542d2d938c778c9373ce253aa4116dbe7c0a5672f78b2b2ae0e1b94b/coverage-7.13.3-py3-none-any.whl", hash = "sha256:90a8af9dba6429b2573199622d72e0ebf024d6276f16abce394ad4d181bb0910", size = 211237, upload-time = "2026-02-03T14:02:27.986Z" }, ] [[package]] @@ -627,6 +636,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ba/5a/18ad964b0086c6e62e2e7500f7edc89e3faa45033c71c1893d34eed2b2de/dnspython-2.8.0-py3-none-any.whl", hash = "sha256:01d9bbc4a2d76bf0db7c1f729812ded6d912bd318d3b1cf81d30c0f845dbf3af", size = 331094, upload-time = "2025-09-07T18:57:58.071Z" }, ] +[[package]] +name = "docstring-parser" +version = "0.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/9d/c3b43da9515bd270df0f80548d9944e389870713cc1fe2b8fb35fe2bcefd/docstring_parser-0.17.0.tar.gz", hash = "sha256:583de4a309722b3315439bb31d64ba3eebada841f2e2cee23b99df001434c912", size = 27442, upload-time = "2025-07-21T07:35:01.868Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/55/e2/2537ebcff11c1ee1ff17d8d0b6f4db75873e3b0fb32c2d4a2ee31ecb310a/docstring_parser-0.17.0-py3-none-any.whl", hash = "sha256:cf2569abd23dce8099b300f9b4fa8191e9582dda731fd533daf54c4551658708", size = 36896, upload-time = "2025-07-21T07:35:00.684Z" }, +] + [[package]] name = "docutils" version = "0.22.4" @@ -699,17 +717,18 @@ wheels = [ [[package]] name = "fastapi" -version = "0.128.0" +version = "0.128.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "annotated-doc" }, { name = "pydantic" }, { name = "starlette" }, { name = "typing-extensions" }, + { name = "typing-inspection" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/52/08/8c8508db6c7b9aae8f7175046af41baad690771c9bcde676419965e338c7/fastapi-0.128.0.tar.gz", hash = "sha256:1cc179e1cef10a6be60ffe429f79b829dce99d8de32d7acb7e6c8dfdf7f2645a", size = 365682, upload-time = "2025-12-27T15:21:13.714Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c9/b7/21bf3d694cbff0b7cf5f459981d996c2c15e072bd5ca5609806383947f1e/fastapi-0.128.4.tar.gz", hash = "sha256:d6a2cc4c0edfbb2499f3fdec55ba62e751ee58a6354c50f85ed0dabdfbcfeb60", size = 375898, upload-time = "2026-02-07T08:14:09.616Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5c/05/5cbb59154b093548acd0f4c7c474a118eda06da25aa75c616b72d8fcd92a/fastapi-0.128.0-py3-none-any.whl", hash = "sha256:aebd93f9716ee3b4f4fcfe13ffb7cf308d99c9f3ab5622d8877441072561582d", size = 103094, upload-time = "2025-12-27T15:21:12.154Z" }, + { url = "https://files.pythonhosted.org/packages/ae/8b/c8050e556f5d7a1f33a93c2c94379a0bae23c58a79ad9709d7e052d0c3b8/fastapi-0.128.4-py3-none-any.whl", hash = "sha256:9321282cee605fd2075ccbc95c0f2e549d675c59de4a952bba202cd1730ac66b", size = 103684, upload-time = "2026-02-07T08:14:07.939Z" }, ] [[package]] @@ -850,6 +869,12 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/77/b6/85c4d21067220b9a78cfb81f516f9725ea6befc1544ec9bd2c1acd97c324/google_api_core-2.29.0-py3-none-any.whl", hash = "sha256:d30bc60980daa36e314b5d5a3e5958b0200cb44ca8fa1be2b614e932b75a3ea9", size = 173906, upload-time = "2026-01-08T22:21:36.093Z" }, ] +[package.optional-dependencies] +grpc = [ + { name = "grpcio" }, + { name = "grpcio-status" }, +] + [[package]] name = "google-auth" version = "2.48.0" @@ -864,6 +889,150 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/83/1d/d6466de3a5249d35e832a52834115ca9d1d0de6abc22065f049707516d47/google_auth-2.48.0-py3-none-any.whl", hash = "sha256:2e2a537873d449434252a9632c28bfc268b0adb1e53f9fb62afc5333a975903f", size = 236499, upload-time = "2026-01-26T19:22:45.099Z" }, ] +[package.optional-dependencies] +requests = [ + { name = "requests" }, +] + +[[package]] +name = "google-cloud-aiplatform" +version = "1.136.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "docstring-parser" }, + { name = "google-api-core", extra = ["grpc"] }, + { name = "google-auth" }, + { name = "google-cloud-bigquery" }, + { name = "google-cloud-resource-manager" }, + { name = "google-cloud-storage" }, + { name = "google-genai" }, + { name = "packaging" }, + { name = "proto-plus" }, + { name = "protobuf" }, + { name = "pydantic" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7f/9c/38ce04e3ef89034c736320a27b4a6e3171ca2f3fb56d38f76a310c745d14/google_cloud_aiplatform-1.136.0.tar.gz", hash = "sha256:01e64a0d0861486e842bf7e904077c847bcc1b654a29883509d57476de915b7d", size = 9946722, upload-time = "2026-02-04T16:28:12.903Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/55/e8/f317dc96c9c73846dd3e4d16691cc5f248801f46354d9d57f2c67fd67413/google_cloud_aiplatform-1.136.0-py2.py3-none-any.whl", hash = "sha256:5c829f002b7b673dcd0e718f55cc0557b571bd10eb5cdb7882d72916cfbf8c0e", size = 8203924, upload-time = "2026-02-04T16:28:10.343Z" }, +] + +[[package]] +name = "google-cloud-bigquery" +version = "3.40.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-api-core", extra = ["grpc"] }, + { name = "google-auth" }, + { name = "google-cloud-core" }, + { name = "google-resumable-media" }, + { name = "packaging" }, + { name = "python-dateutil" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/94/0a/62438ca138a095945468968696d9cca75a4cfd059e810402e70b0236d8ba/google_cloud_bigquery-3.40.0.tar.gz", hash = "sha256:b3ccb11caf0029f15b29569518f667553fe08f6f1459b959020c83fbbd8f2e68", size = 509287, upload-time = "2026-01-08T01:07:26.065Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/90/6a/90a04270dd60cc70259b73744f6e610ae9a158b21ab50fb695cca0056a3d/google_cloud_bigquery-3.40.0-py3-none-any.whl", hash = "sha256:0469bcf9e3dad3cab65b67cce98180c8c0aacf3253d47f0f8e976f299b49b5ab", size = 261335, upload-time = "2026-01-08T01:07:23.761Z" }, +] + +[[package]] +name = "google-cloud-core" +version = "2.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-api-core" }, + { name = "google-auth" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a6/03/ef0bc99d0e0faf4fdbe67ac445e18cdaa74824fd93cd069e7bb6548cb52d/google_cloud_core-2.5.0.tar.gz", hash = "sha256:7c1b7ef5c92311717bd05301aa1a91ffbc565673d3b0b4163a52d8413a186963", size = 36027, upload-time = "2025-10-29T23:17:39.513Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/89/20/bfa472e327c8edee00f04beecc80baeddd2ab33ee0e86fd7654da49d45e9/google_cloud_core-2.5.0-py3-none-any.whl", hash = "sha256:67d977b41ae6c7211ee830c7912e41003ea8194bff15ae7d72fd6f51e57acabc", size = 29469, upload-time = "2025-10-29T23:17:38.548Z" }, +] + +[[package]] +name = "google-cloud-resource-manager" +version = "1.16.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-api-core", extra = ["grpc"] }, + { name = "google-auth" }, + { name = "grpc-google-iam-v1" }, + { name = "grpcio" }, + { name = "proto-plus" }, + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4e/7f/db00b2820475793a52958dc55fe9ec2eb8e863546e05fcece9b921f86ebe/google_cloud_resource_manager-1.16.0.tar.gz", hash = "sha256:cc938f87cc36c2672f062b1e541650629e0d954c405a4dac35ceedee70c267c3", size = 459840, upload-time = "2026-01-15T13:04:07.726Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/ff/4b28bcc791d9d7e4ac8fea00fbd90ccb236afda56746a3b4564d2ae45df3/google_cloud_resource_manager-1.16.0-py3-none-any.whl", hash = "sha256:fb9a2ad2b5053c508e1c407ac31abfd1a22e91c32876c1892830724195819a28", size = 400218, upload-time = "2026-01-15T13:02:47.378Z" }, +] + +[[package]] +name = "google-cloud-storage" +version = "3.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-api-core" }, + { name = "google-auth" }, + { name = "google-cloud-core" }, + { name = "google-crc32c" }, + { name = "google-resumable-media" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f7/b1/4f0798e88285b50dfc60ed3a7de071def538b358db2da468c2e0deecbb40/google_cloud_storage-3.9.0.tar.gz", hash = "sha256:f2d8ca7db2f652be757e92573b2196e10fbc09649b5c016f8b422ad593c641cc", size = 17298544, upload-time = "2026-02-02T13:36:34.119Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/46/0b/816a6ae3c9fd096937d2e5f9670558908811d57d59ddf69dd4b83b326fd1/google_cloud_storage-3.9.0-py3-none-any.whl", hash = "sha256:2dce75a9e8b3387078cbbdad44757d410ecdb916101f8ba308abf202b6968066", size = 321324, upload-time = "2026-02-02T13:36:32.271Z" }, +] + +[[package]] +name = "google-crc32c" +version = "1.8.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/03/41/4b9c02f99e4c5fb477122cd5437403b552873f014616ac1d19ac8221a58d/google_crc32c-1.8.0.tar.gz", hash = "sha256:a428e25fb7691024de47fecfbff7ff957214da51eddded0da0ae0e0f03a2cf79", size = 14192, upload-time = "2025-12-16T00:35:25.142Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/5f/7307325b1198b59324c0fa9807cafb551afb65e831699f2ce211ad5c8240/google_crc32c-1.8.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:4b8286b659c1335172e39563ab0a768b8015e88e08329fa5321f774275fc3113", size = 31300, upload-time = "2025-12-16T00:21:56.723Z" }, + { url = "https://files.pythonhosted.org/packages/21/8e/58c0d5d86e2220e6a37befe7e6a94dd2f6006044b1a33edf1ff6d9f7e319/google_crc32c-1.8.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:2a3dc3318507de089c5384cc74d54318401410f82aa65b2d9cdde9d297aca7cb", size = 30867, upload-time = "2025-12-16T00:38:31.302Z" }, + { url = "https://files.pythonhosted.org/packages/ce/a9/a780cc66f86335a6019f557a8aaca8fbb970728f0efd2430d15ff1beae0e/google_crc32c-1.8.0-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:14f87e04d613dfa218d6135e81b78272c3b904e2a7053b841481b38a7d901411", size = 33364, upload-time = "2025-12-16T00:40:22.96Z" }, + { url = "https://files.pythonhosted.org/packages/21/3f/3457ea803db0198c9aaca2dd373750972ce28a26f00544b6b85088811939/google_crc32c-1.8.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cb5c869c2923d56cb0c8e6bcdd73c009c36ae39b652dbe46a05eb4ef0ad01454", size = 33740, upload-time = "2025-12-16T00:40:23.96Z" }, + { url = "https://files.pythonhosted.org/packages/df/c0/87c2073e0c72515bb8733d4eef7b21548e8d189f094b5dad20b0ecaf64f6/google_crc32c-1.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:3cc0c8912038065eafa603b238abf252e204accab2a704c63b9e14837a854962", size = 34437, upload-time = "2025-12-16T00:35:21.395Z" }, + { url = "https://files.pythonhosted.org/packages/d1/db/000f15b41724589b0e7bc24bc7a8967898d8d3bc8caf64c513d91ef1f6c0/google_crc32c-1.8.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:3ebb04528e83b2634857f43f9bb8ef5b2bbe7f10f140daeb01b58f972d04736b", size = 31297, upload-time = "2025-12-16T00:23:20.709Z" }, + { url = "https://files.pythonhosted.org/packages/d7/0d/8ebed0c39c53a7e838e2a486da8abb0e52de135f1b376ae2f0b160eb4c1a/google_crc32c-1.8.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:450dc98429d3e33ed2926fc99ee81001928d63460f8538f21a5d6060912a8e27", size = 30867, upload-time = "2025-12-16T00:43:14.628Z" }, + { url = "https://files.pythonhosted.org/packages/ce/42/b468aec74a0354b34c8cbf748db20d6e350a68a2b0912e128cabee49806c/google_crc32c-1.8.0-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:3b9776774b24ba76831609ffbabce8cdf6fa2bd5e9df37b594221c7e333a81fa", size = 33344, upload-time = "2025-12-16T00:40:24.742Z" }, + { url = "https://files.pythonhosted.org/packages/1c/e8/b33784d6fc77fb5062a8a7854e43e1e618b87d5ddf610a88025e4de6226e/google_crc32c-1.8.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:89c17d53d75562edfff86679244830599ee0a48efc216200691de8b02ab6b2b8", size = 33694, upload-time = "2025-12-16T00:40:25.505Z" }, + { url = "https://files.pythonhosted.org/packages/92/b1/d3cbd4d988afb3d8e4db94ca953df429ed6db7282ed0e700d25e6c7bfc8d/google_crc32c-1.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:57a50a9035b75643996fbf224d6661e386c7162d1dfdab9bc4ca790947d1007f", size = 34435, upload-time = "2025-12-16T00:35:22.107Z" }, +] + +[[package]] +name = "google-genai" +version = "1.62.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "distro" }, + { name = "google-auth", extra = ["requests"] }, + { name = "httpx" }, + { name = "pydantic" }, + { name = "requests" }, + { name = "sniffio" }, + { name = "tenacity" }, + { name = "typing-extensions" }, + { name = "websockets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/94/4c/71b32b5c8db420cf2fd0d5ef8a672adbde97d85e5d44a0b4fca712264ef1/google_genai-1.62.0.tar.gz", hash = "sha256:709468a14c739a080bc240a4f3191df597bf64485b1ca3728e0fb67517774c18", size = 490888, upload-time = "2026-02-04T22:48:41.989Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/09/5f/4645d8a28c6e431d0dd6011003a852563f3da7037d36af53154925b099fd/google_genai-1.62.0-py3-none-any.whl", hash = "sha256:4c3daeff3d05fafee4b9a1a31f9c07f01bc22051081aa58b4d61f58d16d1bcc0", size = 724166, upload-time = "2026-02-04T22:48:39.956Z" }, +] + +[[package]] +name = "google-resumable-media" +version = "2.8.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-crc32c" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/64/d7/520b62a35b23038ff005e334dba3ffc75fcf583bee26723f1fd8fd4b6919/google_resumable_media-2.8.0.tar.gz", hash = "sha256:f1157ed8b46994d60a1bc432544db62352043113684d4e030ee02e77ebe9a1ae", size = 2163265, upload-time = "2025-11-17T15:38:06.659Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1f/0b/93afde9cfe012260e9fe1522f35c9b72d6ee222f316586b1f23ecf44d518/google_resumable_media-2.8.0-py3-none-any.whl", hash = "sha256:dd14a116af303845a8d932ddae161a26e86cc229645bc98b39f026f9b1717582", size = 81340, upload-time = "2025-11-17T15:38:05.594Z" }, +] + [[package]] name = "googleapis-common-protos" version = "1.72.0" @@ -876,6 +1045,11 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c4/ab/09169d5a4612a5f92490806649ac8d41e3ec9129c636754575b3553f4ea4/googleapis_common_protos-1.72.0-py3-none-any.whl", hash = "sha256:4299c5a82d5ae1a9702ada957347726b167f9f8d1fc352477702a1e851ff4038", size = 297515, upload-time = "2025-11-06T18:29:13.14Z" }, ] +[package.optional-dependencies] +grpc = [ + { name = "grpcio" }, +] + [[package]] name = "greenlet" version = "3.3.1" @@ -902,35 +1076,63 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/90/e7/824beda656097edee36ab15809fd063447b200cc03a7f6a24c34d520bc88/greenlet-3.3.1-cp313-cp313-win_arm64.whl", hash = "sha256:2f080e028001c5273e0b42690eaf359aeef9cb1389da0f171ea51a5dc3c7608d", size = 226294, upload-time = "2026-01-23T15:30:52.73Z" }, ] +[[package]] +name = "grpc-google-iam-v1" +version = "0.14.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "googleapis-common-protos", extra = ["grpc"] }, + { name = "grpcio" }, + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/76/1e/1011451679a983f2f5c6771a1682542ecb027776762ad031fd0d7129164b/grpc_google_iam_v1-0.14.3.tar.gz", hash = "sha256:879ac4ef33136c5491a6300e27575a9ec760f6cdf9a2518798c1b8977a5dc389", size = 23745, upload-time = "2025-10-15T21:14:53.318Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4a/bd/330a1bbdb1afe0b96311249e699b6dc9cfc17916394fd4503ac5aca2514b/grpc_google_iam_v1-0.14.3-py3-none-any.whl", hash = "sha256:7a7f697e017a067206a3dfef44e4c634a34d3dee135fe7d7a4613fe3e59217e6", size = 32690, upload-time = "2025-10-15T21:14:51.72Z" }, +] + [[package]] name = "grpcio" -version = "1.76.0" +version = "1.78.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b6/e0/318c1ce3ae5a17894d5791e87aea147587c9e702f24122cc7a5c8bbaeeb1/grpcio-1.76.0.tar.gz", hash = "sha256:7be78388d6da1a25c0d5ec506523db58b18be22d9c37d8d3a32c08be4987bd73", size = 12785182, upload-time = "2025-10-21T16:23:12.106Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/bf/05/8e29121994b8d959ffa0afd28996d452f291b48cfc0875619de0bde2c50c/grpcio-1.76.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:81fd9652b37b36f16138611c7e884eb82e0cec137c40d3ef7c3f9b3ed00f6ed8", size = 5799718, upload-time = "2025-10-21T16:21:17.939Z" }, - { url = "https://files.pythonhosted.org/packages/d9/75/11d0e66b3cdf998c996489581bdad8900db79ebd83513e45c19548f1cba4/grpcio-1.76.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:04bbe1bfe3a68bbfd4e52402ab7d4eb59d72d02647ae2042204326cf4bbad280", size = 11825627, upload-time = "2025-10-21T16:21:20.466Z" }, - { url = "https://files.pythonhosted.org/packages/28/50/2f0aa0498bc188048f5d9504dcc5c2c24f2eb1a9337cd0fa09a61a2e75f0/grpcio-1.76.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d388087771c837cdb6515539f43b9d4bf0b0f23593a24054ac16f7a960be16f4", size = 6359167, upload-time = "2025-10-21T16:21:23.122Z" }, - { url = "https://files.pythonhosted.org/packages/66/e5/bbf0bb97d29ede1d59d6588af40018cfc345b17ce979b7b45424628dc8bb/grpcio-1.76.0-cp312-cp312-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:9f8f757bebaaea112c00dba718fc0d3260052ce714e25804a03f93f5d1c6cc11", size = 7044267, upload-time = "2025-10-21T16:21:25.995Z" }, - { url = "https://files.pythonhosted.org/packages/f5/86/f6ec2164f743d9609691115ae8ece098c76b894ebe4f7c94a655c6b03e98/grpcio-1.76.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:980a846182ce88c4f2f7e2c22c56aefd515daeb36149d1c897f83cf57999e0b6", size = 6573963, upload-time = "2025-10-21T16:21:28.631Z" }, - { url = "https://files.pythonhosted.org/packages/60/bc/8d9d0d8505feccfdf38a766d262c71e73639c165b311c9457208b56d92ae/grpcio-1.76.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f92f88e6c033db65a5ae3d97905c8fea9c725b63e28d5a75cb73b49bda5024d8", size = 7164484, upload-time = "2025-10-21T16:21:30.837Z" }, - { url = "https://files.pythonhosted.org/packages/67/e6/5d6c2fc10b95edf6df9b8f19cf10a34263b7fd48493936fffd5085521292/grpcio-1.76.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4baf3cbe2f0be3289eb68ac8ae771156971848bb8aaff60bad42005539431980", size = 8127777, upload-time = "2025-10-21T16:21:33.577Z" }, - { url = "https://files.pythonhosted.org/packages/3f/c8/dce8ff21c86abe025efe304d9e31fdb0deaaa3b502b6a78141080f206da0/grpcio-1.76.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:615ba64c208aaceb5ec83bfdce7728b80bfeb8be97562944836a7a0a9647d882", size = 7594014, upload-time = "2025-10-21T16:21:41.882Z" }, - { url = "https://files.pythonhosted.org/packages/e0/42/ad28191ebf983a5d0ecef90bab66baa5a6b18f2bfdef9d0a63b1973d9f75/grpcio-1.76.0-cp312-cp312-win32.whl", hash = "sha256:45d59a649a82df5718fd9527ce775fd66d1af35e6d31abdcdc906a49c6822958", size = 3984750, upload-time = "2025-10-21T16:21:44.006Z" }, - { url = "https://files.pythonhosted.org/packages/9e/00/7bd478cbb851c04a48baccaa49b75abaa8e4122f7d86da797500cccdd771/grpcio-1.76.0-cp312-cp312-win_amd64.whl", hash = "sha256:c088e7a90b6017307f423efbb9d1ba97a22aa2170876223f9709e9d1de0b5347", size = 4704003, upload-time = "2025-10-21T16:21:46.244Z" }, - { url = "https://files.pythonhosted.org/packages/fc/ed/71467ab770effc9e8cef5f2e7388beb2be26ed642d567697bb103a790c72/grpcio-1.76.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:26ef06c73eb53267c2b319f43e6634c7556ea37672029241a056629af27c10e2", size = 5807716, upload-time = "2025-10-21T16:21:48.475Z" }, - { url = "https://files.pythonhosted.org/packages/2c/85/c6ed56f9817fab03fa8a111ca91469941fb514e3e3ce6d793cb8f1e1347b/grpcio-1.76.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:45e0111e73f43f735d70786557dc38141185072d7ff8dc1829d6a77ac1471468", size = 11821522, upload-time = "2025-10-21T16:21:51.142Z" }, - { url = "https://files.pythonhosted.org/packages/ac/31/2b8a235ab40c39cbc141ef647f8a6eb7b0028f023015a4842933bc0d6831/grpcio-1.76.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:83d57312a58dcfe2a3a0f9d1389b299438909a02db60e2f2ea2ae2d8034909d3", size = 6362558, upload-time = "2025-10-21T16:21:54.213Z" }, - { url = "https://files.pythonhosted.org/packages/bd/64/9784eab483358e08847498ee56faf8ff6ea8e0a4592568d9f68edc97e9e9/grpcio-1.76.0-cp313-cp313-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:3e2a27c89eb9ac3d81ec8835e12414d73536c6e620355d65102503064a4ed6eb", size = 7049990, upload-time = "2025-10-21T16:21:56.476Z" }, - { url = "https://files.pythonhosted.org/packages/2b/94/8c12319a6369434e7a184b987e8e9f3b49a114c489b8315f029e24de4837/grpcio-1.76.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:61f69297cba3950a524f61c7c8ee12e55c486cb5f7db47ff9dcee33da6f0d3ae", size = 6575387, upload-time = "2025-10-21T16:21:59.051Z" }, - { url = "https://files.pythonhosted.org/packages/15/0f/f12c32b03f731f4a6242f771f63039df182c8b8e2cf8075b245b409259d4/grpcio-1.76.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6a15c17af8839b6801d554263c546c69c4d7718ad4321e3166175b37eaacca77", size = 7166668, upload-time = "2025-10-21T16:22:02.049Z" }, - { url = "https://files.pythonhosted.org/packages/ff/2d/3ec9ce0c2b1d92dd59d1c3264aaec9f0f7c817d6e8ac683b97198a36ed5a/grpcio-1.76.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:25a18e9810fbc7e7f03ec2516addc116a957f8cbb8cbc95ccc80faa072743d03", size = 8124928, upload-time = "2025-10-21T16:22:04.984Z" }, - { url = "https://files.pythonhosted.org/packages/1a/74/fd3317be5672f4856bcdd1a9e7b5e17554692d3db9a3b273879dc02d657d/grpcio-1.76.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:931091142fd8cc14edccc0845a79248bc155425eee9a98b2db2ea4f00a235a42", size = 7589983, upload-time = "2025-10-21T16:22:07.881Z" }, - { url = "https://files.pythonhosted.org/packages/45/bb/ca038cf420f405971f19821c8c15bcbc875505f6ffadafe9ffd77871dc4c/grpcio-1.76.0-cp313-cp313-win32.whl", hash = "sha256:5e8571632780e08526f118f74170ad8d50fb0a48c23a746bef2a6ebade3abd6f", size = 3984727, upload-time = "2025-10-21T16:22:10.032Z" }, - { url = "https://files.pythonhosted.org/packages/41/80/84087dc56437ced7cdd4b13d7875e7439a52a261e3ab4e06488ba6173b0a/grpcio-1.76.0-cp313-cp313-win_amd64.whl", hash = "sha256:f9f7bd5faab55f47231ad8dba7787866b69f5e93bc306e3915606779bbfb4ba8", size = 4702799, upload-time = "2025-10-21T16:22:12.709Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/06/8a/3d098f35c143a89520e568e6539cc098fcd294495910e359889ce8741c84/grpcio-1.78.0.tar.gz", hash = "sha256:7382b95189546f375c174f53a5fa873cef91c4b8005faa05cc5b3beea9c4f1c5", size = 12852416, upload-time = "2026-02-06T09:57:18.093Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4e/f4/7384ed0178203d6074446b3c4f46c90a22ddf7ae0b3aee521627f54cfc2a/grpcio-1.78.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:f9ab915a267fc47c7e88c387a3a28325b58c898e23d4995f765728f4e3dedb97", size = 5913985, upload-time = "2026-02-06T09:55:26.832Z" }, + { url = "https://files.pythonhosted.org/packages/81/ed/be1caa25f06594463f685b3790b320f18aea49b33166f4141bfdc2bfb236/grpcio-1.78.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:3f8904a8165ab21e07e58bf3e30a73f4dffc7a1e0dbc32d51c61b5360d26f43e", size = 11811853, upload-time = "2026-02-06T09:55:29.224Z" }, + { url = "https://files.pythonhosted.org/packages/24/a7/f06d151afc4e64b7e3cc3e872d331d011c279aaab02831e40a81c691fb65/grpcio-1.78.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:859b13906ce098c0b493af92142ad051bf64c7870fa58a123911c88606714996", size = 6475766, upload-time = "2026-02-06T09:55:31.825Z" }, + { url = "https://files.pythonhosted.org/packages/8a/a8/4482922da832ec0082d0f2cc3a10976d84a7424707f25780b82814aafc0a/grpcio-1.78.0-cp312-cp312-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:b2342d87af32790f934a79c3112641e7b27d63c261b8b4395350dad43eff1dc7", size = 7170027, upload-time = "2026-02-06T09:55:34.7Z" }, + { url = "https://files.pythonhosted.org/packages/54/bf/f4a3b9693e35d25b24b0b39fa46d7d8a3c439e0a3036c3451764678fec20/grpcio-1.78.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:12a771591ae40bc65ba67048fa52ef4f0e6db8279e595fd349f9dfddeef571f9", size = 6690766, upload-time = "2026-02-06T09:55:36.902Z" }, + { url = "https://files.pythonhosted.org/packages/c7/b9/521875265cc99fe5ad4c5a17010018085cae2810a928bf15ebe7d8bcd9cc/grpcio-1.78.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:185dea0d5260cbb2d224c507bf2a5444d5abbb1fa3594c1ed7e4c709d5eb8383", size = 7266161, upload-time = "2026-02-06T09:55:39.824Z" }, + { url = "https://files.pythonhosted.org/packages/05/86/296a82844fd40a4ad4a95f100b55044b4f817dece732bf686aea1a284147/grpcio-1.78.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:51b13f9aed9d59ee389ad666b8c2214cc87b5de258fa712f9ab05f922e3896c6", size = 8253303, upload-time = "2026-02-06T09:55:42.353Z" }, + { url = "https://files.pythonhosted.org/packages/f3/e4/ea3c0caf5468537f27ad5aab92b681ed7cc0ef5f8c9196d3fd42c8c2286b/grpcio-1.78.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fd5f135b1bd58ab088930b3c613455796dfa0393626a6972663ccdda5b4ac6ce", size = 7698222, upload-time = "2026-02-06T09:55:44.629Z" }, + { url = "https://files.pythonhosted.org/packages/d7/47/7f05f81e4bb6b831e93271fb12fd52ba7b319b5402cbc101d588f435df00/grpcio-1.78.0-cp312-cp312-win32.whl", hash = "sha256:94309f498bcc07e5a7d16089ab984d42ad96af1d94b5a4eb966a266d9fcabf68", size = 4066123, upload-time = "2026-02-06T09:55:47.644Z" }, + { url = "https://files.pythonhosted.org/packages/ad/e7/d6914822c88aa2974dbbd10903d801a28a19ce9cd8bad7e694cbbcf61528/grpcio-1.78.0-cp312-cp312-win_amd64.whl", hash = "sha256:9566fe4ababbb2610c39190791e5b829869351d14369603702e890ef3ad2d06e", size = 4797657, upload-time = "2026-02-06T09:55:49.86Z" }, + { url = "https://files.pythonhosted.org/packages/05/a9/8f75894993895f361ed8636cd9237f4ab39ef87fd30db17467235ed1c045/grpcio-1.78.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:ce3a90455492bf8bfa38e56fbbe1dbd4f872a3d8eeaf7337dc3b1c8aa28c271b", size = 5920143, upload-time = "2026-02-06T09:55:52.035Z" }, + { url = "https://files.pythonhosted.org/packages/55/06/0b78408e938ac424100100fd081189451b472236e8a3a1f6500390dc4954/grpcio-1.78.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:2bf5e2e163b356978b23652c4818ce4759d40f4712ee9ec5a83c4be6f8c23a3a", size = 11803926, upload-time = "2026-02-06T09:55:55.494Z" }, + { url = "https://files.pythonhosted.org/packages/88/93/b59fe7832ff6ae3c78b813ea43dac60e295fa03606d14d89d2e0ec29f4f3/grpcio-1.78.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8f2ac84905d12918e4e55a16da17939eb63e433dc11b677267c35568aa63fc84", size = 6478628, upload-time = "2026-02-06T09:55:58.533Z" }, + { url = "https://files.pythonhosted.org/packages/ed/df/e67e3734527f9926b7d9c0dde6cd998d1d26850c3ed8eeec81297967ac67/grpcio-1.78.0-cp313-cp313-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:b58f37edab4a3881bc6c9bca52670610e0c9ca14e2ea3cf9debf185b870457fb", size = 7173574, upload-time = "2026-02-06T09:56:01.786Z" }, + { url = "https://files.pythonhosted.org/packages/a6/62/cc03fffb07bfba982a9ec097b164e8835546980aec25ecfa5f9c1a47e022/grpcio-1.78.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:735e38e176a88ce41840c21bb49098ab66177c64c82426e24e0082500cc68af5", size = 6692639, upload-time = "2026-02-06T09:56:04.529Z" }, + { url = "https://files.pythonhosted.org/packages/bf/9a/289c32e301b85bdb67d7ec68b752155e674ee3ba2173a1858f118e399ef3/grpcio-1.78.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:2045397e63a7a0ee7957c25f7dbb36ddc110e0cfb418403d110c0a7a68a844e9", size = 7268838, upload-time = "2026-02-06T09:56:08.397Z" }, + { url = "https://files.pythonhosted.org/packages/0e/79/1be93f32add280461fa4773880196572563e9c8510861ac2da0ea0f892b6/grpcio-1.78.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:a9f136fbafe7ccf4ac7e8e0c28b31066e810be52d6e344ef954a3a70234e1702", size = 8251878, upload-time = "2026-02-06T09:56:10.914Z" }, + { url = "https://files.pythonhosted.org/packages/65/65/793f8e95296ab92e4164593674ae6291b204bb5f67f9d4a711489cd30ffa/grpcio-1.78.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:748b6138585379c737adc08aeffd21222abbda1a86a0dca2a39682feb9196c20", size = 7695412, upload-time = "2026-02-06T09:56:13.593Z" }, + { url = "https://files.pythonhosted.org/packages/1c/9f/1e233fe697ecc82845942c2822ed06bb522e70d6771c28d5528e4c50f6a4/grpcio-1.78.0-cp313-cp313-win32.whl", hash = "sha256:271c73e6e5676afe4fc52907686670c7cea22ab2310b76a59b678403ed40d670", size = 4064899, upload-time = "2026-02-06T09:56:15.601Z" }, + { url = "https://files.pythonhosted.org/packages/4d/27/d86b89e36de8a951501fb06a0f38df19853210f341d0b28f83f4aa0ffa08/grpcio-1.78.0-cp313-cp313-win_amd64.whl", hash = "sha256:f2d4e43ee362adfc05994ed479334d5a451ab7bc3f3fee1b796b8ca66895acb4", size = 4797393, upload-time = "2026-02-06T09:56:17.882Z" }, +] + +[[package]] +name = "grpcio-status" +version = "1.78.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "googleapis-common-protos" }, + { name = "grpcio" }, + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8a/cd/89ce482a931b543b92cdd9b2888805518c4620e0094409acb8c81dd4610a/grpcio_status-1.78.0.tar.gz", hash = "sha256:a34cfd28101bfea84b5aa0f936b4b423019e9213882907166af6b3bddc59e189", size = 13808, upload-time = "2026-02-06T10:01:48.034Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/8a/1241ec22c41028bddd4a052ae9369267b4475265ad0ce7140974548dc3fa/grpcio_status-1.78.0-py3-none-any.whl", hash = "sha256:b492b693d4bf27b47a6c32590701724f1d3b9444b36491878fb71f6208857f34", size = 14523, upload-time = "2026-02-06T10:01:32.584Z" }, ] [[package]] @@ -1003,7 +1205,7 @@ wheels = [ [[package]] name = "huggingface-hub" -version = "1.3.5" +version = "1.4.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "filelock" }, @@ -1017,21 +1219,21 @@ dependencies = [ { name = "typer-slim" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/67/e9/2658cb9bc4c72a67b7f87650e827266139befaf499095883d30dabc4d49f/huggingface_hub-1.3.5.tar.gz", hash = "sha256:8045aca8ddab35d937138f3c386c6d43a275f53437c5c64cdc9aa8408653b4ed", size = 627456, upload-time = "2026-01-29T10:34:19.687Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c4/fc/eb9bc06130e8bbda6a616e1b80a7aa127681c448d6b49806f61db2670b61/huggingface_hub-1.4.1.tar.gz", hash = "sha256:b41131ec35e631e7383ab26d6146b8d8972abc8b6309b963b306fbcca87f5ed5", size = 642156, upload-time = "2026-02-06T09:20:03.013Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f9/84/a579b95c46fe8e319f89dc700c087596f665141575f4dcf136aaa97d856f/huggingface_hub-1.3.5-py3-none-any.whl", hash = "sha256:fe332d7f86a8af874768452295c22cd3f37730fb2463cf6cc3295e26036f8ef9", size = 536675, upload-time = "2026-01-29T10:34:17.713Z" }, + { url = "https://files.pythonhosted.org/packages/d5/ae/2f6d96b4e6c5478d87d606a1934b5d436c4a2bce6bb7c6fdece891c128e3/huggingface_hub-1.4.1-py3-none-any.whl", hash = "sha256:9931d075fb7a79af5abc487106414ec5fba2c0ae86104c0c62fd6cae38873d18", size = 553326, upload-time = "2026-02-06T09:20:00.728Z" }, ] [[package]] name = "id" -version = "1.5.0" +version = "1.6.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "requests" }, + { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/22/11/102da08f88412d875fa2f1a9a469ff7ad4c874b0ca6fed0048fe385bdb3d/id-1.5.0.tar.gz", hash = "sha256:292cb8a49eacbbdbce97244f47a97b4c62540169c976552e497fd57df0734c1d", size = 15237, upload-time = "2024-12-04T19:53:05.575Z" } +sdist = { url = "https://files.pythonhosted.org/packages/6d/04/c2156091427636080787aac190019dc64096e56a23b7364d3c1764ee3a06/id-1.6.1.tar.gz", hash = "sha256:d0732d624fb46fd4e7bc4e5152f00214450953b9e772c182c1c22964def1a069", size = 18088, upload-time = "2026-02-04T16:19:41.26Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9f/cb/18326d2d89ad3b0dd143da971e77afd1e6ca6674f1b1c3df4b6bec6279fc/id-1.5.0-py3-none-any.whl", hash = "sha256:f1434e1cef91f2cbb8a4ec64663d5a23b9ed43ef44c4c957d02583d61714c658", size = 13611, upload-time = "2024-12-04T19:53:03.02Z" }, + { url = "https://files.pythonhosted.org/packages/42/77/de194443bf38daed9452139e960c632b0ef9f9a5dd9ce605fdf18ca9f1b1/id-1.6.1-py3-none-any.whl", hash = "sha256:f5ec41ed2629a508f5d0988eda142e190c9c6da971100612c4de9ad9f9b237ca", size = 14689, upload-time = "2026-02-04T16:19:40.051Z" }, ] [[package]] @@ -1129,45 +1331,45 @@ wheels = [ [[package]] name = "jiter" -version = "0.12.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/45/9d/e0660989c1370e25848bb4c52d061c71837239738ad937e83edca174c273/jiter-0.12.0.tar.gz", hash = "sha256:64dfcd7d5c168b38d3f9f8bba7fc639edb3418abcc74f22fdbe6b8938293f30b", size = 168294, upload-time = "2025-11-09T20:49:23.302Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/92/c9/5b9f7b4983f1b542c64e84165075335e8a236fa9e2ea03a0c79780062be8/jiter-0.12.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:305e061fa82f4680607a775b2e8e0bcb071cd2205ac38e6ef48c8dd5ebe1cf37", size = 314449, upload-time = "2025-11-09T20:47:22.999Z" }, - { url = "https://files.pythonhosted.org/packages/98/6e/e8efa0e78de00db0aee82c0cf9e8b3f2027efd7f8a71f859d8f4be8e98ef/jiter-0.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5c1860627048e302a528333c9307c818c547f214d8659b0705d2195e1a94b274", size = 319855, upload-time = "2025-11-09T20:47:24.779Z" }, - { url = "https://files.pythonhosted.org/packages/20/26/894cd88e60b5d58af53bec5c6759d1292bd0b37a8b5f60f07abf7a63ae5f/jiter-0.12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df37577a4f8408f7e0ec3205d2a8f87672af8f17008358063a4d6425b6081ce3", size = 350171, upload-time = "2025-11-09T20:47:26.469Z" }, - { url = "https://files.pythonhosted.org/packages/f5/27/a7b818b9979ac31b3763d25f3653ec3a954044d5e9f5d87f2f247d679fd1/jiter-0.12.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:75fdd787356c1c13a4f40b43c2156276ef7a71eb487d98472476476d803fb2cf", size = 365590, upload-time = "2025-11-09T20:47:27.918Z" }, - { url = "https://files.pythonhosted.org/packages/ba/7e/e46195801a97673a83746170b17984aa8ac4a455746354516d02ca5541b4/jiter-0.12.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1eb5db8d9c65b112aacf14fcd0faae9913d07a8afea5ed06ccdd12b724e966a1", size = 479462, upload-time = "2025-11-09T20:47:29.654Z" }, - { url = "https://files.pythonhosted.org/packages/ca/75/f833bfb009ab4bd11b1c9406d333e3b4357709ed0570bb48c7c06d78c7dd/jiter-0.12.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:73c568cc27c473f82480abc15d1301adf333a7ea4f2e813d6a2c7d8b6ba8d0df", size = 378983, upload-time = "2025-11-09T20:47:31.026Z" }, - { url = "https://files.pythonhosted.org/packages/71/b3/7a69d77943cc837d30165643db753471aff5df39692d598da880a6e51c24/jiter-0.12.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4321e8a3d868919bcb1abb1db550d41f2b5b326f72df29e53b2df8b006eb9403", size = 361328, upload-time = "2025-11-09T20:47:33.286Z" }, - { url = "https://files.pythonhosted.org/packages/b0/ac/a78f90caf48d65ba70d8c6efc6f23150bc39dc3389d65bbec2a95c7bc628/jiter-0.12.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0a51bad79f8cc9cac2b4b705039f814049142e0050f30d91695a2d9a6611f126", size = 386740, upload-time = "2025-11-09T20:47:34.703Z" }, - { url = "https://files.pythonhosted.org/packages/39/b6/5d31c2cc8e1b6a6bcf3c5721e4ca0a3633d1ab4754b09bc7084f6c4f5327/jiter-0.12.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:2a67b678f6a5f1dd6c36d642d7db83e456bc8b104788262aaefc11a22339f5a9", size = 520875, upload-time = "2025-11-09T20:47:36.058Z" }, - { url = "https://files.pythonhosted.org/packages/30/b5/4df540fae4e9f68c54b8dab004bd8c943a752f0b00efd6e7d64aa3850339/jiter-0.12.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efe1a211fe1fd14762adea941e3cfd6c611a136e28da6c39272dbb7a1bbe6a86", size = 511457, upload-time = "2025-11-09T20:47:37.932Z" }, - { url = "https://files.pythonhosted.org/packages/07/65/86b74010e450a1a77b2c1aabb91d4a91dd3cd5afce99f34d75fd1ac64b19/jiter-0.12.0-cp312-cp312-win32.whl", hash = "sha256:d779d97c834b4278276ec703dc3fc1735fca50af63eb7262f05bdb4e62203d44", size = 204546, upload-time = "2025-11-09T20:47:40.47Z" }, - { url = "https://files.pythonhosted.org/packages/1c/c7/6659f537f9562d963488e3e55573498a442503ced01f7e169e96a6110383/jiter-0.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:e8269062060212b373316fe69236096aaf4c49022d267c6736eebd66bbbc60bb", size = 205196, upload-time = "2025-11-09T20:47:41.794Z" }, - { url = "https://files.pythonhosted.org/packages/21/f4/935304f5169edadfec7f9c01eacbce4c90bb9a82035ac1de1f3bd2d40be6/jiter-0.12.0-cp312-cp312-win_arm64.whl", hash = "sha256:06cb970936c65de926d648af0ed3d21857f026b1cf5525cb2947aa5e01e05789", size = 186100, upload-time = "2025-11-09T20:47:43.007Z" }, - { url = "https://files.pythonhosted.org/packages/3d/a6/97209693b177716e22576ee1161674d1d58029eb178e01866a0422b69224/jiter-0.12.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6cc49d5130a14b732e0612bc76ae8db3b49898732223ef8b7599aa8d9810683e", size = 313658, upload-time = "2025-11-09T20:47:44.424Z" }, - { url = "https://files.pythonhosted.org/packages/06/4d/125c5c1537c7d8ee73ad3d530a442d6c619714b95027143f1b61c0b4dfe0/jiter-0.12.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:37f27a32ce36364d2fa4f7fdc507279db604d27d239ea2e044c8f148410defe1", size = 318605, upload-time = "2025-11-09T20:47:45.973Z" }, - { url = "https://files.pythonhosted.org/packages/99/bf/a840b89847885064c41a5f52de6e312e91fa84a520848ee56c97e4fa0205/jiter-0.12.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbc0944aa3d4b4773e348cda635252824a78f4ba44328e042ef1ff3f6080d1cf", size = 349803, upload-time = "2025-11-09T20:47:47.535Z" }, - { url = "https://files.pythonhosted.org/packages/8a/88/e63441c28e0db50e305ae23e19c1d8fae012d78ed55365da392c1f34b09c/jiter-0.12.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:da25c62d4ee1ffbacb97fac6dfe4dcd6759ebdc9015991e92a6eae5816287f44", size = 365120, upload-time = "2025-11-09T20:47:49.284Z" }, - { url = "https://files.pythonhosted.org/packages/0a/7c/49b02714af4343970eb8aca63396bc1c82fa01197dbb1e9b0d274b550d4e/jiter-0.12.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:048485c654b838140b007390b8182ba9774621103bd4d77c9c3f6f117474ba45", size = 479918, upload-time = "2025-11-09T20:47:50.807Z" }, - { url = "https://files.pythonhosted.org/packages/69/ba/0a809817fdd5a1db80490b9150645f3aae16afad166960bcd562be194f3b/jiter-0.12.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:635e737fbb7315bef0037c19b88b799143d2d7d3507e61a76751025226b3ac87", size = 379008, upload-time = "2025-11-09T20:47:52.211Z" }, - { url = "https://files.pythonhosted.org/packages/5f/c3/c9fc0232e736c8877d9e6d83d6eeb0ba4e90c6c073835cc2e8f73fdeef51/jiter-0.12.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e017c417b1ebda911bd13b1e40612704b1f5420e30695112efdbed8a4b389ed", size = 361785, upload-time = "2025-11-09T20:47:53.512Z" }, - { url = "https://files.pythonhosted.org/packages/96/61/61f69b7e442e97ca6cd53086ddc1cf59fb830549bc72c0a293713a60c525/jiter-0.12.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:89b0bfb8b2bf2351fba36bb211ef8bfceba73ef58e7f0c68fb67b5a2795ca2f9", size = 386108, upload-time = "2025-11-09T20:47:54.893Z" }, - { url = "https://files.pythonhosted.org/packages/e9/2e/76bb3332f28550c8f1eba3bf6e5efe211efda0ddbbaf24976bc7078d42a5/jiter-0.12.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:f5aa5427a629a824a543672778c9ce0c5e556550d1569bb6ea28a85015287626", size = 519937, upload-time = "2025-11-09T20:47:56.253Z" }, - { url = "https://files.pythonhosted.org/packages/84/d6/fa96efa87dc8bff2094fb947f51f66368fa56d8d4fc9e77b25d7fbb23375/jiter-0.12.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed53b3d6acbcb0fd0b90f20c7cb3b24c357fe82a3518934d4edfa8c6898e498c", size = 510853, upload-time = "2025-11-09T20:47:58.32Z" }, - { url = "https://files.pythonhosted.org/packages/8a/28/93f67fdb4d5904a708119a6ab58a8f1ec226ff10a94a282e0215402a8462/jiter-0.12.0-cp313-cp313-win32.whl", hash = "sha256:4747de73d6b8c78f2e253a2787930f4fffc68da7fa319739f57437f95963c4de", size = 204699, upload-time = "2025-11-09T20:47:59.686Z" }, - { url = "https://files.pythonhosted.org/packages/c4/1f/30b0eb087045a0abe2a5c9c0c0c8da110875a1d3be83afd4a9a4e548be3c/jiter-0.12.0-cp313-cp313-win_amd64.whl", hash = "sha256:e25012eb0c456fcc13354255d0338cd5397cce26c77b2832b3c4e2e255ea5d9a", size = 204258, upload-time = "2025-11-09T20:48:01.01Z" }, - { url = "https://files.pythonhosted.org/packages/2c/f4/2b4daf99b96bce6fc47971890b14b2a36aef88d7beb9f057fafa032c6141/jiter-0.12.0-cp313-cp313-win_arm64.whl", hash = "sha256:c97b92c54fe6110138c872add030a1f99aea2401ddcdaa21edf74705a646dd60", size = 185503, upload-time = "2025-11-09T20:48:02.35Z" }, - { url = "https://files.pythonhosted.org/packages/39/ca/67bb15a7061d6fe20b9b2a2fd783e296a1e0f93468252c093481a2f00efa/jiter-0.12.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:53839b35a38f56b8be26a7851a48b89bc47e5d88e900929df10ed93b95fea3d6", size = 317965, upload-time = "2025-11-09T20:48:03.783Z" }, - { url = "https://files.pythonhosted.org/packages/18/af/1788031cd22e29c3b14bc6ca80b16a39a0b10e611367ffd480c06a259831/jiter-0.12.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94f669548e55c91ab47fef8bddd9c954dab1938644e715ea49d7e117015110a4", size = 345831, upload-time = "2025-11-09T20:48:05.55Z" }, - { url = "https://files.pythonhosted.org/packages/05/17/710bf8472d1dff0d3caf4ced6031060091c1320f84ee7d5dcbed1f352417/jiter-0.12.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:351d54f2b09a41600ffea43d081522d792e81dcfb915f6d2d242744c1cc48beb", size = 361272, upload-time = "2025-11-09T20:48:06.951Z" }, - { url = "https://files.pythonhosted.org/packages/fb/f1/1dcc4618b59761fef92d10bcbb0b038b5160be653b003651566a185f1a5c/jiter-0.12.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2a5e90604620f94bf62264e7c2c038704d38217b7465b863896c6d7c902b06c7", size = 204604, upload-time = "2025-11-09T20:48:08.328Z" }, - { url = "https://files.pythonhosted.org/packages/d9/32/63cb1d9f1c5c6632a783c0052cde9ef7ba82688f7065e2f0d5f10a7e3edb/jiter-0.12.0-cp313-cp313t-win_arm64.whl", hash = "sha256:88ef757017e78d2860f96250f9393b7b577b06a956ad102c29c8237554380db3", size = 185628, upload-time = "2025-11-09T20:48:09.572Z" }, - { url = "https://files.pythonhosted.org/packages/cb/f5/12efb8ada5f5c9edc1d4555fe383c1fb2eac05ac5859258a72d61981d999/jiter-0.12.0-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:e8547883d7b96ef2e5fe22b88f8a4c8725a56e7f4abafff20fd5272d634c7ecb", size = 309974, upload-time = "2025-11-09T20:49:17.187Z" }, - { url = "https://files.pythonhosted.org/packages/85/15/d6eb3b770f6a0d332675141ab3962fd4a7c270ede3515d9f3583e1d28276/jiter-0.12.0-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:89163163c0934854a668ed783a2546a0617f71706a2551a4a0666d91ab365d6b", size = 304233, upload-time = "2025-11-09T20:49:18.734Z" }, - { url = "https://files.pythonhosted.org/packages/8c/3e/e7e06743294eea2cf02ced6aa0ff2ad237367394e37a0e2b4a1108c67a36/jiter-0.12.0-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d96b264ab7d34bbb2312dedc47ce07cd53f06835eacbc16dde3761f47c3a9e7f", size = 338537, upload-time = "2025-11-09T20:49:20.317Z" }, - { url = "https://files.pythonhosted.org/packages/2f/9c/6753e6522b8d0ef07d3a3d239426669e984fb0eba15a315cdbc1253904e4/jiter-0.12.0-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c24e864cb30ab82311c6425655b0cdab0a98c5d973b065c66a3f020740c2324c", size = 346110, upload-time = "2025-11-09T20:49:21.817Z" }, +version = "0.13.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0d/5e/4ec91646aee381d01cdb9974e30882c9cd3b8c5d1079d6b5ff4af522439a/jiter-0.13.0.tar.gz", hash = "sha256:f2839f9c2c7e2dffc1bc5929a510e14ce0a946be9365fd1219e7ef342dae14f4", size = 164847, upload-time = "2026-02-02T12:37:56.441Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/30/7687e4f87086829955013ca12a9233523349767f69653ebc27036313def9/jiter-0.13.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:0a2bd69fc1d902e89925fc34d1da51b2128019423d7b339a45d9e99c894e0663", size = 307958, upload-time = "2026-02-02T12:35:57.165Z" }, + { url = "https://files.pythonhosted.org/packages/c3/27/e57f9a783246ed95481e6749cc5002a8a767a73177a83c63ea71f0528b90/jiter-0.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f917a04240ef31898182f76a332f508f2cc4b57d2b4d7ad2dbfebbfe167eb505", size = 318597, upload-time = "2026-02-02T12:35:58.591Z" }, + { url = "https://files.pythonhosted.org/packages/cf/52/e5719a60ac5d4d7c5995461a94ad5ef962a37c8bf5b088390e6fad59b2ff/jiter-0.13.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1e2b199f446d3e82246b4fd9236d7cb502dc2222b18698ba0d986d2fecc6152", size = 348821, upload-time = "2026-02-02T12:36:00.093Z" }, + { url = "https://files.pythonhosted.org/packages/61/db/c1efc32b8ba4c740ab3fc2d037d8753f67685f475e26b9d6536a4322bcdd/jiter-0.13.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04670992b576fa65bd056dbac0c39fe8bd67681c380cb2b48efa885711d9d726", size = 364163, upload-time = "2026-02-02T12:36:01.937Z" }, + { url = "https://files.pythonhosted.org/packages/55/8a/fb75556236047c8806995671a18e4a0ad646ed255276f51a20f32dceaeec/jiter-0.13.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5a1aff1fbdb803a376d4d22a8f63f8e7ccbce0b4890c26cc7af9e501ab339ef0", size = 483709, upload-time = "2026-02-02T12:36:03.41Z" }, + { url = "https://files.pythonhosted.org/packages/7e/16/43512e6ee863875693a8e6f6d532e19d650779d6ba9a81593ae40a9088ff/jiter-0.13.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b3fb8c2053acaef8580809ac1d1f7481a0a0bdc012fd7f5d8b18fb696a5a089", size = 370480, upload-time = "2026-02-02T12:36:04.791Z" }, + { url = "https://files.pythonhosted.org/packages/f8/4c/09b93e30e984a187bc8aaa3510e1ec8dcbdcd71ca05d2f56aac0492453aa/jiter-0.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdaba7d87e66f26a2c45d8cbadcbfc4bf7884182317907baf39cfe9775bb4d93", size = 360735, upload-time = "2026-02-02T12:36:06.994Z" }, + { url = "https://files.pythonhosted.org/packages/1a/1b/46c5e349019874ec5dfa508c14c37e29864ea108d376ae26d90bee238cd7/jiter-0.13.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7b88d649135aca526da172e48083da915ec086b54e8e73a425ba50999468cc08", size = 391814, upload-time = "2026-02-02T12:36:08.368Z" }, + { url = "https://files.pythonhosted.org/packages/15/9e/26184760e85baee7162ad37b7912797d2077718476bf91517641c92b3639/jiter-0.13.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e404ea551d35438013c64b4f357b0474c7abf9f781c06d44fcaf7a14c69ff9e2", size = 513990, upload-time = "2026-02-02T12:36:09.993Z" }, + { url = "https://files.pythonhosted.org/packages/e9/34/2c9355247d6debad57a0a15e76ab1566ab799388042743656e566b3b7de1/jiter-0.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1f4748aad1b4a93c8bdd70f604d0f748cdc0e8744c5547798acfa52f10e79228", size = 548021, upload-time = "2026-02-02T12:36:11.376Z" }, + { url = "https://files.pythonhosted.org/packages/ac/4a/9f2c23255d04a834398b9c2e0e665382116911dc4d06b795710503cdad25/jiter-0.13.0-cp312-cp312-win32.whl", hash = "sha256:0bf670e3b1445fc4d31612199f1744f67f889ee1bbae703c4b54dc097e5dd394", size = 203024, upload-time = "2026-02-02T12:36:12.682Z" }, + { url = "https://files.pythonhosted.org/packages/09/ee/f0ae675a957ae5a8f160be3e87acea6b11dc7b89f6b7ab057e77b2d2b13a/jiter-0.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:15db60e121e11fe186c0b15236bd5d18381b9ddacdcf4e659feb96fc6c969c92", size = 205424, upload-time = "2026-02-02T12:36:13.93Z" }, + { url = "https://files.pythonhosted.org/packages/1b/02/ae611edf913d3cbf02c97cdb90374af2082c48d7190d74c1111dde08bcdd/jiter-0.13.0-cp312-cp312-win_arm64.whl", hash = "sha256:41f92313d17989102f3cb5dd533a02787cdb99454d494344b0361355da52fcb9", size = 186818, upload-time = "2026-02-02T12:36:15.308Z" }, + { url = "https://files.pythonhosted.org/packages/91/9c/7ee5a6ff4b9991e1a45263bfc46731634c4a2bde27dfda6c8251df2d958c/jiter-0.13.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1f8a55b848cbabf97d861495cd65f1e5c590246fabca8b48e1747c4dfc8f85bf", size = 306897, upload-time = "2026-02-02T12:36:16.748Z" }, + { url = "https://files.pythonhosted.org/packages/7c/02/be5b870d1d2be5dd6a91bdfb90f248fbb7dcbd21338f092c6b89817c3dbf/jiter-0.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f556aa591c00f2c45eb1b89f68f52441a016034d18b65da60e2d2875bbbf344a", size = 317507, upload-time = "2026-02-02T12:36:18.351Z" }, + { url = "https://files.pythonhosted.org/packages/da/92/b25d2ec333615f5f284f3a4024f7ce68cfa0604c322c6808b2344c7f5d2b/jiter-0.13.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7e1d61da332ec412350463891923f960c3073cf1aae93b538f0bb4c8cd46efb", size = 350560, upload-time = "2026-02-02T12:36:19.746Z" }, + { url = "https://files.pythonhosted.org/packages/be/ec/74dcb99fef0aca9fbe56b303bf79f6bd839010cb18ad41000bf6cc71eec0/jiter-0.13.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3097d665a27bc96fd9bbf7f86178037db139f319f785e4757ce7ccbf390db6c2", size = 363232, upload-time = "2026-02-02T12:36:21.243Z" }, + { url = "https://files.pythonhosted.org/packages/1b/37/f17375e0bb2f6a812d4dd92d7616e41917f740f3e71343627da9db2824ce/jiter-0.13.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d01ecc3a8cbdb6f25a37bd500510550b64ddf9f7d64a107d92f3ccb25035d0f", size = 483727, upload-time = "2026-02-02T12:36:22.688Z" }, + { url = "https://files.pythonhosted.org/packages/77/d2/a71160a5ae1a1e66c1395b37ef77da67513b0adba73b993a27fbe47eb048/jiter-0.13.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ed9bbc30f5d60a3bdf63ae76beb3f9db280d7f195dfcfa61af792d6ce912d159", size = 370799, upload-time = "2026-02-02T12:36:24.106Z" }, + { url = "https://files.pythonhosted.org/packages/01/99/ed5e478ff0eb4e8aa5fd998f9d69603c9fd3f32de3bd16c2b1194f68361c/jiter-0.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98fbafb6e88256f4454de33c1f40203d09fc33ed19162a68b3b257b29ca7f663", size = 359120, upload-time = "2026-02-02T12:36:25.519Z" }, + { url = "https://files.pythonhosted.org/packages/16/be/7ffd08203277a813f732ba897352797fa9493faf8dc7995b31f3d9cb9488/jiter-0.13.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5467696f6b827f1116556cb0db620440380434591e93ecee7fd14d1a491b6daa", size = 390664, upload-time = "2026-02-02T12:36:26.866Z" }, + { url = "https://files.pythonhosted.org/packages/d1/84/e0787856196d6d346264d6dcccb01f741e5f0bd014c1d9a2ebe149caf4f3/jiter-0.13.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:2d08c9475d48b92892583df9da592a0e2ac49bcd41fae1fec4f39ba6cf107820", size = 513543, upload-time = "2026-02-02T12:36:28.217Z" }, + { url = "https://files.pythonhosted.org/packages/65/50/ecbd258181c4313cf79bca6c88fb63207d04d5bf5e4f65174114d072aa55/jiter-0.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:aed40e099404721d7fcaf5b89bd3b4568a4666358bcac7b6b15c09fb6252ab68", size = 547262, upload-time = "2026-02-02T12:36:29.678Z" }, + { url = "https://files.pythonhosted.org/packages/27/da/68f38d12e7111d2016cd198161b36e1f042bd115c169255bcb7ec823a3bf/jiter-0.13.0-cp313-cp313-win32.whl", hash = "sha256:36ebfbcffafb146d0e6ffb3e74d51e03d9c35ce7c625c8066cdbfc7b953bdc72", size = 200630, upload-time = "2026-02-02T12:36:31.808Z" }, + { url = "https://files.pythonhosted.org/packages/25/65/3bd1a972c9a08ecd22eb3b08a95d1941ebe6938aea620c246cf426ae09c2/jiter-0.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:8d76029f077379374cf0dbc78dbe45b38dec4a2eb78b08b5194ce836b2517afc", size = 202602, upload-time = "2026-02-02T12:36:33.679Z" }, + { url = "https://files.pythonhosted.org/packages/15/fe/13bd3678a311aa67686bb303654792c48206a112068f8b0b21426eb6851e/jiter-0.13.0-cp313-cp313-win_arm64.whl", hash = "sha256:bb7613e1a427cfcb6ea4544f9ac566b93d5bf67e0d48c787eca673ff9c9dff2b", size = 185939, upload-time = "2026-02-02T12:36:35.065Z" }, + { url = "https://files.pythonhosted.org/packages/49/19/a929ec002ad3228bc97ca01dbb14f7632fffdc84a95ec92ceaf4145688ae/jiter-0.13.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:fa476ab5dd49f3bf3a168e05f89358c75a17608dbabb080ef65f96b27c19ab10", size = 316616, upload-time = "2026-02-02T12:36:36.579Z" }, + { url = "https://files.pythonhosted.org/packages/52/56/d19a9a194afa37c1728831e5fb81b7722c3de18a3109e8f282bfc23e587a/jiter-0.13.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ade8cb6ff5632a62b7dbd4757d8c5573f7a2e9ae285d6b5b841707d8363205ef", size = 346850, upload-time = "2026-02-02T12:36:38.058Z" }, + { url = "https://files.pythonhosted.org/packages/36/4a/94e831c6bf287754a8a019cb966ed39ff8be6ab78cadecf08df3bb02d505/jiter-0.13.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9950290340acc1adaded363edd94baebcee7dabdfa8bee4790794cd5cfad2af6", size = 358551, upload-time = "2026-02-02T12:36:39.417Z" }, + { url = "https://files.pythonhosted.org/packages/a2/ec/a4c72c822695fa80e55d2b4142b73f0012035d9fcf90eccc56bc060db37c/jiter-0.13.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2b4972c6df33731aac0742b64fd0d18e0a69bc7d6e03108ce7d40c85fd9e3e6d", size = 201950, upload-time = "2026-02-02T12:36:40.791Z" }, + { url = "https://files.pythonhosted.org/packages/b6/00/393553ec27b824fbc29047e9c7cd4a3951d7fbe4a76743f17e44034fa4e4/jiter-0.13.0-cp313-cp313t-win_arm64.whl", hash = "sha256:701a1e77d1e593c1b435315ff625fd071f0998c5f02792038a5ca98899261b7d", size = 185852, upload-time = "2026-02-02T12:36:42.077Z" }, + { url = "https://files.pythonhosted.org/packages/80/60/e50fa45dd7e2eae049f0ce964663849e897300433921198aef94b6ffa23a/jiter-0.13.0-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:3d744a6061afba08dd7ae375dcde870cffb14429b7477e10f67e9e6d68772a0a", size = 305169, upload-time = "2026-02-02T12:37:50.376Z" }, + { url = "https://files.pythonhosted.org/packages/d2/73/a009f41c5eed71c49bec53036c4b33555afcdee70682a18c6f66e396c039/jiter-0.13.0-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:ff732bd0a0e778f43d5009840f20b935e79087b4dc65bd36f1cd0f9b04b8ff7f", size = 303808, upload-time = "2026-02-02T12:37:52.092Z" }, + { url = "https://files.pythonhosted.org/packages/c4/10/528b439290763bff3d939268085d03382471b442f212dca4ff5f12802d43/jiter-0.13.0-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab44b178f7981fcaea7e0a5df20e773c663d06ffda0198f1a524e91b2fde7e59", size = 337384, upload-time = "2026-02-02T12:37:53.582Z" }, + { url = "https://files.pythonhosted.org/packages/67/8a/a342b2f0251f3dac4ca17618265d93bf244a2a4d089126e81e4c1056ac50/jiter-0.13.0-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bb00b6d26db67a05fe3e12c76edc75f32077fb51deed13822dc648fa373bc19", size = 343768, upload-time = "2026-02-02T12:37:55.055Z" }, ] [[package]] @@ -1313,6 +1515,7 @@ dependencies = [ { name = "kubernetes" }, { name = "litellm" }, { name = "llama-stack" }, + { name = "llama-stack-api" }, { name = "llama-stack-client" }, { name = "openai" }, { name = "prometheus-client" }, @@ -1361,11 +1564,14 @@ llslibdev = [ { name = "aiosqlite" }, { name = "autoevals" }, { name = "blobfile" }, + { name = "chardet" }, { name = "datasets" }, { name = "emoji" }, { name = "faiss-cpu" }, { name = "fire" }, + { name = "google-cloud-aiplatform" }, { name = "langdetect" }, + { name = "litellm" }, { name = "mcp" }, { name = "nltk" }, { name = "numpy" }, @@ -1374,9 +1580,11 @@ llslibdev = [ { name = "opentelemetry-sdk" }, { name = "peft" }, { name = "psutil" }, + { name = "psycopg2-binary" }, { name = "pythainlp" }, { name = "requests" }, { name = "sentence-transformers" }, + { name = "sqlite-vec" }, { name = "torch", version = "2.9.0", source = { registry = "https://download.pytorch.org/whl/cpu" }, marker = "sys_platform == 'darwin'" }, { name = "torch", version = "2.9.0+cpu", source = { registry = "https://download.pytorch.org/whl/cpu" }, marker = "sys_platform != 'darwin'" }, { name = "transformers" }, @@ -1401,6 +1609,7 @@ requires-dist = [ { name = "kubernetes", specifier = ">=30.1.0" }, { name = "litellm", specifier = ">=1.75.5.post1" }, { name = "llama-stack", specifier = "==0.4.3" }, + { name = "llama-stack-api", specifier = "==0.4.4" }, { name = "llama-stack-client", specifier = "==0.4.3" }, { name = "openai", specifier = ">=1.99.9" }, { name = "prometheus-client", specifier = ">=0.22.1" }, @@ -1449,12 +1658,15 @@ llslibdev = [ { name = "aiosqlite", specifier = ">=0.21.0" }, { name = "autoevals", specifier = ">=0.0.129" }, { name = "blobfile", specifier = ">=3.0.0" }, + { name = "chardet", specifier = ">=5.2.0" }, { name = "datasets", specifier = ">=3.6.0" }, { name = "emoji", specifier = ">=2.1.0" }, { name = "faiss-cpu", specifier = ">=1.11.0" }, { name = "fire", specifier = ">=0.7.0" }, + { name = "google-cloud-aiplatform", specifier = ">=1.130.0" }, { name = "langdetect", specifier = ">=1.0.9" }, - { name = "mcp", specifier = ">=1.9.4" }, + { name = "litellm", specifier = ">=1.81.0" }, + { name = "mcp", specifier = ">=1.23.0" }, { name = "nltk", specifier = ">=3.8.1" }, { name = "numpy", specifier = "==2.3.5" }, { name = "opentelemetry-exporter-otlp", specifier = ">=1.34.1" }, @@ -1462,9 +1674,11 @@ llslibdev = [ { name = "opentelemetry-sdk", specifier = ">=1.34.1" }, { name = "peft", specifier = ">=0.15.2" }, { name = "psutil", specifier = ">=7.0.0" }, + { name = "psycopg2-binary", specifier = ">=2.9.10" }, { name = "pythainlp", specifier = ">=3.0.10" }, { name = "requests", specifier = ">=2.32.4" }, { name = "sentence-transformers", specifier = ">=5.0.0" }, + { name = "sqlite-vec", specifier = ">=0.1.6" }, { name = "torch", specifier = "==2.9.0", index = "https://download.pytorch.org/whl/cpu", conflict = { package = "lightspeed-stack", group = "llslibdev" } }, { name = "transformers", specifier = ">=4.34.0" }, { name = "tree-sitter", specifier = ">=0.24.0" }, @@ -1473,7 +1687,7 @@ llslibdev = [ [[package]] name = "litellm" -version = "1.81.6" +version = "1.81.9" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, @@ -1489,9 +1703,9 @@ dependencies = [ { name = "tiktoken" }, { name = "tokenizers" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/2e/f3/194a2dca6cb3eddb89f4bc2920cf5e27542256af907c23be13c61fe7e021/litellm-1.81.6.tar.gz", hash = "sha256:f02b503dfb7d66d1c939f82e4db21aeec1d6e2ed1fe3f5cd02aaec3f792bc4ae", size = 13878107, upload-time = "2026-02-01T04:02:27.36Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ff/8f/2a08f3d86fd008b4b02254649883032068378a8551baed93e8d9dcbbdb5d/litellm-1.81.9.tar.gz", hash = "sha256:a2cd9bc53a88696c21309ef37c55556f03c501392ed59d7f4250f9932917c13c", size = 16276983, upload-time = "2026-02-07T21:14:24.473Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e6/05/3516cc7386b220d388aa0bd833308c677e94eceb82b2756dd95e06f6a13f/litellm-1.81.6-py3-none-any.whl", hash = "sha256:573206ba194d49a1691370ba33f781671609ac77c35347f8a0411d852cf6341a", size = 12224343, upload-time = "2026-02-01T04:02:23.704Z" }, + { url = "https://files.pythonhosted.org/packages/0b/8b/672fc06c8a2803477e61e0de383d3c6e686e0f0fc62789c21f0317494076/litellm-1.81.9-py3-none-any.whl", hash = "sha256:24ee273bc8a62299fbb754035f83fb7d8d44329c383701a2bd034f4fd1c19084", size = 14433170, upload-time = "2026-02-07T21:14:21.469Z" }, ] [[package]] @@ -1985,7 +2199,7 @@ wheels = [ [[package]] name = "openai" -version = "2.16.0" +version = "2.17.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -1997,9 +2211,9 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b1/6c/e4c964fcf1d527fdf4739e7cc940c60075a4114d50d03871d5d5b1e13a88/openai-2.16.0.tar.gz", hash = "sha256:42eaa22ca0d8ded4367a77374104d7a2feafee5bd60a107c3c11b5243a11cd12", size = 629649, upload-time = "2026-01-27T23:28:02.579Z" } +sdist = { url = "https://files.pythonhosted.org/packages/9c/a2/677f22c4b487effb8a09439fb6134034b5f0a39ca27df8b95fac23a93720/openai-2.17.0.tar.gz", hash = "sha256:47224b74bd20f30c6b0a6a329505243cb2f26d5cf84d9f8d0825ff8b35e9c999", size = 631445, upload-time = "2026-02-05T16:27:40.953Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/16/83/0315bf2cfd75a2ce8a7e54188e9456c60cec6c0cf66728ed07bd9859ff26/openai-2.16.0-py3-none-any.whl", hash = "sha256:5f46643a8f42899a84e80c38838135d7038e7718333ce61396994f887b09a59b", size = 1068612, upload-time = "2026-01-27T23:28:00.356Z" }, + { url = "https://files.pythonhosted.org/packages/44/97/284535aa75e6e84ab388248b5a323fc296b1f70530130dee37f7f4fbe856/openai-2.17.0-py3-none-any.whl", hash = "sha256:4f393fd886ca35e113aac7ff239bcd578b81d8f104f5aedc7d3693eb2af1d338", size = 1069524, upload-time = "2026-02-05T16:27:38.941Z" }, ] [[package]] @@ -2191,11 +2405,11 @@ wheels = [ [[package]] name = "parse" -version = "1.20.2" +version = "1.21.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/4f/78/d9b09ba24bb36ef8b83b71be547e118d46214735b6dfb39e4bfde0e9b9dd/parse-1.20.2.tar.gz", hash = "sha256:b41d604d16503c79d81af5165155c0b20f6c8d6c559efa66b4b695c3e5a0a0ce", size = 29391, upload-time = "2024-06-11T04:41:57.34Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/76/2770399accc7b922c288533a88a10e9baa50a1aec76789a8c255a9c6c060/parse-1.21.0.tar.gz", hash = "sha256:937725d51330ffec9c7a26fdb5623baa135d8ba8ed78817ea9523538844e3ce4", size = 29649, upload-time = "2026-02-05T18:33:41.085Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d0/31/ba45bf0b2aa7898d81cbbfac0e88c267befb59ad91a19e36e1bc5578ddb1/parse-1.20.2-py2.py3-none-any.whl", hash = "sha256:967095588cb802add9177d0c0b6133b5ba33b1ea9007ca800e526f42a85af558", size = 20126, upload-time = "2024-06-11T04:41:55.057Z" }, + { url = "https://files.pythonhosted.org/packages/3e/b6/3fee2205ce1333eaa85fdf8500de4e412bbc112d77c9b0045cc8d5a6fcec/parse-1.21.0-py2.py3-none-any.whl", hash = "sha256:6d81f7bae0ab25fd72818375c4a9c71c8705256bfc42e8725be609cf8b904aed", size = 20277, upload-time = "2026-02-05T18:33:39.673Z" }, ] [[package]] @@ -2341,24 +2555,32 @@ wheels = [ [[package]] name = "polyleven" -version = "0.9.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/5c/a9/5fcf2c4d77270d9f8cd5e3d1c878ca7ffcab22debc27c16d9c67288632de/polyleven-0.9.0.tar.gz", hash = "sha256:299a93766761b5e5fb4092388f3dc6401224fd436c05f11c4ee48b262587e8da", size = 6274, upload-time = "2025-02-26T08:25:21.812Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ce/ef/558fe641a3e80a1525b2958b61a42fe966ea4a933c589993d650982d5363/polyleven-0.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1b9c905fa0862c1f3e27e948a713fb86a26ce1659f1d90b1b4aff04a8890213b", size = 7332, upload-time = "2025-02-26T08:24:25.949Z" }, - { url = "https://files.pythonhosted.org/packages/ee/08/ed89a4f97d8ae158e49996f96cef27aa84c376ba91e659e5b75060dc7d45/polyleven-0.9.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7058bea0da4893ebb8bedd9f638ec4e026c150e29b7b7385db5c157742d0ff11", size = 22749, upload-time = "2025-02-26T08:24:27.888Z" }, - { url = "https://files.pythonhosted.org/packages/fe/5a/293585bbf05af28184ad63098e9505fd5e590baae20f771b733847576b9e/polyleven-0.9.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b99fcfc48c1eaacc4a46dd9d22dc98de111120c66b56df14257f276b762bd591", size = 21060, upload-time = "2025-02-26T08:24:29.802Z" }, - { url = "https://files.pythonhosted.org/packages/4f/17/038034494567e74f7b0a1452d31070fe9abc6294b8cda69831b3795e2190/polyleven-0.9.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:29ef7db85a7bb01be9372461bc8d8993d4817dfcea702e4d2b8f0d9c43415ebe", size = 21654, upload-time = "2025-02-26T08:24:31.354Z" }, - { url = "https://files.pythonhosted.org/packages/c8/49/6bc884ecd64d6635d7c7c77948f524280c459a476001560bee17930e6b3f/polyleven-0.9.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:288bfe0a0040421c52a5dc312b55c47812a72fb9cd7e6d19859ac2f9f11f350f", size = 20518, upload-time = "2025-02-26T08:24:32.628Z" }, - { url = "https://files.pythonhosted.org/packages/8c/16/d164c846a673504eeb461519c7f2877af6c216c73ac94ac7e9e8de7e903f/polyleven-0.9.0-cp312-cp312-win32.whl", hash = "sha256:7260fa32fff7194e06b4221e0a6d2ba2decd4e4dc51f7f8cddbf365649326ee4", size = 11298, upload-time = "2025-02-26T08:24:33.645Z" }, - { url = "https://files.pythonhosted.org/packages/f8/ef/96069efc5f959c19c53d44a561d52d5588d87ed7f4f5c481b34926114afc/polyleven-0.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:4db8b16aac237dbf644a0e4323c3ba0907dab6adecd2a345bf2fa92301d7fb2d", size = 10674, upload-time = "2025-02-26T08:24:35.48Z" }, - { url = "https://files.pythonhosted.org/packages/a9/42/ddd88c802aec287d678537844a0ea77ab8152a0cfe7fafe9736da256fdbf/polyleven-0.9.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45cea2885c61bda9711244a51aed068f9a55f1d776d4caad6c574a3f401945ae", size = 7342, upload-time = "2025-02-26T08:24:36.706Z" }, - { url = "https://files.pythonhosted.org/packages/06/e4/d3b3ea3a931117f0dc79f095c0523cad533773290e66b4d719dfbc6c54f2/polyleven-0.9.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62b039e9dc8fa53ad740de02d168a7e9d0edce3734b2927f40fe851b328b766f", size = 22691, upload-time = "2025-02-26T08:24:37.752Z" }, - { url = "https://files.pythonhosted.org/packages/ef/5c/61faf77ed4de6ee2b7f137db12d4d49b96d71e03e2157a06352d54cd4d39/polyleven-0.9.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0a0c1ecd2dc356fd94edc80e18a30ad28e93ccc840127e765b83ad60426b2d5", size = 21018, upload-time = "2025-02-26T08:24:41.034Z" }, - { url = "https://files.pythonhosted.org/packages/40/ef/6404cbd3934a77bf7776173366e8e39846a6bd0128e2cf9714753dc0b512/polyleven-0.9.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:20576da0c8000bd1c4a07cee43db9169b7d094f5dcc03b20775506d07c56f4fb", size = 21671, upload-time = "2025-02-26T08:24:42.193Z" }, - { url = "https://files.pythonhosted.org/packages/4d/39/d20098cef731f42b84b58af676910333a68782060298326849489d4bf884/polyleven-0.9.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ba356ce9e7e7e8ddf4eff17eb39df5b822cb8899450c6d289a22249b78c9a5f4", size = 20542, upload-time = "2025-02-26T08:24:43.661Z" }, - { url = "https://files.pythonhosted.org/packages/b6/2d/4b47edc0467ded497d95cb1fdb13dc138b4abf01f53597bcf47c01e111ea/polyleven-0.9.0-cp313-cp313-win32.whl", hash = "sha256:244d759986486252121061d727a642d3505cbdd9e6616467b42935e662a9fa61", size = 11299, upload-time = "2025-02-26T08:24:44.69Z" }, - { url = "https://files.pythonhosted.org/packages/47/c9/b4584ddbd246e221f17fb90bd902653ca1564d1e60b8c87fe4ed46f30b54/polyleven-0.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:8f671df664924b3ec14195be7bf778d5f71811989e59a3f9547f8066cefc596f", size = 10676, upload-time = "2025-02-26T08:24:45.779Z" }, +version = "0.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/42/17/9280f189a81b9efa0c79da7518e433061c5044a1ac2b59a02b7e1794101b/polyleven-0.10.0.tar.gz", hash = "sha256:00e8aeaccd5e40a6e0685e1a4bdc404fbf009b498597373bc3a83ebb41b360ee", size = 6245, upload-time = "2026-02-03T08:38:22.047Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2b/38/073ba3d487beee498ebcc1e3f295fc84e791d536768f9352f2a8d888899a/polyleven-0.10.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d0e2cac82d683bccebed36b1d85d2833c0b0db46c78c8e2212763b3f93943fd3", size = 7366, upload-time = "2026-02-03T08:37:10.18Z" }, + { url = "https://files.pythonhosted.org/packages/2c/76/3df923ce1f28a093e3538098f3a7aa2b9b43f9ef6a3ad81264d3c90a0504/polyleven-0.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5ad04871576432e00129c3a8c5408eb0ecbf0321883a8332accf6c8011e9c003", size = 7438, upload-time = "2026-02-03T08:37:11.353Z" }, + { url = "https://files.pythonhosted.org/packages/e9/df/7394891d85769e7f0bfbb035185c44092615bd7babe9613be74f7bf6564a/polyleven-0.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:04f321bf1d3e4de4095393356caa399e0b3ae9f4610908b9c09b45cbbf3c09fe", size = 20785, upload-time = "2026-02-03T08:37:12.242Z" }, + { url = "https://files.pythonhosted.org/packages/36/33/04538816336d8cbce69106f04b3e36b7c092147257a62b6943ef45784a53/polyleven-0.10.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:675db0544b1e4fc08a78c7c777b40c56c1a6a8a1b538bd177610c3cdc3c6934e", size = 22917, upload-time = "2026-02-03T08:37:13.202Z" }, + { url = "https://files.pythonhosted.org/packages/49/5a/d525c6f83e37ad4ad75577d4cd80d062fc65cda11101d94d28283c425c30/polyleven-0.10.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ccfbb44da427c66040b668fa67ebfa5d87a21919a5ba1771c4ea67affd492a26", size = 21231, upload-time = "2026-02-03T08:37:14.161Z" }, + { url = "https://files.pythonhosted.org/packages/dd/67/0170fe46071cb009605168c6ed7303ba1ff7f3a28d58bb8b0f3a90611bce/polyleven-0.10.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ea673445dee5a2cfe303edb96541d7a5b359070b57357ecd14a1b63b39abb370", size = 20504, upload-time = "2026-02-03T08:37:15.842Z" }, + { url = "https://files.pythonhosted.org/packages/ce/cf/50248c4ee64085c019396ebbeff6e5a0f322cc80c6b25b038c9bada790ca/polyleven-0.10.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:ab8ac71db516befeda8f4ab61875c938b1c67315f2872fa262f0ba3856de6c73", size = 21821, upload-time = "2026-02-03T08:37:16.916Z" }, + { url = "https://files.pythonhosted.org/packages/76/19/43998ff24ac7d187caafac5feab00b12a9887d381177b4a05fb41f979d95/polyleven-0.10.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:547e9b4197230e2fae8110d0ac57f5de8d65dbf0aa94f4882fe5faf3f8e7c700", size = 20689, upload-time = "2026-02-03T08:37:17.961Z" }, + { url = "https://files.pythonhosted.org/packages/75/b5/57a1447fe5e7ad38752bc3d8688296fa86e126ee1154463115218e101860/polyleven-0.10.0-cp312-cp312-win32.whl", hash = "sha256:41c8d61a8c742921a7ebc286e2d5dee03cd6659e22702e573329de84f4abfe98", size = 11589, upload-time = "2026-02-03T08:37:19.298Z" }, + { url = "https://files.pythonhosted.org/packages/04/54/56768ecfe0ffc7bb522b403b6fb312e2a5f5c95507ecdbe29f5c93315013/polyleven-0.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:0e50a96ec039b0e6ebddad4e16968b5ba48e47279c1b8049c4cffddea06306b0", size = 10798, upload-time = "2026-02-03T08:37:22.136Z" }, + { url = "https://files.pythonhosted.org/packages/6f/96/e2cac97db366a5e8e3771eb784651f9bfb4ddc1b22b9db90e7ceec116f8c/polyleven-0.10.0-cp312-cp312-win_arm64.whl", hash = "sha256:595d8d0cd934e1e4042abbff9838b3a673eda6d269c3fa5727eec313c3df3d60", size = 9396, upload-time = "2026-02-03T08:37:23.159Z" }, + { url = "https://files.pythonhosted.org/packages/de/87/6a5139102cb93abb1fd7b84639061815dfaed64df09666d6618a67a3e079/polyleven-0.10.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7fb473bfa1a68597cae78d14a4cb5fce75f619b978d5469f4bfb15e1b43f6214", size = 7368, upload-time = "2026-02-03T08:37:24.082Z" }, + { url = "https://files.pythonhosted.org/packages/bb/25/4a98834a8d60f6194d0e990fc08c60a8a29eab29eb70fb234020a442519a/polyleven-0.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:37f5fa2018efac4b243f1f62bd81d8cc830245407258940119c26a4f95ab301a", size = 7449, upload-time = "2026-02-03T08:37:25.08Z" }, + { url = "https://files.pythonhosted.org/packages/da/65/36f489f78734a3c735d6cba3fec1c5e802272dde28965109ae504d8813c0/polyleven-0.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d83ddc05fba8b75cd23404bc218a3fea888778f061300e220a705e7b303f24c", size = 20746, upload-time = "2026-02-03T08:37:26.002Z" }, + { url = "https://files.pythonhosted.org/packages/95/a4/0ccb1707b68093bb42596c9a3cce4bf2ddea1f3eaef60203c69df42608cc/polyleven-0.10.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99a4e6e89ff4f1e73dd3e160a3b87df0f7600513fefe8ea436cc66fd9e4719c3", size = 22858, upload-time = "2026-02-03T08:37:26.939Z" }, + { url = "https://files.pythonhosted.org/packages/a0/5e/9ca68c7e1fa78a9fdea07d71cfa4819a79e636f3ee4548c29e5d5d56c933/polyleven-0.10.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c9d0148436ce0ca86675a0d79c40e135166447e102b1ed1465ced2e1eb589f5", size = 21190, upload-time = "2026-02-03T08:37:28.952Z" }, + { url = "https://files.pythonhosted.org/packages/c0/33/d37ddff59af19ccf0c039cd44d3b54f330c1ea0a9fb7f983d8cb92539401/polyleven-0.10.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0a33c50d15ae3bf7c10b18df8fcea8ce2d67286f81219bcf47976a9c61c8297c", size = 20535, upload-time = "2026-02-03T08:37:30.427Z" }, + { url = "https://files.pythonhosted.org/packages/99/ae/27132c2f05b7143e86a7a2141c6b389191214a45507eaff92c3b7e887a2c/polyleven-0.10.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:644cd3730f43580cf90557fa1a149ea0e09571c2ddef9d9c3071ea0bd3b00b4a", size = 21838, upload-time = "2026-02-03T08:37:31.398Z" }, + { url = "https://files.pythonhosted.org/packages/9b/22/a13f4c20a4fcf8f6f24a29b7acd10527c6f2c869e510e78a5ae088b65d73/polyleven-0.10.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ea096c4cc8e8f089f082fe1caefb152267b138eb314d378103fee63418490cc3", size = 20713, upload-time = "2026-02-03T08:37:32.363Z" }, + { url = "https://files.pythonhosted.org/packages/40/18/9697494f24d3599b646a2f828a44a8224e6ad3188150a1297f6324c48c7f/polyleven-0.10.0-cp313-cp313-win32.whl", hash = "sha256:3f4b30a8054b9c31f477a15d727dc944bc7450a7d84f6c90e30d354da90a4388", size = 11589, upload-time = "2026-02-03T08:37:33.368Z" }, + { url = "https://files.pythonhosted.org/packages/0a/3c/ea272a2d893b16dcea9bc1721ed882f26a5d3b4cb4e68d22f15c604bb6d2/polyleven-0.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:3a3a3e9e5f7733f64b8aea794beb757e20cca8b7a90a52d8dfe02e973746e457", size = 10800, upload-time = "2026-02-03T08:37:34.842Z" }, + { url = "https://files.pythonhosted.org/packages/52/e4/4dee260066f1c6172c09f2671c3f05cd80210f229ef064ec7b4812347507/polyleven-0.10.0-cp313-cp313-win_arm64.whl", hash = "sha256:22b4ed4fc73f28e7aa44a4e6146bf88dcb3d0a175f08c0978f3a486f0009bce2", size = 9394, upload-time = "2026-02-03T08:37:36.277Z" }, ] [[package]] @@ -2438,14 +2660,14 @@ wheels = [ [[package]] name = "proto-plus" -version = "1.27.0" +version = "1.27.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "protobuf" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/01/89/9cbe2f4bba860e149108b683bc2efec21f14d5f7ed6e25562ad86acbc373/proto_plus-1.27.0.tar.gz", hash = "sha256:873af56dd0d7e91836aee871e5799e1c6f1bda86ac9a983e0bb9f0c266a568c4", size = 56158, upload-time = "2025-12-16T13:46:25.729Z" } +sdist = { url = "https://files.pythonhosted.org/packages/3a/02/8832cde80e7380c600fbf55090b6ab7b62bd6825dbedde6d6657c15a1f8e/proto_plus-1.27.1.tar.gz", hash = "sha256:912a7460446625b792f6448bade9e55cd4e41e6ac10e27009ef71a7f317fa147", size = 56929, upload-time = "2026-02-02T17:34:49.035Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/cd/24/3b7a0818484df9c28172857af32c2397b6d8fcd99d9468bd4684f98ebf0a/proto_plus-1.27.0-py3-none-any.whl", hash = "sha256:1baa7f81cf0f8acb8bc1f6d085008ba4171eaf669629d1b6d1673b21ed1c0a82", size = 50205, upload-time = "2025-12-16T13:46:24.76Z" }, + { url = "https://files.pythonhosted.org/packages/5d/79/ac273cbbf744691821a9cca88957257f41afe271637794975ca090b9588b/proto_plus-1.27.1-py3-none-any.whl", hash = "sha256:e4643061f3a4d0de092d62aa4ad09fa4756b2cbb89d4627f3985018216f9fefc", size = 50480, upload-time = "2026-02-02T17:34:47.339Z" }, ] [[package]] @@ -2526,14 +2748,14 @@ wheels = [ [[package]] name = "pyaml" -version = "25.7.0" +version = "26.2.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pyyaml" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c4/01/41f63d66a801a561c9e335523516bd5f761bc43cc61f8b75918306bf2da8/pyaml-25.7.0.tar.gz", hash = "sha256:e113a64ec16881bf2b092e2beb84b7dcf1bd98096ad17f5f14e8fb782a75d99b", size = 29814, upload-time = "2025-07-10T18:44:51.824Z" } +sdist = { url = "https://files.pythonhosted.org/packages/38/fb/2b9590512a9d7763620d87171c7531d5295678ce96e57393614b91da8998/pyaml-26.2.1.tar.gz", hash = "sha256:489dd82997235d4cfcf76a6287fce2f075487d77a6567c271e8d790583690c68", size = 30653, upload-time = "2026-02-06T13:49:30.769Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a8/ee/a878f2ad010cbccb311f947f0f2f09d38f613938ee28c34e60fceecc75a1/pyaml-25.7.0-py3-none-any.whl", hash = "sha256:ce5d7867cc2b455efdb9b0448324ff7b9f74d99f64650f12ca570102db6b985f", size = 26418, upload-time = "2025-07-10T18:44:50.679Z" }, + { url = "https://files.pythonhosted.org/packages/5d/f3/1f8651f23101e6fae41d0d504414c9722b0140bf0fc6acf87ac52e18aa41/pyaml-26.2.1-py3-none-any.whl", hash = "sha256:6261c2f0a2f33245286c794ad6ec234be33a73d2b05427079fd343e2812a87cf", size = 27211, upload-time = "2026-02-06T13:49:29.652Z" }, ] [[package]] @@ -3118,15 +3340,15 @@ wheels = [ [[package]] name = "rich" -version = "14.3.1" +version = "14.3.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "markdown-it-py" }, { name = "pygments" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a1/84/4831f881aa6ff3c976f6d6809b58cdfa350593ffc0dc3c58f5f6586780fb/rich-14.3.1.tar.gz", hash = "sha256:b8c5f568a3a749f9290ec6bddedf835cec33696bfc1e48bcfecb276c7386e4b8", size = 230125, upload-time = "2026-01-24T21:40:44.847Z" } +sdist = { url = "https://files.pythonhosted.org/packages/74/99/a4cab2acbb884f80e558b0771e97e21e939c5dfb460f488d19df485e8298/rich-14.3.2.tar.gz", hash = "sha256:e712f11c1a562a11843306f5ed999475f09ac31ffb64281f73ab29ffdda8b3b8", size = 230143, upload-time = "2026-02-01T16:20:47.908Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/87/2a/a1810c8627b9ec8c57ec5ec325d306701ae7be50235e8fd81266e002a3cc/rich-14.3.1-py3-none-any.whl", hash = "sha256:da750b1aebbff0b372557426fb3f35ba56de8ef954b3190315eb64076d6fb54e", size = 309952, upload-time = "2026-01-24T21:40:42.969Z" }, + { url = "https://files.pythonhosted.org/packages/ef/45/615f5babd880b4bd7d405cc0dc348234c5ffb6ed1ea33e152ede08b2072d/rich-14.3.2-py3-none-any.whl", hash = "sha256:08e67c3e90884651da3239ea668222d19bea7b589149d8014a21c633420dbb69", size = 309963, upload-time = "2026-02-01T16:20:46.078Z" }, ] [[package]] @@ -3195,28 +3417,27 @@ wheels = [ [[package]] name = "ruff" -version = "0.14.14" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/2e/06/f71e3a86b2df0dfa2d2f72195941cd09b44f87711cb7fa5193732cb9a5fc/ruff-0.14.14.tar.gz", hash = "sha256:2d0f819c9a90205f3a867dbbd0be083bee9912e170fd7d9704cc8ae45824896b", size = 4515732, upload-time = "2026-01-22T22:30:17.527Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d2/89/20a12e97bc6b9f9f68343952da08a8099c57237aef953a56b82711d55edd/ruff-0.14.14-py3-none-linux_armv6l.whl", hash = "sha256:7cfe36b56e8489dee8fbc777c61959f60ec0f1f11817e8f2415f429552846aed", size = 10467650, upload-time = "2026-01-22T22:30:08.578Z" }, - { url = "https://files.pythonhosted.org/packages/a3/b1/c5de3fd2d5a831fcae21beda5e3589c0ba67eec8202e992388e4b17a6040/ruff-0.14.14-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6006a0082336e7920b9573ef8a7f52eec837add1265cc74e04ea8a4368cd704c", size = 10883245, upload-time = "2026-01-22T22:30:04.155Z" }, - { url = "https://files.pythonhosted.org/packages/b8/7c/3c1db59a10e7490f8f6f8559d1db8636cbb13dccebf18686f4e3c9d7c772/ruff-0.14.14-py3-none-macosx_11_0_arm64.whl", hash = "sha256:026c1d25996818f0bf498636686199d9bd0d9d6341c9c2c3b62e2a0198b758de", size = 10231273, upload-time = "2026-01-22T22:30:34.642Z" }, - { url = "https://files.pythonhosted.org/packages/a1/6e/5e0e0d9674be0f8581d1f5e0f0a04761203affce3232c1a1189d0e3b4dad/ruff-0.14.14-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f666445819d31210b71e0a6d1c01e24447a20b85458eea25a25fe8142210ae0e", size = 10585753, upload-time = "2026-01-22T22:30:31.781Z" }, - { url = "https://files.pythonhosted.org/packages/23/09/754ab09f46ff1884d422dc26d59ba18b4e5d355be147721bb2518aa2a014/ruff-0.14.14-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3c0f18b922c6d2ff9a5e6c3ee16259adc513ca775bcf82c67ebab7cbd9da5bc8", size = 10286052, upload-time = "2026-01-22T22:30:24.827Z" }, - { url = "https://files.pythonhosted.org/packages/c8/cc/e71f88dd2a12afb5f50733851729d6b571a7c3a35bfdb16c3035132675a0/ruff-0.14.14-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1629e67489c2dea43e8658c3dba659edbfd87361624b4040d1df04c9740ae906", size = 11043637, upload-time = "2026-01-22T22:30:13.239Z" }, - { url = "https://files.pythonhosted.org/packages/67/b2/397245026352494497dac935d7f00f1468c03a23a0c5db6ad8fc49ca3fb2/ruff-0.14.14-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:27493a2131ea0f899057d49d303e4292b2cae2bb57253c1ed1f256fbcd1da480", size = 12194761, upload-time = "2026-01-22T22:30:22.542Z" }, - { url = "https://files.pythonhosted.org/packages/5b/06/06ef271459f778323112c51b7587ce85230785cd64e91772034ddb88f200/ruff-0.14.14-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01ff589aab3f5b539e35db38425da31a57521efd1e4ad1ae08fc34dbe30bd7df", size = 12005701, upload-time = "2026-01-22T22:30:20.499Z" }, - { url = "https://files.pythonhosted.org/packages/41/d6/99364514541cf811ccc5ac44362f88df66373e9fec1b9d1c4cc830593fe7/ruff-0.14.14-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1cc12d74eef0f29f51775f5b755913eb523546b88e2d733e1d701fe65144e89b", size = 11282455, upload-time = "2026-01-22T22:29:59.679Z" }, - { url = "https://files.pythonhosted.org/packages/ca/71/37daa46f89475f8582b7762ecd2722492df26421714a33e72ccc9a84d7a5/ruff-0.14.14-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb8481604b7a9e75eff53772496201690ce2687067e038b3cc31aaf16aa0b974", size = 11215882, upload-time = "2026-01-22T22:29:57.032Z" }, - { url = "https://files.pythonhosted.org/packages/2c/10/a31f86169ec91c0705e618443ee74ede0bdd94da0a57b28e72db68b2dbac/ruff-0.14.14-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:14649acb1cf7b5d2d283ebd2f58d56b75836ed8c6f329664fa91cdea19e76e66", size = 11180549, upload-time = "2026-01-22T22:30:27.175Z" }, - { url = "https://files.pythonhosted.org/packages/fd/1e/c723f20536b5163adf79bdd10c5f093414293cdf567eed9bdb7b83940f3f/ruff-0.14.14-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:e8058d2145566510790eab4e2fad186002e288dec5e0d343a92fe7b0bc1b3e13", size = 10543416, upload-time = "2026-01-22T22:30:01.964Z" }, - { url = "https://files.pythonhosted.org/packages/3e/34/8a84cea7e42c2d94ba5bde1d7a4fae164d6318f13f933d92da6d7c2041ff/ruff-0.14.14-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:e651e977a79e4c758eb807f0481d673a67ffe53cfa92209781dfa3a996cf8412", size = 10285491, upload-time = "2026-01-22T22:30:29.51Z" }, - { url = "https://files.pythonhosted.org/packages/55/ef/b7c5ea0be82518906c978e365e56a77f8de7678c8bb6651ccfbdc178c29f/ruff-0.14.14-py3-none-musllinux_1_2_i686.whl", hash = "sha256:cc8b22da8d9d6fdd844a68ae937e2a0adf9b16514e9a97cc60355e2d4b219fc3", size = 10733525, upload-time = "2026-01-22T22:30:06.499Z" }, - { url = "https://files.pythonhosted.org/packages/6a/5b/aaf1dfbcc53a2811f6cc0a1759de24e4b03e02ba8762daabd9b6bd8c59e3/ruff-0.14.14-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:16bc890fb4cc9781bb05beb5ab4cd51be9e7cb376bf1dd3580512b24eb3fda2b", size = 11315626, upload-time = "2026-01-22T22:30:36.848Z" }, - { url = "https://files.pythonhosted.org/packages/2c/aa/9f89c719c467dfaf8ad799b9bae0df494513fb21d31a6059cb5870e57e74/ruff-0.14.14-py3-none-win32.whl", hash = "sha256:b530c191970b143375b6a68e6f743800b2b786bbcf03a7965b06c4bf04568167", size = 10502442, upload-time = "2026-01-22T22:30:38.93Z" }, - { url = "https://files.pythonhosted.org/packages/87/44/90fa543014c45560cae1fffc63ea059fb3575ee6e1cb654562197e5d16fb/ruff-0.14.14-py3-none-win_amd64.whl", hash = "sha256:3dde1435e6b6fe5b66506c1dff67a421d0b7f6488d466f651c07f4cab3bf20fd", size = 11630486, upload-time = "2026-01-22T22:30:10.852Z" }, - { url = "https://files.pythonhosted.org/packages/9e/6a/40fee331a52339926a92e17ae748827270b288a35ef4a15c9c8f2ec54715/ruff-0.14.14-py3-none-win_arm64.whl", hash = "sha256:56e6981a98b13a32236a72a8da421d7839221fa308b223b9283312312e5ac76c", size = 10920448, upload-time = "2026-01-22T22:30:15.417Z" }, +version = "0.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c8/39/5cee96809fbca590abea6b46c6d1c586b49663d1d2830a751cc8fc42c666/ruff-0.15.0.tar.gz", hash = "sha256:6bdea47cdbea30d40f8f8d7d69c0854ba7c15420ec75a26f463290949d7f7e9a", size = 4524893, upload-time = "2026-02-03T17:53:35.357Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/88/3fd1b0aa4b6330d6aaa63a285bc96c9f71970351579152d231ed90914586/ruff-0.15.0-py3-none-linux_armv6l.whl", hash = "sha256:aac4ebaa612a82b23d45964586f24ae9bc23ca101919f5590bdb368d74ad5455", size = 10354332, upload-time = "2026-02-03T17:52:54.892Z" }, + { url = "https://files.pythonhosted.org/packages/72/f6/62e173fbb7eb75cc29fe2576a1e20f0a46f671a2587b5f604bfb0eaf5f6f/ruff-0.15.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:dcd4be7cc75cfbbca24a98d04d0b9b36a270d0833241f776b788d59f4142b14d", size = 10767189, upload-time = "2026-02-03T17:53:19.778Z" }, + { url = "https://files.pythonhosted.org/packages/99/e4/968ae17b676d1d2ff101d56dc69cf333e3a4c985e1ec23803df84fc7bf9e/ruff-0.15.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d747e3319b2bce179c7c1eaad3d884dc0a199b5f4d5187620530adf9105268ce", size = 10075384, upload-time = "2026-02-03T17:53:29.241Z" }, + { url = "https://files.pythonhosted.org/packages/a2/bf/9843c6044ab9e20af879c751487e61333ca79a2c8c3058b15722386b8cae/ruff-0.15.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:650bd9c56ae03102c51a5e4b554d74d825ff3abe4db22b90fd32d816c2e90621", size = 10481363, upload-time = "2026-02-03T17:52:43.332Z" }, + { url = "https://files.pythonhosted.org/packages/55/d9/4ada5ccf4cd1f532db1c8d44b6f664f2208d3d93acbeec18f82315e15193/ruff-0.15.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a6664b7eac559e3048223a2da77769c2f92b43a6dfd4720cef42654299a599c9", size = 10187736, upload-time = "2026-02-03T17:53:00.522Z" }, + { url = "https://files.pythonhosted.org/packages/86/e2/f25eaecd446af7bb132af0a1d5b135a62971a41f5366ff41d06d25e77a91/ruff-0.15.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f811f97b0f092b35320d1556f3353bf238763420ade5d9e62ebd2b73f2ff179", size = 10968415, upload-time = "2026-02-03T17:53:15.705Z" }, + { url = "https://files.pythonhosted.org/packages/e7/dc/f06a8558d06333bf79b497d29a50c3a673d9251214e0d7ec78f90b30aa79/ruff-0.15.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:761ec0a66680fab6454236635a39abaf14198818c8cdf691e036f4bc0f406b2d", size = 11809643, upload-time = "2026-02-03T17:53:23.031Z" }, + { url = "https://files.pythonhosted.org/packages/dd/45/0ece8db2c474ad7df13af3a6d50f76e22a09d078af63078f005057ca59eb/ruff-0.15.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:940f11c2604d317e797b289f4f9f3fa5555ffe4fb574b55ed006c3d9b6f0eb78", size = 11234787, upload-time = "2026-02-03T17:52:46.432Z" }, + { url = "https://files.pythonhosted.org/packages/8a/d9/0e3a81467a120fd265658d127db648e4d3acfe3e4f6f5d4ea79fac47e587/ruff-0.15.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcbca3d40558789126da91d7ef9a7c87772ee107033db7191edefa34e2c7f1b4", size = 11112797, upload-time = "2026-02-03T17:52:49.274Z" }, + { url = "https://files.pythonhosted.org/packages/b2/cb/8c0b3b0c692683f8ff31351dfb6241047fa873a4481a76df4335a8bff716/ruff-0.15.0-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:9a121a96db1d75fa3eb39c4539e607f628920dd72ff1f7c5ee4f1b768ac62d6e", size = 11033133, upload-time = "2026-02-03T17:53:33.105Z" }, + { url = "https://files.pythonhosted.org/packages/f8/5e/23b87370cf0f9081a8c89a753e69a4e8778805b8802ccfe175cc410e50b9/ruff-0.15.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:5298d518e493061f2eabd4abd067c7e4fb89e2f63291c94332e35631c07c3662", size = 10442646, upload-time = "2026-02-03T17:53:06.278Z" }, + { url = "https://files.pythonhosted.org/packages/e1/9a/3c94de5ce642830167e6d00b5c75aacd73e6347b4c7fc6828699b150a5ee/ruff-0.15.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:afb6e603d6375ff0d6b0cee563fa21ab570fd15e65c852cb24922cef25050cf1", size = 10195750, upload-time = "2026-02-03T17:53:26.084Z" }, + { url = "https://files.pythonhosted.org/packages/30/15/e396325080d600b436acc970848d69df9c13977942fb62bb8722d729bee8/ruff-0.15.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:77e515f6b15f828b94dc17d2b4ace334c9ddb7d9468c54b2f9ed2b9c1593ef16", size = 10676120, upload-time = "2026-02-03T17:53:09.363Z" }, + { url = "https://files.pythonhosted.org/packages/8d/c9/229a23d52a2983de1ad0fb0ee37d36e0257e6f28bfd6b498ee2c76361874/ruff-0.15.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:6f6e80850a01eb13b3e42ee0ebdf6e4497151b48c35051aab51c101266d187a3", size = 11201636, upload-time = "2026-02-03T17:52:57.281Z" }, + { url = "https://files.pythonhosted.org/packages/6f/b0/69adf22f4e24f3677208adb715c578266842e6e6a3cc77483f48dd999ede/ruff-0.15.0-py3-none-win32.whl", hash = "sha256:238a717ef803e501b6d51e0bdd0d2c6e8513fe9eec14002445134d3907cd46c3", size = 10465945, upload-time = "2026-02-03T17:53:12.591Z" }, + { url = "https://files.pythonhosted.org/packages/51/ad/f813b6e2c97e9b4598be25e94a9147b9af7e60523b0cb5d94d307c15229d/ruff-0.15.0-py3-none-win_amd64.whl", hash = "sha256:dd5e4d3301dc01de614da3cdffc33d4b1b96fb89e45721f1598e5532ccf78b18", size = 11564657, upload-time = "2026-02-03T17:52:51.893Z" }, + { url = "https://files.pythonhosted.org/packages/f6/b0/2d823f6e77ebe560f4e397d078487e8d52c1516b331e3521bc75db4272ca/ruff-0.15.0-py3-none-win_arm64.whl", hash = "sha256:c480d632cc0ca3f0727acac8b7d053542d9e114a462a145d0b00e7cd658c515a", size = 10865753, upload-time = "2026-02-03T17:53:03.014Z" }, ] [[package]] @@ -3358,11 +3579,11 @@ wheels = [ [[package]] name = "setuptools" -version = "80.10.2" +version = "81.0.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/76/95/faf61eb8363f26aa7e1d762267a8d602a1b26d4f3a1e758e92cb3cb8b054/setuptools-80.10.2.tar.gz", hash = "sha256:8b0e9d10c784bf7d262c4e5ec5d4ec94127ce206e8738f29a437945fbc219b70", size = 1200343, upload-time = "2026-01-25T22:38:17.252Z" } +sdist = { url = "https://files.pythonhosted.org/packages/0d/1c/73e719955c59b8e424d015ab450f51c0af856ae46ea2da83eba51cc88de1/setuptools-81.0.0.tar.gz", hash = "sha256:487b53915f52501f0a79ccfd0c02c165ffe06631443a886740b91af4b7a5845a", size = 1198299, upload-time = "2026-02-06T21:10:39.601Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/94/b8/f1f62a5e3c0ad2ff1d189590bfa4c46b4f3b6e49cef6f26c6ee4e575394d/setuptools-80.10.2-py3-none-any.whl", hash = "sha256:95b30ddfb717250edb492926c92b5221f7ef3fbcc2b07579bcd4a27da21d0173", size = 1064234, upload-time = "2026-01-25T22:38:15.216Z" }, + { url = "https://files.pythonhosted.org/packages/e1/e3/c164c88b2e5ce7b24d667b9bd83589cf4f3520d97cad01534cd3c4f55fdb/setuptools-81.0.0-py3-none-any.whl", hash = "sha256:fdd925d5c5d9f62e4b74b30d6dd7828ce236fd6ed998a08d81de62ce5a6310d6", size = 1062021, upload-time = "2026-02-06T21:10:37.175Z" }, ] [[package]] @@ -3437,6 +3658,18 @@ asyncio = [ { name = "greenlet" }, ] +[[package]] +name = "sqlite-vec" +version = "0.1.6" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/ed/aabc328f29ee6814033d008ec43e44f2c595447d9cccd5f2aabe60df2933/sqlite_vec-0.1.6-py3-none-macosx_10_6_x86_64.whl", hash = "sha256:77491bcaa6d496f2acb5cc0d0ff0b8964434f141523c121e313f9a7d8088dee3", size = 164075, upload-time = "2024-11-20T16:40:29.847Z" }, + { url = "https://files.pythonhosted.org/packages/a7/57/05604e509a129b22e303758bfa062c19afb020557d5e19b008c64016704e/sqlite_vec-0.1.6-py3-none-macosx_11_0_arm64.whl", hash = "sha256:fdca35f7ee3243668a055255d4dee4dea7eed5a06da8cad409f89facf4595361", size = 165242, upload-time = "2024-11-20T16:40:31.206Z" }, + { url = "https://files.pythonhosted.org/packages/f2/48/dbb2cc4e5bad88c89c7bb296e2d0a8df58aab9edc75853728c361eefc24f/sqlite_vec-0.1.6-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b0519d9cd96164cd2e08e8eed225197f9cd2f0be82cb04567692a0a4be02da3", size = 103704, upload-time = "2024-11-20T16:40:33.729Z" }, + { url = "https://files.pythonhosted.org/packages/80/76/97f33b1a2446f6ae55e59b33869bed4eafaf59b7f4c662c8d9491b6a714a/sqlite_vec-0.1.6-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux1_x86_64.whl", hash = "sha256:823b0493add80d7fe82ab0fe25df7c0703f4752941aee1c7b2b02cec9656cb24", size = 151556, upload-time = "2024-11-20T16:40:35.387Z" }, + { url = "https://files.pythonhosted.org/packages/6a/98/e8bc58b178266eae2fcf4c9c7a8303a8d41164d781b32d71097924a6bebe/sqlite_vec-0.1.6-py3-none-win_amd64.whl", hash = "sha256:c65bcfd90fa2f41f9000052bcb8bb75d38240b2dae49225389eca6c3136d3f0c", size = 281540, upload-time = "2024-11-20T16:40:37.296Z" }, +] + [[package]] name = "sse-starlette" version = "3.2.0" @@ -3452,15 +3685,15 @@ wheels = [ [[package]] name = "starlette" -version = "0.50.0" +version = "0.52.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, { name = "typing-extensions", marker = "python_full_version < '3.13'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ba/b8/73a0e6a6e079a9d9cfa64113d771e421640b6f679a52eeb9b32f72d871a1/starlette-0.50.0.tar.gz", hash = "sha256:a2a17b22203254bcbc2e1f926d2d55f3f9497f769416b3190768befe598fa3ca", size = 2646985, upload-time = "2025-11-01T15:25:27.516Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c4/68/79977123bb7be889ad680d79a40f339082c1978b5cfcf62c2d8d196873ac/starlette-0.52.1.tar.gz", hash = "sha256:834edd1b0a23167694292e94f597773bc3f89f362be6effee198165a35d62933", size = 2653702, upload-time = "2026-01-18T13:34:11.062Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d9/52/1064f510b141bd54025f9b55105e26d1fa970b9be67ad766380a3c9b74b0/starlette-0.50.0-py3-none-any.whl", hash = "sha256:9e5391843ec9b6e472eed1365a78c8098cfceb7a74bfd4d6b1c0c0095efb3bca", size = 74033, upload-time = "2025-11-01T15:25:25.461Z" }, + { url = "https://files.pythonhosted.org/packages/81/0d/13d1d239a25cbfb19e740db83143e95c772a1fe10202dda4b76792b114dd/starlette-0.52.1-py3-none-any.whl", hash = "sha256:0029d43eb3d273bc4f83a08720b4912ea4b071087a3b48db01b7c839f7954d74", size = 74272, upload-time = "2026-01-18T13:34:09.188Z" }, ] [[package]] @@ -3484,6 +3717,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a2/09/77d55d46fd61b4a135c444fc97158ef34a095e5681d0a6c10b75bf356191/sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5", size = 6299353, upload-time = "2025-04-27T18:04:59.103Z" }, ] +[[package]] +name = "tenacity" +version = "9.1.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/47/c6/ee486fd809e357697ee8a44d3d69222b344920433d3b6666ccd9b374630c/tenacity-9.1.4.tar.gz", hash = "sha256:adb31d4c263f2bd041081ab33b498309a57c77f9acf2db65aadf0898179cf93a", size = 49413, upload-time = "2026-02-07T10:45:33.841Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d7/c1/eb8f9debc45d3b7918a32ab756658a0904732f75e555402972246b0b8e71/tenacity-9.1.4-py3-none-any.whl", hash = "sha256:6095a360c919085f28c6527de529e76a06ad89b23659fa881ae0649b867a9d55", size = 28926, upload-time = "2026-02-07T10:45:32.24Z" }, +] + [[package]] name = "termcolor" version = "3.3.0" @@ -3649,22 +3891,21 @@ wheels = [ [[package]] name = "tqdm" -version = "4.67.2" +version = "4.67.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/27/89/4b0001b2dab8df0a5ee2787dcbe771de75ded01f18f1f8d53dedeea2882b/tqdm-4.67.2.tar.gz", hash = "sha256:649aac53964b2cb8dec76a14b405a4c0d13612cb8933aae547dd144eacc99653", size = 169514, upload-time = "2026-01-30T23:12:06.555Z" } +sdist = { url = "https://files.pythonhosted.org/packages/09/a9/6ba95a270c6f1fbcd8dac228323f2777d886cb206987444e4bce66338dd4/tqdm-4.67.3.tar.gz", hash = "sha256:7d825f03f89244ef73f1d4ce193cb1774a8179fd96f31d7e1dcde62092b960bb", size = 169598, upload-time = "2026-02-03T17:35:53.048Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f5/e2/31eac96de2915cf20ccaed0225035db149dfb9165a9ed28d4b252ef3f7f7/tqdm-4.67.2-py3-none-any.whl", hash = "sha256:9a12abcbbff58b6036b2167d9d3853042b9d436fe7330f06ae047867f2f8e0a7", size = 78354, upload-time = "2026-01-30T23:12:04.368Z" }, + { url = "https://files.pythonhosted.org/packages/16/e1/3079a9ff9b8e11b846c6ac5c8b5bfb7ff225eee721825310c91b3b50304f/tqdm-4.67.3-py3-none-any.whl", hash = "sha256:ee1e4c0e59148062281c49d80b25b67771a127c85fc9676d3be5f243206826bf", size = 78374, upload-time = "2026-02-03T17:35:50.982Z" }, ] [[package]] name = "transformers" -version = "5.0.0" +version = "5.1.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "filelock" }, { name = "huggingface-hub" }, { name = "numpy" }, { name = "packaging" }, @@ -3675,9 +3916,9 @@ dependencies = [ { name = "tqdm" }, { name = "typer-slim" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/bc/79/845941711811789c85fb7e2599cea425a14a07eda40f50896b9d3fda7492/transformers-5.0.0.tar.gz", hash = "sha256:5f5634efed6cf76ad068cc5834c7adbc32db78bbd6211fb70df2325a9c37dec8", size = 8424830, upload-time = "2026-01-26T10:46:46.813Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c9/1d/a7d91500a6c02ec76058bc9e65fcdec1bdb8882854dec8e4adf12d0aa8b0/transformers-5.1.0.tar.gz", hash = "sha256:c60d6180e5845ea1b4eed38d7d1b06fcc4cc341c6b7fa5c1dc767d7e25fe0139", size = 8531810, upload-time = "2026-02-05T15:41:42.932Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/52/f3/ac976fa8e305c9e49772527e09fbdc27cc6831b8a2f6b6063406626be5dd/transformers-5.0.0-py3-none-any.whl", hash = "sha256:587086f249ce64c817213cf36afdb318d087f790723e9b3d4500b97832afd52d", size = 10142091, upload-time = "2026-01-26T10:46:43.88Z" }, + { url = "https://files.pythonhosted.org/packages/b7/66/57042d4b0f1ede8046d7ae6409bf3640df996e9cbc3fe20467aa29badc54/transformers-5.1.0-py3-none-any.whl", hash = "sha256:de534b50c9b2ce6217fc56421075a1734241fb40704fdc90f50f6a08fc533d59", size = 10276537, upload-time = "2026-02-05T15:41:40.358Z" }, ] [[package]] @@ -3704,7 +3945,7 @@ wheels = [ [[package]] name = "trl" -version = "0.27.1" +version = "0.27.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "accelerate" }, @@ -3712,9 +3953,9 @@ dependencies = [ { name = "packaging" }, { name = "transformers" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/2a/85/e0151f2bc006722c032fad942d442ac3cfe1e25b770fca3a6c50e599a89c/trl-0.27.1.tar.gz", hash = "sha256:9d502626c3ac1d32cdc7d8978c742de31bfc11135b4d15be1d83909632dcb75c", size = 449005, upload-time = "2026-01-24T03:33:56.977Z" } +sdist = { url = "https://files.pythonhosted.org/packages/af/71/a470db929cb51f7f3f6ee33ccded1e88ee9ec83fe6070d4f6e48940bef3a/trl-0.27.2.tar.gz", hash = "sha256:b0a5b3ba4c28cf3736647b77925feccee8c542c509f63f4f5df070f3abe602df", size = 446950, upload-time = "2026-02-03T18:07:06.386Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/08/49/6b03bdbb26c4f4f962624014fe7ae4ea91834286f4387ad0d3748bf21c6f/trl-0.27.1-py3-none-any.whl", hash = "sha256:641843c8556516c39896113b79c9b0b668236670b3eae3697107117c75cc65eb", size = 532873, upload-time = "2026-01-24T03:33:55.195Z" }, + { url = "https://files.pythonhosted.org/packages/a8/f7/21105a8b2bae423f9f7455aa2f4938ab3359c81e4b20b8436ac9255ebec9/trl-0.27.2-py3-none-any.whl", hash = "sha256:05fbaa257d5d55bc659d8470c4d189eb046c18332d34dbe679e595bd5d6553cc", size = 530912, upload-time = "2026-02-03T18:07:04.941Z" }, ] [[package]] @@ -3834,11 +4075,11 @@ wheels = [ [[package]] name = "wcwidth" -version = "0.5.3" +version = "0.6.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/c2/62/a7c072fbfefb2980a00f99ca994279cb9ecf310cb2e6b2a4d2a28fe192b3/wcwidth-0.5.3.tar.gz", hash = "sha256:53123b7af053c74e9fe2e92ac810301f6139e64379031f7124574212fb3b4091", size = 157587, upload-time = "2026-01-31T03:52:10.92Z" } +sdist = { url = "https://files.pythonhosted.org/packages/35/a2/8e3becb46433538a38726c948d3399905a4c7cabd0df578ede5dc51f0ec2/wcwidth-0.6.0.tar.gz", hash = "sha256:cdc4e4262d6ef9a1a57e018384cbeb1208d8abbc64176027e2c2455c81313159", size = 159684, upload-time = "2026-02-06T19:19:40.919Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/3c/c1/d73f12f8cdb1891334a2ccf7389eed244d3941e74d80dd220badb937f3fb/wcwidth-0.5.3-py3-none-any.whl", hash = "sha256:d584eff31cd4753e1e5ff6c12e1edfdb324c995713f75d26c29807bb84bf649e", size = 92981, upload-time = "2026-01-31T03:52:09.14Z" }, + { url = "https://files.pythonhosted.org/packages/68/5a/199c59e0a824a3db2b89c5d2dade7ab5f9624dbf6448dc291b46d5ec94d3/wcwidth-0.6.0-py3-none-any.whl", hash = "sha256:1a3a1e510b553315f8e146c54764f4fb6264ffad731b3d78088cdb1478ffbdad", size = 94189, upload-time = "2026-02-06T19:19:39.646Z" }, ] [[package]] @@ -3850,6 +4091,37 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/34/db/b10e48aa8fff7407e67470363eac595018441cf32d5e1001567a7aeba5d2/websocket_client-1.9.0-py3-none-any.whl", hash = "sha256:af248a825037ef591efbf6ed20cc5faa03d3b47b9e5a2230a529eeee1c1fc3ef", size = 82616, upload-time = "2025-10-07T21:16:34.951Z" }, ] +[[package]] +name = "websockets" +version = "15.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/21/e6/26d09fab466b7ca9c7737474c52be4f76a40301b08362eb2dbc19dcc16c1/websockets-15.0.1.tar.gz", hash = "sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee", size = 177016, upload-time = "2025-03-05T20:03:41.606Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/51/6b/4545a0d843594f5d0771e86463606a3988b5a09ca5123136f8a76580dd63/websockets-15.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3e90baa811a5d73f3ca0bcbf32064d663ed81318ab225ee4f427ad4e26e5aff3", size = 175437, upload-time = "2025-03-05T20:02:16.706Z" }, + { url = "https://files.pythonhosted.org/packages/f4/71/809a0f5f6a06522af902e0f2ea2757f71ead94610010cf570ab5c98e99ed/websockets-15.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:592f1a9fe869c778694f0aa806ba0374e97648ab57936f092fd9d87f8bc03665", size = 173096, upload-time = "2025-03-05T20:02:18.832Z" }, + { url = "https://files.pythonhosted.org/packages/3d/69/1a681dd6f02180916f116894181eab8b2e25b31e484c5d0eae637ec01f7c/websockets-15.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0701bc3cfcb9164d04a14b149fd74be7347a530ad3bbf15ab2c678a2cd3dd9a2", size = 173332, upload-time = "2025-03-05T20:02:20.187Z" }, + { url = "https://files.pythonhosted.org/packages/a6/02/0073b3952f5bce97eafbb35757f8d0d54812b6174ed8dd952aa08429bcc3/websockets-15.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8b56bdcdb4505c8078cb6c7157d9811a85790f2f2b3632c7d1462ab5783d215", size = 183152, upload-time = "2025-03-05T20:02:22.286Z" }, + { url = "https://files.pythonhosted.org/packages/74/45/c205c8480eafd114b428284840da0b1be9ffd0e4f87338dc95dc6ff961a1/websockets-15.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0af68c55afbd5f07986df82831c7bff04846928ea8d1fd7f30052638788bc9b5", size = 182096, upload-time = "2025-03-05T20:02:24.368Z" }, + { url = "https://files.pythonhosted.org/packages/14/8f/aa61f528fba38578ec553c145857a181384c72b98156f858ca5c8e82d9d3/websockets-15.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64dee438fed052b52e4f98f76c5790513235efaa1ef7f3f2192c392cd7c91b65", size = 182523, upload-time = "2025-03-05T20:02:25.669Z" }, + { url = "https://files.pythonhosted.org/packages/ec/6d/0267396610add5bc0d0d3e77f546d4cd287200804fe02323797de77dbce9/websockets-15.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d5f6b181bb38171a8ad1d6aa58a67a6aa9d4b38d0f8c5f496b9e42561dfc62fe", size = 182790, upload-time = "2025-03-05T20:02:26.99Z" }, + { url = "https://files.pythonhosted.org/packages/02/05/c68c5adbf679cf610ae2f74a9b871ae84564462955d991178f95a1ddb7dd/websockets-15.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5d54b09eba2bada6011aea5375542a157637b91029687eb4fdb2dab11059c1b4", size = 182165, upload-time = "2025-03-05T20:02:30.291Z" }, + { url = "https://files.pythonhosted.org/packages/29/93/bb672df7b2f5faac89761cb5fa34f5cec45a4026c383a4b5761c6cea5c16/websockets-15.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3be571a8b5afed347da347bfcf27ba12b069d9d7f42cb8c7028b5e98bbb12597", size = 182160, upload-time = "2025-03-05T20:02:31.634Z" }, + { url = "https://files.pythonhosted.org/packages/ff/83/de1f7709376dc3ca9b7eeb4b9a07b4526b14876b6d372a4dc62312bebee0/websockets-15.0.1-cp312-cp312-win32.whl", hash = "sha256:c338ffa0520bdb12fbc527265235639fb76e7bc7faafbb93f6ba80d9c06578a9", size = 176395, upload-time = "2025-03-05T20:02:33.017Z" }, + { url = "https://files.pythonhosted.org/packages/7d/71/abf2ebc3bbfa40f391ce1428c7168fb20582d0ff57019b69ea20fa698043/websockets-15.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:fcd5cf9e305d7b8338754470cf69cf81f420459dbae8a3b40cee57417f4614a7", size = 176841, upload-time = "2025-03-05T20:02:34.498Z" }, + { url = "https://files.pythonhosted.org/packages/cb/9f/51f0cf64471a9d2b4d0fc6c534f323b664e7095640c34562f5182e5a7195/websockets-15.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee443ef070bb3b6ed74514f5efaa37a252af57c90eb33b956d35c8e9c10a1931", size = 175440, upload-time = "2025-03-05T20:02:36.695Z" }, + { url = "https://files.pythonhosted.org/packages/8a/05/aa116ec9943c718905997412c5989f7ed671bc0188ee2ba89520e8765d7b/websockets-15.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5a939de6b7b4e18ca683218320fc67ea886038265fd1ed30173f5ce3f8e85675", size = 173098, upload-time = "2025-03-05T20:02:37.985Z" }, + { url = "https://files.pythonhosted.org/packages/ff/0b/33cef55ff24f2d92924923c99926dcce78e7bd922d649467f0eda8368923/websockets-15.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:746ee8dba912cd6fc889a8147168991d50ed70447bf18bcda7039f7d2e3d9151", size = 173329, upload-time = "2025-03-05T20:02:39.298Z" }, + { url = "https://files.pythonhosted.org/packages/31/1d/063b25dcc01faa8fada1469bdf769de3768b7044eac9d41f734fd7b6ad6d/websockets-15.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:595b6c3969023ecf9041b2936ac3827e4623bfa3ccf007575f04c5a6aa318c22", size = 183111, upload-time = "2025-03-05T20:02:40.595Z" }, + { url = "https://files.pythonhosted.org/packages/93/53/9a87ee494a51bf63e4ec9241c1ccc4f7c2f45fff85d5bde2ff74fcb68b9e/websockets-15.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c714d2fc58b5ca3e285461a4cc0c9a66bd0e24c5da9911e30158286c9b5be7f", size = 182054, upload-time = "2025-03-05T20:02:41.926Z" }, + { url = "https://files.pythonhosted.org/packages/ff/b2/83a6ddf56cdcbad4e3d841fcc55d6ba7d19aeb89c50f24dd7e859ec0805f/websockets-15.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f3c1e2ab208db911594ae5b4f79addeb3501604a165019dd221c0bdcabe4db8", size = 182496, upload-time = "2025-03-05T20:02:43.304Z" }, + { url = "https://files.pythonhosted.org/packages/98/41/e7038944ed0abf34c45aa4635ba28136f06052e08fc2168520bb8b25149f/websockets-15.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:229cf1d3ca6c1804400b0a9790dc66528e08a6a1feec0d5040e8b9eb14422375", size = 182829, upload-time = "2025-03-05T20:02:48.812Z" }, + { url = "https://files.pythonhosted.org/packages/e0/17/de15b6158680c7623c6ef0db361da965ab25d813ae54fcfeae2e5b9ef910/websockets-15.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:756c56e867a90fb00177d530dca4b097dd753cde348448a1012ed6c5131f8b7d", size = 182217, upload-time = "2025-03-05T20:02:50.14Z" }, + { url = "https://files.pythonhosted.org/packages/33/2b/1f168cb6041853eef0362fb9554c3824367c5560cbdaad89ac40f8c2edfc/websockets-15.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:558d023b3df0bffe50a04e710bc87742de35060580a293c2a984299ed83bc4e4", size = 182195, upload-time = "2025-03-05T20:02:51.561Z" }, + { url = "https://files.pythonhosted.org/packages/86/eb/20b6cdf273913d0ad05a6a14aed4b9a85591c18a987a3d47f20fa13dcc47/websockets-15.0.1-cp313-cp313-win32.whl", hash = "sha256:ba9e56e8ceeeedb2e080147ba85ffcd5cd0711b89576b83784d8605a7df455fa", size = 176393, upload-time = "2025-03-05T20:02:53.814Z" }, + { url = "https://files.pythonhosted.org/packages/1b/6c/c65773d6cab416a64d191d6ee8a8b1c68a09970ea6909d16965d26bfed1e/websockets-15.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:e09473f095a819042ecb2ab9465aee615bd9c2028e4ef7d933600a8401c79561", size = 176837, upload-time = "2025-03-05T20:02:55.237Z" }, + { url = "https://files.pythonhosted.org/packages/fa/a8/5b41e0da817d64113292ab1f8247140aac61cbf6cfd085d6a0fa77f4984f/websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f", size = 169743, upload-time = "2025-03-05T20:03:39.41Z" }, +] + [[package]] name = "wheel" version = "0.46.3"