From 21c6158658e79fc89e4e3cc75fc3e8062b3ed4fb Mon Sep 17 00:00:00 2001 From: Lou Kratz <219901029+loukratz-bv@users.noreply.github.com> Date: Tue, 13 Jan 2026 15:26:16 -0500 Subject: [PATCH] doc: Add Howtos, Gallery, redo tutorials --- .../create-gallery-example.chatmode.md | 178 +++ .../create-howto-example.chatmode.md | 259 ++++ .github/workflows/pr-checks.yml | 2 +- DOCUMENTATION_ROADMAP.md | 243 ++-- docs/.pages | 3 +- docs/Concepts/.pages | 5 - docs/Concepts/Core/application.md | 2 - docs/Concepts/Core/authorization-provider.md | 48 - docs/Concepts/Core/flow.md | 109 -- docs/Concepts/Core/indexes.md | 57 - docs/Concepts/Core/memory.md | 48 - docs/Concepts/Core/model.md | 53 - docs/Concepts/Core/telemetry.md | 19 - docs/Concepts/Core/tool.md | 63 - docs/Concepts/Core/variable.md | 53 - docs/Concepts/Overview/flow-control.md | 271 ---- docs/Concepts/Steps/agent.md | 54 - docs/Concepts/Steps/decoder.md | 53 - docs/Concepts/Steps/index.md | 42 - docs/Concepts/Steps/llm-inference.md | 62 - docs/Concepts/Steps/prompt-template.md | 49 - docs/Concepts/Steps/search.md | 56 - docs/Concepts/mental-model-and-philosophy.md | 363 +++++ docs/Gallery/dataflow_pipelines.md | 80 ++ docs/Gallery/dataflow_pipelines.mermaid | 45 + docs/Gallery/simple_chatbot.md | 36 + .../simple_chatbot.mermaid} | 22 +- .../configure_aws_authentication.md | 60 + .../use_api_key_authentication.md | 40 + .../load_multiple_inputs_from_files.md | 62 + .../pass_inputs_on_the_cli.md | 52 + .../serve_with_auto_reload.md | 26 + .../Data Processing/adjust_concurrency.md | 41 + .../Data Processing/cache_step_results.md | 71 + .../How To/Data Processing/decode_json_xml.md | 24 + .../Data Processing/explode_collections.md | 40 + docs/How To/Data Processing/gather_results.md | 68 + .../Data Processing/read_data_from_files.md | 35 + .../Data Processing/read_sql_databases.md | 47 + .../Data Processing/write_data_to_file.md | 40 + .../call_large_language_models.md | 51 + .../How To/Invoke Models/create_embeddings.md | 49 + .../reuse_prompts_with_templates.md | 39 + .../Language Features/include_qtype_yaml.md | 45 + .../include_raw_text_from_other_files.md | 47 + .../reference_entities_by_id.md | 51 + .../use_environment_variables.md | 47 + .../trace_calls_with_open_telemetry.md | 49 + .../validate_qtype_yaml.md | 35 + .../visualize_application_architecture.md | 61 + .../visualize_example.mermaid} | 23 +- docs/How To/Qtype Server/flow_as_ui.png | Bin 0 -> 84343 bytes .../Qtype Server/serve_flows_as_apis.md | 40 + docs/How To/Qtype Server/serve_flows_as_ui.md | 42 + .../use_conversational_interfaces.md | 59 + .../use_variables_with_ui_hints.md | 47 + .../bind_tool_inputs_and_outputs.md | 48 + ...reate_tools_from_openapi_specifications.md | 89 ++ .../create_tools_from_python_modules.md | 90 ++ docs/Reference/.pages | 4 - docs/Reference/Examples/rag.mmd | 58 - docs/Reference/Examples/simple-chat.md | 39 - docs/Reference/cli.md | 293 ++++ docs/Reference/{Python API => }/plugins.md | 0 docs/Reference/semantic-validation-rules.md | 179 +++ docs/Tutorials/01-first-qtype-application.md | 96 +- docs/Tutorials/02-conversational-chatbot.md | 247 +--- docs/Tutorials/03-structured-data.md | 481 +++++++ .../03-tools-and-function-calling.md | 460 ------ .../Tutorials/04-data-processing-pipelines.md | 334 ----- .../04-tools-and-function-calling.md | 483 +++++++ docs/Tutorials/05-multi-flow-applications.md | 281 ---- docs/Tutorials/06-rag-document-system.md | 1234 ----------------- docs/Tutorials/complete_example_ui.png | Bin 70114 -> 0 bytes docs/Tutorials/example_chat.png | Bin 0 -> 223800 bytes docs/Tutorials/index.md | 3 +- docs/Tutorials/multi_flow.mmd | 58 - docs/Tutorials/old-agent-with-tools.md | 281 ---- docs/Tutorials/rag_chat_ui.png | Bin 454903 -> 0 bytes docs/{Tutorials => }/example_ui.png | Bin docs/index.md | 2 +- docs/{How-To Guides => legacy_how_tos}/.pages | 0 .../Configuration/modular-yaml.md | 0 .../Configuration/phoenix_projects.png | Bin .../Configuration/phoenix_traces.png | Bin .../Configuration/reference-by-id.md | 0 .../Configuration/telemetry-setup.md | 0 .../Data Types/custom-types.md | 0 .../Data Types/domain-types.md | 0 .../Debugging/visualize-apps.md | 3 - .../Tools/api-tools.md | 0 .../Tools/python-tools.md | 0 docs/stylesheets/extra.css | 27 + .../aws_authentication.qtype.yaml | 63 + .../hello_world_chat.qtype.yaml | 0 .../simple_chatbot.qtype.yaml | 40 + examples/data_processing/batch_inputs.csv | 5 + .../batch_processing.qtype.yaml | 54 + .../cache_step_results.qtype.yaml | 78 ++ .../collect_results.qtype.yaml | 55 + examples/data_processing/create_sample_db.py | 129 ++ .../dataflow_pipelines.qtype.yaml | 108 ++ .../data_processing/decode_json.qtype.yaml | 23 + .../data_processing/explode_items.qtype.yaml | 25 + examples/data_processing/read_file.qtype.yaml | 67 + examples/data_processing/reviews.db | Bin 0 -> 8192 bytes .../data_processing/sample_documents.jsonl | 5 + .../create_embeddings.qtype.yaml | 28 + .../invoke_models/simple_llm_call.qtype.yaml | 32 + .../language_features/include_raw.qtype.yaml | 27 + examples/language_features/story_prompt.txt | 6 + .../language_features/ui_hints.qtype.yaml | 52 + .../data_analysis_with_telemetry.qtype.yaml | 0 .../bedrock/hello_world.qtype.yaml | 0 .../bedrock/hello_world_chat.qtype.yaml | 0 ...hello_world_chat_with_telemetry.qtype.yaml | 0 .../hello_world_chat_with_thinking.qtype.yaml | 0 .../bedrock/hello_world_completion.qtype.yaml | 0 ...ello_world_completion_with_auth.qtype.yaml | 0 .../bedrock/simple_agent_chat.qtype.yaml | 0 .../chat_with_langfuse.qtype.yaml | 0 examples/{ => legacy}/data/customers.csv | 0 .../{ => legacy}/data_processor.qtype.yaml | 0 .../echo/debug_example.qtype.yaml | 0 examples/{ => legacy}/echo/prompt.qtype.yaml | 0 examples/{ => legacy}/echo/readme.md | 0 examples/{ => legacy}/echo/test.qtype.yaml | 0 examples/{ => legacy}/echo/video.qtype.yaml | 0 .../field_extractor_example.qtype.yaml | 0 .../multi_flow_example.qtype.yaml | 0 .../legacy/openai/hello_world_chat.qtype.yaml | 43 + ...hello_world_chat_with_telemetry.qtype.yaml | 0 examples/{ => legacy}/qtype_plugin_example.py | 0 examples/{ => legacy}/rag.qtype.yaml | 0 examples/{ => legacy}/sample_data.txt | 0 .../{ => legacy}/time_utilities.qtype.yaml | 0 examples/{ => legacy}/vertex/README.md | 0 .../vertex/hello_world_chat.qtype.yaml | 0 .../vertex/hello_world_completion.qtype.yaml | 0 ...ello_world_completion_with_auth.qtype.yaml | 0 .../trace_with_opentelemetry.qtype.yaml | 40 + examples/tools_integration/sample_utils.py | 35 + examples/tutorials/01_hello_world.qtype.yaml | 48 + .../02_conversational_chat.qtype.yaml | 37 + .../tutorials/03_structured_data.qtype.yaml | 130 ++ .../04_tools_and_function_calling.qtype.yaml | 89 ++ mcp-approach.md | 263 ++++ mkdocs.yml | 3 + pyproject.toml | 4 +- .../application/converters/tools_from_api.py | 74 +- qtype/base/types.py | 7 +- qtype/commands/generate.py | 4 +- qtype/dsl/loader.py | 6 +- qtype/semantic/visualize.py | 57 +- tests/dsl/test_dsl_loader.py | 6 +- tests/dsl/test_env_var_substitution.py | 145 ++ uv.lock | 16 +- 157 files changed, 5909 insertions(+), 4251 deletions(-) create mode 100644 .github/chatmodes/create-gallery-example.chatmode.md create mode 100644 .github/chatmodes/create-howto-example.chatmode.md delete mode 100644 docs/Concepts/.pages delete mode 100644 docs/Concepts/Core/application.md delete mode 100644 docs/Concepts/Core/authorization-provider.md delete mode 100644 docs/Concepts/Core/flow.md delete mode 100644 docs/Concepts/Core/indexes.md delete mode 100644 docs/Concepts/Core/memory.md delete mode 100644 docs/Concepts/Core/model.md delete mode 100644 docs/Concepts/Core/telemetry.md delete mode 100644 docs/Concepts/Core/tool.md delete mode 100644 docs/Concepts/Core/variable.md delete mode 100644 docs/Concepts/Overview/flow-control.md delete mode 100644 docs/Concepts/Steps/agent.md delete mode 100644 docs/Concepts/Steps/decoder.md delete mode 100644 docs/Concepts/Steps/index.md delete mode 100644 docs/Concepts/Steps/llm-inference.md delete mode 100644 docs/Concepts/Steps/prompt-template.md delete mode 100644 docs/Concepts/Steps/search.md create mode 100644 docs/Concepts/mental-model-and-philosophy.md create mode 100644 docs/Gallery/dataflow_pipelines.md create mode 100644 docs/Gallery/dataflow_pipelines.mermaid create mode 100644 docs/Gallery/simple_chatbot.md rename docs/{Tutorials/chat_with_telemetry.mermaid => Gallery/simple_chatbot.mermaid} (54%) create mode 100644 docs/How To/Authentication/configure_aws_authentication.md create mode 100644 docs/How To/Authentication/use_api_key_authentication.md create mode 100644 docs/How To/Command Line Usage/load_multiple_inputs_from_files.md create mode 100644 docs/How To/Command Line Usage/pass_inputs_on_the_cli.md create mode 100644 docs/How To/Command Line Usage/serve_with_auto_reload.md create mode 100644 docs/How To/Data Processing/adjust_concurrency.md create mode 100644 docs/How To/Data Processing/cache_step_results.md create mode 100644 docs/How To/Data Processing/decode_json_xml.md create mode 100644 docs/How To/Data Processing/explode_collections.md create mode 100644 docs/How To/Data Processing/gather_results.md create mode 100644 docs/How To/Data Processing/read_data_from_files.md create mode 100644 docs/How To/Data Processing/read_sql_databases.md create mode 100644 docs/How To/Data Processing/write_data_to_file.md create mode 100644 docs/How To/Invoke Models/call_large_language_models.md create mode 100644 docs/How To/Invoke Models/create_embeddings.md create mode 100644 docs/How To/Invoke Models/reuse_prompts_with_templates.md create mode 100644 docs/How To/Language Features/include_qtype_yaml.md create mode 100644 docs/How To/Language Features/include_raw_text_from_other_files.md create mode 100644 docs/How To/Language Features/reference_entities_by_id.md create mode 100644 docs/How To/Language Features/use_environment_variables.md create mode 100644 docs/How To/Observability & Debugging/trace_calls_with_open_telemetry.md create mode 100644 docs/How To/Observability & Debugging/validate_qtype_yaml.md create mode 100644 docs/How To/Observability & Debugging/visualize_application_architecture.md rename docs/{Reference/Examples/chat_with_telemetry.mmd => How To/Observability & Debugging/visualize_example.mermaid} (53%) create mode 100644 docs/How To/Qtype Server/flow_as_ui.png create mode 100644 docs/How To/Qtype Server/serve_flows_as_apis.md create mode 100644 docs/How To/Qtype Server/serve_flows_as_ui.md create mode 100644 docs/How To/Qtype Server/use_conversational_interfaces.md create mode 100644 docs/How To/Qtype Server/use_variables_with_ui_hints.md create mode 100644 docs/How To/Tools & Integration/bind_tool_inputs_and_outputs.md create mode 100644 docs/How To/Tools & Integration/create_tools_from_openapi_specifications.md create mode 100644 docs/How To/Tools & Integration/create_tools_from_python_modules.md delete mode 100644 docs/Reference/.pages delete mode 100644 docs/Reference/Examples/rag.mmd delete mode 100644 docs/Reference/Examples/simple-chat.md create mode 100644 docs/Reference/cli.md rename docs/Reference/{Python API => }/plugins.md (100%) create mode 100644 docs/Reference/semantic-validation-rules.md create mode 100644 docs/Tutorials/03-structured-data.md delete mode 100644 docs/Tutorials/03-tools-and-function-calling.md delete mode 100644 docs/Tutorials/04-data-processing-pipelines.md create mode 100644 docs/Tutorials/04-tools-and-function-calling.md delete mode 100644 docs/Tutorials/05-multi-flow-applications.md delete mode 100644 docs/Tutorials/06-rag-document-system.md delete mode 100644 docs/Tutorials/complete_example_ui.png create mode 100644 docs/Tutorials/example_chat.png delete mode 100644 docs/Tutorials/multi_flow.mmd delete mode 100644 docs/Tutorials/old-agent-with-tools.md delete mode 100644 docs/Tutorials/rag_chat_ui.png rename docs/{Tutorials => }/example_ui.png (100%) rename docs/{How-To Guides => legacy_how_tos}/.pages (100%) rename docs/{How-To Guides => legacy_how_tos}/Configuration/modular-yaml.md (100%) rename docs/{How-To Guides => legacy_how_tos}/Configuration/phoenix_projects.png (100%) rename docs/{How-To Guides => legacy_how_tos}/Configuration/phoenix_traces.png (100%) rename docs/{How-To Guides => legacy_how_tos}/Configuration/reference-by-id.md (100%) rename docs/{How-To Guides => legacy_how_tos}/Configuration/telemetry-setup.md (100%) rename docs/{How-To Guides => legacy_how_tos}/Data Types/custom-types.md (100%) rename docs/{How-To Guides => legacy_how_tos}/Data Types/domain-types.md (100%) rename docs/{How-To Guides => legacy_how_tos}/Debugging/visualize-apps.md (98%) rename docs/{How-To Guides => legacy_how_tos}/Tools/api-tools.md (100%) rename docs/{How-To Guides => legacy_how_tos}/Tools/python-tools.md (100%) create mode 100644 docs/stylesheets/extra.css create mode 100644 examples/authentication/aws_authentication.qtype.yaml rename examples/{openai => conversational_ai}/hello_world_chat.qtype.yaml (100%) create mode 100644 examples/conversational_ai/simple_chatbot.qtype.yaml create mode 100644 examples/data_processing/batch_inputs.csv create mode 100644 examples/data_processing/batch_processing.qtype.yaml create mode 100644 examples/data_processing/cache_step_results.qtype.yaml create mode 100644 examples/data_processing/collect_results.qtype.yaml create mode 100644 examples/data_processing/create_sample_db.py create mode 100644 examples/data_processing/dataflow_pipelines.qtype.yaml create mode 100644 examples/data_processing/decode_json.qtype.yaml create mode 100644 examples/data_processing/explode_items.qtype.yaml create mode 100644 examples/data_processing/read_file.qtype.yaml create mode 100644 examples/data_processing/reviews.db create mode 100644 examples/data_processing/sample_documents.jsonl create mode 100644 examples/invoke_models/create_embeddings.qtype.yaml create mode 100644 examples/invoke_models/simple_llm_call.qtype.yaml create mode 100644 examples/language_features/include_raw.qtype.yaml create mode 100644 examples/language_features/story_prompt.txt create mode 100644 examples/language_features/ui_hints.qtype.yaml rename examples/{ => legacy}/bedrock/data_analysis_with_telemetry.qtype.yaml (100%) rename examples/{ => legacy}/bedrock/hello_world.qtype.yaml (100%) rename examples/{ => legacy}/bedrock/hello_world_chat.qtype.yaml (100%) rename examples/{ => legacy}/bedrock/hello_world_chat_with_telemetry.qtype.yaml (100%) rename examples/{ => legacy}/bedrock/hello_world_chat_with_thinking.qtype.yaml (100%) rename examples/{ => legacy}/bedrock/hello_world_completion.qtype.yaml (100%) rename examples/{ => legacy}/bedrock/hello_world_completion_with_auth.qtype.yaml (100%) rename examples/{ => legacy}/bedrock/simple_agent_chat.qtype.yaml (100%) rename examples/{ => legacy}/chat_with_langfuse.qtype.yaml (100%) rename examples/{ => legacy}/data/customers.csv (100%) rename examples/{ => legacy}/data_processor.qtype.yaml (100%) rename examples/{ => legacy}/echo/debug_example.qtype.yaml (100%) rename examples/{ => legacy}/echo/prompt.qtype.yaml (100%) rename examples/{ => legacy}/echo/readme.md (100%) rename examples/{ => legacy}/echo/test.qtype.yaml (100%) rename examples/{ => legacy}/echo/video.qtype.yaml (100%) rename examples/{ => legacy}/field_extractor_example.qtype.yaml (100%) rename examples/{ => legacy}/multi_flow_example.qtype.yaml (100%) create mode 100644 examples/legacy/openai/hello_world_chat.qtype.yaml rename examples/{ => legacy}/openai/hello_world_chat_with_telemetry.qtype.yaml (100%) rename examples/{ => legacy}/qtype_plugin_example.py (100%) rename examples/{ => legacy}/rag.qtype.yaml (100%) rename examples/{ => legacy}/sample_data.txt (100%) rename examples/{ => legacy}/time_utilities.qtype.yaml (100%) rename examples/{ => legacy}/vertex/README.md (100%) rename examples/{ => legacy}/vertex/hello_world_chat.qtype.yaml (100%) rename examples/{ => legacy}/vertex/hello_world_completion.qtype.yaml (100%) rename examples/{ => legacy}/vertex/hello_world_completion_with_auth.qtype.yaml (100%) create mode 100644 examples/observability_debugging/trace_with_opentelemetry.qtype.yaml create mode 100644 examples/tools_integration/sample_utils.py create mode 100644 examples/tutorials/01_hello_world.qtype.yaml create mode 100644 examples/tutorials/02_conversational_chat.qtype.yaml create mode 100644 examples/tutorials/03_structured_data.qtype.yaml create mode 100644 examples/tutorials/04_tools_and_function_calling.qtype.yaml create mode 100644 mcp-approach.md create mode 100644 tests/dsl/test_env_var_substitution.py diff --git a/.github/chatmodes/create-gallery-example.chatmode.md b/.github/chatmodes/create-gallery-example.chatmode.md new file mode 100644 index 00000000..47a1a37c --- /dev/null +++ b/.github/chatmodes/create-gallery-example.chatmode.md @@ -0,0 +1,178 @@ +--- +description: Create a new example application for the QType documentation gallery +tools: ['vscode', 'execute', 'read', 'edit', 'search', 'web', 'agent', 'todo'] +--- + +# Create Gallery Example + +You are helping create example applications for the QType documentation gallery. Each gallery example provides users with an example reference of an AI application. + +## File Structure + +Each example requires two files: + +1. **Documentation**: `docs/Gallery/{Category}/{name}.md` +2. **QType YAML**: `examples/{category}/{name}.qtype.yaml` + +Where: +- `{Category}` is title case with spaces (e.g., "Conversational AI") +- `{category}` is lowercase with underscores (e.g., "conversational_ai") +- `{name}` is lowercase with underscores (e.g., "simple_chatbot") + +## Example Categories + +From DOCUMENTATION_ROADMAP.md: +- **Conversational AI**: Simple Chatbot, Multi-Turn Reasoning Agent, Customer Support Bot +- **RAG & Document Processing**: Complete RAG System, Semantic Search Q&A, Multi-Modal Document Analysis, Hybrid Search System +- **Data Processing**: ETL Pipeline, Batch Document Classification, Structured Data Extraction, CSV Processing at Scale +- **Multi-Agent Systems**: Research Assistant, Collaborative Agents +- **Specialized**: Evaluation & Judging, Content Moderation + +## Workflow + +### Step 1: Create QType YAML + +Create the example application in `examples/{category}/{name}.qtype.yaml`: + +**Minimalism is Critical**: +- Only include what's necessary to demonstrate the specific concept +- Don't add extra steps, types, or variables unless they're essential +- Keep the YAML as concise as possible while remaining functional +- If the example can work without a feature, remove it + +**Reference Syntax**: +- **NEVER use `$ref` syntax** - QType uses simple string references by ID +- ✅ Correct: `model: nova` or `inputs: [user_message]` +- ❌ Wrong: `model: {$ref: nova}` or `inputs: [{$ref: user_message}]` +- References work by name lookup, not by JSON pointer + +**Common Mistakes to Avoid**: +- Don't create custom types unless the example specifically demonstrates custom types +- Don't add collection/aggregation steps unless demonstrating data processing +- Don't add extra variables that aren't used in the example +- Don't include `type: Application` field (it's inferred) +- Don't over-comment - only add comments for non-obvious logic + +**Best Practices**: +- Use AWS Bedrock as the default model provider (amazon.nova-lite-v1:0) +- Use domain types (ChatMessage, RAGSearchResult, etc.) when appropriate +- Keep inference_params minimal (temperature, max_tokens only when relevant) +- Use descriptive IDs that make the flow clear + +### Step 2: Generate Mermaid Diagram + +After the YAML is approved, run: +```bash +uv run qtype visualize -nd examples/{category}/{name}.qtype.yaml -o "docs/Gallery/{Category}/{name}.mermaid" +``` + +### Step 3: Create Documentation + +Create `docs/Gallery/{Category}/{name}.md` following this exact structure: + +```markdown +# {Example Name} + +## Overview + +{2-3 sentence description of what the example does and demonstrates} + +## Architecture + +--8<-- "Gallery/{Category}/{name}.mermaid" + +## Complete Code + +\`\`\`yaml +--8<-- "../examples/{category}/{name}.qtype.yaml" +\`\`\` + +## Key Features + +- **{Schema Object/Type}**: {What it does and how it works briefly} +- **{Schema Object/Type}**: {What it does and how it works briefly} +... + +## Running the Example + +\`\`\`bash +# Start the server (if applicable) +qtype serve examples/{category}/{name}.qtype.yaml + +# Or run directly +qtype run examples/{category}/{name}.qtype.yaml +\`\`\` + +## Learn More + +- Tutorial: [Link to related tutorial](../../Tutorials/{tutorial}.md) {only if exists} +- How-To: [Link to related how-to](../../How-To%20Guides/{guide}.md) {only if exists} +- Example: [Link to related example](../../Gallery/{guide}.md) {only if exists} +``` + +## Critical Rules for Documentation + +### Snippet Paths +- **Mermaid**: `"Gallery/{Category}/{name}.mermaid"` (relative to docs root) +- **YAML**: `"../examples/{category}/{name}.qtype.yaml"` (relative to docs root) +- Paths are NOT relative to the current .md file +- Do NOT use `./` or `../../../` patterns + +### Key Features Section + +**MUST BE**: Schema objects, types, parameters, or attributes defined in the QType DSL +- ✅ Good: "Conversational Interface", "Memory", "ChatMessage Type", "LLMInference Step", "system_message parameter" +- ❌ Bad: "Model Abstraction", "Type Safety", "Simple Architecture", "Declarative Step Definition" + +**Process for writing feature descriptions**: +1. Dig into the codebase to understand the feature (use grep_search, read_file on qtype/dsl/, qtype/semantic/, qtype/interpreter/) +2. Read the model definitions, field descriptions, and executor implementations +3. Write a concise description that explains: + - What the feature is (schema object, type, parameter) + - What it does (its purpose/behavior) + - How it works (mechanism/implementation detail) +4. Keep it brief (1 sentence, max 2) + +### Example Key Features: + +```markdown +- **Conversational Interface**: Flow interface type that automatically accumulates chat messages in `conversation_history` and passes them to LLM steps for context-aware responses +- **Memory**: Conversation history buffer with `token_limit` that stores messages and automatically flushes oldest content when limit is exceeded +- **ChatMessage Type**: Built-in domain type with `role` field (user/assistant/system) and `blocks` list for structured multi-modal content +- **LLMInference Step**: Executes model inference with optional `system_message` prepended to conversation and `memory` reference for persistent context across turns +- **Model Configuration**: Model resource with provider-specific `inference_params` including `temperature` (randomness) and `max_tokens` (response length limit) +``` + +## Links Section + +Only link to documentation that already exists. Do NOT create placeholder links for: +- Tutorials that haven't been written +- How-To guides that don't exist +- Reference docs not yet created + +Check the docs/ directory structure to verify files exist before linking. + +## Commands Reference + +```bash +# Validate the YAML +uv run qtype validate examples/{category}/{name}.qtype.yaml + +# Test the application +uv run qtype run examples/{category}/{name}.qtype.yaml + +# Generate visualization +uv run qtype visualize -nd examples/{category}/{name}.qtype.yaml -o "docs/Gallery/{Category}/{name}.mermaid" + +# Serve the application +uv run qtype serve examples/{category}/{name}.qtype.yaml +``` + +## QType-Specific Guidelines + +Follow all guidelines from `.github/copilot-instructions.md`: +- Use `uv run` for all commands +- Follow PEP8 and project style guidelines +- Use AWS Bedrock models by default +- Keep examples minimal and focused +- Include type hints where appropriate in comments diff --git a/.github/chatmodes/create-howto-example.chatmode.md b/.github/chatmodes/create-howto-example.chatmode.md new file mode 100644 index 00000000..3c7e5f8d --- /dev/null +++ b/.github/chatmodes/create-howto-example.chatmode.md @@ -0,0 +1,259 @@ +--- +description: Create a new How-To guide for the QType documentation +tools: ['vscode', 'execute', 'read', 'edit', 'search', 'web', 'agent', 'todo'] +--- + +# Create How-To Guide + +You are helping create How-To guides for QType documentation. Each How-To provides a focused, task-oriented solution to a specific problem. + +## File Structure + +Each How-To requires one file: + +**Documentation**: `docs/How To/{Category}/{name}.md` + +Where: +- `{Category}` is title case with spaces (e.g., "Data Processing") +- `{name}` is lowercase with underscores (e.g., "adjust_concurrency") + +## How-To Categories + +From DOCUMENTATION_ROADMAP.md: + +- **Language Features**: Environment variables, references, includes, list types, session inputs +- **Command Line Usage**: CLI inputs, file loading +- **Data Processing**: SQL, files (CSV/JSON/text), documents, fan-out, aggregation, debugging +- **Invoke Models**: Prompts, LLMs, embeddings, model parameters, memory, provider switching +- **Authentication**: API keys, bearer tokens, OAuth2, AWS, Google Vertex, secret management +- **Observability & Debugging**: Validation, visualization, Phoenix, Langfuse, OpenTelemetry +- **Data & Types**: Built-in types, domain types, custom types, FieldExtractor, Construct +- **Tools & Integration**: OpenAPI specifications, Python modules, tool bindings +- **Retrieval Augmented Generation (RAG)**: Document conversion, chunking, embedding, vector/document indexes, search, filtering, reranking +- **Conversational Interfaces**: Flow interfaces, memory, conversation history, session persistence +- **Interfaces**: HTTP API, serve, interactive UI, variable UI hints +- **Extension & Advanced**: CLI plugins, step caching, custom step types + +## How-To Structure + +```markdown +# {Task as Question} + +{1-2 sentence overview of the problem / approach} + +### QType YAML + +\`\`\`yaml +# Only the relevant YAML snippet, not complete app +{minimal snippet demonstrating the solution} +\`\`\` + +### Explanation + +- **{Schema Object/Type/Parameter}**: {1 line what it does} +- **{Schema Object/Type/Parameter}**: {1 line what it does} +- **{Schema Object/Type/Parameter}**: {1 line what it does} + +## Complete Example + +\`\`\`yaml +# Optional: include only if a full working example adds value +# Otherwise, omit this section entirely +# ALWAYS use snippet syntax to include from examples/ directory: +--8<-- "../examples/{category}/{example_name}.qtype.yaml" +\`\`\` + +## See Also + +- [Related How-To](../{Category}/{guide}.md) +- [Component Reference](../../components/{Component}.md) +- [Tutorial](../../Tutorials/{tutorial}.md) +- [Example](../../Gallery/{Category}/{example}.md) +``` + +## Critical Design Principles + +### Brevity Over Completeness +- **How-Tos are NOT tutorials** - they solve one specific problem +- Show ONLY the relevant snippet, not a complete application +- If the snippet is 5-10 lines, that's perfect +- Omit `application:`, `flows:`, etc. unless directly relevant +- Focus on the configuration that answers the question + +### Question-Oriented Titles +- ✅ "Add Vector Search to an Application" +- ✅ "Configure Chunking Strategies" +- ✅ "Rerank Search Results" +- ❌ "Vector Search" (too broad) +- ❌ "How to Use Chunking" (redundant "How to") + +### Minimal Explanation +- Use bullet points only +- Each bullet: **{Thing}**: {What it does in 1 line} +- No prose paragraphs +- No architecture discussions +- Just enough to understand the snippet + +### Complete Example = Optional +- Only include if the snippet alone isn't self-contained +- Must be a minimal, runnable YAML file stored in `examples/` directory +- ALWAYS use snippet syntax `--8<-- "../examples/{path}"` to include the file +- Create example files in appropriate subdirectories (e.g., `examples/language_features/`, `examples/data_processing/`) +- Prefer referencing existing examples in Gallery instead of creating new ones + +## Workfl0: Create Example Files (if needed) + +If a Complete Example section is needed: +1. Create working example in `examples/{category}/` directory +2. Validate with `uv run qtype validate examples/{category}/{name}.qtype.yaml` (Copilot execution) +3. Test with `uv run qtype run examples/{category}/{name}.qtype.yaml` (Copilot execution) +4. Use snippet syntax in documentation to include the file +5. **Important**: In the documentation itself, show commands as `qtype ...` (without `uv run`) + +### Step ow + +### Step 1: Understand the Task + +Identify: +- What specific problem does this solve? +- What's the minimal configuration needed? +- What schema objects/types/parameters are involved? + +### Step 2: Create Minimal Snippet + +Extract ONLY the relevant portion: +- If showing a step configuration, show just that step +- If showing model configuration, show just the model resource +- If showing authentication, show just the auth_providers section +- Comment only non-obvious parts + +**Reference Syntax**: +- **NEVER use `$ref` syntax** - QType uses simple string references by ID +- ✅ Correct: `model: nova` or `inputs: [user_message]` +- ❌ Wrong: `model: {$ref: nova}` or `inputs: [{$ref: user_message}]` + +### Step 3: Research Schema Objects + +For the Explanation section: +1. Use grep_search, read_file on qtype/dsl/, qtype/semantic/, qtype/interpreter/ +2. Read model definitions, field descriptions, executor implementations +3. Write concise bullets explaining: + - What each schema object/parameter is + - What it does (purpose/behavior) + - How it works (mechanism) + +### Step 4: AInclude Raw Text from Other Files" + +```markdown +# Include Raw Text from Other Files + +Load external text files into your YAML configuration using the `!include_raw` directive, useful for keeping prompts, templates, and long text content in separate files. + +### QType YAML + +\`\`\`yaml +steps: + - id: generate_story + type: PromptTemplate + template: !include_raw story_prompt.txt + inputs: + - theme + - tone + outputs: + - story +\`\`\` + +**story_prompt.txt:** +\`\`\`txt +--8<-- "../examples/language_features/story_prompt.txt" +\`\`\` + +### Explanation + +- **!include_raw**: YAML tag that loads the contents of an external file as a raw string +- **Relative paths**: File paths are resolved relative to the YAML file's location +- **Template substitution**: The loaded text can contain variable placeholders (e.g., `{theme}`, `{tone}`) that are substituted at runtime +- **Use cases**just Concurrency" (snippet-only, no complete example) + +```markdown +# Adjust Concurrency + +Control parallel execution of steps to optimize throughput and resource usage using the `concurrency` parameter on steps that implement `ConcurrentStepMixin` or `BatchableStepMixin`. + +### QType YAML + +\`\`\`yaml +steps: + - type: LLMInference + id: classify + model: nova + concurrency: 10 # Process up to 10 items in parallel + inputs: [document] + prompt: "Classify this document: {{document}}" +\`\`\` + +### Explanation + +- **concurrency**: Maximum number of concurrent executions for this step (default: 5) +- **ConcurrentStepMixin**: Steps that can process multiple items in parallel (LLMInference, InvokeTool, VectorSearch) +- **BatchableStepMixin**: Steps that can batch API calls for efficiency (InvokeEmbedding, IndexUpsert) + +## See Also + +- [LLMInference Reference](../../components/LLMInference.md) +- [Example: Data Processing Pipeline](../../Gallery/Data%20Processing/batch_classification + - type: VectorIndex + id: doc_index + embedding_model: titan_embed + dimension: 1024 + +steps: + - type: VectorSearch + id: search + index: doc_index + inputs: [query] + top_k: 5 # Return top 5 results + score_threshold: 0.7 # Minimum similarity score +\`\`\` + +### Explanation + +- **VectorIndex**: In-memory vector store with HNSW algorithm for similarity search +- **embedding_model**: EmbeddingModel resource used to generate query embeddings +- **dimension**: Vector dimension, must match embedding model output +- **top_k**: Number of results to return, ranked by similarity score +- **score_threshold**: Minimum cosine similarity (0-1) for results + +## See Also + +- [VectorIndex Reference](../../components/VectorIndex.md) +- [VectorSearch Reference](../../components/VectorSearch.md) +- [Example: RAG System](../../Gallery/RAG%20%26%20Document%20Processing/rag_system.md) +``` + +## Commands Reference + +**In Documentation** (user-facing): +```bash +# Validate snippet syntax (if creating complete example) +qtype validate examples/howto/{name}.qtype.yaml + +# Test complete example +qtype run examples/howto/{name}.qtype.yaml +``` + +**For Copilot Execution** (actual terminal commands must use `uv run`): +```bash +# Copilot must prefix all qtype commands with 'uv run' +uv run qtype validate examples/howto/{name}.qtype.yaml +uv run qtype run examples/howto/{name}.qtype.yaml +``` + +## QType-Specific Guidelines + +Follow all guidelines from `.github/copilot-instructions.md`: +- Use `uv run` for all commands +- Follow PEP8 and project style guidelines +- Use AWS Bedrock models by default in examples +- Keep snippets minimal and focused +- Reference existing schema objects accurately diff --git a/.github/workflows/pr-checks.yml b/.github/workflows/pr-checks.yml index 3b033ff6..2f39fb15 100644 --- a/.github/workflows/pr-checks.yml +++ b/.github/workflows/pr-checks.yml @@ -97,4 +97,4 @@ jobs: # Test schema generation uv run qtype generate schema -o /tmp/test-schema.json # Test validation if example exists - uv run qtype validate examples/bedrock/hello_world.qtype.yaml \ No newline at end of file + uv run qtype validate examples/conversational_ai/simple_chatbot.qtype.yaml \ No newline at end of file diff --git a/DOCUMENTATION_ROADMAP.md b/DOCUMENTATION_ROADMAP.md index 5109eac7..0d5d8057 100644 --- a/DOCUMENTATION_ROADMAP.md +++ b/DOCUMENTATION_ROADMAP.md @@ -1,20 +1,18 @@ # Tutorials -- [ ] Your First QType Application (15 min) -- [ ] Configuration and Authentication (20 min) - All the examples need AWS/OpenAI credentials, but auth is never taught - It's implicitly used but never explained - A beginner would be stuck at "how do I configure my API key?" -- [ ] Building a Stateful Chatbot (20 min) -- [ ] Working with Types and Structured Data (25 min) -- [ ] Adding Tools to Your Application (25 min) +- [x] Your First QType Application + auth (15 min) +- [x] Building a Stateful Chatbot (20 min) +- [x] Working with Types and Structured Data (25 min) +- [x] Adding Tools to Your Application (25 min) - [ ] Building an AI Agent (30 min) +- [ ] Organize applications with modular YAML (includes and references) -# Patterns & Examples +# Example Gallery +Structure: ┌─────────────────────────────────┐ -│ Pattern Name: "RAG System" │ +│ Example Name: "RAG System" │ ├─────────────────────────────────┤ │ • Visual diagram │ │ • 2-3 sentence description │ @@ -26,114 +24,151 @@ │ → Reference: VectorIndex docs │ └─────────────────────────────────┘ -**Conversational AI** -- [ ] ⚡ Simple Chatbot -- [ ] ⚡ Multi-Turn Reasoning Agent -- [ ] ⚡ Customer Support Bot - -**RAG & Document Processing** -- [ ] ⚡ Complete RAG System (ingestion + chat) -- [ ] ⚡ Semantic Search Q&A -- [ ] ⚡ Multi-Modal Document Analysis +- [x] ⚡ Simple Chatbot +- [x] ⚡ Dataflow Pipeline for LLM Calls +- [ ] ⚡ Retrieval Augmented Generation (RAG) +- [ ] ⚡ Q&A With Semantic Re-Ranking - [ ] ⚡ Hybrid Search System - -**Data Processing** -- [ ] ⚡ ETL Pipeline -- [ ] ⚡ Batch Document Classification -- [ ] ⚡ Structured Data Extraction -- [ ] ⚡ CSV Processing at Scale - -**Multi-Agent Systems** - [ ] ⚡ Research Assistant - [ ] ⚡ Collaborative Agents - -**Specialized** - [ ] ⚡ Evaluation & Judging -- [ ] ⚡ Content Moderation # How Tos -**Getting Started** -- [ ] Configure authentication and API keys -- [ ] Set up environment variables and secrets -- [ ] Adjust model parameters (memory, temperature, token, system messages) +``` +# {Task as Question} -**Configuration & Organization** -- [ ] Organize applications with modular YAML (includes and references) -- [ ] Reference entities by ID -- [ ] Use session inputs for sticky variables -- [ ] Manage secrets with Secret Manager +{1-2 sentence overview of the problem / approach} -**Observability & Debugging** -- [ ] Validate YAML specifications -- [ ] Visualize application architecture -- [ ] Add telemetry (Phoenix, Langfuse, Prometheus) +### QType YAML -**Data Sources** -- [ ] Read from SQL databases -- [ ] Read from files (CSV, JSON, text) -- [ ] Load Documents +\`\`\`yaml +# Only the relevant YAML snippet, not complete app +{minimal snippet demonstrating the solution} +\`\`\` -**Data & Types** -- [ ] Work with domain types (ChatMessage, RAGDocument, etc.) -- [ ] Define custom types -- [ ] Extract structured data with FieldExtractor -- [ ] Transform data with Construct -- [ ] Aggregate data from multiple records - -**Document Processing** -- [ ] Convert documents to text (PDF, DOCX) -- [ ] Split documents into chunks -- [ ] Embed document chunks -- [ ] Configure chunking strategies - -**Search & Retrieval** -- [ ] Add vector search to an application -- [ ] Perform document search (full-text) -- [ ] Configure search filters -- [ ] Rerank search results -- [ ] Implement hybrid search strategies - -**Index Management** -- [ ] Create and configure vector indexes -- [ ] Create and configure document indexes -- [ ] Upsert data into indexes +### Explanation -**Tools & Integration** -- [ ] Create tools from OpenAPI specifications -- [ ] Create tools from Python modules -- [ ] Configure tool parameters and outputs +- **{Schema Object/Type/Parameter}**: {1 line what it does} +- **{Schema Object/Type/Parameter}**: {1 line what it does} +- **{Schema Object/Type/Parameter}**: {1 line what it does} + +## Complete Example -**Flows & Orchestration** -- [ ] Organize multi-flow applications -- [ ] Invoke flows from other flows -- [ ] Share resources across flows +\`\`\`yaml +# Optional: include only if a full working example adds value +# Otherwise, omit this section entirely +# Use a snippet to include from the examples/ directory +\`\`\` -**Data Output** -- [ ] Write results to files -- [ ] Batch write operations +## See Also -**Performance & Optimization** -- [ ] Configure concurrency for parallel processing -- [ ] Enable caching to reduce costs -- [ ] Batch process large datasets +- [Related How-To](../{Category}/{guide}.md) +- [Component Reference](../../components/{Component}.md) +- [Tutorial](../../Tutorials/{tutorial}.md) +- [Example](../../Gallery/{Category}/{example}.md) +``` + +**Language Features** +- [x] Use Environment Variables +- [x] Reference Entities by ID +- [x] Include Raw Text from Other Files +- [x] Include QType Yaml +- [ ] Use List Types +- [ ] Use Session Inputs for Sticky Variables + +**Command Line Usage** +- [x] Pass Inputs On The CLI +- [x] Load Multiple Inputs from Files + +**Data Processing** +- [x] Read Data from SQL databases +- [x] Read Data from files +- [ ] Read Data from Document Sources +- [x] Write Data to a File +- [x] Adjust Concurrency +- [ ] Configure Batch Processing +- [ ] Invoke Other Flows # Depends on https://github.com/bazaarvoice/qtype/issues/112 +- [x] Cache Step Results +- [x] Explode Collections for Fan-Out Processing +- [x] Gather Results into a List +- [ ] Aggregate results? +- [ ] Use Echo for Debugging +- [x] Decode JSON/XML to Structured Data + +**Invoke Models** +- [x] Reuse Prompts with Templates +- [x] Call Large Language Models +- [x] Create Embeddings +- [x] Configure Model Parameters (temperature, max_tokens) +- [ ] Use Memory for Conversational Context +- [ ] Switch Between Model Providers + +**Authentication** +- [x] Use API Key Authentication +- [ ] Use Bearer Token Authentication +- [ ] Use OAuth2 Authentication +- [x] Configure AWS Authentication (Access Keys, Profile, Role) +- [ ] Configure Google Vertex Authentication +- [ ] Manage Secrets with Secret Manager + +**Observability & Debugging** +- [x] Trace Calls with Open Telemetry +- [x] Validate Qtype YAML +- [x] Visualize Application Architecture + +**Data & Types** +- [ ] Use Built-In Types (`text`, `number`, `boolean`, `bytes`) +- [ ] Use Built-In Domain Types (ChatMessage, RAGDocument, RAGChunk, RAGSearchResult, Embedding, AggregateStats) +- [ ] Define Custom Types +- [ ] Extract Structured Data with FieldExtractor (JSONPath) +- [ ] Transform Data with Construct +- [ ] Work with List Types + +**Tools & Integration** +- [x] Create Tools from OpenAPI Specifications +- [x] Create Tools from Python Modules +- [x] Bind Tool Inputs and Outputs + +**Qtype Server** +- [x] Serve Flows as APIs +- [x] Serve Flows as UI +- [x] Use Conversational Interfaces +- [x] Serve Applications with Auto-Reload +- [x] Use Variables with UI Hints + +**Chat Specific** +- [ ] Configure Memory Token Limits +- [ ] Use Conversation History in Prompts +- [ ] Persist Session Inputs Across Turns + + +**Retrieval Augmented Generation (RAG)** +- [ ] Convert Documents to Text (PDF, DOCX) +- [ ] Split Documents into Chunks (Configure Chunk Size and Overlap) +- [ ] Embed Document Chunks +- [ ] Populate a Vector Index +- [ ] Populate a Document Index +- [ ] Search a Document Index (Full-Text) +- [ ] Search a Vector Index (Semantic) +- [ ] Filter Search Results +- [ ] Rerank Search Results with Bedrock Reranker +- [ ] Implement Hybrid Search Strategies +- [ ] Configure Vector Index Parameters (HNSW) +- [ ] Upsert Data into Indexes -**Deployment** -- [ ] Serve applications via HTTP API -- [ ] Deploy with Docker **Extension & Advanced** -- [ ] Create custom step types -- [ ] Write QType plugins +- [ ] Write CLI Plugins +- [ ] Configure Step Caching with Version/Namespace -# Explanation +# Concepts **Mental Model & Philosophy** -- [ ] ⚡ What is QType? (elevator pitch and purpose) -- [ ] ⚡ Core mental model: flows, steps, variables, and data flow -- [ ] ⚡ Design constraints and assumptions -- [ ] ⚡ What QType is NOT (non-goals and anti-patterns) -- [ ] ⚡ When to use QType vs alternatives +- [x] ⚡ What is QType? (elevator pitch and purpose) +- [x] ⚡ Core mental model: flows, steps, variables, and data flow +- [x] ⚡ What QType is NOT (non-goals and anti-patterns) +- [x] ⚡ When to use QType vs alternatives **Architecture & Design** - [ ] QType architecture: DSL → Semantic → Interpreter layers @@ -166,20 +201,13 @@ - [ ] Memory management and token limits **Validation & Rules** -- [ ] ⚡ Semantic validation rules explained -- [ ] ⚡ Why each validation rule exists -- [ ] ⚡ How to satisfy validation requirements +- [x] ⚡ Semantic validation rules by model entity **Decision Guides** - [ ] Vector search vs Document search vs SQL search (when to use each) - [ ] When to use InvokeTool vs Agent - [ ] When to use batching vs concurrent execution -**Common Mistakes & Anti-Patterns** -- [ ] ⚡ Common pitfalls and how to avoid them -- [ ] ⚡ Anti-patterns and why they fail -- [ ] ⚡ Debugging common misconceptions - # Reference **CLI Commands** @@ -187,9 +215,8 @@ - `qtype validate` - Validate YAML specs - `qtype serve` - Serve as HTTP API - `qtype visualize` - Generate architecture diagrams -- `qtype generate schema` - Generate JSON schema -- `qtype convert api` - Convert OpenAPI to tools -- `qtype convert module` - Convert Python modules to tools +- `qtype generate` - Generate JSON schema +- `qtype convert` - Convert OpenAPI or Python modules to tools **YAML Specification** - [ ] Application structure diff --git a/docs/.pages b/docs/.pages index cae75006..aa470df8 100644 --- a/docs/.pages +++ b/docs/.pages @@ -1,7 +1,8 @@ nav: - index.md - Tutorials - - How-To Guides + - Gallery + - How To - Concepts - Reference - Contributing diff --git a/docs/Concepts/.pages b/docs/Concepts/.pages deleted file mode 100644 index 78ca9445..00000000 --- a/docs/Concepts/.pages +++ /dev/null @@ -1,5 +0,0 @@ -title: Concepts -nav: - - Overview - - Core - - Steps diff --git a/docs/Concepts/Core/application.md b/docs/Concepts/Core/application.md deleted file mode 100644 index b07ce2bf..00000000 --- a/docs/Concepts/Core/application.md +++ /dev/null @@ -1,2 +0,0 @@ ---8<-- "components/Application.md" - diff --git a/docs/Concepts/Core/authorization-provider.md b/docs/Concepts/Core/authorization-provider.md deleted file mode 100644 index 96bf748a..00000000 --- a/docs/Concepts/Core/authorization-provider.md +++ /dev/null @@ -1,48 +0,0 @@ -# AuthorizationProvider - -AuthorizationProvider defines how QType components authenticate with external APIs and services. It provides a centralized, reusable way to configure authentication credentials and methods for models, tools, indexes, and other components that need to access external resources. - -By centralizing authentication configuration, AuthorizationProvider enables secure credential management, supports multiple authentication methods (API keys, OAuth2, Bearer tokens, AWS credentials), and allows the same credentials to be reused across multiple components. - -## Key Principles - -### Type Discriminator - -All authorization providers must include a `type` field for proper schema validation: -- `type: APIKeyAuthProvider` for API key authentication -- `type: BearerTokenAuthProvider` for bearer token authentication -- `type: OAuth2AuthProvider` for OAuth2 authentication -- `type: AWSAuthProvider` for AWS credentials - -### Centralized Definition & Reference by ID - -Authorization providers are defined at the application level and referenced by ID: - -```yaml -authorization_providers: - - type: APIKeyAuthProvider - id: openai_auth - api_key: ${OPENAI_API_KEY} - -models: - - type: Model - id: gpt4 - provider: openai - auth: openai_auth # References by ID -``` - -## Rules and Behaviors - -- **Unique IDs**: Each authorization provider must have a unique `id` within the application. Duplicate authorization provider IDs will result in a validation error. -- **Required Type**: The `type` field is mandatory and specifies the authentication method. -- **Method-Specific Fields**: Different authentication types require different field combinations (see component docs for details). -- **Reference by Components**: Can be referenced by Models, Tools, Indexes, and TelemetrySink by their ID string. -- **Environment Variable Support**: Credential fields support environment variable substitution using `${VARIABLE_NAME}` syntax for secure credential management. - ---8<-- "components/AuthorizationProvider.md" - -## Related Concepts - -AuthorizationProvider is referenced by [Model](model.md), [Tool](tool.md), [Index](indexes.md), and other components that need external API access. - -## Example Usage diff --git a/docs/Concepts/Core/flow.md b/docs/Concepts/Core/flow.md deleted file mode 100644 index 9c9a0da6..00000000 --- a/docs/Concepts/Core/flow.md +++ /dev/null @@ -1,109 +0,0 @@ -# Flow - -A flow defines a sequence of [Steps](../Steps/index.md) that work together to accomplish a specific task or workflow. Flows are the primary orchestration mechanism in QType, allowing you to chain multiple operations such as LLM inference, tool calls, and data processing into coherent, reusable workflows. - -Flows can be invoked from other flows using the InvokeFlow step, enabling composability where common patterns can be extracted into reusable flow components. - -## Key Principles - -### Explicit Variable Declarations - -All variables used within a flow **must be declared** in the `variables` section of the flow. This creates a clear "data contract" for the flow, making it easier to understand what data flows through each step. - -```yaml -flows: - - type: Flow - id: my_flow - variables: - - id: user_query - type: text - - id: response - type: text - # ... steps reference these variables -``` - -### Reference by ID - -Steps reference variables by their ID (as strings). The variable must be declared in the flow's `variables` section. - -```yaml -steps: - - id: my_step - type: LLMInference - inputs: - - user_query # References the variable declared above - outputs: - - response -``` - -## Rules and Behaviors - -- **Unique IDs**: Each flow must have a unique `id` within the application. Duplicate flow IDs will result in a validation error. -- **Required Steps**: Flows must contain at least one step. Empty flows will result in a validation error. -- **Required Variables**: All variables used in step inputs/outputs must be declared in the flow's `variables` section. -- **Input Specification**: The `inputs` field lists which variables serve as the flow's inputs (by referencing their IDs). -- **Output Specification**: The `outputs` field lists which variables serve as the flow's outputs (by referencing their IDs). -- **Step References**: Steps can be referenced by ID (string) or embedded as inline objects within the flow definition. -- **Sequential Execution**: Steps within a flow are executed in the order they appear in the `steps` list. - ---8<-- "components/Flow.md" - -## Flow Interface - -Flows define their interaction pattern using the `interface` field, which specifies how the flow should be hosted and what kind of user experience it provides. - -### Complete Flows - -Complete flows (`type: Complete`) are stateless executions that accept input values and produce output values. Think of them like data pipelines or functions. - -```yaml -flows: - - type: Flow - id: my_flow - interface: - type: Complete - variables: - - id: input_data - type: text - - id: output_data - type: text - inputs: - - input_data - outputs: - - output_data -``` - -**Interpreter Behavior**: The interpreter hosts each complete flow as an HTTP endpoint where you can POST the input values and receive the output values. - -### Conversational Flows - -Conversational flows (`type: Conversational`) enable interactive, multi-turn conversations. They have specific requirements: - -**Requirements:** -* Must have at least one input variable of type `ChatMessage` -* Must have exactly one output variable of type `ChatMessage` - -**Session Management**: The `interface.session_inputs` field allows you to specify which variables should persist across conversation turns: - -```yaml -flows: - - type: Flow - id: doc_chat_flow - interface: - type: Conversational - session_inputs: - - document_content # This variable persists across turns - variables: - - id: document_content - type: text - - id: user_message - type: ChatMessage - - id: ai_response - type: ChatMessage -``` - -**Interpreter Behavior**: -- The interpreter hosts conversational flows as endpoints ending in `/chat` -- Supports Vercel's [ai-sdk](https://ai-sdk.dev/) -- The UI automatically detects conversational flows and provides a chat interface -- Conversational flows can be stateful or stateless depending on whether LLM inference steps use [Memory](memory.md) \ No newline at end of file diff --git a/docs/Concepts/Core/indexes.md b/docs/Concepts/Core/indexes.md deleted file mode 100644 index 7855f5d0..00000000 --- a/docs/Concepts/Core/indexes.md +++ /dev/null @@ -1,57 +0,0 @@ -# Index - -An index represents a searchable data structure that enables retrieval operations within QType applications. Indexes provide the foundation for Retrieval Augmented Generation (RAG) patterns, semantic search, and knowledge retrieval workflows by allowing applications to search through large collections of documents, embeddings, or structured data. - -Indexes are defined at the application level and referenced by search steps that need to query data. - -## Key Principles - -### Type Discriminator - -All indexes must include a `type` field for proper schema validation: -- `type: VectorIndex` for vector/embedding similarity search -- `type: DocumentIndex` for text-based document search - -### Centralized Definition & Reference by ID - -Indexes are defined at the application level and referenced by ID: - -```yaml -indexes: - - type: VectorIndex - id: my_vector_db - name: embeddings_collection - embedding_model: text_embedder - args: - host: localhost - port: 6333 - -flows: - - type: Flow - id: search_flow - steps: - - type: VectorSearch - index: my_vector_db # References by ID -``` - -## Rules and Behaviors - -- **Unique IDs**: Each index must have a unique `id` within the application. Duplicate index IDs will result in a validation error. -- **Required Name**: The `name` field specifies the actual index, collection, or table name in the external system. -- **Authentication Support**: Indexes can reference an `AuthorizationProvider` by ID for secure access to external search systems. -- **Flexible Configuration**: The `args` field allows index-specific configuration and connection parameters for different backends. -- **Embedding Model Requirement**: `VectorIndex` requires an `embedding_model` reference to vectorize queries and match the embedding space of stored documents. - ---8<-- "components/Index.md" - -## Index Types - ---8<-- "components/VectorIndex.md" - ---8<-- "components/DocumentIndex.md" - -## Related Concepts - -Indexes are used by search [Steps](../Steps/index.md) and require [Model](model.md) configurations (especially embedding models for vector indexes). They may also reference [AuthorizationProvider](authorization-provider.md) for secure access. - -## Example Usage diff --git a/docs/Concepts/Core/memory.md b/docs/Concepts/Core/memory.md deleted file mode 100644 index 6eb0de03..00000000 --- a/docs/Concepts/Core/memory.md +++ /dev/null @@ -1,48 +0,0 @@ -# Memory - -Memory in QType provides persistent storage for conversation history and contextual state data across multiple steps or conversation turns. It enables applications to maintain context between interactions, allowing for more coherent and context-aware conversations in chatbots, agents, and multi-turn workflows. - -Memory configurations are defined at the application level and referenced by steps that need to maintain state. - -## Key Principles - -### Centralized Definition - -Memory objects are defined once at the application level and can be shared across multiple steps: - -```yaml -id: my_app -memories: - - id: chat_memory - token_limit: 50000 - chat_history_token_ratio: 0.7 - -flows: - - type: Flow - id: chat_flow - steps: - - type: LLMInference - model: gpt4 - memory: chat_memory # References by ID -``` - -### Reference by ID - -Steps reference memory configurations by their ID (as a string), not by embedding the memory object inline. - -## Rules and Behaviors - -- **Unique IDs**: Each memory block must have a unique `id` within the application. Duplicate memory IDs will result in a validation error. -- **Token Management**: Memory automatically manages token limits to prevent exceeding model context windows. When the token limit is reached, older content is flushed based on the `token_flush_size`. -- **Chat History Ratio**: The `chat_history_token_ratio` determines what portion of the total memory should be reserved for chat history versus other contextual data. -- **Default Values**: Memory has sensible defaults - 100,000 token limit, 70% chat history ratio, and 3,000 token flush size. -- **Shared Memory**: Multiple steps can reference the same memory ID to share conversational context. - ---8<-- "components/Memory.md" - -## Related Concepts - -Memory is primarily used by LLM-based steps like [LLMInference](../Steps/llm-inference.md) and [Agent](../Steps/agent.md) to maintain conversational context. - -## Example Usage - diff --git a/docs/Concepts/Core/model.md b/docs/Concepts/Core/model.md deleted file mode 100644 index 81d1cec2..00000000 --- a/docs/Concepts/Core/model.md +++ /dev/null @@ -1,53 +0,0 @@ -# Model - -A model represents a generative AI model configuration that can be used for inference tasks such as text generation, chat, or embeddings. Models define how to connect to and configure specific AI providers like OpenAI, Anthropic, AWS Bedrock, or others. - -Each model must have a unique `id` and specify a `provider`. Models are defined at the application level and can be referenced by steps like `LLMInference`, `Agent`, or `InvokeEmbedding`. - -## Key Principles - -### Type Discriminator - -All models must include a `type` field for proper schema validation: -- `type: Model` for standard generative models -- `type: EmbeddingModel` for embedding/vectorization models - -### Referencing Models - -Steps reference models by their ID: - -```yaml -models: - - type: Model - id: gpt4 - provider: openai - model_id: gpt-4-turbo - -flows: - - type: Flow - id: my_flow - steps: - - type: LLMInference - model: gpt4 # References the model by ID -``` - -## Rules and Behaviors - -- **Unique IDs**: Each model must have a unique `id` within the application. Duplicate model IDs will result in a validation error. -- **Model ID Resolution**: If `model_id` is not specified, the model's `id` field is used as the model identifier for the provider. -- **Provider Requirement**: The `provider` field is required and specifies which AI service to use (e.g., "openai", "anthropic", "aws-bedrock"). -- **Authentication**: Models can reference an `AuthorizationProvider` by ID or as a string reference for API authentication. -- **Inference Parameters**: The `inference_params` dictionary allows customization of model behavior (temperature, max_tokens, etc.). - -## Model Types - ---8<-- "components/Model.md" - ---8<-- "components/EmbeddingModel.md" - -## Related Concepts - -Models can reference [AuthorizationProvider](authorization-provider.md) for secure API access. - -## Example Usage - diff --git a/docs/Concepts/Core/telemetry.md b/docs/Concepts/Core/telemetry.md deleted file mode 100644 index 2e4beb73..00000000 --- a/docs/Concepts/Core/telemetry.md +++ /dev/null @@ -1,19 +0,0 @@ -# Telemetry - -Telemetry provides comprehensive observability and monitoring capabilities for QType applications, enabling developers to track performance, debug issues, and gain insights into application behavior. It captures metrics, traces, and logs across all components including steps, flows, model interactions, and tool executions. - -Telemetry is essential for production deployments, allowing teams to monitor application health, optimize performance, identify bottlenecks, and troubleshoot issues in real-time or through historical analysis. - -Only one telemetry sink can be configured per application. For multiple destinations, use a telemetry aggregator like OpenTelemetry Collector. - -## Component Definition - ---8<-- "components/TelemetrySink.md" - - -## Related Concepts - -Telemetry observes all QType components including [Steps](../Steps/index.md), [Flows](flow.md), [Models](model.md), [Tools](tool.md), and [Memory](memory.md). It integrates with [AuthorizationProvider](authorization-provider.md) for secure data export and provides insights for optimizing [Variable](variable.md) data flow. - -## Example Usage - diff --git a/docs/Concepts/Core/tool.md b/docs/Concepts/Core/tool.md deleted file mode 100644 index 4be9bddd..00000000 --- a/docs/Concepts/Core/tool.md +++ /dev/null @@ -1,63 +0,0 @@ -# Tool - -A tool represents an external capability that can be invoked to perform specific tasks, integrate with external services, or execute custom functions. Tools provide the bridge between QType applications and the outside world, enabling workflows to interact with APIs, databases, file systems, and custom business logic. - -Tools are defined at the application level and can be: -- Invoked directly using the InvokeTool step -- Made available to [Agents](../Steps/agent.md) for autonomous decision-making - -## Key Principles - -### Type Discriminator - -All tools must include a `type` field for proper schema validation: -- `type: PythonFunctionTool` for Python function calls -- `type: APITool` for HTTP API endpoints - -### Centralized Definition - -Tools are defined once at the application level and referenced by ID: - -```yaml -tools: - - type: PythonFunctionTool - id: my_calculator - name: calculate - description: Performs calculations - function_name: calculate - module_path: my_tools.calculator - inputs: - expression: - type: text - optional: false - outputs: - result: - type: text - optional: false - -flows: - - type: Flow - id: my_flow - steps: - - type: InvokeTool - tool: my_calculator # References by ID -``` - -## Rules and Behaviors - -- **Dual Usage**: Tools can be used as standalone steps (via InvokeTool) or provided to agents for autonomous invocation -- **Authentication Support**: Tools can reference [AuthorizationProvider](authorization-provider.md) by ID for secure access to external resources -- **Input/Output Parameters**: Tools define their interface through input and output parameter dictionaries -- **Type Safety**: Tool parameters are validated against their declared types -- **Reusability**: Tools defined once can be used across multiple flows or agents - -## Tool Types - ---8<-- "components/APITool.md" - ---8<-- "components/PythonFunctionTool.md" - -## Related Concepts - -Tools integrate with [AuthorizationProvider](authorization-provider.md) for secure access, can be used as [Steps](../Steps/index.md) in [Flows](flow.md), and are essential for [Agent](../Steps/agent.md) capabilities. They consume and produce [Variables](variable.md) for data flow and may interact with [Models](model.md) and [Indexes](indexes.md). - diff --git a/docs/Concepts/Core/variable.md b/docs/Concepts/Core/variable.md deleted file mode 100644 index daac973a..00000000 --- a/docs/Concepts/Core/variable.md +++ /dev/null @@ -1,53 +0,0 @@ -# Variable - -Variables are the fundamental data containers in QType. All data flowing between [Steps](../Steps/index.md) must be explicitly declared as variables. This "declare before use" principle creates clear data contracts and enables static validation. - -## Variable Scoping - -Variables are scoped to the [Flow](flow.md) where they are declared. Each flow's `variables` section lists all variables available within that flow. - -```yaml -flows: - - type: Flow - id: my_flow - variables: - - id: user_input - type: text - - id: processed_output - type: text -``` - -## Variable Declaration - -Each variable must have: -- **Unique ID**: Used to reference the variable throughout the flow. Must be unique within the flow's scope. -- **Type**: Specifies the data type (primitive, domain-specific, or custom type). - -## Referencing Variables - -Steps reference variables by their ID (as a string): - -```yaml -steps: - - id: my_step - type: LLMInference - inputs: - - user_input # References the variable declared above - outputs: - - processed_output -``` - -The validator ensures that all referenced variables are declared in the flow's `variables` section. - ---8<-- "components/Variable.md" - ---8<-- "components/PrimitiveTypeEnum.md" - -## Domain Specific Types - -Domain specific types are included for common use cases (chat bots, RAG, etc) - - ---8<-- "components/ChatMessage.md" ---8<-- "components/ChatContent.md" ---8<-- "components/Embedding.md" diff --git a/docs/Concepts/Overview/flow-control.md b/docs/Concepts/Overview/flow-control.md deleted file mode 100644 index e9edd40e..00000000 --- a/docs/Concepts/Overview/flow-control.md +++ /dev/null @@ -1,271 +0,0 @@ -# Understand Flows and Variables - -QType flows orchestrate multi-step processing by connecting steps through shared variables. Understanding how variables flow between steps is essential for building effective AI workflows. - -## Understanding Flows - ---8<-- "components/Flow.md" - -A **Flow** executes steps in sequence, automatically passing data between them through variables. The key concept is **variable binding** - how step outputs become inputs for subsequent steps. - -```yaml -flows: - - id: simple_flow - steps: - - id: step1 - outputs: - - id: shared_data - type: text - - id: step2 - inputs: - - shared_data # Automatically receives output from step1 -``` - -## Variable Flow Between Steps - -Variables are the primary way data moves between steps. When a step produces an output variable, any subsequent step can use it as input by referencing its ID. - -### Basic Variable Passing - -```yaml -flows: - - id: data_pipeline - steps: - # Step 1: Create some data - - id: create_data - template: "User input: {user_question}" - inputs: - - id: user_question - type: text - outputs: - - id: formatted_input # This variable is now available - type: text - - # Step 2: Process the data - - id: process_data - model: gpt-4 - system_message: "Process this input thoughtfully." - inputs: - - formatted_input # References output from step1 - outputs: - - id: ai_response - type: text - - # Step 3: Format final output - - id: format_result - template: "Final result: {ai_response}" - inputs: - - ai_response # References output from step2 - outputs: - - id: final_output - type: text -``` - -### Variable Reuse - -Variables remain available throughout the flow, so later steps can access outputs from any earlier step: - -```yaml -flows: - - id: reuse_example - steps: - - id: step1 - outputs: - - id: original_data - type: text - - - id: step2 - outputs: - - id: processed_data - type: text - - - id: step3 - inputs: - - original_data # From step1 - - processed_data # From step2 - template: | - Original: {original_data} - Processed: {processed_data} -``` - -## Variable Types and Compatibility - -Variables have types that must be compatible between steps: - -```yaml -flows: - - id: type_example - steps: - - id: text_producer - outputs: - - id: text_data - type: text # Produces text - - - id: text_consumer - inputs: - - text_data # Expects text - compatible! - - - id: chat_step - inputs: - - id: message_input - type: ChatMessage # Different type - # text_data cannot be used here without conversion -``` - -## Multi-Step Flow Examples - -### Three-Step Processing Chain - -```yaml -id: processing_chain - -# Define reusable components at the application level -auths: - - id: openai_auth - type: api_key - api_key: ${OPENAI_KEY} - -models: - - id: gpt-4 - provider: openai - auth: openai_auth # Reference auth by ID - -flows: - - id: analyze_and_respond - steps: - # Step 1: Prepare the input - - id: prepare_input - template: | - Analyze this user question: {raw_question} - inputs: - - id: raw_question - type: text - outputs: - - id: prepared_prompt - type: text - - # Step 2: Get AI analysis - - id: analyze - model: gpt-4 - system_message: "Provide detailed analysis." - inputs: - - prepared_prompt - outputs: - - id: analysis_result - type: text - - # Step 3: Format the final response - - id: format_response - template: | - ## Analysis Results - - {analysis_result} - - *Generated in response to: {raw_question}* - inputs: - - analysis_result - - raw_question # Reusing from step 1 - outputs: - - id: final_response - type: text -``` - -## Common Variable Patterns - -### Variable Naming Conventions - -Use clear, descriptive names that indicate the data's purpose and lifecycle: - -```yaml -# ✅ Good variable names -flows: - - id: clear_naming - steps: - - id: extract_entities - outputs: - - id: extracted_entities # Clear what it contains - type: array - - - id: validate_entities - inputs: - - extracted_entities - outputs: - - id: validated_entities # Clear transformation - type: array - - - id: format_final_output - inputs: - - validated_entities - outputs: - - id: formatted_entity_report # Clear final purpose - type: text -``` - -### Multiple Input Variables - -Steps can use multiple variables from different earlier steps: - -```yaml -flows: - - id: multi_input_example - steps: - - id: get_user_data - outputs: - - id: user_info - type: text - - - id: get_preferences - outputs: - - id: user_preferences - type: text - - - id: personalize_response - template: | - Based on your info: {user_info} - And your preferences: {user_preferences} - Here's a personalized response... - inputs: - - user_info # From step 1 - - user_preferences # From step 2 - outputs: - - id: personalized_message - type: text -``` - -### Variable Scope and Lifecycle - -Variables are available from when they're created until the flow ends: - -```yaml -flows: - - id: variable_lifecycle - steps: - - id: early_step - outputs: - - id: early_data - type: text - # early_data is now available to all following steps - - - id: middle_step - inputs: - - early_data # Can use early_data - outputs: - - id: middle_data - type: text - # middle_data is now also available - - - id: late_step - inputs: - - early_data # Still available - - middle_data # Also available - # Can use variables from any earlier step -``` - -### Debugging Tips - -1. **Check variable names carefully** - Case-sensitive and must match exactly -2. **Verify step order** - Variables must be created before they're used -3. **Confirm types match** - Use conversion steps when needed -4. **Use descriptive names** - Easier to track data flow - -Understanding variable flow between steps is the key to building effective QType applications. Focus on clear variable names, proper ordering, and type compatibility to create reliable multi-step workflows. diff --git a/docs/Concepts/Steps/agent.md b/docs/Concepts/Steps/agent.md deleted file mode 100644 index 9b6bae74..00000000 --- a/docs/Concepts/Steps/agent.md +++ /dev/null @@ -1,54 +0,0 @@ -# Agent - -Agent is a specialized LLMInference step that combines language model capabilities with tool access, enabling AI assistants that can perform actions, make API calls, execute functions, and interact with external systems. Agents represent autonomous AI entities that can reason about tasks and use available tools to accomplish objectives. - -Agents extend the basic LLMInference functionality by providing access to a curated set of tools, allowing the AI to move beyond text generation to actual task execution and problem-solving. - -## Key Principles - -### Tool References by ID - -Agents reference tools by their IDs (as strings): - -```yaml -tools: - - type: PythonFunctionTool - id: calculator - name: calculate - # ... tool config - -flows: - - type: Flow - id: agent_flow - steps: - - type: Agent - id: my_agent - model: gpt4 - tools: - - calculator # References tool by ID -``` - -### Inherits from LLMInference - -Agents have all the capabilities of LLMInference steps, including: -- Model reference by ID -- Memory integration by ID -- System message support -- Explicit variable declarations for inputs/outputs - -## Rules and Behaviors - -- **Inherits LLMInference**: All LLMInference rules and behaviors apply (required model, explicit variables, memory integration, etc.). -- **Required Model**: Must reference a model ID defined in the application. -- **Optional Tools**: The `tools` field lists tool IDs available to the agent (defaults to empty list). -- **Tool Integration**: References tools by ID string (tools must be defined in the application). -- **Decision Making**: The agent autonomously decides which tools to use based on the input and conversation context. -- **Multi-Step Execution**: Can perform multiple tool calls and reasoning steps within a single agent invocation. - ---8<-- "components/Agent.md" - -## Related Concepts - -Agent steps extend [LLMInference](llm-inference.md) and require [Model](../Core/model.md) configurations and [Tool](../Core/tool.md) access. They may use [Memory](../Core/memory.md) for persistent context and can be orchestrated within [Flows](../Core/flow.md). - -## Example Usage diff --git a/docs/Concepts/Steps/decoder.md b/docs/Concepts/Steps/decoder.md deleted file mode 100644 index 9475525c..00000000 --- a/docs/Concepts/Steps/decoder.md +++ /dev/null @@ -1,53 +0,0 @@ -# Decoder - -Decoder steps parse and extract structured data from unstructured text, converting raw content into organized formats like JSON, XML, or custom data structures. They enable applications to transform natural language responses, documents, and other text sources into machine-readable formats for further processing. - -Decoders are essential for building robust data pipelines that can handle the variability of AI-generated content and extract actionable information from diverse text sources. - -## Rules and Behaviors - -- **Format Specification**: Decoders must specify the target output format (JSON, XML, CSV, etc.) -- **Schema Validation**: Output can be validated against predefined schemas to ensure data quality -- **Error Handling**: Decoders handle malformed input gracefully with configurable error strategies -- **Type Conversion**: Automatic conversion of extracted values to appropriate data types -- **Flexible Parsing**: Support for both strict and lenient parsing modes -- **Pattern Matching**: Can use regular expressions or custom patterns for extraction -- **Fallback Strategies**: Configurable behavior when parsing fails (return empty, use defaults, raise error) - -## Component Definition - ---8<-- "components/Decoder.md" - -## Configuration Options - -## Format Types - -### JSON Format -- **Purpose**: Extract JSON objects from text -- **Use Cases**: API responses, structured AI outputs, configuration parsing -- **Validation**: Schema-based validation with JSONSchema -- **Error Handling**: Syntax error recovery and partial extraction - -### XML Format -- **Purpose**: Parse XML documents and extract elements -- **Use Cases**: Document processing, legacy system integration, structured markup -- **Features**: Namespace support, XPath queries, attribute extraction -- **Output**: Converted to JSON structure or custom format - -### CSV Format -- **Purpose**: Parse comma-separated values -- **Use Cases**: Data imports, spreadsheet processing, tabular data extraction -- **Options**: Custom delimiters, header detection, type inference -- **Output**: Array of objects or structured table format - -### Custom Format -- **Purpose**: Pattern-based extraction using regular expressions -- **Use Cases**: Entity extraction, log parsing, custom data formats -- **Features**: Named capture groups, multiple patterns, validation rules -- **Output**: Dictionary of extracted values - -## Related Concepts - -Decoder steps often process outputs from [LLMInference](llm-inference.md) or [Agent](agent.md) steps, work within [Flows](../Core/flow.md) for data transformation pipelines, and may validate extracted data against [Variable](../Core/variable.md) type definitions. They integrate with other [Steps](index.md) for comprehensive data processing workflows. - -## Example Usage diff --git a/docs/Concepts/Steps/index.md b/docs/Concepts/Steps/index.md deleted file mode 100644 index 9f16e9c6..00000000 --- a/docs/Concepts/Steps/index.md +++ /dev/null @@ -1,42 +0,0 @@ -# Step - -A step represents any executable component that can take inputs and produce outputs within a QType application. Steps are the fundamental building blocks of workflows, providing a consistent interface for operations ranging from simple prompt templates to complex AI agent interactions. - -All steps share common properties (ID, inputs, outputs) while implementing specific behaviors for their domain. Steps can be composed into [Flows](../Core/flow.md) to create sophisticated pipelines, and they can reference each other to build modular, reusable applications. - -## Rules and Behaviors - -- **Unique IDs**: Each step must have a unique `id` within the application. Duplicate step IDs will result in a validation error. -- **Abstract Base Class**: Step is an abstract base class - you must use concrete implementations for actual functionality. -- **Input/Output Variables**: Steps define their interface through optional `inputs` and `outputs` lists that specify the data they consume and produce. -- **Variable References**: Input and output variables can be specified as Variable objects or as string references to variables defined elsewhere. -- **Optional Interface**: Both `inputs` and `outputs` are optional - some steps may infer them automatically or have default behaviors. -- **Flow Integration**: All steps can be included in flows and can be referenced by other steps. -- **Polymorphic Usage**: Steps can be used polymorphically - any step type can be used wherever a Step is expected. - -## Component Definition - ---8<-- "components/Step.md" - -## Step Types - -QType provides several categories of steps for different use cases: - -### AI and Language Model Steps -- **[LLMInference](llm-inference.md)** - Direct language model inference with prompts -- **[Agent](agent.md)** - AI agents with tool access and decision-making capabilities -- **[PromptTemplate](prompt-template.md)** - Dynamic prompt generation with variable substitution - -### Tool and Integration Steps -- **[Tool](../Core/tool.md)** - External integrations and function execution (Tools can also be used as steps) - -### Search and Retrieval Steps -- **[Search Steps](search.md)** - Vector similarity search and document search operations - -### Control Flow and Processing Steps -- **[Flow](../Core/flow.md)** - Orchestration of multiple steps (see [Flow concept](../Core/flow.md) for detailed information) -- **[Decoder](decoder.md)** - Structured data parsing and extraction - -## Related Concepts - -Steps are orchestrated by [Flows](../Core/flow.md), may reference [Models](../Core/model.md) for AI operations, can use [Tools](../Core/tool.md) for external integrations, and access [Indexes](../Core/indexes.md) for search operations. They also define and consume [Variables](../Core/variable.md) for data flow. diff --git a/docs/Concepts/Steps/llm-inference.md b/docs/Concepts/Steps/llm-inference.md deleted file mode 100644 index fae9cfdf..00000000 --- a/docs/Concepts/Steps/llm-inference.md +++ /dev/null @@ -1,62 +0,0 @@ -# LLMInference - -LLMInference is a step that performs direct language model inference, sending prompts to AI models and capturing their responses. It provides the core interface for integrating large language models into QType workflows, supporting both simple text generation and complex conversational interactions. - -LLMInference steps can maintain conversation context through memory, apply system prompts for role-setting, and process inputs/outputs concurrently when configured. - -## Key Principles - -### Explicit Variable Declaration - -All inputs and outputs must be declared in the flow's `variables` section and referenced by ID: - -```yaml -flows: - - type: Flow - id: my_flow - variables: - - id: user_prompt - type: text - - id: ai_response - type: text - steps: - - type: LLMInference - id: llm_step - model: gpt4 - inputs: - - user_prompt # References declared variable - outputs: - - ai_response -``` - -### Model Reference by ID - -The `model` field references a model by its ID (as a string): - -```yaml -models: - - type: Model - id: gpt4 - provider: openai - -flows: - - steps: - - type: LLMInference - model: gpt4 # String reference to model ID -``` - -## Rules and Behaviors - -- **Required Model**: The `model` field is mandatory and must reference a model ID defined in the application. -- **Required Variables**: All inputs and outputs must be declared in the flow's `variables` section. -- **Memory Integration**: Can optionally reference a Memory object by ID to maintain conversation history and context. -- **System Message**: Optional `system_message` field sets the AI's role and behavior context. -- **Concurrency Support**: Supports `concurrency_config` for processing multiple inputs concurrently. - ---8<-- "components/LLMInference.md" - -## Related Concepts - -LLMInference steps require [Model](../Core/model.md) configurations, may use [Memory](../Core/memory.md) for context retention, often consume output from [PromptTemplate](prompt-template.md) steps, and are extended by [Agent](agent.md) steps for tool-enabled interactions. - -## Example Usage diff --git a/docs/Concepts/Steps/prompt-template.md b/docs/Concepts/Steps/prompt-template.md deleted file mode 100644 index c7d65fc1..00000000 --- a/docs/Concepts/Steps/prompt-template.md +++ /dev/null @@ -1,49 +0,0 @@ -# PromptTemplate - -PromptTemplate is a step that generates dynamic prompts by substituting variables into string templates. It enables the creation of reusable prompt patterns that can be customized with different inputs, making it easy to build flexible prompt-driven workflows. - -PromptTemplate steps are particularly useful for preprocessing inputs before sending them to language models, creating consistent prompt formats across different use cases, and building modular prompt libraries. - -## Key Principles - -### Explicit Variable Declaration - -All inputs and outputs must be declared in the flow's `variables` section: - -```yaml -flows: - - type: Flow - id: my_flow - variables: - - id: user_name - type: text - - id: generated_prompt - type: text - steps: - - type: PromptTemplate - id: greeting_prompt - template: "Hello {user_name}, how can I help you today?" - inputs: - - user_name - outputs: - - generated_prompt -``` - -### Variable Substitution - -Uses standard string formatting with curly braces `{variable_name}` for placeholders. The variable names in the template must match the IDs of input variables. - -## Rules and Behaviors - -- **Required Template**: The `template` field is mandatory and contains the string template with variable placeholders. -- **Required Variables**: All inputs and outputs must be declared in the flow's `variables` section. -- **Variable Substitution**: Template placeholders must correspond to input variable IDs. -- **Input Flexibility**: Can accept any number of input variables that correspond to template placeholders. - ---8<-- "components/PromptTemplate.md" - -## Related Concepts - -PromptTemplate steps are often used before [LLMInference](llm-inference.md) or [Agent](agent.md) steps to prepare prompts, and they consume [Variables](../Core/variable.md) for dynamic content generation. - -## Example Usage diff --git a/docs/Concepts/Steps/search.md b/docs/Concepts/Steps/search.md deleted file mode 100644 index 381bd052..00000000 --- a/docs/Concepts/Steps/search.md +++ /dev/null @@ -1,56 +0,0 @@ -# Search - -Search steps enable information retrieval from indexed data using different search strategies. QType provides both vector-based semantic search and traditional document search capabilities, allowing applications to find relevant information based on user queries. - -Search steps integrate with [Indexes](../Core/indexes.md) to perform efficient retrieval operations and can be combined with other steps to build sophisticated question-answering and information retrieval workflows. - -## Rules and Behaviors - -- **Index Dependency**: Search steps require a valid index reference to perform search operations -- **Query Processing**: Input queries are processed according to the search type (semantic embedding for vector search, text matching for document search) -- **Result Ranking**: Search results are automatically ranked by relevance score -- **Configurable Limits**: Number of returned results can be controlled via configuration parameters -- **Type Safety**: Search steps validate that the referenced index supports the requested search operation -- **Empty Results**: Search steps handle cases where no matching documents are found gracefully -- **Similarity Thresholds**: Vector search can filter results based on minimum similarity scores - -## Vector Search - -Vector search performs semantic similarity matching using embeddings to find conceptually related content. - -### Component Definition - ---8<-- "components/VectorSearch.md" - -### Configuration - -### Key Properties - -- **index**: Reference to a vector index containing embedded documents -- **top_k**: Maximum number of results to return (default: 10) -- **similarity_threshold**: Minimum similarity score for results (0.0-1.0) -- **embedding_model**: Optional override for query embedding generation - -## Document Search - -Document search performs traditional text-based search using keyword matching and full-text search capabilities. - -### Component Definition - ---8<-- "components/DocumentSearch.md" - -### Configuration - -### Key Properties - -- **index**: Reference to a document index with full-text search capabilities -- **max_results**: Maximum number of documents to return -- **search_fields**: Specific fields to search within documents -- **boost_factors**: Relevance boosting for specific fields -- **filters**: Additional filtering criteria for search results - -## Related Concepts - -Search steps work with [Indexes](../Core/indexes.md) for data storage and retrieval, may use [Models](../Core/model.md) for embedding generation in vector search, and integrate with [Flows](../Core/flow.md) for complex information retrieval pipelines. Results are typically consumed by other [Steps](index.md) for further processing. - -## Example Usage diff --git a/docs/Concepts/mental-model-and-philosophy.md b/docs/Concepts/mental-model-and-philosophy.md new file mode 100644 index 00000000..1c72be4d --- /dev/null +++ b/docs/Concepts/mental-model-and-philosophy.md @@ -0,0 +1,363 @@ +# Mental Model & Philosophy + +## What is QType? + +**QType is a domain-specific language (DSL) for rapid prototyping of AI applications.** + +Qtype is a declarative, text-based language that lets you specify *what* your AI application should do, not *how* to do it. You write YAML specifications that describe flows, steps, models, and data transformations, and QType handles the execution. + +**Elevator pitch:** QType turns AI application prototypes from days of Python coding into hours of YAML configuration, without sacrificing maintainability or requiring you to learn yet another GUI tool. + +--- + +## Core Mental Model: The Three Primitives + +Understanding QType requires understanding three core concepts and how they relate: + +**Think of QType like this:** + +**Variables** are your data containers (typed boxes) +**Steps** are your transformations (functions on boxes) +**Flows** are your pipelines (sequences of transformations) +**The DSL** is your specification language (what you write) +**The Semantic layer** is your validator (what checks it) +**The Interpreter** is your executor (what runs it) + +**You declare what you want, QType handles how to do it.** + + +### 1. Variables: The Data + +**Variables are typed data containers** that hold values as they move through your application. + +```yaml +variables: + - id: question # A variable named "question" + type: text # It holds text data + - id: answer + type: text + - id: reviews + type: list[text] # Can hold complex types like lists +``` + +**Key insight:** Variables are *declared* upfront, making data flow explicit before runtime. + +**Types matter:** Every variable has a type (primitive like `text`/`int`, domain-specific like `ChatMessage`, or custom types you define). + +--- + +### 2. Steps: The Transformations + +**Steps are individual operations** that take input variables and produce output variables. + +```yaml +steps: + - id: format_prompt + type: PromptTemplate + template: "Answer this: {question}" + inputs: + - question # Consumes the question variable + outputs: + - prompt # Produces a prompt variable +``` + +**Each step:** +- Has a specific type (`PromptTemplate`, `LLMInference`, `InvokeTool`, etc.) +- Declares which variables it reads (`inputs`) +- Declares which variables it produces (`outputs`) +- Performs one focused transformation + +**Key insight:** Steps are pure transformations. Everything is declared, making flows inspectable and debuggable. + +**Step types are extensible:** QType ships with ~25 step types (LLMs, tools, data processing, RAG operations), and you can write custom tools for domain-specific operations. + +--- + +### 3. Flows: The Orchestration + +**Flows are sequences of steps** that form complete processing pipelines. + +```yaml +flows: + - id: answer_question + inputs: + - question # What comes in + outputs: + - answer # What goes out + variables: # All data containers + - id: question + type: text + - id: prompt + type: text + - id: answer + type: text + steps: + - id: format_prompt + type: PromptTemplate + # ... (transforms question → prompt) + - id: get_answer + type: LLMInference + # ... (transforms prompt → answer) +``` + +**Flows are data pipelines:** +- They receive input variables +- Pass them through a sequence of steps +- Each step transforms data from one form to another +- Final outputs are extracted and returned + +**Key insight:** Flows are *stateless* by default - each execution is independent. Use Memory or external storage for stateful applications (like chatbots). This makes flows easy to reason about, test, and parallelize. + +--- + +## The Data Flow Model + +Here's how data moves through a QType application: + +``` +Input Variables + ↓ + Step 1 (transforms A → B) + ↓ + Step 2 (transforms B → C) + ↓ + Step 3 (transforms C → D) + ↓ +Output Variables +``` + +**Linear execution:** Steps run sequentially in declaration order. Each step waits for its inputs to be available. Parallelism is supported for multiple inputs. + +**1-to-many cardinality:** Some steps (like `Explode`) can produce multiple outputs for one input, creating fan-out patterns. Other steps (like `Collect`) aggregate many inputs into one output. This enables batch processing patterns. + +--- + +## Architecture: The Three Layers + +QType is built in three distinct layers, each with a specific responsibility: + +``` +┌─────────────────────────────────────────────┐ +│ CLI Commands │ +│ (validate, run, serve) │ +├─────────────────────────────────────────────┤ +│ Interpreter │ +│ (execution engine) │ +├─────────────────────────────────────────────┤ +│ Semantic │ +│ (linking & validation) │ +├─────────────────────────────────────────────┤ +│ DSL │ +│ (core data models) │ +└─────────────────────────────────────────────┘ +``` + +### Layer 1: DSL (Domain-Specific Language) + +**Responsibility:** Define the data structures that represent QType specifications. + +- Pure Pydantic models +- No business logic, just structure +- Represents YAML as typed Python objects +- References are strings (like `model: "gpt-4"`) + +**Example:** The `Flow` model has `steps: list[Step]`, `variables: list[Variable]`, etc. + +--- + +### Layer 2: Semantic + +**Responsibility:** Transform DSL objects into resolved, validated representations. + +**The pipeline:** + +1. **Parse** - Load YAML and build DSL objects +2. **Link** - Resolve string references to actual objects (`"gpt-4"` → `Model` object) +3. **Resolve** - Build semantic IR (intermediate representation) where all IDs become object references +4. **Check** - Validate semantic rules (no missing variables, types match, etc.) + +**Key insight:** This layer catches errors *before* execution. You get fast feedback without running expensive LLM calls. + +**Symbol table:** During linking, QType builds a map of all components by ID. + +--- + +### Layer 3: Interpreter + +**Responsibility:** Execute flows by running steps with real resources. + +- Creates executors for each step type +- Manages resources (models, indexes, caches) +- Handles streaming and progress tracking +- Emits telemetry events +- Orchestrates async execution + +**Executor pattern:** Each step type has an executor class (`LLMInferenceExecutor`, `InvokeToolExecutor`, etc.) that knows how to run that specific operation. Executors receive `ExecutorContext` with cross-cutting concerns like auth, telemetry, and progress tracking. + +**Key insight:** The interpreter layer is optional - you could generate code from semantic IR, compile to a different runtime, or build alternative execution strategies. The DSL and semantic layers are independent of execution. + +--- + +## The Loading Pipeline + +When you run `qtype validate` or `qtype run`, here's what happens: + +``` +YAML File + ↓ +1. Load (expand env vars, includes) + ↓ +2. Parse (YAML → DSL models) + ↓ +3. Link (resolve ID references) + ↓ +4. Resolve (DSL → Semantic IR) + ↓ +5. Check (validate semantics) + ↓ +6. Execute (run the flow) +``` +Each stage builds on the previous, and errors are caught as early as possible. + +--- + +## Philosophy: Why QType Exists + +### 1. **Code is a Liability** + +Every line of Python code you write is something you have to maintain, debug, and explain to colleagues. QType shifts complexity from *imperative code* (how to do it) to *declarative specification* (what to do). + +**Example:** Instead of writing Python to: +- Initialize an LLM client +- Format prompts +- Handle streaming +- Parse JSON responses +- Construct typed objects +- Log telemetry + +You write YAML that *declares* these operations, and QType handles the implementation. + +--- + +### 2. **Modularity and Composability** + +QType applications are built from composable pieces: +- **Flows** can invoke other flows +- **Tools** are reusable functions +- **Types** define domain models +- **Models** and **Memories** are shared resources + +You can build libraries of flows, tools, and types that work together like Lego blocks. + +--- + +### 3. **Traceability and Observability** + +Because everything is declared: +- You can visualize the entire application structure +- Trace data flow through the system +- Emit structured telemetry +- Understand what's happening without reading code + +Otel Observability is supported by default. + +QType makes the *implicit* (hidden in code) *explicit* (visible in YAML). + +--- + +### 4. **Rapid Iteration** + +Changing a QType application is fast: +- Edit YAML +- Validate +- Run + +No recompiling, no virtual environment issues, no import errors. The feedback loop is seconds, not minutes. + +--- + +## What QType Is NOT + +### ❌ Not a Low-Code/No-Code Tool + +QType is not Flowise, Dify, LangFlow, or similar GUI-based agent builders. + +**Why not:** +- **Audience:** QType targets *engineers* who want text-based specifications they can version control, code review, and integrate into CI/CD +- **Control:** GUI tools trade precision and flexibility for convenience. QType gives you full control via explicit configuration, and can connect to apis or your code. +- **Complexity ceiling:** GUIs work great for simple flows but become unwieldy for complex applications with dozens of components. YAML scales better for large systems + +**When to use GUI tools:** If you're non-technical or building simple demo flows, GUI tools are faster. If you're an engineer building prototype systems, QType is more maintainable. + +--- + +### ❌ Not a General Data Engineering Tool + +**What it is:** QType is not Dagster, Prefect, Airflow, or similar orchestration frameworks. + +**Why not:** +- **Focus:** Data engineering tools excel at *data pipelines* (ETL, batch processing, scheduling). QType excels at *AI workflows* (LLM calls, vector search, tool calling, agents) +- **Features:** Dagster has sophisticated scheduling, retries, dependency management, and data lineage. QType has LLM abstractions, type systems for AI data (ChatMessage, RAGDocument), and streaming support +- **Overlap:** Both can process data in pipelines, but the primitives are different + +**When to use data engineering tools:** If your workflow is primarily data transformation, aggregation, and movement without AI components, use Dagster/Airflow. They're better at traditional ETL. + +**When to use QType:** If your workflow involves LLMs, embeddings, vector search, tool calling, or agents, QType gives you purpose-built primitives. You *could* build these in Dagster, but QType makes it easier. + +**Can they coexist?** Yes! Use Dagster to orchestrate data pipelines that feed into QType applications, or use QType flows as Dagster ops for AI-specific processing. + +--- + +## When to Use QType + +### ✅ Use QType When: + +**You're prototyping AI applications** +- Quickly try different LLMs, prompts, and flows +- Iterate on application structure without Python boilerplate +- Get validation feedback instantly + +**You want type-safe AI workflows** +- Explicit data flow with typed variables +- Catch errors before runtime +- Understand what data flows where + +**You're building modular AI systems** +- Reusable flows, tools, and types +- Compose applications from libraries +- Share components across projects + +**You value maintainability** +- YAML specs are easier to review than Python +- Version control shows exactly what changed +- Generate documentation automatically + +**You need observability** +- Built-in telemetry and tracing +- Visualize application structure +- Understand execution patterns + +--- + +### 🤔 Consider Alternatives When: + +**You need complete Python control** +- Complex branching logic +- Dynamic behavior based on runtime conditions +- Integration with Python-specific libraries + +**You're building pure data pipelines** +- No LLM or AI components +- Traditional ETL operations +- Dagster/Airflow are better fits + +**You prefer visual tools** +- GUI-based development +- Non-technical users +- Flowise/Dify are more appropriate + +**Your workflow is extremely simple** +- Single LLM call, no orchestration +- Direct API usage is simpler +- QType adds unnecessary structure + diff --git a/docs/Gallery/dataflow_pipelines.md b/docs/Gallery/dataflow_pipelines.md new file mode 100644 index 00000000..1b745dd9 --- /dev/null +++ b/docs/Gallery/dataflow_pipelines.md @@ -0,0 +1,80 @@ +# LLM Processing Pipelines + +## Overview + +An automated data processing pipeline that reads product reviews from a SQLite database and analyzes each review's sentiment using an LLM. This example demonstrates QType's dataflow capabilities with database sources, parallel LLM processing, and streaming results without requiring batch operations. + +## Architecture + +```mermaid +--8<-- "Gallery/dataflow_pipelines.mermaid" +``` + +## Complete Code + +```yaml +--8<-- "../examples/data_processing/dataflow_pipelines.qtype.yaml" +``` + +## Key Features + +- **SQLSource Step**: Database source that executes SQL queries using SQLAlchemy connection strings and emits one message per result row, enabling parallel processing of database records through downstream steps +- **PromptTemplate Step**: Template engine with curly-brace variable substitution (`{product_name}`, `{rating}`) that dynamically generates prompts from message variables for each review +- **LLMInference Step**: Processes each message independently through the language model with automatic parallelization, invoking AWS Bedrock inference for all reviews concurrently +- **Multi-record Flow**: Each database row becomes an independent FlowMessage flowing through the pipeline in parallel, carrying variables (review_id, product_name, rating, review_text) and accumulating new fields (llm_analysis) at each step +- **Message Sink**: The final step accumulates all records and writes them to an output file. + +## Running the Example + +### Setup + +First, create the sample database with product reviews: + +```bash +python examples/data_processing/create_sample_db.py +``` + +This generates a SQLite database with 10 sample product reviews covering various products and sentiments. + +### Run the Pipeline + +Process all reviews and generate the analysis with real-time progress monitoring: + +```bash +qtype run -i '{"output_path":"results.parquet"}' --progress examples/data_processing/dataflow_pipelines.qtype.yaml +``` + +The `--progress` flag displays a live dashboard showing: +- Message throughput for each step (msg/s) +- Success/error counts +- Processing duration with visual progress bars + +Example output: +``` +╭─────────────────────────────────────────────────────────────────────────────── Flow Progress ───────────────────────────────────────────────────────────────────────────────╮ +│ │ +│ Step load_reviews 1.6 msg/s ▁▁▁▁▃▃▃▃▅▅▅▅████████ ✔ 10 succeeded ✖ 0 errors ⟳ - hits ✗ - misses 0:00:06 │ +│ Step create_prompt 1.6 msg/s ▁▁▁▁▃▃▃▃▅▅▅▅████████ ✔ 10 succeeded ✖ 0 errors ⟳ - hits ✗ - misses 0:00:06 │ +│ Step analyze_sentiment 2.0 msg/s ▄▄▄▄▆▆▆▆▅▅▅▅███████▁ ✔ 10 succeeded ✖ 0 errors ⟳ - hits ✗ - misses 0:00:04 │ +│ Step write_results - msg/s ✔ 1 succeeded ✖ 0 errors ⟳ - hits ✗ - misses 0:00:00 │ +│ │ +╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ + +``` + +You'll notice that the output shows 1 message for `write_results` and 10 for the others. That is because it is reporting the number of messages _emitted_ from each step, and `write_results` is a sink that collects all messages. + +The final message of the output will be the result file where the data are written: + +``` +2026-01-16 11:23:35,151 - INFO: ✅ Flow execution completed successfully +2026-01-16 11:23:35,151 - INFO: Processed 1 em +2026-01-16 11:23:35,152 - INFO: +Results: +result_file: results.parquet +``` + +## Learn More + +- Tutorial: [Your First QType Application](../../Tutorials/01_hello_world.md) +- Example: [Simple Chatbot](./simple_chatbot.md) diff --git a/docs/Gallery/dataflow_pipelines.mermaid b/docs/Gallery/dataflow_pipelines.mermaid new file mode 100644 index 00000000..17e929a6 --- /dev/null +++ b/docs/Gallery/dataflow_pipelines.mermaid @@ -0,0 +1,45 @@ +flowchart TD + subgraph APP ["📱 review_analysis_pipeline"] + direction TB + + subgraph FLOW_0 ["🔄 analyze_reviews"] + direction TB + FLOW_0_START@{shape: circle, label: "▶️ Start"} + FLOW_0_S0@{shape: rect, label: "⚙️ load_reviews"} + FLOW_0_S1@{shape: doc, label: "📄 create_prompt"} + FLOW_0_S2@{shape: rounded, label: "✨ analyze_sentiment"} + FLOW_0_S3@{shape: rect, label: "⚙️ write_results"} + FLOW_0_S0 -->|product_name| FLOW_0_S1 + FLOW_0_S0 -->|rating| FLOW_0_S1 + FLOW_0_S0 -->|review_text| FLOW_0_S1 + FLOW_0_S1 -->|analysis_prompt| FLOW_0_S2 + FLOW_0_S0 -->|review_id| FLOW_0_S3 + FLOW_0_S0 -->|product_name| FLOW_0_S3 + FLOW_0_S0 -->|rating| FLOW_0_S3 + FLOW_0_S0 -->|review_text| FLOW_0_S3 + FLOW_0_S2 -->|llm_analysis| FLOW_0_S3 + FLOW_0_START -->|output_path| FLOW_0_S3 + end + + subgraph RESOURCES ["🔧 Shared Resources"] + direction LR + MODEL_NOVA_LITE@{shape: rounded, label: "✨ nova_lite (aws-bedrock)" } + end + + end + + FLOW_0_S2 -.->|uses| MODEL_NOVA_LITE + + %% Styling + classDef appBox fill:none,stroke:#495057,stroke-width:3px + classDef flowBox fill:#e1f5fe,stroke:#0277bd,stroke-width:2px + classDef llmNode fill:#f3e5f5,stroke:#7b1fa2,stroke-width:2px + classDef modelNode fill:#e8f5e8,stroke:#2e7d32,stroke-width:2px + classDef authNode fill:#fff3e0,stroke:#ef6c00,stroke-width:2px + classDef telemetryNode fill:#fce4ec,stroke:#c2185b,stroke-width:2px + classDef resourceBox fill:#f5f5f5,stroke:#616161,stroke-width:1px + + class APP appBox + class FLOW_0 flowBox + class RESOURCES resourceBox + class TELEMETRY telemetryNode \ No newline at end of file diff --git a/docs/Gallery/simple_chatbot.md b/docs/Gallery/simple_chatbot.md new file mode 100644 index 00000000..f7c5ca32 --- /dev/null +++ b/docs/Gallery/simple_chatbot.md @@ -0,0 +1,36 @@ +# Simple Chatbot + +## Overview + +A friendly conversational chatbot with memory that maintains context across multiple conversation turns. This example demonstrates the minimal setup needed to create a stateful chatbot using AWS Bedrock, perfect for getting started with conversational AI applications. + +## Architecture + +```mermaid +--8<-- "Gallery/simple_chatbot.mermaid" +``` + +## Complete Code + +```yaml +--8<-- "../examples/conversational_ai/simple_chatbot.qtype.yaml" +``` + +## Key Features + +- **Conversational Interface**: This instructs the front-end to create a conversational user experience. +- **Memory**: Conversation history buffer with `token_limit` (10,000) that stores messages and automatically flushes oldest content when limit is exceeded +- **ChatMessage Type**: Built-in domain type with `role` field (user/assistant/system) and `blocks` list for structured multi-modal content +- **LLMInference Step**: Executes model inference with optional `system_message` prepended to conversation and `memory` reference for persistent context across turns +- **Model Configuration**: Model resource with provider-specific `inference_params` including `temperature` (randomness) and `max_tokens` (response length limit) + +## Running the Example + +```bash +# Start the chatbot server +qtype serve examples/conversational_ai/simple_chatbot.qtype.yaml +``` + +## Learn More + +- Tutorial: [Building a Stateful Chatbot](../../Tutorials/02_conversational_chat.md) diff --git a/docs/Tutorials/chat_with_telemetry.mermaid b/docs/Gallery/simple_chatbot.mermaid similarity index 54% rename from docs/Tutorials/chat_with_telemetry.mermaid rename to docs/Gallery/simple_chatbot.mermaid index 988f67c3..6938e629 100644 --- a/docs/Tutorials/chat_with_telemetry.mermaid +++ b/docs/Gallery/simple_chatbot.mermaid @@ -1,28 +1,24 @@ flowchart TD - subgraph APP ["📱 Application: hello_world"] + subgraph APP ["📱 simple_chatbot"] direction TB - subgraph FLOW_0 ["💬 Flow: chat_example"] + subgraph FLOW_0 ["🔄 chat_flow"] direction LR - FLOW_0_S0@{shape: rounded, label: "✨ llm_inference_step"} + FLOW_0_START@{shape: circle, label: "▶️ Start"} + FLOW_0_S0@{shape: rounded, label: "✨ generate_response"} + FLOW_0_START -->|user_message| FLOW_0_S0 end subgraph RESOURCES ["🔧 Shared Resources"] direction LR - AUTH_OPENAI_AUTH@{shape: hex, label: "🔐 openai_auth\nAPI_KEY"} - MODEL_GPT_4@{shape: rounded, label: "✨ gpt-4 (openai)" } - MODEL_GPT_4 -.->|uses| AUTH_OPENAI_AUTH - end - - subgraph TELEMETRY ["📊 Observability"] - direction TB - TEL_SINK@{shape: curv-trap, label: "📡 hello_world_telemetry\nhttp://localhost:6006/v1/traces"} + MODEL_NOVA_LITE@{shape: rounded, label: "✨ nova_lite (aws-bedrock)" } + MEM_CONVERSATION_MEMORY@{shape: win-pane, label: "🧠 conversation_memory (10KT)"} end end - FLOW_0_S0 -.->|uses| MODEL_GPT_4 - FLOW_0_S0 -.->|traces| TEL_SINK + FLOW_0_S0 -.->|uses| MODEL_NOVA_LITE + FLOW_0_S0 -.->|stores| MEM_CONVERSATION_MEMORY %% Styling classDef appBox fill:none,stroke:#495057,stroke-width:3px diff --git a/docs/How To/Authentication/configure_aws_authentication.md b/docs/How To/Authentication/configure_aws_authentication.md new file mode 100644 index 00000000..c1a4e0a5 --- /dev/null +++ b/docs/How To/Authentication/configure_aws_authentication.md @@ -0,0 +1,60 @@ +# Configure AWS Authentication + +AWS Bedrock and other AWS services require authentication, which can be configured using access keys, AWS profiles, or role assumption. + +### QType YAML + +```yaml +auths: + # Method 1: AWS Profile (recommended) + - type: aws + id: aws_profile + profile_name: default + region: us-east-1 + + # Method 2: Access Keys (for CI/CD) + - type: aws + id: aws_keys + access_key_id: AKIAIOSFODNN7EXAMPLE + secret_access_key: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY + region: us-east-1 + + # Method 3: Role Assumption + - type: aws + id: aws_role + profile_name: base_profile + role_arn: arn:aws:iam::123456789012:role/MyRole + role_session_name: qtype-session + region: us-east-1 + +models: + - type: Model + id: nova + provider: aws-bedrock + model_id: us.amazon.nova-micro-v1:0 + auth: aws_profile +``` + +### Explanation + +- **type: aws**: Declares an AWS authentication provider +- **profile_name**: Uses credentials from `~/.aws/credentials` (recommended for local development) +- **access_key_id / secret_access_key**: Explicit credentials (use environment variables or secret manager) +- **session_token**: Temporary credentials for AWS STS sessions +- **role_arn**: ARN of IAM role to assume (requires base credentials via profile or keys) +- **role_session_name**: Session identifier when assuming a role +- **external_id**: External ID for cross-account role assumption +- **region**: AWS region for API calls (e.g., `us-east-1`, `us-west-2`) + +## Complete Example + +```yaml +--8<-- "../examples/authentication/aws_authentication.qtype.yaml" +``` + +## See Also + +- [AWSAuthProvider Reference](../../components/AWSAuthProvider.md) +- [Model Reference](../../components/Model.md) +- [How-To: Use API Key Authentication](use_api_key_authentication.md) +- [How-To: Manage Secrets with Secret Manager](../Authentication/manage_secrets.md) diff --git a/docs/How To/Authentication/use_api_key_authentication.md b/docs/How To/Authentication/use_api_key_authentication.md new file mode 100644 index 00000000..b76c1c41 --- /dev/null +++ b/docs/How To/Authentication/use_api_key_authentication.md @@ -0,0 +1,40 @@ +# Use API Key Authentication + +Authenticate with model providers like OpenAI using API keys, either from environment variables or stored in secret managers. + +### QType YAML + +```yaml +auths: + - type: api_key + id: openai_auth + api_key: ${OPENAI_KEY} + host: https://api.openai.com + +models: + - type: Model + id: gpt-4 + provider: openai + model_id: gpt-4-turbo + auth: openai_auth +``` + +### Explanation + +- **type: api_key**: Specifies this is an API key-based authentication provider +- **api_key**: The API key value, typically loaded from an environment variable using `${VAR_NAME}` syntax +- **host**: Base URL or domain of the provider (optional, some providers infer this) +- **auth**: Reference to the auth provider by its ID when configuring models + +## Complete Example + +```yaml +--8<-- "../examples/tutorials/01_hello_world.qtype.yaml" +``` + +## See Also + +- [APIKeyAuthProvider Reference](../../components/APIKeyAuthProvider.md) +- [Use Environment Variables](../Language%20Features/use_environment_variables.md) +- [Model Reference](../../components/Model.md) +- [Tutorial: Your First QType Application](../../Tutorials/your_first_qtype_application.md) diff --git a/docs/How To/Command Line Usage/load_multiple_inputs_from_files.md b/docs/How To/Command Line Usage/load_multiple_inputs_from_files.md new file mode 100644 index 00000000..d869b010 --- /dev/null +++ b/docs/How To/Command Line Usage/load_multiple_inputs_from_files.md @@ -0,0 +1,62 @@ +# Load Multiple Inputs from Files + +Process multiple inputs in batch by loading data from CSV, JSON, Parquet, or Excel files using the `--input-file` CLI flag, enabling bulk processing without manual JSON construction. + +### CLI Command + +```bash +qtype run app.qtype.yaml --input-file inputs.csv +``` + +### Supported File Formats + +- **CSV**: Columns map to input variable names +- **JSON**: Array of objects or records format +- **Parquet**: Efficient columnar format for large datasets +- **Excel**: `.xlsx` or `.xls` files + +### How It Works + +When you provide `--input-file`, QType: +1. Reads the file into a pandas DataFrame +2. Each row becomes one execution of the flow +3. Column names must match flow input variable IDs +4. Processes rows with configured concurrency +5. Returns results as a DataFrame (can be saved with `--output`) + +## Complete Example + +**batch_inputs.csv:** +```csv +--8<-- "../examples/data_processing/batch_inputs.csv" +``` + +**Application:** +```yaml +--8<-- "../examples/data_processing/batch_processing.qtype.yaml" +``` + +**Run the batch:** +```bash +# Process all rows from CSV +qtype run batch_processing.qtype.yaml --input-file batch_inputs.csv + +# Save results to Parquet +qtype run batch_processing.qtype.yaml \ + --input-file batch_inputs.csv \ + --output results.parquet +``` + +### Explanation + +- **--input-file (-I)**: Path to file containing input data (CSV, JSON, Parquet, Excel) +- **Column mapping**: CSV column names must match flow input variable IDs exactly +- **Batch processing**: Each row is processed as a separate flow execution +- **--output (-o)**: Optional path to save results as Parquet file +- **Parallel processing**: Steps that support concurrency will process multiple rows in parallel + +## See Also + + + +- [Example: Dataflow Pipeline](../../Gallery/Data%20Processing/dataflow_pipelines.md) diff --git a/docs/How To/Command Line Usage/pass_inputs_on_the_cli.md b/docs/How To/Command Line Usage/pass_inputs_on_the_cli.md new file mode 100644 index 00000000..6cd8f561 --- /dev/null +++ b/docs/How To/Command Line Usage/pass_inputs_on_the_cli.md @@ -0,0 +1,52 @@ +# Pass Inputs On The CLI + +Provide input values to your QType flows directly from the command line using JSON-formatted input data, enabling dynamic parameterization of applications without modifying YAML files. + +### CLI Usage + +```bash +# Pass a single input variable +qtype run -i '{"user_name":"Alice"}' app.qtype.yaml + +# Pass multiple input variables +qtype run -i '{"model_id":"claude-3", "temperature":0.7}' app.qtype.yaml + +# Pass complex nested structures +qtype run -i '{"config":{"max_tokens":1000,"top_p":0.9}}' app.qtype.yaml + +# Specify which flow to run with inputs +qtype run -f analyze_data -i '{"threshold":0.85}' app.qtype.yaml +``` + +### Explanation + +- **`-i`, `--input`**: Accepts a JSON blob containing key-value pairs where keys match variable names declared in your flow's `inputs` field +- **JSON format**: Must be valid JSON with double quotes for strings, properly escaped special characters +- **Flow inputs**: The variables must match those declared in the flow's `inputs` list or the application's `inputs` list +- **`-f`, `--flow`**: Specifies which flow to run when your application contains multiple flows (defaults to first flow if omitted) + +## Complete Example + +The [LLM Processing Pipelines](../../Gallery/dataflow_pipelines.md) example demonstrates passing the output file path as a CLI input: + +```bash +# Run the pipeline with a custom output path +qtype run -i '{"output_path":"results.parquet"}' \ + --progress \ + examples/data_processing/dataflow_pipelines.qtype.yaml +``` + +The flow declares `output_path` in its inputs: + +```yaml +flows: + - name: analyze_reviews + inputs: + - output_path # Receives value from CLI -i flag +``` + +## See Also + +- [Load Multiple Inputs from Files](load_inputs_from_files.md) +- [Use Session Inputs for Sticky Variables](../Language%20Features/use_session_inputs.md) +- [Example: LLM Processing Pipelines](../../Gallery/dataflow_pipelines.md) diff --git a/docs/How To/Command Line Usage/serve_with_auto_reload.md b/docs/How To/Command Line Usage/serve_with_auto_reload.md new file mode 100644 index 00000000..c523ef84 --- /dev/null +++ b/docs/How To/Command Line Usage/serve_with_auto_reload.md @@ -0,0 +1,26 @@ +# Serve Applications with Auto-Reload + +Enable automatic reloading of your application when YAML files change during development using the `--reload` flag. + +### CLI Command + +```bash +qtype serve --reload my_app.qtype.yaml +``` + +### Explanation + +- **--reload**: Watches YAML files for changes and automatically restarts the server +- **Development workflow**: Edit your YAML file, save, and immediately see changes without manual restart +- **Port option**: Combine with `-p`/`--port` to specify server port (default: 8000) + +### Example with Port + +```bash +qtype serve --reload -p 8080 examples/tutorials/01_hello_world.qtype.yaml +``` + +## See Also + +- [Serve Command Reference](../../Reference/CLI.md#serve) +- [Tutorial: Hello World](../../Tutorials/01_hello_world.md) diff --git a/docs/How To/Data Processing/adjust_concurrency.md b/docs/How To/Data Processing/adjust_concurrency.md new file mode 100644 index 00000000..44ce1cbe --- /dev/null +++ b/docs/How To/Data Processing/adjust_concurrency.md @@ -0,0 +1,41 @@ +# Adjust Concurrency + +Control parallel execution of steps to optimize throughput and resource usage using the `concurrency_config` parameter on steps that support concurrent processing. + +### QType YAML + +```yaml +steps: + - type: LLMInference + id: classify + model: nova + concurrency_config: + num_workers: 10 # Process up to 10 items in parallel + inputs: [document] + outputs: [classification] +``` + +### Explanation + +- **concurrency_config**: Configuration object for concurrent processing with `num_workers` parameter +- **num_workers**: Maximum number of concurrent async workers for this step (default: 1) + +### Steps Supporting Concurrency + +The following step types support `concurrency_config`: + +- **LLMInference**: Parallel LLM inference calls +- **InvokeEmbedding**: Parallel embedding generation +- **InvokeTool**: Parallel tool invocations +- **DocToTextConverter**: Parallel document conversion +- **DocumentSplitter**: Parallel document chunking +- **DocumentEmbedder**: Parallel chunk embedding +- **DocumentSearch**: Parallel search queries +- **BedrockReranker**: Parallel reranking operations + +## See Also + +- [LLMInference Reference](../../components/LLMInference.md) +- [InvokeEmbedding Reference](../../components/InvokeEmbedding.md) +- [DocumentEmbedder Reference](../../components/DocumentEmbedder.md) +- [LLM Processing Pipelines](../../Gallery/dataflow_pipelines.md) diff --git a/docs/How To/Data Processing/cache_step_results.md b/docs/How To/Data Processing/cache_step_results.md new file mode 100644 index 00000000..e143c4da --- /dev/null +++ b/docs/How To/Data Processing/cache_step_results.md @@ -0,0 +1,71 @@ +# Cache Step Results + +Avoid redundant computation by caching step results on disk, enabling faster re-runs when processing the same inputs. + +### QType YAML + +```yaml +steps: + - type: LLMInference + id: classify + model: nova + inputs: [prompt] + outputs: [category] + cache_config: + namespace: document_classification # Logical grouping for cached data + version: "1.0" # Change to invalidate cache + on_error: Drop # Don't cache errors (default) + ttl: 3600 # Cache for 1 hour (seconds) + compress: false # Optionally compress cached data +``` + +### Explanation + +- **cache_config**: Enables step-level caching with configuration options +- **namespace**: Logical separation for cache entries (e.g., different projects or data domains) +- **version**: Cache version string - increment to invalidate all cached results for this step +- **on_error**: How to handle errors - `Drop` (don't cache errors, default) or `Cache` (cache error results) +- **ttl**: Time-to-live in seconds before cached entries expire +- **compress**: Whether to compress cached data (saves disk space, adds CPU overhead) + +Cached values are stored in the `.qtype-cache/` directory in your working directory. + +### Monitoring Cache Performance + +Use the `--progress` flag to see cache hits and misses: + +```bash +qtype run app.qtype.yaml --flow my_flow --progress +``` + +First run shows cache misses: +``` +Step classify ✔ 5 succeeded ✖ 0 errors ⟳ 0 hits ✗ 5 misses +``` + +Subsequent runs show cache hits (much faster): +``` +Step classify ✔ 5 succeeded ✖ 0 errors ⟳ 5 hits ✗ 0 misses +``` + +## Complete Example + +```yaml +--8<-- "../examples/data_processing/cache_step_results.qtype.yaml" +``` + +Run the example: +```bash +# First run - cold cache +qtype run examples/data_processing/cache_step_results.qtype.yaml --progress -i '{"file_path": "examples/data_processing/sample_documents.jsonl"}' + +# Second run - warm cache (much faster) +qtype run examples/data_processing/cache_step_results.qtype.yaml --progress -i '{"file_path": "examples/data_processing/sample_documents.jsonl"}' + +``` + +## See Also + +- [LLMInference Reference](../../components/LLMInference.md) +- [Adjust Concurrency](adjust_concurrency.md) +- [Tutorial: Your First QType Application](../../Tutorials/your_first_qtype_application.md) diff --git a/docs/How To/Data Processing/decode_json_xml.md b/docs/How To/Data Processing/decode_json_xml.md new file mode 100644 index 00000000..11d28d21 --- /dev/null +++ b/docs/How To/Data Processing/decode_json_xml.md @@ -0,0 +1,24 @@ +# Decode JSON/XML to Structured Data + +Parse string data in JSON or XML format into structured outputs. This is particularly useful for extracting structured data from llm outputs. + +### QType YAML + +```yaml +--8<-- "../examples/data_processing/decode_json.qtype.yaml" +``` + +### Explanation + +- **Decoder**: Step that parses string data (JSON or XML) into structured outputs +- **format**: The data format to parse - `json` (default) or `xml` +- **inputs**: String variable containing the encoded data to decode +- **outputs**: List of variables to extract from the decoded data (field names must match keys in the JSON/XML) +- **Error handling**: If parsing fails, the step raises returns an error +- **Markdown cleanup**: Automatically strips markdown code fences (```json, ```xml) if present in the input + +## See Also + +- [Decoder Reference](../../components/Decoder.md) +- [CustomType Reference](../../components/CustomType.md) +- [Tutorial: Working with Types and Structured Data](../../Tutorials/structured_data.md) diff --git a/docs/How To/Data Processing/explode_collections.md b/docs/How To/Data Processing/explode_collections.md new file mode 100644 index 00000000..d06966b1 --- /dev/null +++ b/docs/How To/Data Processing/explode_collections.md @@ -0,0 +1,40 @@ +# Fan-Out Collections with Explode + +Transform a single list input into multiple outputs, one per item, enabling parallel processing of collection elements. + +### QType YAML + +```yaml +steps: + - type: Explode + id: fan_out + inputs: + - items # Variable of type list[T] + outputs: + - item # Variable of type T +``` + +### Explanation + +- **Explode**: Takes a single list and yields one output message per item +- **inputs**: Must be a single variable of type `list[T]` +- **outputs**: Single variable of the item type `T` (unwrapped from the list) +- **Fan-out pattern**: Each item is processed independently by downstream steps + +## Complete Example + +```yaml +--8<-- "../examples/data_processing/explode_items.qtype.yaml" +``` + +**Run it:** +```bash +qtype run examples/data_processing/explode_items.qtype.yaml \ + -i '{"items": ["apple", "banana", "cherry"]}' +``` + +## See Also + +- [Aggregate Data using Collect](./aggregate_data.md) +- [Explode Reference](../../components/Explode.md) +- [Adjust Concurrency](./adjust_concurrency.md) diff --git a/docs/How To/Data Processing/gather_results.md b/docs/How To/Data Processing/gather_results.md new file mode 100644 index 00000000..4d811fc7 --- /dev/null +++ b/docs/How To/Data Processing/gather_results.md @@ -0,0 +1,68 @@ +# Gather Results into a List + +Combine fan-out processing results into a single list while preserving variables that have the same value across all messages (common ancestors). + +### QType YAML + +```yaml +variables: + - id: processed_product + type: text + - id: all_processed + type: list[text] + +steps: + - type: Collect + id: aggregate + inputs: [processed_product] + outputs: [all_processed] +``` + +### Explanation + +- **Collect**: Gathers all input values from multiple messages into a single list output +- **Common ancestors**: Only variables that have the exact same value across ALL input messages are preserved in the output message +- **Fan-out pattern**: Typically used after `Explode` to reverse the fan-out and aggregate results +- **Single output**: Always produces exactly one output message containing the aggregated list + +### Understanding Common Ancestors + +If you have these three messages flowing into `Collect`: + +``` +Message 1: {category: "Electronics", region: "US", product: "Phone", processed: "Processed: Phone"} +Message 2: {category: "Electronics", region: "US", product: "Laptop", processed: "Processed: Laptop"} +Message 3: {category: "Electronics", region: "US", product: "Tablet", processed: "Processed: Tablet"} +``` + +The `Collect` step will output: + +``` +{category: "Electronics", region: "US", all_processed: ["Processed: Phone", "Processed: Laptop", "Processed: Tablet"]} +``` + +Note that `product` is **not preserved** because it has different values across the messages. Only `category` and `region` (which are identical in all three messages) are included as common ancestors. + +## Complete Example + +```yaml +--8<-- "../examples/data_processing/collect_results.qtype.yaml" +``` + +Run the example: + +```bash +qtype run examples/data_processing/collect_results.qtype.yaml \ + -i '{"category": "Electronics", "region": "US", "products": ["Phone", "Laptop", "Tablet"]}' +``` + +Output: +``` +all_processed: ['Processed: Phone', 'Processed: Laptop', 'Processed: Tablet'] +``` + +## See Also + +- [Explode Collections for Fan-Out Processing](explode_collections.md) +- [Collect Reference](../../components/Collect.md) +- [Explode Reference](../../components/Explode.md) diff --git a/docs/How To/Data Processing/read_data_from_files.md b/docs/How To/Data Processing/read_data_from_files.md new file mode 100644 index 00000000..c142d502 --- /dev/null +++ b/docs/How To/Data Processing/read_data_from_files.md @@ -0,0 +1,35 @@ +# Read Data from Files + +Load structured data from files using FileSource, which supports CSV, JSON, JSONL, and Parquet formats with automatic format detection based on file extension. + +### QType YAML + +```yaml +steps: + - id: read_data + type: FileSource + path: batch_inputs.csv + outputs: + - query + - topic +``` + +### Explanation + +- **FileSource**: Step that reads structured data from files using fsspec-compatible URIs +- **path**: File path (relative to YAML file or absolute), supports local files and cloud storage (s3://, gs://, etc.) +- **outputs**: Column names from the file to extract as variables (must match actual column names) +- **Format detection**: Automatically determined by file extension (.csv, .json, .jsonl, .parquet) +- **Streaming**: Emits one FlowMessage per row, enabling downstream steps to process data in parallel + +## Complete Example + +```yaml +--8<-- "../examples/data_processing/read_file.qtype.yaml" +``` + +## See Also + +- [FileSource Reference](../../components/FileSource.md) +- [Aggregate Reference](../../components/Aggregate.md) +- [Example: Batch Processing](../../Gallery/Data%20Processing/batch_processing.md) diff --git a/docs/How To/Data Processing/read_sql_databases.md b/docs/How To/Data Processing/read_sql_databases.md new file mode 100644 index 00000000..de2962fe --- /dev/null +++ b/docs/How To/Data Processing/read_sql_databases.md @@ -0,0 +1,47 @@ +# Read Data from SQL Databases + +Query relational databases and process results row-by-row using the `SQLSource` step, which supports any database accessible via SQLAlchemy connection strings. + +### QType YAML + +```yaml +steps: + - type: SQLSource + id: load_reviews + connection: "sqlite:///data/reviews.db" + query: | + SELECT + review_id, + product_name, + rating, + review_text + FROM product_reviews + WHERE rating >= 4 + ORDER BY review_id + outputs: + - review_id + - product_name + - rating + - review_text +``` + +### Explanation + +- **SQLSource**: Step type that executes SQL queries and emits one message per database row +- **connection**: SQLAlchemy-format connection string (e.g., `sqlite:///path.db`, `postgresql://user:pass@host/db`) +- **query**: SQL query to execute; column names must match output variable IDs +- **outputs**: Variables to populate from query result columns (order must match SELECT clause) +- **auth**: Optional reference to AuthorizationProvider for database credentials + +## Complete Example + +```yaml +--8<-- "../examples/data_processing/dataflow_pipelines.qtype.yaml" +``` + +## See Also + +- [SQLSource Reference](../../components/SQLSource.md) +- [FileSource Reference](../../components/FileSource.md) +- [Tutorial: Working with Types and Structured Data](../../Tutorials/working_with_types_and_structured_data.md) +- [Example: Dataflow Pipeline](../../Gallery/Data%20Processing/dataflow_pipelines.md) diff --git a/docs/How To/Data Processing/write_data_to_file.md b/docs/How To/Data Processing/write_data_to_file.md new file mode 100644 index 00000000..74b916f0 --- /dev/null +++ b/docs/How To/Data Processing/write_data_to_file.md @@ -0,0 +1,40 @@ +# Write Data to a File + +Write flow data to files using the `FileWriter` step, which accumulates all messages and outputs data in Parquet format using fsspec-compatible URIs. + +### QType YAML + +```yaml +steps: + - type: FileWriter + id: write_results + path: output_path # Variable containing file path + inputs: + - review_id + - product_name + - rating + - llm_analysis + - output_path + outputs: + - result_file +``` + +### Explanation + +- **FileWriter**: Batches all incoming messages and writes them as a single Parquet file +- **path**: fsspec-compatible URI (can be a `ConstantPath`, Variable reference, or string) for the output file location +- **inputs**: Variables from FlowMessages to include as columns in the output file +- **outputs**: Variable containing the path where data was written (useful for passing to downstream steps) +- **batch_config**: Optional configuration for batch size. This defaults to max_int (i.e., processes all messages into one file). If you change it, you will get multiple files. + +## Complete Example +See the [LLM Processing Pipelines](../../Gallery/dataflow_pipelines.md) gallery example. + + + +## See Also + +- [FileWriter Reference](../../components/FileWriter.md) +- [Read Data from Files](read_data_from_files.md) +- [Read SQL Databases](read_sql_databases.md) +- [LLM Processing Pipelines](../../Gallery/dataflow_pipelines.md) diff --git a/docs/How To/Invoke Models/call_large_language_models.md b/docs/How To/Invoke Models/call_large_language_models.md new file mode 100644 index 00000000..b46f8bbd --- /dev/null +++ b/docs/How To/Invoke Models/call_large_language_models.md @@ -0,0 +1,51 @@ +# Call Large Language Models + +Send text input to an LLM and receive a response using the `LLMInference` step with a system message and configurable model parameters like temperature and max_tokens. + +### QType YAML + +```yaml +models: + - type: Model + id: nova_lite + provider: aws-bedrock + model_id: amazon.nova-lite-v1:0 + inference_params: + temperature: 0.7 + max_tokens: 500 + +steps: + - type: LLMInference + id: assistant + model: nova_lite + system_message: "You are a helpful assistant" + inputs: [text] + outputs: [response] +``` + +### Explanation + +- **model**: Reference to a Model resource defining the LLM provider and model ID +- **inference_params**: Configuration for model behavior (temperature, max_tokens, top_p, etc.) +- **temperature**: Controls randomness (0.0 = deterministic, 1.0 = creative) +- **max_tokens**: Maximum number of tokens in the response +- **system_message**: Sets the assistant's persona and instructions for all requests +- **inputs**: Variables containing the user's text input to the LLM +- **outputs**: Variables where the LLM's response will be stored (must be type `text` or `ChatMessage`) + +## Complete Example + +```yaml +--8<-- "../examples/invoke_models/simple_llm_call.qtype.yaml" +``` + +Run with: +```bash +qtype run simple_llm_call.qtype.yaml --input '{"text": "What is the capital of France?"}' +``` + +## See Also + +- [LLMInference Reference](../../components/LLMInference.md) +- [Model Reference](../../components/Model.md) +- [Tutorial: Build a Conversational Interface](../../Tutorials/conversational_interface.md) diff --git a/docs/How To/Invoke Models/create_embeddings.md b/docs/How To/Invoke Models/create_embeddings.md new file mode 100644 index 00000000..13f5850c --- /dev/null +++ b/docs/How To/Invoke Models/create_embeddings.md @@ -0,0 +1,49 @@ +# Create Embeddings + +Generate vector embeddings from text using an embedding model, useful for semantic search, similarity comparisons, and RAG applications. + +### QType YAML + +```yaml +models: + - type: EmbeddingModel + id: titan_embed + provider: aws-bedrock + model_id: amazon.titan-embed-text-v2:0 + dimensions: 1024 + +flows: + - type: Flow + id: main + steps: + - type: InvokeEmbedding + id: embed_text + model: titan_embed + inputs: [text] + outputs: [embedding] +``` + +### Explanation + +- **EmbeddingModel**: Defines an embedding model configuration with provider and dimensions +- **dimensions**: Size of the embedding vector (must match model output, e.g., 1024 for Titan v2) +- **InvokeEmbedding**: Step type that generates embeddings from input text +- **Embedding**: Output type containing the vector array and metadata + +## Complete Example + +```yaml +--8<-- "../examples/invoke_models/create_embeddings.qtype.yaml" +``` + +Run with: +```bash +qtype run examples/invoke_models/create_embeddings.qtype.yaml \ + -i '{"text": "Your text here"}' +``` + +## See Also + +- [InvokeEmbedding Reference](../../components/InvokeEmbedding.md) +- [EmbeddingModel Reference](../../components/EmbeddingModel.md) +- [Tutorial: Build a RAG System](../../Tutorials/building_rag_system.md) diff --git a/docs/How To/Invoke Models/reuse_prompts_with_templates.md b/docs/How To/Invoke Models/reuse_prompts_with_templates.md new file mode 100644 index 00000000..2b796a99 --- /dev/null +++ b/docs/How To/Invoke Models/reuse_prompts_with_templates.md @@ -0,0 +1,39 @@ +# Reuse Prompts with Templates + +Define reusable prompt templates with variable placeholders using the `PromptTemplate` step, enabling consistent prompt formatting and dynamic content substitution. + +### QType YAML + +```yaml +steps: + - id: create_prompt + type: PromptTemplate + template: | + Analyze this product review in 1-2 sentences. Include: + - Overall sentiment (positive/negative/mixed) + - Key themes or points + + Product: {product_name} + Rating: {rating}/5 + Review: {review_text} + inputs: + - product_name + - rating + - review_text + outputs: + - analysis_prompt +``` + +### Explanation + +- **type: PromptTemplate**: Step that formats strings with variable placeholders using Python's `str.format()` syntax +- **template**: String with `{variable_name}` placeholders that get replaced with actual values from inputs +- **inputs**: Variables whose values will be substituted into the template placeholders +- **outputs**: Single variable containing the formatted prompt string ready for LLM inference + +## See Also + +- [PromptTemplate Reference](../../components/PromptTemplate.md) +- [How-To: Include Raw Text from Other Files](../../How%20To/Language%20Features/include_raw_text.md) +- [How-To: Invoke LLMs with System Messages](invoke_llms_with_system_messages.md) +- [Tutorial: Your First QType Application](../../Tutorials/your_first_qtype_application.md) diff --git a/docs/How To/Language Features/include_qtype_yaml.md b/docs/How To/Language Features/include_qtype_yaml.md new file mode 100644 index 00000000..9dd25ef7 --- /dev/null +++ b/docs/How To/Language Features/include_qtype_yaml.md @@ -0,0 +1,45 @@ +# Include QType YAML + +Organize QType applications into reusable modules by including external YAML files using the `!include` directive, allowing you to share models, tools, authentication providers, and other resources across multiple applications. + +### QType YAML + +```yaml +id: my_app + +# Include shared resources from other files +references: + - !include common/auth.qtype.yaml + - !include common/models.qtype.yaml + - !include common/tools.qtype.yaml + +flows: + - id: main_flow + steps: + - type: LLMInference + id: generate + model: shared_gpt4 # References model from included file + prompt: "Generate a summary" +``` + +**common/models.qtype.yaml:** +```yaml +- id: shared_gpt4 + type: Model + provider: openai + model_id: gpt-4 + auth: shared_openai_auth +``` + +### Explanation + +- **!include**: YAML tag that loads and parses external YAML files, merging their content into the current specification +- **Relative paths**: File paths are resolved relative to the including YAML file's location +- **Nested includes**: Included files can include other files, creating a hierarchy of modular components +- **Remote includes**: Supports URLs (e.g., `!include https://example.com/config.yaml`) via fsspec + +## See Also + +- [Reference Entities by ID](reference_entities_by_id.md) +- [Include Raw Text from Other Files](include_raw_text_from_other_files.md) +- [Application Reference](../../components/Application.md) diff --git a/docs/How To/Language Features/include_raw_text_from_other_files.md b/docs/How To/Language Features/include_raw_text_from_other_files.md new file mode 100644 index 00000000..b33c24f9 --- /dev/null +++ b/docs/How To/Language Features/include_raw_text_from_other_files.md @@ -0,0 +1,47 @@ +# Include Raw Text from Other Files + +Load external text files into your YAML configuration using the `!include_raw` directive, useful for keeping prompts, templates, and long text content in separate files. + +### QType YAML + +```yaml +steps: + - id: generate_story + type: PromptTemplate + template: !include_raw story_prompt.txt + inputs: + - theme + - tone + outputs: + - story +``` + +**story_prompt.txt:** +```txt +--8<-- "../examples/language_features/story_prompt.txt" +``` + +### Explanation + +- **!include_raw**: YAML tag that loads the contents of an external file as a raw string +- **Relative paths**: File paths are resolved relative to the YAML file's location +- **Template substitution**: The loaded text can contain variable placeholders (e.g., `{theme}`, `{tone}`) that are substituted at runtime +- **Use cases**: Prompt templates, system messages, documentation, or any text content you want to manage separately + +## Complete Example + +```yaml +--8<-- "../examples/language_features/include_raw.qtype.yaml" +``` + + + +**Run it:** +```bash +qtype run include_raw.qtype.yaml -i '{"theme":"a robot learning to paint","tone":"inspirational"}' +``` + +## See Also + +- [PromptTemplate Reference](../../components/PromptTemplate.md) +- [Reference Entities by ID](../../How%20To/Language%20Features/reference_entities_by_id.md) diff --git a/docs/How To/Language Features/reference_entities_by_id.md b/docs/How To/Language Features/reference_entities_by_id.md new file mode 100644 index 00000000..f967c55d --- /dev/null +++ b/docs/How To/Language Features/reference_entities_by_id.md @@ -0,0 +1,51 @@ +# Reference Entities by ID + +Use QType's "define once, reference by ID" pattern to eliminate duplication and improve maintainability by assigning unique IDs to components and referencing them throughout your application. + +### QType YAML + +```yaml +# Define components with unique IDs +auths: + - type: api_key + id: openai_auth + api_key: ${OPENAI_KEY} + +models: + - type: Model + id: gpt4 + provider: openai + model_id: gpt-4o + auth: openai_auth # Reference auth by ID + +memories: + - id: conversation_memory + token_limit: 10000 + +flows: + - type: Flow + id: chat_flow + steps: + - type: LLMInference + model: gpt4 # Reference model by ID + memory: conversation_memory # Reference memory by ID +``` + +### Explanation + +- **`id` field**: Assigns a unique identifier to any component (models, auths, tools, variables, etc.) +- **Reference by string**: Use the ID string wherever the component is needed +- **Automatic resolution**: QType's linker automatically resolves ID references to actual objects during validation +- **Reusability**: The same component can be referenced multiple times throughout the application + +## Complete Example + +```yaml +!include ../../examples/conversational_ai/simple_chatbot.qtype.yaml +``` + +## See Also + +- [Tutorial: Your First QType Application](../../Tutorials/01-first-qtype-application.md) +- [Model Reference](../../components/Model.md) +- [APIKeyAuthProvider Reference](../../components/APIKeyAuthProvider.md) diff --git a/docs/How To/Language Features/use_environment_variables.md b/docs/How To/Language Features/use_environment_variables.md new file mode 100644 index 00000000..ae9140cf --- /dev/null +++ b/docs/How To/Language Features/use_environment_variables.md @@ -0,0 +1,47 @@ +# Use Environment Variables + +Keep sensitive credentials and environment-specific configuration out of your YAML files by using environment variable substitution with `${VAR_NAME}` syntax. + +### QType YAML + +```yaml +auths: + - type: api_key + id: openai_auth + api_key: ${OPENAI_KEY} # Required variable + host: https://api.openai.com + +models: + - type: Model + id: gpt4 + provider: openai + model_id: ${MODEL_NAME:-gpt-4} # Optional with default + auth: openai_auth +``` + +### Explanation + +- **`${VAR_NAME}`**: Substitutes the value of environment variable `VAR_NAME`; raises error if not set +- **`${VAR_NAME:-default}`**: Substitutes the value of `VAR_NAME` or uses `default` if not set +- **Environment variable resolution**: Happens during YAML loading, before validation and execution +- **Works everywhere**: Can be used in any string value throughout the YAML specification + +## Setting Environment Variables + +```bash +# Export before running +export OPENAI_KEY="sk-..." +qtype run app.qtype.yaml + +# Or set inline +OPENAI_KEY="sk-..." uv run qtype run app.qtype.yaml + +# Or in a .env file (automatically loaded via the loader) +echo 'OPENAI_KEY="sk-..."' >> .env +qtype run app.qtype.yaml +``` + +## See Also + +- [Tutorial: Your First QType Application](../../Tutorials/01-first-qtype-application.md) +- [APIKeyAuthProvider Reference](../../components/APIKeyAuthProvider.md) diff --git a/docs/How To/Observability & Debugging/trace_calls_with_open_telemetry.md b/docs/How To/Observability & Debugging/trace_calls_with_open_telemetry.md new file mode 100644 index 00000000..6568d14d --- /dev/null +++ b/docs/How To/Observability & Debugging/trace_calls_with_open_telemetry.md @@ -0,0 +1,49 @@ +# Trace Calls with OpenTelemetry + +Enable distributed tracing for your QType applications using OpenTelemetry to monitor LLM calls, execution times, and data flow through Phoenix or other observability platforms. + +### QType YAML + +```yaml +telemetry: + id: phoenix_trace + provider: Phoenix + endpoint: http://localhost:6006/v1/traces +``` + +### Explanation + +- **telemetry**: Top-level application configuration for observability +- **id**: Unique identifier for the telemetry sink +- **provider**: Telemetry backend (`Phoenix` or `Langfuse`) +- **endpoint**: URL where OpenTelemetry traces are sent + +### Starting Phoenix + +Before running your application, start the Phoenix server: + +```bash +python3 -m phoenix.server.main serve +``` + +Phoenix will start on `http://localhost:6006` where you can view traces and spans in real-time. + +## Complete Example + +```yaml +--8<-- "../examples/observability_debugging/trace_with_opentelemetry.qtype.yaml" +``` + +Run the example: + +```bash +qtype run examples/observability_debugging/trace_with_opentelemetry.qtype.yaml --text "I love this product!" +``` + +Then open `http://localhost:6006` in your browser to see the traced execution. + +## See Also + +- [Application Reference](../../components/Application.md) +- [Validate QType YAML](validate_qtype_yaml.md) +- [Visualize Application Architecture](visualize_application_architecture.md) diff --git a/docs/How To/Observability & Debugging/validate_qtype_yaml.md b/docs/How To/Observability & Debugging/validate_qtype_yaml.md new file mode 100644 index 00000000..191c70ad --- /dev/null +++ b/docs/How To/Observability & Debugging/validate_qtype_yaml.md @@ -0,0 +1,35 @@ +# Validate QType YAML + +Check your QType YAML files for syntax errors, schema violations, reference issues, and semantic problems before running them. + +### Command Line + +```bash +# Basic validation +qtype validate path/to/app.qtype.yaml + +# Validate and print the parsed document +qtype validate path/to/app.qtype.yaml --print +``` + +### Validation Checks + +- **YAML Syntax**: Verifies valid YAML structure and syntax +- **Schema Validation**: Ensures all fields match the QType schema (Pydantic validation) +- **Reference Resolution**: Checks that all ID references (models, steps, variables) exist +- **Duplicate Detection**: Identifies duplicate component IDs +- **Semantic Validation**: Validates flow logic, type compatibility, and business rules + +### Options + +- **`--print` / `-p`**: Print the validated document with resolved references and defaults applied + +### Exit Codes + +- **0**: Validation successful +- **1**: Validation failed (error details printed to stderr) + +## See Also + +- [Application Reference](../../components/Application.md) +- [Semantic Validation Rules](../../Concepts/semantic_validation_rules.md) diff --git a/docs/How To/Observability & Debugging/visualize_application_architecture.md b/docs/How To/Observability & Debugging/visualize_application_architecture.md new file mode 100644 index 00000000..19564aa6 --- /dev/null +++ b/docs/How To/Observability & Debugging/visualize_application_architecture.md @@ -0,0 +1,61 @@ +# Visualize Application Architecture + +Generate interactive diagrams showing your application's flows, steps, and data dependencies to understand structure and debug issues. + +## Example Visualization + +Here's what a visualization looks like for a conversational chatbot application: + +```mermaid +--8<-- "How To/Observability & Debugging/visualize_example.mermaid" +``` + +This diagram shows: + +- **Flow structure**: The conversational flow with its interface and steps +- **Data flow**: How variables (user_message, context, response) flow between steps +- **Shared resources**: The LLM model and memory used by the application +- **Step types**: Different icons for templates (📄), LLM inference (✨), and other components + +## Command Line + +```bash +# Generate and open diagram in browser +qtype visualize path/to/app.qtype.yaml + +# Save Mermaid diagram to file +qtype visualize path/to/app.qtype.yaml --output diagram.mmd + +# Save without opening browser +qtype visualize path/to/app.qtype.yaml --output diagram.mmd --no-display +``` + +## Prerequisites + +Visualization requires [mermaid-cli](https://github.com/mermaid-js/mermaid-cli) to be installed: + +```bash +npm install -g @mermaid-js/mermaid-cli +``` + +## How It Works + +- **Generates Mermaid diagram**: Creates a flowchart showing flows, steps, and variable connections +- **Converts to SVG**: Uses `mmdc` to render the diagram as a scalable vector graphic +- **Opens in browser**: Displays the interactive diagram automatically (unless `--no-display` is set) + +## Options + +- **`--output` / `-o`**: Save the Mermaid diagram source to a file (`.mmd` format) +- **`--no-display` / `-nd`**: Skip opening the diagram in browser (useful for CI/CD) + +## Exit Codes + +- **0**: Visualization successful +- **1**: Visualization failed (invalid YAML or missing mmdc) + +## See Also + +- [Validate QType YAML](validate_qtype_yaml.md) +- [Application Reference](../../components/Application.md) +- [Flow Reference](../../components/Flow.md) diff --git a/docs/Reference/Examples/chat_with_telemetry.mmd b/docs/How To/Observability & Debugging/visualize_example.mermaid similarity index 53% rename from docs/Reference/Examples/chat_with_telemetry.mmd rename to docs/How To/Observability & Debugging/visualize_example.mermaid index a24440ac..6938e629 100644 --- a/docs/Reference/Examples/chat_with_telemetry.mmd +++ b/docs/How To/Observability & Debugging/visualize_example.mermaid @@ -1,31 +1,24 @@ flowchart TD - subgraph APP ["📱 Application: hello_world"] + subgraph APP ["📱 simple_chatbot"] direction TB - subgraph FLOW_0 ["💬 Flow: chat_example -A simple chat flow with OpenAI"] + subgraph FLOW_0 ["🔄 chat_flow"] direction LR FLOW_0_START@{shape: circle, label: "▶️ Start"} - FLOW_0_S0@{shape: rounded, label: "✨ llm_inference_step"} - FLOW_0_START -->|user_message: ChatMessage'>| FLOW_0_S0 + FLOW_0_S0@{shape: rounded, label: "✨ generate_response"} + FLOW_0_START -->|user_message| FLOW_0_S0 end subgraph RESOURCES ["🔧 Shared Resources"] direction LR - AUTH_OPENAI_AUTH@{shape: hex, label: "🔐 openai_auth\nAPI_KEY"} - MODEL_GPT_4@{shape: rounded, label: "✨ gpt-4 (openai)" } - MODEL_GPT_4 -.->|uses| AUTH_OPENAI_AUTH - end - - subgraph TELEMETRY ["📊 Observability"] - direction TB - TEL_SINK@{shape: curv-trap, label: "📡 hello_world_telemetry\nhttp://localhost:6006/v1/traces"} + MODEL_NOVA_LITE@{shape: rounded, label: "✨ nova_lite (aws-bedrock)" } + MEM_CONVERSATION_MEMORY@{shape: win-pane, label: "🧠 conversation_memory (10KT)"} end end - FLOW_0_S0 -.->|uses| MODEL_GPT_4 - FLOW_0_S0 -.->|traces| TEL_SINK + FLOW_0_S0 -.->|uses| MODEL_NOVA_LITE + FLOW_0_S0 -.->|stores| MEM_CONVERSATION_MEMORY %% Styling classDef appBox fill:none,stroke:#495057,stroke-width:3px diff --git a/docs/How To/Qtype Server/flow_as_ui.png b/docs/How To/Qtype Server/flow_as_ui.png new file mode 100644 index 0000000000000000000000000000000000000000..bd217590cd8754fb64f13fed0200af5863cea49e GIT binary patch literal 84343 zcmeFYhg%a}yElp;Dj=X#l_pA2s-X0yBGN(W9i_L>dyA-ufE4K+L0YKNf}tb53jqRz z-XVkzp@p2`f%o0-+27gUA8@YCb%jh@Gn0GnwbuPB6Z}R+mX!D&F&-Wssl41PH9Wj) z^mup#UvFFkws7a_GvVRgl(LqVej_g}egBQKgN3!7IUb%|aIE%q9raF{H2tW^$iFY+ zDZDAKW?hY>cu7+CaP)@!hc_lSuTuNyYB-`Z@daHqUzHM=ytv)^Cv)_~zjivAMo(+6PP|< z+#zl$937Rz18tpp_;@gQOV`Xjf*)JrzyiwkKR#T?qabm**6<;oeES977Dt+n+6}z7 zZTd<)RNYVPp6laNe)i0`Y5CYO_NL{dEhCt!i8$Sy7M@0)K`{#nUZrBBCDnG>k1u*m zEqxE_-(3A5N?_7(dK&Ee70wI|lr;Hje%$ko$+he(Cy_6!bJ2qIeXGGj4h0L>t!1a& zv-HKo>9}-uhHvjl=>+sCXn2#K79x8mf^YLH1^(8Kxy?XOO4Gvh*%v0&@`zRcUR>6e zY?vpZozBxAB&AQ&>8P%h-fm&etX+{~P5=J*=IryE+BZaHbUNfe>oCm)e*8FdHM^g! zv&GY#t3p!!kHzDoa)V&;Vy+QCYkLYs2u@VL@$H(aP3u_j<6G+YpGwW)0-*u<=?`loBRBkV~a$ucY zUKr(l6ABtF`tj(62#qSwXA-rH2+I%>Q%Y*DxmJ;hEgc!jL;hPtqxWw-`9KA|`Q-Kj z-g{XGY2I43z0Y_&ksrA3jF-nq0U{zlA**Pl?P-4Z1tCqmpX{H#y;*@acg z{4Qbj5C5-aGjq)g0gtgF^s^ZjWSw(9Pw zqfm^$nI*r%$iwTAATT4uqGU-j>p{VO#QxibBt8v?gE#&C+hbB1>E!*lrF=|CN(pr6 z+$qF@7+UY)5!klfD|KH_xJh1A+`cm`aEIF{O_gWp_SwU!nbcU#vPRelto%nsniuYRX%%2DwZs1>&ZE?BR=ji6XkN5n*?`>`UR7+Xo?`T~Ure)UsIo&`0yK$Q+x-w@8MzfaRp%bIZ<3&5MESlg0+ipSQ7Kl^Rd{0U@BA1K~jiwhY3fp2$Jzme=0-THOXnMcAzeIH0> zD1KktXdH%<8hw!5BJ+KH>+|Qow?s`oBs|diN-!7lV&&XU-|e1X+CAYPfWO)92{%1s3V{i zB+zu?ONjd8T^b>LJ9qx(#O>mlC*nlQv(a0LhbFc7(*cP2qC@J_D^o$>hktLpzIy$m zP=NFu<&FnRY%#aGh>$meZsg9E4=H=fvQW4M3%*i4#IJ1*H00nqKfJ$X7is%z;z!d=8neBX!yQZEF~?7{*3DDz z$D4h>Y=4O*LrSefZ`+tTP&qv7wF-S(^R}Th*eS1c)H8Ma@w^eGVKiPeeY7T9yRx^^ zYL0)-OSooEZ%+SR(VH|6BTC~2x>lY(odgm@0SRtznr0@6@QpZg>;4KV=G8UyAY5KXk&cP4hv!* z(?S|mJ{;#mJEKRChGnwoNpv}y6U{67gN%{vWzarZYBN!D-IvfW+T;zha&BfKG$JFS zl8%FJA+AZNp_x7B&x>CxZg{0C#bAped!IUtjF;YZ`Mlh->_Ph3T^wTMUEVx|wb_;2n zIRDzlA>NimOfgOcPMy@@cAWM)_s3Ru?P7W#HRTokWIRebarP5BaX;ca9mEPAxg8}R zxnK*hOxJX-ZV^BU#m&b`opow-cnMMoSObFar_1Zco{qAA{PpASkHH_|?}}!!WlDd!tc=1Y2U@LOtJcV`|9IMynp$p@3Tj6 zK(Hd^Cd(L`e6(KFaCAExf~7%~NJ01epn!MF%H5y5S@&{?)#>1J)}7YK!Q=L;5xNpN zK7}WvQKg9Xv2V%z8xx+*+g|e#%5TyeQyuT8My5s8KgE&=ebMhl_s^JVnD1?m)(X_p zp5mQx_(wEq|9ZLg3@+p%vsBJCT^Oq?F?Ke#I9B%(`s?K(2RoOgY2V32!UP&d;Pu$c zXGe1fzDpij@d}nX63j-KtF%rf_EoXPFuIn#C~b&6IMR;uIdL2HGa8#_rXzPq(H`yU zrw0w0U76(3(dSj8Kw9kf()~ znAV-^9?WVo(;@qGaM!%ge9_#PW|t-%H@t|bY#`(6G#D%$GF|;*-eR6JCvH3w*en-6l32?@p-p4IekDoWk)9o_zZREk#b%FRt@2Q&NWo(yl;!mGYI zcwFDPeyzoTNc)WQL(>-v3;Zo_F}w;5ycI|A>$^Wkah;@Z*L4?foU#{=KI|ergAFy| zFu#osJzNMr)alP2kgOF%5Z;fw_B@mJxokMS%_kp$qk1)qSHH8+P0YnjEt|M=utc=G2EZ@MpdgUG-A+U4>_()!%`}gwe zE06F9{@IR?hZkgxclGiZ72x~)FB15ix4HNxi28ze9ay6PKAu1D|8?{=`X2=US|<1k z?8AGZE-fz)e5;!}o15FaSUI>V{C?2`Y#?%!({;haqhUS&T#;8}*#gdwu-4FV)lpUw zHFW^-yf@)^7&@o2lxLu#MM^f zfsXQ<`_c~1=Jy48p71<*AW3}x{(W(0GYe6*SFbOd18WiwtXy3kMR|EWJUny_kD{~i9XKP1SYX|%L=jVNI;^5{g z@!-Mvh5r3_ai8X%*8g=SdzZ_f1^htX^F6$LJWqK4eQuzs`1w}RH`bo!cDk>uK|sua zYebD-80T}!W>Aq38>9n}^RPdQs(wW$JS&!vqG4ebcA3sQ8ab5PJJ{dBc z2?+Y~YWgg`)rGf~N(y(X*f|fXUp+2`lNtV;ROpVk6Jw)eA@4?dO#GT*_SvZMq4(11 z|LA5~)L%69L2wn zzYM`8HrJy~l8c0R$1MU6P^~_j;5QYL5YMyK>+MIsc--&=^65Q~eh44_omV&zocZ`P zb9X5a#~nWbllwGzApFF>dcXKVWl?DQ9cRm4=}Z`{LCylM!M=93+G$m0xk57W8qR&b z*Q0I2hao2amu9?j<|ZsR2Z)LJW^`YOs5XB4 zBKu91-i^BOwr0qt-Qul1RFG*;0oQwFwihF1F{#iZE1ceD#gcdGd~3WB_h?aL#(hPj zj7H`NmaAk(Y_V}H4xhLX_LEUH9@9?A!svycicrqnrjMb8cc%T)&jb}+z;zL5YM+gI z^<6|hu6}2YZomoMt8m~lV%^?Oh6L%B89o%aCH!n~SWxTYf&HMr5+E(2H%@VJcW$)8 zXDv_XQ+9dtJDs#P|JJV}vY+;gn<^g{WJQ&d?48aYq$4vuQ>ZkAn))8=9#e@_*0-mDXBTyRu|4EJnPh3DU|EZcXl}6x>faM*p49x-X|`l zA~AnDWC|@z@tg~u|K#zdl?B#NISQTj+Wd`5+kNK(qF&IU_2)`T>u)b~4m+C|xahZD%a(mxwNE;91DVt@i; zb>)Oq_wwW1ga?dXTXAy!Mf#hd7NP0axnb39Cor#BMG$_F+DjaSR(L^-=$}a4<(o^k zDt~nnWk^5d(l5Lo=?5g=@VkW$CX;}PV^T17UmKEBv{thjqc1y`wTAfai9&n)Gz=Of zc^9$z3p)I+eU=wL$mwuSSl}Y{-W3wxpl{=7_(XY;d+e#+Dk)>R({}=NT)zj-4K!6D z_I0FAw@eFbW=Fm&<;Iv(3Y)#<&sJk*4UlGzUF*^^G@bXvhE=JH>AJUPgsbDheaDW>EYizdqYV(6_7)oV;KGt1mp-Wqj%Ae&5_h-N?)FftEAe^x zZq2rD@0ItrBaFfDiL8ZbJ*+6~Z-C^{-tMUTiNxG6Vwy|C@K@n-eNFrKpjh1B(sz~u zJoNE2VFzEN&swR47m_vforg+uUrk?$&vuU%_r)F7(W)WMAQPJ(9?s{#5VEjNLM*{@ z8Lf_SO|>EX#NO*}juVc%3=p5f+PN~PrzuX(nMT1$cG2E76ig#BV| zjqPtXiihv;d%MDc)M{3A%iUahI>V~*!gt7pCe@uETLmSuRD}+@37=_rNqvMZ68x*< zS~bD-%k@pA%aTDY}Ps)+ei#M_f9?I#gCK7X>W>51yh7GyPFQ(OO z-l>a_s4%PHA_e+yzc3WDIOv@%%?NFcf8f>A&b&jdv(VJ$KP_+wAGe5>pj$B?im{-y@pic6id)9VvYin(~C8|L=iLBDu+Q_WqxF1mTcusA+JdrS)vlG2jaazwrRNs}z|v-~kJ{SFZ6 zBeu#%UQ1)NneaA4Mfd%=@=vD4B9^>k_IuZKb3jR^2fFj{{`xz92(4+%TB$lcSgAzI zT(^O%YH*zIv^hdOIo)N6XaJPl2c$C$Naqgi>Cwvseg@2}N#UZG^DGa(t=k&K`IK)n z-OlLdxL$6F84_&eh=#8ruJ2&c8pIYp1JX5)DkMf!+7r#@ZwECGo8dlk{U=+C5332P!Nbn`x3N z-6xFTg}x!dK#m5WIMs25Xp(4Fa`+s*?-Fo$aYBNsgKE z4Qr?`ZG=Khk5_YwDv;Id@wEY%<_M`){M~*!brlhr0n=~2jm$?524!S&I}VWEmn1Y2 z?OAi5qx-|l`<0D1=Z7dGz8Sg~`zl8bQNEGCC^(;`0R3^62X^J3NhP3^bE}zLs_m4d zd?_C9Dmc{~TkuR9~EyHY)p+{m1~l0Zg( zcDma?5hJA93aXl>^O)Ry&f-u-g=}L2*IHHW5SO;*(6zL|tgWIdW%DP`>;sy`HtU(l z#h;cfn-Fv@d+A$(K+w(&5dJy&+cOm+H%wVja(5d>sB=_ znLpQ}!SnHkPKLlQsYqdSsJ6}xCZtW^t=_BvxsGM-KbSVZs$Ql;V%=NUN3XSAu5}Hnu1VulN z<=<{kAg1^4!Im9Oknn^D#2ZGR$Q+Q5sn}O&bvExU6vZyk4#wJpgBqE=P^N#~sureN zeoGpG=g}cwL@@>~EsQnjJ2mJsLzxqVZSECX!`lF8QM18>au2e7A*<)ERLp|nQduF6wMm-dDm(cx=VZ9lc8~D#X=unsea|3hKA@(= z!CUbM%Dtw&J>6HU0^}m&To)l+Xjxd}-ky`mg$ogNzitJ+nV8a`HciCD zo%RDsiAsk6X^#H9F238sEH9d2u%MXE%$dqj_X+y~;X;HzwKD8k@pUrGgH>AKW4tZB zIN-OO=+h@h<2>!%-TS)a9_RG5FKW_in$NE~YXK+qr9ptpZ9H{ARTAh0Fn3hA(Zt$G z#`R6g>rB72#m^SXOShL+U-Vo{9SPyQmNspJQ#01mbHmC_$;l&&>uZe*tNPZgmGy>6 zaWg+h-69ASRbmk~xfI6GDQCDaKjs4%fY^@*_#3JngXY)aKJ9G!cE$${7|rpL zJG4tVaPPhCTvJRPS(lV&Xp{m@KTc^j{uKO{UqClopF%KY=;UjT(1WXFu^i(Ommz%h zeCADXlHX-=hY&(XZI(S*galrVyPxs}wHQgbtJp?Cc)IUh#Y{*(($eu?jl!A}(OaZh zw+Z7?qD^~;wLxo}we|5pA=YJK`=-B# z7G7W`85HV5(uWa!;MW^{vRTa&XK2xxQ^~tzTW+vc&DTkP*14xhxsb!!RmS>$9KeYQ z4RXeob=9QEu_=0CpFNV+L8u++U=wyyP9T3r%fz5t?iVCe>=DK>zdd&RS?`Dv5@fB zJkKg>q(a{s_Ip0AAjv}>n{2)Y86xA?Vsu$}QMzeVV`j(jmcy}Q;%&eeI?yi_1NUv1 zGW_-M4r!1Xaek@?DzRqj6A67~y$pWP@$p*fKMD6N37EGAihpna)1d=OA8oZWf!nxT zxV3g}GIkaf{CZ{Y7id!jjL3!7;YStzQIRuS6BR<)P?G>@em}J`aGBNeN^;|S*Y8~o zh=-1HIg{g#aL_~03uITnFu1>5RZ0@N83(GzI&nM7jo7YKI3Yq1HXR5#-bAOi)%m^& z=}oikxzb8jlsXHJDGFLt-KF4!B8|@$xT_g|iZg}?(L@DS@NR%d^89#UduzfATT?&l zX^(&5Bx_pV2IZE7!fU7B%ck9-HF7rJMSKKDtW<*Oe}N-Dx^a_~A6D2Iqn8b9JVMVa zZ*`&loAgmaaZzscHn1kzpqa61B zsjuICBlg`{T`pa4&R!v?5pvhqVN1z~yatzf@J; zW2_TU6Gt$^f|}tABkpp@wrpT9`^cUck~;xfgXDY@ahXqb9h9H{G?DqIQ}6DgeJ>ml zQ)65-Uh-QZ*Ar7W3z;OX-^|e|1FZ^GMXe4;-QoxNaAKbLv`t8($x2{#??3}gILJM38UI7a0shn}zJSzx>=dcV950zA>2h|XuaEX zq;F4xC}W(b)XE0wO-OcvuCt~0YY9+pFV>vd+Tko|UqhV=nJF;ngmx_yRRfd&t0Zo& zJAZnRLb&3?{`boPUk@1Y1v1Dk2mC^Ok7b&qN&DqG^<>_?2*SC}43BJRr=C}{vd1%S z$K(-7FUMH%Mvh8}{T`QdLLkm&B)|SPevlRcW4CeC{dX%fNQE?+IInem_p+4-B#SR| zxRd?&>n6$+p@ks}v@6m~KBC7dLieK{CzaXJ6*ssW_;-3il-pw)XRGP#!z3^g&-M-U zwb*cJ?qk+SojTK#TI;sgzPq5CcaO~sJh!R&xM(FUCcT-EoCgyU*xnC?ez8a%#V2mg zn-5K@456H3`y2&;=dJcgWx&1D?*O=;-8J$xH#-CG)=IBMZW5jjZmov>xa3N%t`pyB zf`}GDWUgv##cEsXT+|iIWi+zv9V~W)hm&j6CnD7}wNmu;i92V-dd{i~#Y=WujTAQ( zTy3=pq3+8XAm*wP-k3b=dK+qCyV}-+?L*3bDmwduo%7PldmSnbf4Qi>h+ntFO%%;| z_t^7ht=*fAP4HBsRdM%@_DrOVJl(}yalhb0BXdEF`z28|j8DMJM0K6X+rUPulQ%Hh z_$I=nd*_{38f^L7a=QO7nxJmf{CR~quOj@gq}Kd}$Zkp1goBc=k?$&*azh#eWqW=r zAC0l=?46xAs}OS<>ly{$SbaR$kRV*?X$$l94F<8zzm*#B9OH;hpT^rxC^RO|By#|sVP3}{v;C}!HMJ@GGv+i9k`T{$qzjaHFh?^}nL44+vWM^&XoRi(G>#j&#V z?nFuw>5p?w@$BzKU*BzK@RKu2=34%Bl!I!Q!S;0_tIV59>r??MR~f0Gtn0d4W{qWWE2uE$J5KemG1}6wYFI#*rg|V77LY8xCp6_Y#k@j}2>>(yv%OfXrH!tb zjs$CZcDUW1{vZ*5>%7QTaN?jgkyeYb6@gYvCq?8uD{xhhJz()0BxczwaUnF*eS`QkH6ou zTLhMjDHt{mkS<~=-;ZW>{C$>#G>xd+Wm*gO{|tr6bZ=Iplgbd}9nQVcrmW`TH;3qCzs zUeWwn&konF!;O^lO?l3TanNC#Md|GB_DkS)R$7bd8%`&hmDL$5IqbHx{n|%krGdemjfk?{H!z4n$Zsy6T9uZ4`6-z0s|*4~dA0L5o*`f@INS@hmyI@UEKG ziDcD}-7%QZlci*bv{9!ToC=XnXd`pCfPCL-z9R2e6;zS#${umtq|2uC04TSPc!Zkc zoEUlg`T`PZaNKvy)9CZR%OSqU?8bK-W8CY9Jv-Ut!{B#k zeW5w0TI6zWazH0tre{{5^J%U5va#%B@=~4S42$;izoH9wqYEkD+9>+S8L7-TB0$>l z5{5?3_QOD}=DV%Q1>LjB{&Kg*ZJI|#l3AjVW2p14B_=FdRjlfbrjc%vxsfU1Ma=L5 zC;*O-;(7ih#WGHm;;E1x<>F?UI4y~~E3vyyGGINwx9S*W+~L1-7OGprRgl`4!Bw13 z2UN6CUW3s`gNh(o#_^}#8&y6K3mH_yvCS*4%0Efp#TS&3Dq=?~EA{h&gvsB~jk9Qv zsnA$$HS$Lni- z;~&nJ4kmvtoph?=I*Xv+2l-cIP|>ToQPz2bP-(IWPH%<^)h4!xn_lA3(Q2Qj>ZLzw zFA$rIs4lf z1*i={5NAm}^MTY#-jKAa5c-+2`k8X7&(EJx+$e84K!|fiOJC$373D2qs_3^W7i&WX z_q*xm456dK$LU}_JLK2zj^Gw&lXof{IENr`arLQ4hJeTerufXVc0&jdZH|tvSw8ty~@;ej(=Z)^wQZ{b_+~dV+mr7HvN`PlUKPCY&4M(rFFMS`Rl) zzH^7}NX8SZ?6^Nti(KT+$N?8@N=!e?MyyPlk<#buM%IouW+20TTF+rGQ~kfv5&d_ggc??7y;BdP{~Y!ud)riB=00L&05s{b3zaFGX8+gIwQOfN62k?rAp z*j>Wd;eTSr>yK*psKm05A8GJ9KFU&SC{wc)E`%8^dv0fItE=AET{(c#@v5E+6k$Ts z!J1mrk1DOpZaA0San1nQT_=-pMj1>*>Tf;(*_Qvs-)@Pl8B8(*rbpD#zxatIN!u~U z1Fv``KT}q8{7!C~lTnKM5+NjacYPky3Q$$i<7{4v4|G-UgHEwWrEv0Nl@UxFu=*^z>7N2`m<2JytujT)?|Yiz+JjZmM=Bj;^AQv#ql4G_43@qa1$jj~}BVCy=zE%Ts^yTu%TA z`5{rr*?v0{#PG)dOilo~mojMGaLj}0;BAQVX?~x&Cq&PdW|et(`cu0LEvoJlAkCJO zB%nY-f3{yDWm0?;&v7(Uq!mhuOVpq7fWqRx0`=+U(6+?xVO?qC>F!eHU1UJBT6f89 zCh(5#lvgF!xjF?9d)do-4lvcy{;COgkM+U%4<^IPi`ETRs*|PHVpCnWem+xYWpI1%QE~$ib)E zmOXq9$RQ!Q@hW|0`-asJt&_@A{*`Kd6975d>c(KaGKtu~MYzD>%FhNqd%49@6;`Yr zZ}-(~=5FY%7sYc0NIOgU9XEATmyoC_&qSJs6qvnVH)GzisR|QB(Wr{Y)~f(uNYDJ_ zQX%6eyh_F`;Crd@ta1DWl#aO$Qx$oMwqsNgwYHJB|39^V4u7Ie*W+Xw+um_Y5%fJ~ z#{{pOyYoyWecEJ-GN9-x&HAH*njt?peh?St<)Bx5uTaR*QnJ{-urB9`v8UJR*1RgD z_atRN42l#4=ckn36=s?3DRkjY^PZAl41*cRml(|;7Fj*G{?7c2kP6FoK^eu2Gy2ag zS=Ah7W+x5To&mhaD}GjkaraB2qQZusAM^STKFm0Q9VJYKbRYLe3C|CihH6G|G%SrL zo2I7)MI0NgNN2d+N%QsAvHNYky_yu|AKs;`SqZl9$~V!EyMEZsTca`Ivs~F){iw#b z1WOiXTcT=g{Jzd@QkK{t%v7kbF`dqw=+-cLG_jGg*S+*SXgg5zYo(n;!KBN30ZpO~}=4Oj%9 zu71*k8eV9pMY(^v%4YU1rMZ!^0!h z8#3&|=L=k0bXejZE$Gt6r(gSQeRjdL7kEbsEmuGaj2ooq%FKA`n=o&V1JnVKRAyek z)1YUsvY(|2uHEau>BH=X@rMyER`qamNeM#D`mo=nPIdPF78IAMO?Wq(7p-9?syPq}Wj(Sx}#{K%4d3u1df}_%q3_bhvN3pg&j|o-Om1Ub*0IoMdOF zPME?$9-|NUKZjsV1l)2T#ClBmf!F`m?em7qI_Ry?HvO!E?A0`8sKHLL`K0tJ;@PU_ zfa)~7-C2IQSMb{vzy1!fP#{JcyFY{+?weEDRif>;0m&8Olj1m$QQB@V1{hDY%^`tB z|H(=)xe=3aPw~_rEg|1d%i{jpXD`l$>lfdv`@Mqs86hUXvyv{^+h-U)A1rguhB)eZ$GrmX1@bN?`u$SR(q@Zx1i0&Y|Z3q$YS}cxf3PyUOTu zx;mQWa?C4;E1fyr9{TBFiTd@bfJ;e+ei~y0 zRAE~3r+VY5+d!9zTzp}}k5%C(RpqYY3VpEI&~NWFG`kwAh}cQdJBVGxsp_dH?{yV8 zylSr$vK^C|T7F25zAJ%I1d|jjs&xJt?!+GfgqKt(AiNy+54q_wj;8}txm*sd0y55_ z%Dz_Ww7G)K>71CItOSDmIG4Zs(@s`HzmzxY`6Q)fXh}pLF0nQwYST#Gcz3##rK4OY0p*@gz3LUlkG!jVd+s#iM-SbpOOS3{d}+#}T3*FJ<7|QT(hBY|LfF+l{hDTA?0j<6nvrN-ulvykDYdOK8^AKG% z9qL}=m`pgoExTct-(M!YZOeNzOxH3MW(AgS{R2Xt`yQglmkyFkz8!G#rjO^ObQRhL z`|fS*^0iV$SFMRE&NxmC1hq{0tWVf9IM-_cCXU2=s2#<=Fj54rKbd1OMLPrfv# zG56LSKjkO@f{9CxQe)boSK`z1RI5r$j2b z&K-6089ufs*slFaJIG@Kx%0#;y&&}(dr31tNu0hl`p(&Gy9{RB7rsBA>2E18%flC=m7hziGiM29v(#bU zk!@4Mr{Z-cRN-*xJ44n$hdQi%1?t!PPQ!+?y?rgX4y)KVR6z6VWVcZEw0m))(E8;x z3VpY+XO+rvU1oYkiA!?il~>7hSt^+?x>gw=_mnAE|I1|e>RpqJ);B>eBKOp~>LS4EXnX&HE}9(h?l1;WA=TEq*O z4!8GAG5=&sr^buUm>1T_0a^Mx=X{!8boB^>X9u6tSS*5CqRqgikC9hWOxPd0Uo|mK zq(}|0p>|_l(JLNP5D17^(?S;yI~dv*942HSw4`qZJDhD#Cw7Bt#0fYmPbn!*y-`H+ zbQKYldf7{S)dTgD`S6V(tx!W7QEpEqPiC@f1?RnI-q=V)nmzxQhorVNR{x3f0y!V< z4`et6`t4>u?Z2xl{U^5Zs5h^)Jn20ilRq0wa z)k=p0I&}A^DP4=S$421Qf!f{ToOhF?(2)22%TnU@W0u{IL-pb!ApMEf z9p{`;=f2HxDp=1B*4k!mNL|>y(~UEGj|M@f#I+0`*5QD<;?anK-|)wFYuyCao1&?)DEVvnSNbrN5^%wTG58-+|5>d^7>4l1qh1c;am3B525f}7O}NoX{EUOF$Ct( zwSB;E(hAhSZwsp-1~ZXeKcg-<%}46M0L+%KblHR4@MRf%6!sV#b#2(&>Al{p*JvMq z${8Yaj%~o5`SjE^0=jEDI7hH(cuzTy?B9d2betPenF_D^*o`ivt=%=U&gPw1EkmjO z&wxTvdkC17Oa|rIl|kAni!Vcq=%;;84A*!Fc-JYD?JA6HlD)Mojw(I*F=r&a_4oYx zYM$(CB;`sdfHbnodMVH*Engi%hh7%~YD4M!+{#SEZFVZq)9n_@_UoHBL*<>pilCZ3 z;rsS5^u~}yrO{t8-31BVb%}prq|-{>w*5wCn2>k+UOJ>IB+MbmUcXH@m&v$L)a58z zl{}PG2-{!%pw*q?KhUoRVDQR&)hl@!mR5X(5m<4(a{4{wgxx`i#X&@)ob>MTE4%vP zK8RtDIQ-joTX#e@6q2Jh^Crv)da2KtW-}|6Rm&Qy*W$F8M`(RcYW0w8eHAC33XmUG%n~nCKbiQoq zl%85n1H-J6p)fx26gtIheTaa3l<}EI2e+I*3f-Dpw|2?uTZjf;5|O->Xb!aYH>7>7 zc%`yPtPk45$^c;dAvLag9=*TU%1Cy`&qtRZDEr_#j4Ez(CJICw+Ju>m5rR6C@W9rS zgcpA8&Agm>Q=ETr84^H@Vpf)AxU7d=0^*JRB(#iuc8C0PQ%pi8K22Nh(%>sk0w!TD z@(#fcEdF6gz|rn<#E53AF(DZ>d=ae!n%TPaTcsj4R|vI@r+7#7E%ygPIgkBc-~wRA zfzy<`{bC+HPx}q{+Ud(>_}g(4F1@$UxuhYEN^MQR~+th|RO>Ou@y-{YT^4YJ7kX?pfVFKLGx6q1}3kwdT>aplB#7#+W~Rpvpk$WST9ssu_U_0$KE!8J2^=|A<~P1_fTBz1+_YZECj16;1ZZ(24?_y6J^= z(S=RwHZ{L-x}{@mM{sZ$3XmU*v$b`eMhW=2<## z&gpf^LQPPy=R{pw^5(ZmjA-7%k<@8p?*L#eQG_bmZtWu;nha!Xg@JKm*jcZCTWt#X z)G#MfxVtX>4X&O0??Y0R_{|Z3@3-|crOKN^`JA0f#58{zIODKF**cXaG`3;Ci_%(o zXYF6bVeL*FVJGrMr}U0ZRUu9*w5Vy<;|6^=+Rr-$fr-+{uSBfhLKq3$uT<7sG>wol zbl%=E0|QR5%zrt-3;+QSW-@+>=`~5ju|2z`XKcus z7VzH~pA*5ANlZQ(L4=Yw=!t)5=xCht&b6zZt3`l+>-HT9}-Hk3Ba zE6+M4djJ^*MCTK^RJggep3fe<>!kkNxoTLI9Temx7N!+xUVLU2zt9`KKpNxp9$C>H zzP)0qs_DvcQ5#tAtHV;g+q|5{6OT}|W+D(CeLH$=zZAD3{4MBUG*{aIJP!gi;FHJ+ z@=m|Es|RN%OoGZnd%$}LoRy)0NUl2(QqGo5f`CnDcI-WGn>QWEaO+f`W(tgOD_!ST zgmQK5iFnn#PbGzRQ7I77^MAK_bwN*>Q30yK=fQ86%3M)nRCf_$y6>@X7;yQ9-f3D{il&sLAwKyEoumSPpA)m-4c;JtsR|BG~KN5rHUjjr`#>~P*bk+Cb z+P#nGj`YAUY8C7ILQ~<&&{4^H?8ZqZD2bCy@?DAW-i%*TL->9JAjPP`OWHX_zH6s} zSuD~)BMR=I%6{M#le@c>x;#;Y1KpET-izA_PHlI)Cb>s+JVHHJ7ZC~@RZD9Hf_1HY zoh;EYcF3}(E7zGA;*H(>x-&nfN8`xB`$x=t`$Ou~g}9olWU@>iHU@ujgC9+;baghy z`4I*_F6;28U{FGwAM}*V(F!vA^m0xs0_+xgFaAZWQ4{%Yw`!NJciG0DR(kC;awH!A z9Ma&3kUFwkuSNQ^g1M7Y>`Fczgahn-P3A6-CHwhH6RRLwYT<8pwv{Uk?ZXbSm_Y{- z7E6u3k{`T`o|=22bD#GDPQmD&r@eRjrLgam1a)VP*CPNq#yP*Bs8qi8piMuqO`|WF z3^O4ld@x?6{q1zNJ<5+=OZ8?Q=eg-Q3e;})>Bu?s5iU|+36@ecXk0)m))ro~PUkV< z1}b0kf0fWBScC+gzz7CvoOzv)kFMAOOJU2a$2Bd)&jVw1oPT*E99&oz$(r%yge zyk}-GL1SDk-Ff}}WVL&^OJCPPWL*;92#2Fw-|VSB!F9{KfEPjEKlN7vG(!l@18!%qa2QUiNACV43+3SnrnC zbcJw(p|#DwDVjT*4-${LQ10FP@q%NPAgP>ja~Qj9ab*0E8mv1Rt>_-8#P`6Wl9jrATn*`Xnnv$$5WPXw14<&Yz=c`26W zl+f(}A&ugTrJU<|(!A&NZetp8oH}B&=yfixPh!OHcub$X%MtpL#GQ--C^gpqfl>=0 zj57A92Pl=ElM+Do;hX(T*-`@YI+W_+wo4ZO#bH?k=t-pqNeo?M*>h0*ppexa_nsZc zsuW{;?dlZ={@CsmmmZqVWFg*$9q%bGi^n>bruKi#1j zWxW0P+u;&Y$8A4s9(a2oX87D6AJ*v%cMxy@pB-&_t|c!2KkU6{SW{cqHo6fN6#*3l zK@d;@=}K>*igZvAkS<8?JyJqI6jYk@9*Pj8_ZmV#M5Kk@OXwX!?;&Tgx9@j7-z(4m zbDgvQa9s(>T5HZZ<{0C?$312e0OfUJ-rVNU1Mm;qQgp<~DV9R$A>pulAZTXdm?$gM zYNvHJB*nwUcrnodNe*aQM;H6eAXGw8O_@)A?^9_2!Ca8%|NY9yulXKSL|~~_212%l zM`A~idP&aT!{5xlYLU7~TK=`bZSsqKQz?=xAfWn z?j`$@z4n7#jh4QamxH+lmv4TTXXn$9cWx_{e!eEbWYUym~gdP&TolIjacB`D;u3t!g8hxpy@Dy7+ms4vMf6Yy_;1X$y&z^_t z$Tu7BqhYEKs+IF*Yubk5T!`6Ppr^yiW&+G9YFqjgp}g|*VYB}4rkUKisl=mYx2M>u zx37;VWIBk}s&o-mOErESM!82`S}CVa)7g-m<*!M}MEZ`;TqnF6l@TZ77XnS61C+F$ z1Hx-QJCjGs)b^p{uPd3<|E$y(10Jz8M7c!ktydNWY7=J?LI1CSmENezxiEXDnUZU?#IM`C&(#iO8!YhI=! znMtx;rLJ$sNh*H=TunlzRg+!BQe;7Ox)Hz}n~tA`0;z08<%Cb=QV6OLb;`W*+@?o$ z@;Ujljl94}R4 z*PUjKK^mjM`K?Y?-HZRHNv`StG|2%%WHoV|7?3c;jy2ZpU(?h!7y$Y=r*o8!f1XwJ zstcW-I9iGy0`B9HeQl<=1F?Z4x}|tGrirT8UOdog4hs|$sS#MW=kQ+YghWM~V%bqs zYdzJk7@uR#`8bDcWklZ$?cWs@1n!<&Jj^!pc1#F6HzIXa(BQNHh($Le-Gpmg$C)Yx z8WHgYR@u*p+a(-YSSv4*W>WP3&Qa%p6r;7fC+2rPhL9|QH{O&>p)IUCHScd;J}Z3y zr1Y7fUu#^OiO%%u^Ph9FPR*J1X@(9$Wwiw@eTEP;vdG*)RGD}8rPGhJDE1sZEMjiR1ol>q}V&xjyPRuq*5ApV-*m9$6_@xy*8d%a)dY^+QXWqJM zw#OzX!6f@Qbo2)d+AE@|~?I^T_c8!9CrFVKCo?EibD)%ClAeC)(< ztJ`M#NI)HBQn^0b^qHMQW3R4!OltDEXvdjO+9Hu-#M1|*Gz~D%!KTJbiWR2U;?Ba0 z`$U?JaxyU;+c3aXqr)mYH?>?|%%jd)$ZW^WddK|d?9zc22lELBeVZzP zr;566_&;Ulnq+hX8i*Nt|nBj871#?-^# zOW9F6433!r2L%C2+iSn&71sc!`u6wXkbh?54(|ne$cfe1+g`fOjW5j!^IzzNx0=WL z=p=ps+eKjcA{>ou=qi>uUaZnR29&-DPiBI7gLCdC(gPq?;$U*7Pb1C+J>1;ts+P^N zi;q5|{Z0=%0m3QSEGC|(Q0dFai%+elp5YG8WayrXSps)XC?g&yrThR|hCa)=;Fa!W z0BBup6KE2d_ko;foSZ38wv~B*y7sE{x}rKHuwuns(%a47?M}OQin!OrO8c0Mo57e6 zWjhfs`#2zWG}Cma~DT@3AL9(HN-7 zz_$=sxt}YAS%Ko)Xwktt2F=Vm7lo_oVV*~aa44b$m5d*wl4v^XTq>Pk|1@hD=P^&6 zxE>NB@G8q3?<4VR+&^U=uBbGcvn&5o-TLOPRaSD1`5-PCjB(JNnF9KPg9cp)Fu8@u zVb>E=NSswxJo8z@-WZf>eR~7C4A$0!o(*?G$b=o(wf9(7p znDyI}oqeQh5-|MBD1z%L|E%>t9htwseCE>o&H^#9EaX4F^SjFP_d9-jOZBF)L=$KL;CiJlV!H{hh${oP;lpOgFd9clC>C%YyE zeW!jq?f)G7*`WV5fqzZlUoY^l7x*_2{J%aBoOBYArg#gNG;kOH7jIsVQFx8kfgg${ zuYYp^NT~D;zyA`rgCPLS970`2|Cg|I<;ESQ3;1mZpQ@ z@1XKKjHQYL>BCXga`bX&9|C!bO$7KC$0sn4+ ze|^BePVnC-@NXFWHxKwX5&Ab1{5KW;119`ECI7#`0tfgu93XsdIy*rL12koO@8f>O zIe%s~0ebj@s4e0cnG-C(2h^}$|6McrgLRBz@9OfCxy2*4Ia}%koMG48Q8|7aI6TQl z3N;sNz*gfrAtW$frP2Nv=Xz;0*sw||jBq-)u+CmnY~#E58L?i*X7ir=OgUhWTwB+Y zF+la|76a#Jm9yQAtpg;UR%3~xSWjE8wQkUW0pbm)X22rk(%uNR>tMzRTDI&aiQ(0S z?0+~1uleDx@Yzl}0!3?}85j*5Am+`ojeOkwp8TiJ@hTA1_B};+*3rKCVh;)DL zK-l)~-5ZjB5nTM^kL1Jo3#2TnNnb{>t86X{kfg<9qi>TXOSG&$oNv10D)@zIa6_Bgv~FngRH=u`B=UPlmT&9I?>O>h{N4 z5vvhuOgu+CZ%aEh`RwPCOv2;)la2`=s7bRpT|fEy4?bWjyy+<40uHp1Ip-v;T2ll2~@3-6?IhHCls|UiiM(-g>EQh8pZdbR(%fo~NaxQ2ct{ie&v1 zA2mT>FJ%p%_u?Dpd2S{3%AjBYEH5s+i(?&?6FB8D6PlESnDOE0~3x9;O^@{jMev zyW&vAyZVK?Ff548@}|2CwPR!g!`_m5`5cf7FhzRJ#1)gzO{U4Lt2nC#x<@c6 z&39FRF)7@+K4Qt!tc}9dv#0iSYrq84MpatWu}hqmJ*%+8urBpx#nyT_D=n!vWoia7 z`|+~vtq!w0B)Di58nTRnSEFd*_nORPgXA|&WHarPX(FbQT%@sUMjJ*!>)8IUK_J`V zt>}`2?Lt%BPn8mv1^bp1?l20(s(QA5c2X3#_D_uF&{6zF+Pwr3niOswHg>Io2u{7q zmQx&&tw{O>s)b6dQMsb*AqI~o_Uhyv-}t@pVD=H70gZxfTECi!sXQiRw7JkJYW6j0 znnUjU0YDkr;8`fm!h~sTrv%=+dLJe_n^%gyHk!4-fp^dJMobe8*VrLeCy?5t_rx9! zms&)QRXZ!d#H!no9qhMTW6Sr2@(2e5UT`DX0u509_dgJ~?B*woJQ&EwJSv@tXI~La zI;zuml+tQ(d10UJX?Dr#6{;XZDHh$|o88oJGw^xfl@7|dG(SQ+s_WdHOvfT5^I zOSWI6y$s@)ekO*GSeWPgrmY88rI{I~#3&*VHLXee^IoJYqbqe?leMJ}5@<-1Yjz^2 z9Xg##6qhR~sibOYt!-l|=sO;W} z2y*5}xB9N>C#fCnE-~lp@e|Vq?->Q{ZJw@X_aGM<)0e*8=O?io8(x2=aq;l7@Ium# zUmz3nWUBvwXXk_m*zk1<`sa0O9{GjPwWl`H-;C7Dp${fg_w%16o0nK^4-~cefCo&w zUn^z|l*>Wmw~q9T5&rLtM({-za&2WxybpXN#^|hy(#^J?@k}o@6B6oq+AcYln>M2j z(t4MTG?W4)aB;46g@qitwGYe2QD1m1+`Db^pel9J39t6<9W6txMITKTO(%YUu{8T%KI9hUy;Enj>+)kC zjI{*Y=>~cNYL+-5Yjzw3A3zf!Zgz zQ9fK(2^0N@|2#6q^SpnkQoxykJe7q3e(818H=cr@Q=>GaBw`PuD6e08N2!vG*j9VPi z$@@UcpJd-1yCBZd&_`WQYu(3qyJ`lT7)#QHJLfxu zj5CMu61DFi*1}$>8uIo5kGU9T*1hn4c|i^{f4fb~tyozxfjT;c+m4S+w~!X?aorgf zhfFzH6}<6ph^e?9HFvx&n9m=q-8|jvo55u^8dQ1;G*dZtl?m*w^oJ`E8{G#E;|-|= zt$>Ehy~47{43><|^>?tNTF)3ML%H;*{La?P4jYYx0BmlDxm#&T6*&;^JK5R65Dn925-&5fstl#6S|K>M3X3QZg=mk_&y?gH)KlCS;la(Fcj}3>~C?0Q8E>y@Gk>XK(aR zSEuuGCsC@@OBHH%jJXl)cqf-TIxL9VF*F*`9s$9WhyPx{sFWdeB&)z+oktk&CE}3s zZ7|hWCS7DJd1s(ZJHtJHxh~(tU#8!hKl)8`EkQa@Y;4q}?THe#U7KzfPM>uLseL(% zPm8uU!MzOi>~G}cX95P_nqOUfA~HJJ^kQc26JuwMT5%gM=B=i+p=N(6NWDJoym6XMvU~M-kLpBso zzJ7n=Xt>68#loNdG!m{x%`P(YIpBQnLh&)%*MA}vNLboPDqXMKeH1o7zeGMn(ynnT zk_xd3mv(}k1@Df=!ovfcN{TKwM7E>8?0GLU>fvrd!JLHbKq2N~bdFchaty11G^LMF zuZwMluS~4!+FY?8M66kaJ)saa9liU#=lFo=)-G@qIjP59 ze6yYLoBf}FQ@qSqrmC<$COjTEGAHlGQr5M<#Vrw)WbQbDVOH^xSr_ELIkpBPT~mIY z?_jBs;p>n7c(}e2#-v@a;j=r-*clv^)@XQr_bai;*QY@hh?Or-c^jox1eEy(m+eK~ zOb5wg73*p}#u7noQXfz@1K-$qEiQIO0Z8N8ABg8~4Dp-+KvbS&s!2k|DZ%stCa$k_ zVF&Axqr?HC|&ZyLi=ge4^Mz&BJ9Lfo9y>t=&eK6YdXlx6co+8(yWULgrcC)G1j+b{ThH zq_wX@mU=&iN-feFSdP|I2YdQ_w9xBPOg_s0-gDFHrXgL1nypfSZ)5OavjPAt?i&t- zRJunU(nFSq3rofNb^_^iep1HyC>1V>lPc`Giszsec7CZCKbb7S8Ec60AF)q zKXu|yUUKyKAR>T^Kd$PR@V*%{(t=DeJ3p4>NGe~ZO-kGhY%!AP$as_>vc{=X;hkA3 zysS;P2z|5M2J`b;n6D&>E|nJ5_J^d>-b{5^8Op@(@@_js>H&2xA0mU~`l+S7QN z60c4}AT73j{-9H~T4wZJ0aKln9!MvMDwf|Le(E>s5YtYTe>0Wy%nOegI_t=G`#FPy z9DuIsy-y9Dts4tJg>v`G8_Jt!HFICn&&qx&KW9*ZNEa7 zhpD!y%o0^095}7GD>j;Rn{xn`RX>NeUUWmZ4`w7N=u!5)Q@_0gRz&Hk5?>Q~O8%&K z+rLiGuE4>*M>Lt#8JOxIr-~RG92l zxpUySBtz5HAFlNc5WzjXr=<9krneGtmG$uS;R;BW7x>5A-O-9DDj0;#7RpBun9BEz z@JNDTd>tv8Ct)GeHn3*aJ)65Yt^o1&U3_uVIcDg5ONB~w?I~%t($3* zVJ|bPS7y(W!g`nq%W^C~erte^E;N0`xES3=`Ak_fV^F8UgicLRR=Int zPy){YSn_1*u9kU6+zk=tp6?Nbb>VQh>U)G$+Ce8}E%^wAQ9a|M%-Dg~n^b0GZi{+v zq3g}YyxQpskhzrr&GQZTJpkOVR2uKEBbj4y&N@Yo#VY)>(4z@R0@}H%2CccS@<}y2 zL)NDJtz8Bsh2uwu8)pgl@~EtHw`{ev12BEBjt<3G^@sEv1zq3SOoft;H(t{qqd$rp z^I6Gu6#}x0cj^mOHEMk(OA$?TyUreQu-bLC-~#|h6Kd|L80bCa%N!147bmt0;-0+m zGD+0c`977%gj=H6;er1rvBcifX1O0jk7f|Dq@go?ROVQ6ME~O?Xs;minZNV9w&lCjE|?sPZc?J2*NJZtZ$FE zfsV!Q7pf-APdr@b69Z;v-e;qDtVi5g@sV2di-K7m*^f}&YN6Jzjbz7#grvGIUk~Q` z8`U+JqJPH5wBW4Z$R`^2yiBTAN07}(VW~DG{o;Ly=TC{o27K}s>TFW5dnspVe~aFF zu+k30Xr^4l1r9O~@JAJu9eeMWzzf;zdqS!c;W8toYgt|JgA1fMA~B{nhOj##;;fXq z<(3@WVWCM{oU9Z6;^)qZJgHyY6(LKQ8=WHtpb;!#M=H!8f=N?j9vSjU!F4V7*1bI6 z-`?$49l)KGTe98Gk|lS%lA!q|4nQJd-F!UZx?R_apd^8*JF26C`N6L!MsbEx{@Y{{ zLozjq-$zEKiT{rCOI?Qq;8=^=yi3u7`35nIKJUbPQNoSJV+SogS@_iYl`v+5J|IEl z1bLEt6jHihG;`&yzq!!Gen)wL*6OUFe?TcumM}G0I8|ia$ejHu-*2IH*DDKyI!Hj9 zu)P?#>)3T+TvPx`FTxLaR2sFUlV^(u8Luo~p+W%&D#BWmPIm5=In_KHzW(U4)}eQc zI@#*C1tO)<0Zy^ebe(RKIy1Ju7=WIb7e2uaYh)_UZKc^|b57o%IB}%%A%ztAeh?5G z=cMNrOAdHO)(s^67->Ckg1|6Jj))4ulxwO(%U{q;qvHJa1Tnma1oT9du4T?gBuH=lyI z)Zr-43OK|l#r#5q*3a6>e?)wvgW``-) z6miUUrU3N?Ym`eQKns|s_F7pnPNrI=^BKJK&i7n{S1cACR{@+~WKFpm2z1Y=-grq6 zqtIwBSfwVYhD>1^6(}h4BkBx=TFs%Z3DM%W)v^bR++HynxHd zV_$TioHlW>-){;sGKW3p<`8x^B@+?LvBPWDPlY$9#Dsq}{IpD?6jP;4iB17n%M{W( zi=W616YYZx5uysc+1!KD(APGZ_i4Fx$ik2<1-BjMPF+)|} zm)&p#HeZ2)5??FJqydRyV8)WiJPIsQ2=OK$S}V5qUomZJ)r6rPYSN;EX*V;@^31O5 zF(-#xzdk!qUZ%U(UxB|rxdF6Qwv`W#Q@TTz9f73rm!?}mG&zf!=N=fbZ%^HY(Zf%t z`T3je3|)oI6lCjZz!QCC^i?#3ZgXmChTg+8%iwIE0Tqrq;m#r@2?CvTt}vG zjdj64)EVPI+;p12tdFH1H}GYE}azu z4xBg4v?W3(fYeJWKy3S+1q#i4_!`ZZ{VL7Y(hKVYkS65tfC0BO&7hxKTZ>aLyVK|i zvV?8JU07E0#Y_C^iuH`&EXxdC%Gucyu^aPPZb4D>jA_OVsztx+KG158frDFne%q15RS%@)0V-4@fGhEOS- z5&o4|dpdGQ1DsYdo<{)7;lJBD-Gd!^nKoSAejP~`9r#n#* zi4Ir}xzKyNwkTYT0ME%%GZ2)n3UM6Sek!x=op6B_SXyTle}#o_&9%N?>pi*&c3-_G zz&pLA-ZU2(X8wV$^YXb7Pu>5FG21cUS;AKZ-<4pRh6wMExnxRLw}Gk$q4=4&Jict7 zQ=TQYcL@U|T@4d1Xa($WvepGsbfEP}6|67MDO2C}!qr4UhuaWo@1+uK(s-J7ep~7; z^%!S@um3&zC=JX~mihz~`0Dm}0vi50)7jC>w&$0gjXq0KJ08`pmZ!m5B`XfBW*d0eCX#XTrs$Dpm1 z{V0%>KBhl+aHMku53*5-1#)mnu8m8kz??1~U+i%1TGKtLl|t62@3vlpi_5$?Ls8k` z#gddhN7d`3Hr*K}*F-;&oL3RMl5xW??wU{%-ywDsGp;fL!8bmf>U5XVIvk=97?4td zD%k)`=mN1WH&8s$DSKV9&;kK*0jV!swx*&#-~EQzH@s!V5`V3z%@%cseb>Iix^E)@ zXqc;A7NYD%!ScJh9kUzZ4`5+gnEXumx9Ykhy^Q){C2sKtgO%3S{x>@cQ3hqqPu2M# z()h-G%!X^U?gdg`+nG4{~PyJY%0c)OS6#gOgt22yTaadW9LD`SAH{9|wj;7wLM% zwS<0C00P2ZN-VYib zt4qc`m#x+U{N*>D6i561^iMAGLpA_FUGpvSYF-s&Q4Cqu97_bC zLqQ%@3{SJi_X*+0i@L5AzLayjSt(BF?m^%TR>Vf*P|K1yc^y{oEBAwZW+;4&fJh~qZ#+ZYb9OVUu^|0-<+iZ`A?OVp zP~qIjnMLd(dYq%3%W(whGU>r^=rJ02i9O04%yV&<^X{VpvkJ!ZB4g;&M;d9NUXhm9 zcvT>?WkBT8JrP!0*DqlV<$K}j)3jw~*Abx36eK+U0m=MuGL^;Vu{^4hJF(BMIh2lg zr4uD(6Xm+zho;cMQV8(=f^w{!zn@(F;z)!RLx2h*YPPf!6;(~|ZA-kjK8m;bah%D2 zkKGJB5*SNWUJ-A1J}MGsOFa@TuVA* zZ{vZ#*@tY_DSLlqDGX1hQjMTT41duyeO1b97~EZ=_#TC0*QA^ougg?Z_l()ribJad z3U_b0(lPB_I_ql@ur+<$9*b+L&T~wBDF*Bi@iU~Lc)?5HNUQAeZm=LG94ImtFZuD! zMfCD*d?iR>FU)%cs&S9_{()Eo5izT|qTv17d5uNe%c}43(rLX6Tz=0=15#Mpu|Q*^ z{y>&jNro5%t(*=lQd+ftC*fu@STw2@H}XKYa&~vgp>5e{pv~#cG9O7_RCR5XzS%p? z{RtPjMMQSPWs+OfdIG!%DHKZexq?RIVUU{|EnYea^MNvdv_-*)GiumDJQZYy`4MMLo)`7=n>X;^+l;Sv9InAJh`@Z{Z-cp z4a@0KTd7>ox8^~2>;!wEO0uy{a^7Ln-HmRBai&Y$F+WKPiwCC)@7^@R{1>(-^V%8P z1J&4Bnk6J6Yi;lkwDTJVetm%ZtX7>Q?EL=rhrN`SZeobgW#@f*=tP^2gxk-7@C)dwe?p1~FuY z%O+03XyRqccNX>fsoW+PnabrsLL$4gZ4PrvIiP&OOn z-wI_30Lj8;0rNDk+v=FcMABiwLVNK%>&c4lGga)YaNO;Sr-XjHD}oA{%OqmhRUr8u zZaJUSz49;-U}j9esu<9bJIWjE2jzm8BDL@JrD*91qkEJ+_}- zyWuGRkT#3)9GMKmm+OEoB!YmS-(8`ek$-}}Al{b{b6yz+D4&M{j}hg;IJuFs zwLSldf?@KDypZa7vKdJRYJe|!NN8=nTA~qa$TPDh3{!%7>^!#Wf0y2Uk3vg5sy=`u zF~fx4p?mR6s8UGTsq$j5rRjeOdH#8MgBW;BlPNB_P3o&cPyK!{vZ?~Z{ET4Izj4ki zNM0wt;rMYXu~Y6`2Jvst&5(Wec+n4W=icw{sk+g4cGH9T?e25o%-=Xzo())M1ttZF zbFJX-YyT($=Iu`1p||U~Ve)S@5TOVRM_r+Z?3~g3F1S45kgR76a1f`bJAZkA>-c5d z0v6sNxT^L*8FG)T~YA?}s#r#7}2+Jn3+3^$VNG0>pZ@k%52)iF+L9@8(YRE7*x=51_C zJ()oT{=Q#TVJCX-*kN`+;TB6fT&e(+jAmC$J4uBiRL(PYeDqlX1o%}~mM}7p7bT|X zhicbX87-3CJ{u8|%&;I?B~EKqPfu0~yFg1_bwEwrt8R4^DybOF!Ir#U`=pFJpZjTv zV*3JcxNX4kA@vW` z#8Q}j@Z+8&C4K|)pDnUP^?I_DxZ`;(qLi-9Mgnp;#m%;BiKuY=NqDep`fvfkSeS&) zeRk+8PC&I4^QJk>D^y&tQCPiNnmSwOYs75zqgV^0jOa6E5k1#|zK>xkdZ)U18!QYt za*bdCtFl@al%DqU+e$WKI=@W4FxZ);0l&jI#?85Np9=@@1;R&_l!E9;a zY=_QvhGG+s^GXP(G{!I6p*@_t?nt0)o#Xl{xUlxIdCBVWu5T2V@dZ+FlWILe~N7`RNrW_Vb%d0?2C%!B!Xvx$l zyfg@jo0F<+v^sOF+hk?RV-SZRa)+&n+Z&X)ZdN=#D=SF1HL z-lNDC!)}NAE1}@4m&5i>A$4$@RgYU)Z3T=1S@wliP;JS)B}r$M9BuWw{z)2Uk0JN{ z-V`lrIm>8v?yAF3zgIg)xXFCWQeTg203ktOpL1ozsYtJG{nWd>__-YG+l{F^*i`iT zh=aKc1TifqY;4YH$^P6tbwP(H;HkmkaWGYh3biM!5VZS znSHP?n^L41`GvLjP#xELZC!=q!8yVA2jdMF0wOx)mF=$Qhbx8TTeDxupWxSi?kZOTT)dy&IXHF16#1_qVrN_jQ_FbQ8u@fb&5o2EIwW)IG$B^3q;2Ow#sr zL$RYfQGiG){_&yR@SDqf<(E2Eg&ilqsHlNg4^rUbjl(^Y%efg1{QUO^tcR*?3WKQy zHTM#H11d)N+1hqF?~a)5LuszZ;U=2hBQF!`@*#gBM@wrhH`P7f%Y<=SdeM-lFsaAAVM_JL-rgzBt&>7!aXVTOs*Aj|9n6MoYptaDD$k=mem1K zT>Y}()j(r{)@51{JsT`qUTZTch8YZGIFXh7%Vg17KKQFwg&v_lW1*xSifno{abx3k z`OV5U6C8rPjAi3}HQDjQ&nHSHEax`Dzw*2tdL=qkcr|(*u^`ZycM-I3*d1xwF%pvQ zMjt@B{KlVJ3L-rEEYw3q3t!SuAU>8=%K2z*HE0K*s3NY9{u$r>ew-mWsUQjNS38+} zIocWte0iLKq;%m%WcL7qZ0}dbpoLxwGl{hlpb;7|Q<-u)#iMyI`G0Mjs9X1Tp6=NcgKiYxUbCYWFC%$;SaMcb7J#!ct$%A`>T;g19nkw znopzBWv?lodQ*M4UD$8)s?^3R>kV*3G{qez`cQ%6vxat$QG;UCg&&4p z=4-p^D);F6T;01Fd9OewsuxDQzBOCY&?5Ieq1>!-M%NO(W=;)9-?R&(;-st~*5bz> zsN}=HUwRm*5IFCUz--x8LFewm^(Kwg)kl!kOTd1_8iiUg|Ml7@t9rI1U6*zkx|h9c z3B@1no3897@114=iq+G-?S)SAgXHTzS+nB_UBEbHmbW*0w_*=f&7OS7Nb#Qa>$`P6xNHnPknSvtQ?xe##T8LAi)uXUU6mU@F*`N(R$lp0Y&v}? zc7;JVt=3tq(vIF4x$j4uuj`B|F=IWXrwFjMJ%nWl-n_Hwp2a%vRdIad?oN+aM)fv7 zi4KNHWMuM-OP3RQ(c1-F*qjFM#@(Idu9-6VW4DgG?=N9oQ(qAU2gQ#aC$HBi7(&)M ziZI~=q?V&O#V_;KRaFyeE)*igm14N?4IW;{2@}^h1oObrhuXcR2L2346&ilKWNe*6 zXq;qI)RK$z?$soJZXSa8yB!)wG*ugwKs8O5cv2;DV4*FO{UmDy83%=~IWig^MfZgI zzF~MH%jLZdicFuGwd#3?LG|tfwUm2ww)Gs}$Se}!zm9t^a&(GfYrA%ZkMARIIevWD z=zflcZJsqXI4<)ol5hCaub5rd?2O<>ovMl-n`A>i8xo<)<{qmlS+u{kekQ%mJQ&B4 zLfiys4_%cTpM_SsG46On*eZQn!V5mAxozkWS(O{u!gvOE7}4YhB9gd84elX`l-mP7 z%Pw#|Z`yXQR*kfM-IT6Z?J=sM(XfONJJ%KyC2EhDSodtZC_%|})XKH8OTqD_lhoTf zA;L&y!p%;k3k+1%m4@4thTQg{Qd%`eDYmv*=-%6cbD?U=X z45rFxZyz68PEnQgTz9n)*yj$5k59#rASG0LQ>9y`v&^o)Rv_L%3JWGL-!I=<jb@@v7W;AQ&-Yd9 zB~coUFwe)!em+@*xH_~TM%zxX!hV_+4g83xjcGfO@{1!ocU6kC{8bi`6C3Q6KH~w1 z{^3649EfgsMw)wX&N;`6%ovEik+R^b8a3yA4cbYeA(i62dY?pSIkH^lS#Y_3jmXA8 zjpl=F-D>@BHju63kV*_0=+IlU|60%95px<2&2fP)cy-`#ugl$F$o6thptvYknTke_ z*xvKXt{Sez*I1b_37g7tUcwd;QOVx!9WT?kaji6cRx9wf8>~T+PJb}fk97JnvJiHs z#rhDOYkFeiFkClkIQXg{N2{A_1$s1FjUI_JGuN`T(MQGb**|Xs-ULi_M~JRyQ0H27 z2l-|~%rl}|i;lzR;-{{k)+_uXTsrE%qMvq6`U#MrmA$q>hht-J>{Qy11$eMnK5)HM zr4_^1MmoWv*ZwZ;%_$tkrP6(K*Ln-#=1#hR(vz;sXQw4x;Bm$!O9xbTroQ!hR`zz!9;Advs-kEIs9~3P~qPA6r@TJ)SAU zC_h*39kNVdr5nOpm_N2;P)Hw z`9bVJhzaA~hr-Q51Vk3dfWr_zLD_f7D=tt6K6yAGdi~yqs(EI~{Ikc5-E6u~r-B#{ z9-(vWB6O`#XcisuERgCYM4G6dEEao`(XaP5Mb>fjV2zINTD{|>34T$zqQ_T8xPa#P zOJyO0SE5f&uk@Ll-3v3stvM)guCpO?Ts-eOycE99YxQvq!@GYf-v;o6A@kQEn$>|&C_*TC&-`bTmjp9{H$MpTA5X_(Ev`-lF z^^Zi$kK@#qYOqaU!9>5t`)LqWMW*qt z`s)t`ZS)1p)TsDky-|LhaXZhU!)y6^0(^w2(V*r<@)O9tdu$04&ny(n5Sf~mD48h_UanK&t1aA&k0a{x$AFCfF)DD}E4!)wuIa6hs@Cb5*)ZRxZ z-}q&-pf4kB#6QGidhklLO;$r{X|j+v=;wGW`r=#fUqSA0nehZ|#oYPbam3p7Xewn@ zA9BpizXGX~vW^3}SqWK)B z&bEmEC6O=d^z!#Mzvq_NH&S|imbf#!^Od#2hmSS0>|P*z(J4V-cT)Q*D(rzKu@{xWu)ZQ_^SGJs$4}6OwJ&xn3Zx z>u@TtPGD7;gA*`i;SRQSsm+%2EOT{`5oJ$!5w}LhSmzWICf`_)zrSv+OK}oZ7&D5Z zR~_NPI~_p=M5whj3-7ow0Ijk{;ckDrrOEb7fR#*MZ+)hS4+jKz;yM#wu^Ix}2&ga5 z;((ZC4=o+fQmA75$<}_H*!DBE#;E9-Y5X!I?qVsk0>v+=h_KlmjrU+*d8=4x<(--#7pqZm z(4+mn@tnlhd6cK2@SBbwpM84sr}oJ2{aN#Gg97AbWsCyTo6B7%oKFadz`8dfH5JCl z+>>$7fn)*~PS+E8d;t(HYHo81h}O49bXBxo_Y4*APf2h%ZIQe8#e<$R0xWUxqmNdo z+&=6H@7-sK)T9Y<3r-lxe9By(777E9_YYBqELMZh@eS^rH2M|FpX%@DZR%J-0+$?+ z4neESZbF<&^U4Bo>g%5)%zw&;7iA&8#!pRhE3fS27lx+5_ngQsBf_ZdKZ6SeOP>J= z>QkB`_ampzQ{vyf~G?s8$`YM0^1pZZYtVJlXr zsoMbMF?Nh|BO5!u&nZd}Xsc5ypYN+fscS2is4xX@n&6Mt>r#HOHj1m{F3@=pULaE` z>Sp39`~<)%Dt03!Dii$sI1ApkA65y99iY37!|PC{$w?~XbILzLE`D)Wg)F5kN!Si- zh=ksMx+)~ZOHf7zMT4`9_E#(;-iB$W44{1%@BcsSy?0cT z+txR_ttblUMnI{SEr<#crB?+7lq$Vv1JZl%L_~;mm0ly#Yv`ST(xi7nPv|8;fDj;o zko$1XyU#e^+3@~##~tIl)UA7RYF4}(}L0%On2=OS44DmNaTlx@+$-XJcg zaa1I1y{R9<1WqE~#j!lPeTCk~LMiAG`pI@Y^qlJKAL@-iU=>}2t38;tJ@FVC25(zU z*iCmx_K2i%;}5arjU=}2ydzh<0JF{DEk@d`IvcYf!6h}G3&VESQ;(k%#5u|L40=ts16u%(O48rG5#ND~ZIC03Yr1Q}zoAyb1~*5YYZ4 zj6|!LY~h)_Q1)R(UM_!vn4r>TZG_ud;|-h2vU-ieeD46SNc|DNC5ARdYC49ID6I^I z;DST;dJS>aAuTK6W$QDQ;5lM>x`V$?Dq@l|`l|@q-bwdaQ>x?qE$R()w^O0;wHH90 z_r~6`mt~-T=63&(R6lP)Jv!Yhc+q|7X0p)t7u0-sJKR%G{tKjvHSL2L+Aisr!9?Pd8zn3;swUf51 z(l^RFfm7-$z}c`b9o^qeI3!z;LN@AQMQ3g)bbepaE~}>Kn&M-gRI0Y3f@Wsk-fozR z@wPG5mY3k`ysPqJ!5rSMKU(u+m{0!2HeX41y4Jw=ejm2w2SV~iT~m|gJRR{s2LROM z!_Az7f(h3S^o;R(j&By^85&LFS|t&}Epae=p7!~K^~LIE?X>5-KDtr>k}r#PXHbWI z4}}zW_I{%;+W{20ncq5C&574epe)F`te0Z)0Vv?u+sg-XEvAcOT&aV+=*amkQRu`IQ+eiQP04yS1M5Ug}fQg}+-bx7*JH*kQD2xwlWqMU{ixhkovWlrxth zMio;3G8H957A=knuREo{iCNPhQqs;Xqjwj!_kb2)OaAyMryjdSg@&DWk(!dIDhlDLd?b3bw#`5<%djyPzq zsJ5%3+}q}tQTKp|_9v|e8WcHgam4H6K$b+hFh{kUJ1gqFf9=h(`X@-paYl9y?A2*o zJ$2(}!8^)XOF-f&({ep6c#(Cu7<42qCj64wnKU9wb@T%oV%?FS7xwy#skErK*8(-`>;TLG}*h9ZW6O*2|@M zxF%6GE{8^J=#P6@I#u~!Ajvt(|ND6*A43gX7rF4lCXknxVN?s7ke z%(&$G(cR52XQ@GPchSe;ABt*+V8FRFtKubTg#?>r?bM zl#hZ_Mf;5l$dQr5shZsJoS%3HAAZtAnu|(Ol>VM;UCWR+C$q3g^5y-|Cw^=;42kiY zVqI&OlauK+2g_7R7sc-ZdAG8qW$Y7#bmJZIJG&=j#x66+;(&71fKBwhChM(v=z(*^ zxrG<JrXgN;h(BXb>sto15NMsp@NSt@NsGhzX>vA_!Otnj7jV z}XI#jQwbVo6xJkh= zBh$P4gVy!N!P`CLteZ}%&q^~5>s3n)cNnu82r1qb@8atxYbVMpT7Hp`g|D*lUUOHU zfK_XA%gi-E&ZEc0;{xmMGP>1uYQ=pAB3Bf*T=lV@y*7^VoQxY~vVhU|hAVj{mGA9` zgp1c)c0X5mZPdMe6JMrEekBKRwLt?(F&-6~IWo|28R&48_hZO;W#EEu9baY@t~sZ4 z9-Z3c3MpWWmJhCyHf$5d>o@~1+HY7Q*{xl}oW35ddv_0ABRJrNYdOaok24>iPtJS{ zcsCk`;?BKsVxs7Z2Y1K)el4GJ=YTr?b+z}eu1KLK zE2>{FMK$fFkuC15Uyejol;7W}hk2*VEC-RJez)(#42|7eSLlDom6E5Ruv5cAt|;gl z93NJaC}7b9A`+JYXhi=sL$?gvkAsxuL3Fw8+C5I?+GwDPH_y|>ys@A&K|s7DZ;)5& z2fxLPEcV?+5Lq7CQA%2c1)}8tT%~x{WEy^%+1!Sb<>m&O$=Qv;WH*atp!bRUs9c3O z^dhfk+=sgGiCDRY$o9eMV$EzNpb?2vr?Mv~3jw9R$oAC#ry{PGb-xk_&)U}%E2!q! zzp@a~ouhFgn2H$I@3=vEl$8Kz!xy~I0PTdYK{7<#Si@(Xm)^%MJ1ogn$3ayHaYcEs#67X*P z-}2k^Rc^zbF$3K88nGL00%xghsZOXZ@k_rB2n;CTg2V_pG02AH(#;2A5?`tTA&?g$ zLuZa0Xs3%xe|OfWa4yfru(gGIT+}HSZ(7e}f1YmiBZ2ZYEXIxKzS^Jim2+?#aQPy!X6_HUHV&68+CD)M%C`ImXU^3fFs3R(HFErRzkiuIPU+ zPq02q4NhqE=O^WBD>#@_aWAWO_B1*I$xn+ZqNOtfM-6C3KjZ>DIXf`|&$O;D> z|1I0IMxOG8Nn;uC_^Iz_=n6bi&owE2la3hvz8v4}=7fu3ape!74=V&$23C{v)~`~M z6nA)Y5J|gs01O(pa+N-LEUhH~FHk(uCG6l`MlaPSV)q%>jcqwj6D;o;U}EwoitanV zsmRxshjDb<&B~f5)=WX{L=JQ>+Z@I+dgke~=*i(S zcgT`2kOEiq?Ysz@TkRdoLggu%?tUWMWtO7IZyJAqehU8`Ph-D$)f+(O@@IfG080or+cpB|Ex*h! z|MUfg%~6!xkul#?$jDWDQ?THH%WYxswBA6+Rnb~AnU_`>D{e8~<$B_Jn*nCu3=WW4 zm~#FfR0WA~KWpQYE6d==hFJMv!D9RJ)UIzeHk$g9KwV~_8o!d+0Z_+BA$CNgdd+(= ziDsP^rNcZq@0yooM?oVUmn|8e4<+0rCOcDH_78WlyN5=P{CVr| zh(%dOKVCKOa)Pd8f%9i|Gr2Vh`#$>YkurIrhpLh87O8i;BtGD~!a3^L_!n+YgvAxI zScx6#OuKDL2lB<%$E#Ycc%ky&iZcWHoq2>p2^SV$6Vq2k3in)gSGvwoiLXtUqD0?9 z5F~HmEBA#GFKX|`jh!W`HTW(KKU?;SNkU#5s?1ziA@wQO%AKQDU>vFF6h3&Dn4|q= zY-syeq=;EvK#)I?dOVXepZ8ma{{qpg^9$+2CqAkVJxW(5fO@j2bk2j}E=57}v)oJV z0gG74iYq1K-!zOQP^nEaYpCKh8Q*86?nh;%k&g4f=;=r0t;tEyzk8Cm|9SiE7hmpt zraupq8(GXaZQYt>_yp8sljXxtD$fL2-|{#pTQbN7ouMcQNDT<+P0tV!(N79+>rKa&IdY$YmN{||nExJ*2A;Ch2hgl< z8l?2VUoWLMzi%4d8hp2q%V)Mju(cdi%T?!uNc*JYBQFc4<3+lbPC^gMastp>m5yYz z@K^Oe@gODuHpe!)S5>`U?)cizyPfFU?<@{rlC(=*jd`KcqM13I@AY=Kf4{z0Dm71| z+T}U=Yb$0pHnWEfw?>!$7fCUH{TB?VDs)cj0&qmKrhnG_FVz%x0D1lU_m<+*CEV@L zndb)%uR?iUD)*n9{#m**AS^j|TJ5#}mv7jQb=ZonL&5*@&40)FFMUptRq6k|KmX;g z)9!|{ANkyGzmdsO%f3v1bq`*fa!Jx}nW8|0`P*9q-22c!G6XZ8r``Zjx9H|qF}Pi7 z;f%U#Q(Rg0{EJbBa#>YvZQvi*zFv#%_F7DLtF9fRN{n8cUUS-ihwT?~N~u0{Lb#cZ z%lF0<{jH1Ozut6byxk{_?)yzF_LP=QXU=0!k9r5_(D|Mv3vUsIl^ zMw~n44gL4e{g1mjva6l&Jo!I+65Y1nWI1(Y#>YUXnkT7e|FxI@{K3y3d+Nx2e;)U7 zyiE7}{VAVZV)mx^&jJ5xTjh80xQU~D_HTdv;@>(b{bR4s(P)uVN9NT6Iyn9*dEqY# z{cV=B6sZ3k@L#L^_T#vNC+)+3{o+4=xF)zyed@?m^g#R0IOQ{^9SCsLZvQ#pzv%ye z?@{|{&ae_|20vwj6_F8J~M2B|-oGDKF^Gnyv>3}oS=Uk*e+w0Q$iKKUc5fPjiO14t-8hZ{#4uFOYg*Qy6ynqT;J)0*Rk3m@U{pu4A`16 zS=c3%w|%>`xBh0Y7}>FIdzFa?+jyqJddGZ^YuO&t9Hr~T*{B!#?J~biX81N z1+5{-K5IGqK*M0;{ZDpAGcT?*f0cV2L(=6P(IUz87aOXo=5!|e0s5Ra`?li)%bI0Tr?fiw-GE?m3P<$TCiIZf9fw7l$1hP$FTdnA^I z;(IvPWcTFu6griB>r5w)BCNLYZm5IXBZuv#1WyHs3JVyr(%&J)W^$%U{D3VU+2Cka z9(_b2wvL&lTx%FLnzNL#K3P;hx4HrBrrgs3ZQt*a)wab!6LguDT{@B9#^}d|#gHIc zj+UaCd%=aDM&003+YDHBS5u~d##AMkLGj{hUbFt*BrLXc2Yuftnyug(%$w*TdT=Pm z<9qn=aKP8+pjpDMnjiZ@Sx>|GJT=g2`?Yn=aF0jvy^<74i`TtTt1&TWwB0v>GM2y--spQEb zr$80xMLe|8;wR;K2UTb@F$!rAo7J}blK<|%sA{EXnr7`_}9d&DH> z=?r?fa?qGs^o+{Td6*6KNx}DM@Mqyo;&9QZNyNGB)P#W^wt_nENw+-(kDJPZIXq{f zlG-ac=%T$Kq0DKvKrPfmFlRyTP zV$75W8+CZ1mRG?rWnwfehoU9diX=+IHqdk9jdd-4uZgXDrP0totg_SVY5M#5rb<}I z?)rrKNO{rM#b_<3WCdeKewtT|I;-UjY+*;F1A6L%KTebiUTeO)jyTtSN(jO^%P?u{4Q0QZFPK?^(;;aRcy$P?qn~tqp>WU9GX6!LxkRUwJ)1+f-Hf*TV z(C?LIFwh!7hVlD~eTY}@(TKC7%8@A|ZPjKlJ?|>Vf#otBK++%&3hHdf zI$|q&0U7@DV^6P`1Nd>tFUfz*?Y{h_%{WVcH_U>LE?RxB7l zqbSu0_If$#)J9d@YAMm71RL~|s#$5kLIiim$e(UX_)3PU8`#GGD0Ue+Hy4|v zD*)s1R!v}=`9g7|?eP}h z!juJ)G%G*} z%#O{M356+QHEqu#P)t6=okG$3REA|c!ITIetpgrFbKBNe}zHN?i2?;C|QDp@ka+kobBR_LyoXGj1tjU!gP~$$XccxN(P+&2!GkL& z?&VUM0U$)*P6()@KYLz2TyLGYi@%+1!THE468~P1eHZ1{n?St;BDjru!1mEV*_CLD zD|f~6Plt(a&9+|tu{(iOY;Ud5%A&ne4u;rzg7Z#7mKg$5bWbUmd@!;1LUzle1;dHn z$CN`t7h-j5+$&v9A}L^62LPt^cWJv^;BkV;o#_L%mdk+Mqa;qMTMoaDIdl&bByOHF zS})hgD`_A~C#229(e`{2*&w=wvPql(f)XNN?S7kW8Rn= zl<*4O_p<1eas22xfnmOzUKixOo^N!D)O!UuBMI5sfYWk5PwSKe{sJ5@yJgI?Uc`fA z2~oFGswl#J@#-@2rJKrWZJy12f!m3z^kY255%&Aaa7{4Eo7VF*2fptRn03+a9N)j3 z-~agGJ_yhl1ooZZP8LwlzYzdKup4#J@VPwtPwYE)+8!5yJ@P`Bvd^5%!Pg#m07PmRNTv9Xt^I39-M}8rkNB0(oqRI4 zmA3&ReYKOMIc<*#U?9+OX!GsjzlQujfA}^J5a~YZBkO5<j3fa-;3~@-+wQ{e=ou@kN^MbA~3%Gb37K@I$wzjt{@R*2qm5TDmlTY*!MM; zE-|lu=Z#HzX85yt-*LG}%lS({u4q54k{Fu|%-$7&{qCd`Jgli*+_9xH7Pr~w&tv&Z zyWa!H^}Wpy8vCYv_+-8A6!5;$u+Y4f&G!?O)#s@%xy{OG0&M+zEw)Y}`U-3BIp`e% zBBWQWI{hqkg_c7)F`NYlG~hU@zUi^X({Hmkm4?~yT097jSQ0rrW$EnK0V#gjs`H`q zd}W$ZsSPAq(~?~8mdt7Iz8%jnR~6aY9s!e>|EPtR>8#ABh=W*v! z^M>ZHTivM?G7(bLVHLP;hB<7_qs+Q3&z;(>#ZMJBW^aR~{!f|wmniO22DF3J1vxa) zPC7ieyE0GA{(+xNc6IPzR@){^MTiB;H<1n0=_#!CPK>of*d;jlGVQjH&G^fmBT`1c z?}~{fcay=%io^T{Fm&3JiZ#i_@y0vGFmV@4c^~Xl++iyd&&G(Z(fFt9{h9g`%O!J) zZxNH*{l1$jZ%4|lI&E*4pBO7`)VtmH@Eo*q+E-%Q9W!;4k63%NZvK1mVTpel5Pssh z@8v-{I$&Gs7B=rY+n-|$?Ev2KYEm-lnCI*-w~E#J^T;pyAVf15jzz+ib~c7f_w5&m zJ8Q87m=6!{G#Nd&znRwi{Wwn6Y~*iL{OaN>508TmoTV;7w05_9?bd)9lzO3fQ}Rml zw>4Db8J{!-;%lIj%!mkE^K!rO?51(=uRYt!VfNw`Y0MHvj=NYkDmuDlw0Lf|v=_Q~ zK(A*%y*yCu^!73e9?m9lXWDnqe}6f$_a4}k)p;n$_2S9K0k76K^$XN17f3+B{AsX(SCQss#%i|eNhkfb&blT4ym!`-;kKjkT^aG)s+;_oX@BSAK z1I5sn#C(L_4z!`UyK;*5Q9+Tsp5t^5&wO3y>@-KNqU5z zqV;G4&zzRI&eW#aY6gQahq<;kb_rehwu!sydFlx9Bl)D504mxNkBch5iiglGMql!e zW3s4sV)WCUn9t+(m0(*IinUVR=*6TYe4*fjJjm*;&oL%3I(M-=pFLSb$+K3+UY<9k z(t7_JrchV{v)%_nO&>HG8*k6IZyAgv-sSQ+*a;V}Uus8!YBzV5hjQjd$Ox%!Ik zEJqUfl1n^(kI^&gzxf%DmaP~6YSpq6Y>uqZXLcDC-wNsWje%+It6pM`(QzzZ9!v8Q z#jl0*3ndyG_UFq;tzO^W@M2p)X5kKY5;$y06Lc=C3vVAt_=Jm->u!>4OCE;I1X_^W&Gc^eA~xS3 zFdcG%^HO4T;S&45XdPx(80HgY*edL-OBY%Lq&#Q>8_08oRcpBzoR8N!!g#HIDLlFj zVonX!#@kn6{m7byajVjb8wd^d zToM`Bl0<(V87v``W9YtQv2(qxJRD?h&<50#mD#PMdh3_5($W(BVxTNji(k_P zGuTb50)(TlB3^>Y7GWixK1A8c9ev@cnYx7aX*t2F<3mv->(l=+NtP9l>X)#q$sJ+m zWEIcmIR^znsUvLM;GQiWx)2f@qj~zHM9*g4XE){%#>o{15e&WOuDHqY$~?wapyBg%(b_%stCzQ zJD@i=>md>0EW$UqbGGBzD}Cn*-0mrG`?&B&MV-{6{Mb*H`=)-LI?=k`ER<1<-HWrq zQE$KYmXpxXkh}#el(*PPhw+(x6$VVx`WXkxTZt*1tenzFZ2kgWQ(Z{X1VcmyuD!MR zzSJJcp(zx@AER#>BzUlcl7>y!Ka3a_uov9VR-FU-cXXc%tFmy^a<8q16zN2F;;hpVPsqw z-ms;(=CtWR*cv|zE!QBw)wAEd%onp|!9V7d;4aKXfvuETJ8VdeO!3{#5Ih`xwoT$- zb2I1X*|sDQX-F2a_{R@GcHlXSNOpzrp?h;V`D(<$QECgJ`(CYKkP2G_xBahMCmWiLdZ50C zXQ8T4J!T8vz(W0+X2=GyL5B(N2Zeo#?5VmfVIXEdY8y+M3rTG<8jjZdB-bO6(faf_ z06Dscp2HafX}TJ2eOL@Hw8__NJqa$b2&0iWdaNkAet2xnY#V_Z_Pewk2pKp$AXZwO z1ScJv7I;vNN97uXR^5$Y6nQJ*aKB|Kg*gviVgxKTJG$#+k5Yp_A|VIq|e*8*D-cA zPU7cPYK{^f^}=1uQ;%);LPCSC5&Au1Y<@tug|d^2ZiRSfiNqs*xIcKa41MUZ?EqD4 za6yn>@^=lq<+$38q#3u{J0zBEt!MWaukbf?>}kRn?yk!>J!2s?V1k=I$gn}!!^J#W z&-WaVJeNkDkn7t|Er~b`3BqJokMoVHv}4l+w8t{HpsTeS_#sS&`BzTd!p^<0eu)AlT%n5o@*$#(Il8;E$cA4+3UBiBW6)L} zikSuWPU=~7Y-}5gfIgj=mvgU@$mV9`T#Keb1pgQWsqf@pys8zERL-e0Ts=;i52&Dp&d9r_ z`9!M(yZ$r3c6W8F_E~t2s0>>9wc*KPz^VLLSgBk&!WACOC506JVscubiwQf{{zUwG zvR%XG0LGz1o1sPEUPE`~AZ*pI3Bv067|__?5BDXnbTL5L>NNA~W=nW@u3S#>+A80l zQ%APNN=EyrMPT0n>OXM4b`O9!U@KS8?p`(AA55Lk&0O9fQFn-A(yeV8r ztU-y4@Y$ZGJGNiTEC$&z#XR?I8*qsIrfB4niQC2yXCZm;0uQS0+(XQL`|R|h6LaW( z)jHpX&r`2tcqJI47biAytowL5epGGIZW~xJkg&eKh~GFXQ6z^ zX&o35*X7R*&hR*cFMDxxzu%@(=on*zs5tf_(DY@OprXUY*ENf{W_y6BEDiY)`x;9EsO^wO|z`*kiOU zx4dgNm(Xd7VL*HD@}hW%f!o4`;^x^ay+DS91E_@3o$W8S;_Y9)@470C2k6)JE1(VO@i3jyloFg6?auFmF1Ejo>pxJ zUTdGbMf%}IwcD!yJ8N8FrYs~3ch?MrBwi{k4Of7u$UFUQuq7#hj6aS0KHy$uDY96J zO!=N{$eQ0jTflFho#TUH6dZCcI9cA^e*TT`3Xp}=0i%^a8b+TI*zF1g0>hW&{$Ke7 zQ_KMid35A0g;)xWj0k;okxSWm2&hXsQ5@e^P6TG=zjbhOMgs`42?MKNm0x&y%HRJJ zeqi%-_hg?E*slu!N*apq2s(NG=kK4}0Acs>cWWH~ZiD>$a#sOdWcIUY+EbS}4rEWu z+eej7GG>kk8qx-G6J`%NPR1DlODqSn)s~UPuv1RwT6QTwq#iZJlc6ZU6qdBW9@&j0 z{(ag19^+Rw142Z}IZ3joE|DGBqeAN`Hx0N%Jphq}RfWEsxmN`fgOrl*wviD0<5z*vY9%3kvia;VaJdKkL}_7 zO#z3FHD7SjX?JGzabJe<0T9L#1#;Di;AfQ=XbPU%Kln(M)}|O*tUao%B^=W$8{aWe z+5I)E;;jb%o|^-wXsy!8Tw*Tj7aY7YCM!Hx^42ADAq^v+BwAjwS*X3-muA)G?PW{J zqN9#hzQm{k543BO|HP^KX#3*&c#dd|>&CV9@o07aD!P+LzTKyFR<(4mAE*!xE~Yg- zyoB)C757ZpNM6jHrY>MVUU6i}4U75Z%Y?(LIUG{Gwk)`&A43 z6ss_hi0rAIzR%z(ke(W)f(%b31U`}q|1;{DJ#v>|LYbOt_p(wJiw@2 zWm#NhrRKDkB}@AV@UUIUv&z~As=|pjCkMXIoaK7;=TO{b9@n4qvV&)K)<%loc?oru z`$^rg1_Byr9dW~Z-cRd)&(F{QLke0g zZZBKvoyAJc(mC-8mM;=~vdww^QW>uwywu_hyJAYhp1rIX%fRP18T|PJdbb2`EZWM} zGf77zN6}2GkfV8YD{`ByH>)UB21)G!f;OwXNo~?&61)+pM)Vxo zkNfH(vx0o2P|@FmJSF3PsZ7xC=Y0P0QP8$z1R9un?FjW!5~@Akri5918o9qOd~CN!E}?OBQaCk z7hb0k^vd;p@6qKtc2a3$s?9`2bUm|(a8n?f(WET&O+G^sRd7|7SMVB9B{xi zG&IIKQ!l6$D5?WWAgKNIY4^|qkiZHUk%^2fz>WP5ZL%J$K1CfQy!{4Prs`R0?BOXG zPeI_mZjmomp28rW#|l~j$;&{=oY1NcwC6cVIQEqR`XdvcygqP&x>RBD?8(e;eg^o8 zzVA{Cu#+2BS|@Y0p9+w|DJzQk9r)+sTb7eY%8mpyrlP#l#vgu7Nu{nFCx2xw3aG_7 zZT>f}yIBPF9Hx@D1~AxRjCRHTm`*W4+G+$duC34Ozd4Zf(DyJHgvZ)gVbu8FTrfz5a0Pp#MtE9*< zBy1!XSzfNcv%=6cXICk9Em_$0lhoc=MaAHnovEL8xhf?ywa^U2ujyi5%SdAWU{|?| z*GXGRkOYtcH*%>Sl-|EU%|vz{ULH-Y>rpD*zl&=F902eV4&yl6zGOfPreeF*`B+F6 zt;|qlDr}~)&TBCcB{&<^xt&1rCaD{D!r)W#<<7b%PND)#5>KtXwW~BIQFd zZ$T$a=cM=?{bVU0n;9x@V#&S_7d54^5g#I9B8i8+BVPt=Nxfi z3Cn<^TBZe38#%MjtebzuJ#lc<^C0PObypg1UruuyU(F&2k%(Qi$s#VM&>D~o`b47e>SN7Xomb!vN>c?AKlBN&!(J?ivQzp<(> z*uV|TLD<#ry$MPx-hLMRI zg-X4!jzXK9h*zok4s#sMwQSz3_Yk+}BD3Vf-aRRTU~l$SYL?mPY6xQ*ZoMeTK*PXxYt$>Ydo~IO54sMH2w$(z65!Wy zi|$Y@tY1NG%{$q>gl_LTtbCSwxTgsgI$D!xL!v^%7h-k(pdPovtU}Z7(67|GVZ_K` zGAZ6d>`E!Zs$)K0d!N8$$3ZRUzPjkP)d5A9cwXZAodr)CvIn8A73dE9L~~N%1TvV3MOban}Mm}-aIg0g82M?eVR}0t%i5x2^$*I z1{F3&uy-{6dE8U3D_1(wAgEgn%P89ryipK2;I|yNpBa#9v1wmBX~F(_t0gV>R6mcZ z&Gsd5OP}FAob)Q6Ql;q5gQ&OW)spBUVuxG{4qg{opI!KXxx?#i5~Sm(zfgao!>7Ce zFlz7j)ULIz#U3`{R8`>be}r(w?WF)o-~7s^Y^+gE3qxq?7<^AAj1~C@kiO=!Mc`)K z(G8$QJa!^W=5bwPW1UQs2o@59lwf)A$cpx0z2a4V)P)BYIdUQ-W%TE>;cALHDuf)f zjyi34ELGh`Bvz-IPC=&euH70?+?bR)aHF5gh}-dL!_rO>S~9LRx+m6dR?*wOQKC#?y&O*+=+*WGHO$#^mgf*fcbtAXfP zfmbxpK&|vtc)0NTcAaltVcEO2dfcK1O@w3s>R=OBg$_={y?=n$dMzdq9Q(moVVyS* zfM!CSi2=wZv4<)TqJD-3FfeOrJ2R%N=8|h4iFbVPFI!8*t7w_D+z0T(KtyH<{dj21 zr#9@n(g@Q%E{px#1~Tr4``QX}8^Ce4ONJ!p2GSGGKu*%z|5KtCru$rulG${rJ%**u zp1EX?U!5CBPsQjt`w2RdS#p_`m{6h2v~5%bRRko%gdv2qclavW$rOM0U?Lyc_S^&A8u;|w?{*dMdWgF!}*1Cd#mKe=T%C=Qvyyq)s$JzTHN};@F z&99+Y@xcs>PZ^OtBsuXU72(m*<2z{Jj-T>IgsN3WhL3X5$5n!L3QE%N6c2ciJomaL zN(BFWDA;3*w%;&;MC?frpVD|K3RyI4;ao>2zSh=So^8Xc32 zg3L()*E>y8Un+E_au_5iU8~>!iEHVn*ic)D(ysr7Sn|u73x`O2fkA#6T4X2PioB@F zd3!=OQwHYx>gcCu#zKj*aRgQ>Op>sYfKwtUL0E7^SlEzSebrh5r1g=V_h+`+*~xpB zKH-d@ij#0Ue!@VwVzrrfNzwAPUsHI479`W~t7T+dmKoAI{6j!4GdECfLbQX>Ss3^y z#0-L}xW#+fuNb)hIO8yRr&dkKL<3X;uilz=UnK~n`5aVi)za>TRFIi|X1_T@o~YU; zip&*Qr$7l8cY}D<)C@VJP{mRQRrC4d99G;(;K8NH-g;K#v@_V({Fj55CiRjkY4UAa zTdQ4f)l<~eKdy)h=>0t%r>>pMy#y`N#q( z3$XFqhT0vwtr-rfC30BHoJ*v$l%YG8TByv?_ruSS?eRd#+>E$~wO2r; z6YnIB8rIgDER){}m#?2`Gl)`<7pw30666hRIkrV zpr0z1^4Oa3u15=s_(?fLM4(K?bm==itA;A%+u6g#m-kU-_3C#=3~a_mjiIomEP4v+ z($!b0qHV4Dh>-Fr)QOQHU!d}Kvq{=%C$yT+zG}PfUPB{F0DKb z!ACw;8)sCj`d_q>#M#Y$NF@x3bhmtonQff;gtBJyXV5LO_l#EZlcJ+$I>Am)cnPrb zANhEY8v4nB81uzlEZ?BAe5s`$08e;^w>OtX+aFsJ2duu_4GVxsh*OZ)a~YVG)E|K3 zXoNRrIP8lr=4t|Q)x574uhon)Q+*m zHf(;cyW`h1*IP%AB+nhy6)OoFRX*^&4nCy1iJRINTrKOw=?{Ae>bc>3z69QU)9XkO zlDPras5W5MQlnvJZ2G<}&6n>j;Oc!y+NGEyXij%W*b;E%MmSi3wuk8t2UkzQbi*Uf&$y(8AxDX_ed&@kOBd14luGQm7nLNxNjE#q;5(QfGx0GpjJNz zRqd%|52R)S+?Hx(=9*qt^|cM3&G;&y+a1p509$M#-tat{_NelUJg91Ran0=D`jsvu zXyDywK*pvs0haOCTL8yfmLlTGW3gTC4v2Q8(%6~A;}mC=%uK1f;|h@BLfLvfVfENv zI2=O3Je0#CT^N9z4ePOpMbStk;@|B*V3&`((Z*WhG<0J+y>F(f^eBqPs%%o_O4|=F z8&&tRICT+)BwiuFV;eY}oGrSI*ZGHM<#EgR7dfVuE$o2JLmV_$j2*AaN`|Pu2{V}*!<|o;iqA(#*a5*7ZrtjV?eHLI?Y_q={z^%xAmP_k znr80?VQVL=_$(CEKx9{J*sSqR@dtd&5FfWGQF?9j*5(dy1TzZ+F_Y3!bbG4O6TP{f zjh(H!1L=Ukzd{Mw4d9nXNUYV5$ns0oZQFC`n}QFowC7cnFQR&;df5U|2{7ToW6wm z%eflg07qWkYPU((#KdMc*r@>q`34m5da=3-(_9V4BAQ|_ErAk*w}`y&-|yO1t@|At zjSrPp2CAi;*9SNQ!)~fqgA`i5_Yo6q-~CNL{uXd9j7~6pJH>M*tZF8PVjDc0wZ3C@ zcw_X2Cttc3WOFNar4Ojn8P^oeR~c8t36*hK>1t0ajp&JIn$ZeK#B|?!tBUSZ4gr+! zy<;bDWrQ#Q*kyoaQ3(`fs(6bKYelbGAPXP6PJ1?4&n_-dW-Y)!KXfwrrNfx1zC^Yj@@aaAvpPBR)IogyvI(C=4{VF z$|r1Vk2@h{w|B7A>hN9I0?tg9p6o*$W;_R#yoE29MK)~p$T;jlQVOq1Q~p2geP>jY zYnQenh{_?Lhu*8VS=5|IxQeCGsqE+jAp ztAA`RWFzhs$j}egf73Fb{2@k;VAD4o*DTYIEqyfm#nJ2$cC@PkM}jG_hT?t|@}3bo zblZ*M9oOgj_<^g{m2s@iUNXYDf6jTmzNfLc+qd#&fcZWbAw0uSvi==4JQ*ED>83HY zKL`F|?0(tz{$owjNyjWfYGL}Z)|0@aaGK`szySwUDqJi^2TAQWn^&WTh%A8Gc1smI z$cKwgF-N|&xiX+(VqYsns6}@!AZEC%5`*_+LBmFJR2RY2&Mc1ErbhE!mIKJ0_!vq#R1}=rcj*Idab02l4;9ueWDd|=jI}g79MgaT^k4HCBLfd$YodCw4Vz{M%j+Jl zHV~Mpgjc{{UG_NsnoCiL!o0|OAlOD^qZv32R0(O^khw7m^go%Bc?CKAbN2qI=yq#Q zYO0QwJQXiTv6)h84Q?znIOtuva+v6^u6$U9HGn-7BHnUCB5$@8-aE$G{Y6#C_3RZ} zGvhaht150){5yi>G&*yj=#Yso!N~YywY=Q4DJ-^**PoFl(9$9G#<=F}ugmVW5kB`* zb3ZmGTanb;9?F$c2iXtPj4`}s7vpVIkNLoq-dn7_BV5Hn3mKD>XfZg!-hD4d8cAU? zpR}g|ev{d*()fDmNxT6uVSM1)ZVEX z{$}mfESyL=>^fKipSbZrK?TO%r5N)0-LrrjfL+%}uo3cUU#c=_-U2N4?Qscz2+(#} zdEjGABo_UaVl6x+2!Wiy&lQ%SGP7&F3WZBkdF*u2u%s)t4BXwTlji;eUuxa~?67E1 zI`HCHLqbCmOgPU8;ojm9G*jAK!z*!<_r72@TM0vg|K42BZ>_D8D194TRvU0b21Fo> z_s9sq>?TdjpSpZHLA$K|H^t)28KRnoFEuN%o>kk#)XI_hBY~5RESG*yy)~YSF@yz} z@t|S(0l(=W2aDbq_&Q{M`OANKi~qTv1t~H?zoYlSrSk%y7=arBS`CJrO$S+7y*q-URBp!`@1*^DP+m_2K?p;{}=&S?jIxkA2NcL_uc<0dxi6MrorRV1ohriONhh^m9hW@#as9D z`@5+sl459w0{LnHBge^P<*=ZjcjqNs2xao>a3?}fmY~bZzr=-y27_U z(lWvS$n~VMI$e|!bwL&Fmx&3L_}5k@o?ej46?BJbB#viqB(s|D5&N5&j&s?bDhNBc zaFync6@3O(pW-RTjHKb6M?MlhH|L5XVo$}W(H&jKbHv#fphqDtHao~m2Of;>=kKJn zKm1tPVuMZf50NeW0{RJDq!fX_!l%7{xZ~mG`&&n+i~EL>XxZVcRJKMP7D`7OFXFa5 zheO$_yD6*~iiXpOT>r}-Qn64}91jsmy3$Lf%1=q2_CIS?&&A>Qmw5uTJ~u_z3(!tS zD6t#cgpHKsRep?zM4r~8V(ubOv>uCe%wL!W6q|0ilISBo8xz|9{b@DiQjakcJslft z4Nt5fpG?q8XJ*zy^Qe<7o?1;>tub6GR<8ZSZn(XROGfx={70`Dx3`B~+7Gkds?UD; zGNw-O6Aj-Tn{X#Pw)hR#o<|-mpDVVtMP=NCKN#&0)-`6lH%39)4oBakheUQU5@t{= zICy2Uz1}x%7VAKoXk$&viz`e@(FZLzn4(m1jFUTV{ch_AqI%q=@WAt( z+;kgUGoZT z7F`mCw`J{b1iJ5~ClU@W1N)na>*%`d(y?F_?u23G=7}rEnKO>P=J0;_de}Q`H>W1) z(^XuS)34{0pJE93y|ZT1R}pe&Br% z)8r^q9twm$Sxq_&J8f&^j~%9gvz!(Gf_f-E0g%O>vWK@0;+2U`=x%xldCLTM%f2M^ zc?kG$fld)a>_ix(?M+(f&v}bP+k%@ zvtSO09Ewu=t{nJw^r;gOqE5Gy*}E-Yz>bp*+%iXw>(E0o32C~v9w-{wy9YkIGXQvK z`;7bCkxJeGhAb+5g@^vn6q6E)7=kpj=-0fLJBL*p*4U)mpW@lG)L#qrIsv zGqZhJR;?y0ZF?=e{jb8g5R-3H1kJp8v~=ZWVsw%S^{9W z5UJ1f=plKPSNZjF9}fJCn(?_JHSM!%@h%UF_aAa!=GHgz(i*LrNvnG3Ill6}DWQSQ z1#5eXL~4m{YAv;#!GS{Kn^7hJcg93sJoqWcPK!{!g*5}hnY%Yse}7}d!!YyLns^Ur z8@7t~QZPoDTNa|}An*$;(&8h{kq2Lr_7#{ph*S_~N^%*Df0+xgUt|G8nqwA!Ff2}# z2SZwNk0_---U;YGij2s9uUeF7$THSs)S3xt!NB5J*e1DEZqZV0=XQ*3A#7L{=J zaw2Mdso2P;&`GN`js(B7yD@aX!reYOVQ6I>0z2ANbNI+7m9V7;9AizN9N8MI5>O|z zaM?DVHWWz`S@(sr7xl7VI#TU)GCL8fOTa_Yv!cITBYZa(8Hq}g1W`aUXh+p^g3C@jCyD({+g?ukV zLbU<2)R><}w#~|~eRuhikYLekau_!}d+`{q5$uldeGeO+LH%WI7RX=ll)J%@YkZCf!n!> zlhr$HIC<1anedMs!x@AX{X zOYpI~8mIWs>yGPUzktI2Y)=^p-PB(JrDKbJ=%eoE-3G5xuybu;NQRmS%MK_D?o> zZ&K?Rv{~8SCo;qqNUE446g@Q&JLWZ4t=fmn@bQ7>#Q8pad}$EbA*6J}Vn-qK31JGi(?{cp#@9CLdu*h}8bjnoH-_yzvkl6H_jhrq9I57?iAOd< zx{|}PA9AC}Gi_cYc*tY9iscfu4;%RWtt7l-89xWH;LG;9-WZ!S!tDto6=c>rAMC^p zu5A_hvQA~rlONr*>Gp=xQ!<}^MpY%`F8AjcKJ=M*Fd(ix)EKL4HyMDSg;|K#^3wA# zCo6MBp%&KX#HQwiI&gMoO16E{NMM^py=I-J|4hF*Bl?j5ttRg*OZAUn zh0rkDL?Jur4-)&m*ARC^0dWxgUd*a#Z~@2*8hZIZid+p^A+gVqYL8B=~lFHUybQdK5S4%C&bU|t3%(MoOF6 z+?xw@XO|T3X4|oes2zq@zZzO9Q{?#A32pCCrb#(@!p0!Q&;cv86S<>-9bXRjo$%}0 zvYJk&?Vf##cdV@HBL2}0or6|w4)3}cqkJu|5f(I(8opoP;^E)ey*27TR4AEM7oduT zD6S_;uYH59mG+s~Is%#S&83RQ%=FOjLmy+|a}jB9TLg#qanze9C`@rLd#NCV(jl=IcebEz+~;u}k)`L@b7Fe&{9CxU>z2ig z3TbKRXY#duw>ta$t8w>);R0?%-RPfg-dn%sR)+~tT=Dz<#v&kn{d#j8gPR{b^?K87DR@+tizYl~AD74GW6zy(X0l6+?Uti_w zu^)<&HgS5rxvl9lcW^0gG`a2l$7kQ>u&iuZxlDm}ZtI2q*$*Pp*XC#I%(Y4`yKYDN z8f44Z8v;4;iwUZ>*4Yp9ukNqm@a#snW#v_3W#=b@+tn*p*Ts6wwmB3PRe7<}J638( zAtM;~kSwy3&1mle;`)Nhg*|bjd#QWPc=XOPOB1)Z&i=+eJ$D$B;`X^QMZ&taS^CGo zX>?D$;qvE)+;WgM<&qCX{B5F-1OrQ`?0oobex2OCoqCMjQASQ?w!3E&u-L_ooyp*G zlfI7^Z9#ENM5Q~GDJV`6+4LzqGor;MeT~pc$miR+S1B~qZP7A0me6Li-W@s+dz9^i zqFLLTr-DOiOZ)|t@J==rPgKono3>AS1MYdFV}(GgaT~)lpoBHeQvelqUbl=jhHLu zde?~SE*u!ws;NFS%;1BUx+Lc3Oi580VNyVCUUaDQSRn5ZQz)T*5AHXT9A+WmW3|Vw zr*IB#?=~04prr9JHe}k>j<&5q1KV@7mm~U2XyKsKO}S+Ysd`_A1$`S!Efo>6u#642U^SzQt3qQyh_2{ zhGTv8-Nv03$IeUIu_uH`MBRAG(9V&Nml>Wr1nx*^_{8-6Ot8bWJKml&JyG9Y=l*(i zN~3+*Z}Pj9`LtnS9Py6fRj1kBeC^%2Fl<|TjBPv2cYAKenN-CjwG}mA!%L@LyFa&-9X^ zT$SCWQnsHvVb|7lDhZ?XTl+5v?cZ>|G1p`aac2DsW#>8Wyiu6SOiyck{oZi~mx4{c z{+#|X-nf!QH!zt>pBew5yhwp)5y1n%lflPE5a&1Xiu47UN z-QkJtoh=Ln8RSAaZbiAneqkKOB!6<*cSTDl@6F1i{>GgLhHV2SkH8;|iOZU&(^>NFA#XF7rrS}ZcA|}+I1Gja z;P0k5{rY^d5ZX|qTFrUte%;VRJ|@EcHG7`MLuBpO!@~O zvnmM=h|K2Gvl8=PGWMz^gJz-5HXjkf?w2+n#rd*Wr7!NY@S1GSOn8V=#vHTDj2~<1m@_In(_pi7yJc5uG=}wE!oZ5z_hc2WKjr@5 z;#4~W4Y`mIZ&14F6dRjvNOH)3G%Dph)#lLvB9{O|gNQ9Aed7-rg!R!1o<>-h!h4e^ zbN(|o$M${sqhkV<_ zxs^6{Ta2aF;q7f0{a3LvGEt&NQ5vj$ABqLRatjYLC==!=?&OtZZ%%=5iOsJ_Z;rbq z>n>IAOl+f7@`H}|2w$~TA!sPc|E9G~pb}^6#{9Yji2I_Ges+{iM&|R5o z>7GBk5F>#exSSo$fD$zn<~Gmf&p_Aku@%N&qyFIRJ`y7D?E0Ehh7VOzvHbnn z3lxs&RCmznn1{$!8LuBj*WykI!Q&#*DnE~gDsIgFK#||%KiaSL=~a^@m0~qO9m5CN zrTVkgO%7TWm6A+>-DBM*g>#X%`q>%#88=9D5dSs14+qexr%HtYd1;%p-D&`?GB(uu zS94k(N4WAyojm=^Anhs8oOrrxYV5OG?782dkLS(?YC+CjFA6_7Vn@9)z@Y)827Qx` zEICE%@u!H>^_xD2Cs>OwPGx@ACYWrcL$iCJ3mz@~&!uG2i-w}jT|zmKhD6$=o5IX{y8>_J8OswoR83XtYLd=#$gF}Uwm0;= zYG*`(zH6sh;Gx;Cpant^dU+2@r)zA|<0IgVMJ}Vt26wkI5w6PYbK$j7U!T+0`|T_l zk^KEmXm@`LZ&bGFbu0VY9tGj-77T5H%0Mx$i>I>mb3)U_k{=9YyeLY$!7fukP?u}G zV;&l^H01D3aRBxpkuVyDRwpK>;Tsd2pNu5=zbCm#fz(i8aiOBa*m?3e_Osl^K>C1= zK1oucPj}>&p_j_|m&mUY2hldAM=W53;-2C@7-k&_oF0xgsj#%)9V3@Xj&Wh?Hk{kv zr~g7QMf-cj$0gJ;*JPA{WA9DtpT<+snI7MRsL^C9o4QHJ!eu^{<5D7UThd( z+w?88@#uEF8@c#i2wIQt8OyHB@#d0!yyJ6n(jLfdg{AIbjf$j^8?z^kFmB(&QcXtF ztqcnu+l=^f*~tQLr5<*8VEagoA!}%~hErc%7?k$ylfdRNJkrx+R z>?H<}%vFUeymZVh(a^A4!O`M)eezt(o~t9E$IKo`x@pB?W&#nPQxk@|*KgZQ;1`hy z4ii6lMO!;N)KV03rU$y|OpG(Ui!8;!C+ZQgZ|fY6?crczN0 z6rTWSeD7G;^`|x4;GaY|Cl!*r(*S&KPVv?F;6rF|dLciRvVl%_3Pzo-7ayTiK>^S! z-r_GG1kgDs0Q$tFmn)401wgN=-9EUaHP3@k@%`saD6J|$qXZvZcRMKBk=P5~^yOhYRvc-i_9xol;3)eZgpi|KP6 zb!tP&N_qHsc^LuAc4VpUBkyB0w6UUJmcO06GW`NH@4zp(2poJa`UagUhxEBV3U{;M zq3{y$Qx63T8jmwuSJ#x~H#Hc0@AKK(+OEZgI9XRG$x$I>5Py?#HnypQgM*tL2rhc? zb@poGVFTNYBN(?V!%`rGQZhe7jWtS8nmZDBS{9rT7vYJw?-4G>DmuzLWYO#_faEuR zv&U66lZPg-!~os_Kfq~p8q*E}*+ta8lrzvZERK$yzkrs6I<3ACGY8PM8(t~}a-4I} z=YiXS-&9KcNs|g1D%!fg1vJ|YCaf(4ug1(AmdgS2D}EEX|4f;e^PEzmC;B=#Z;);#L5odNM_EG0I2d(9gIc9~O#m%0 zc?v{|^H+G99%e6>o-$h)y`9*_+&ex|kX(x(6!!{kWOZDc} zC6v?KPE&32lVOcVVkc6*OcZH1)pv50NMTIHFx%8x7Ywx%d(p6G2NTz6~pGfAYsurC8;;lq<=}C~|de0OMJigG8B9qR6qLxEfwoet*V3vWD-H1;d?E zf9$=E`v&MDrS*(trW`hLojjaom5Bi@7Zo*l%~KS9F;hKK3lh1fIvYNk?#7kY5yR?O zwda7e<5zlg@z_Ek>7}$2`OOxRF|+*iC7t~|aVR3=wkMVIa5^O^vmjix>#}PGXBMZ) z8yuO+4YQT%O@oRGN>1=RRr$q@j zNor&%P^*nb=LmG>(gdEcHJ`Xa%y%`aBSm^wfeHm5bO7&-U%tO2e9ee{1VbmPf)BSC zidt%TlcfGQ&0g)e5c?@O)tAOzD>47Cq>2216EsZtl% zC4U21P}Yhj4cH%#zsg{svT!K7N1`?Oc^7M^!ofHH`+o!}R3+`LUOFo2^56F_0zapo zO#bbsBb0z_+%vxU-&FiF3mSl*yV4c?PA5$j_vr?;Q3SuE zbR4)k0l`nAR6NAr_x}W3`7M2=o!<=)Eg0TYr?uFq?DUjd{xkgl4F5l?|DV-Qef#+T zYV(&r?fUA_p~FLWZeG`+;;jC01pmk5fEDs#Yjv4pIj_!09s+;v+)}@pd*jiw{{YYE BODF&U literal 0 HcmV?d00001 diff --git a/docs/How To/Qtype Server/serve_flows_as_apis.md b/docs/How To/Qtype Server/serve_flows_as_apis.md new file mode 100644 index 00000000..1e8f7317 --- /dev/null +++ b/docs/How To/Qtype Server/serve_flows_as_apis.md @@ -0,0 +1,40 @@ +# Serve Flows as APIs + +Expose your QType flows as HTTP REST endpoints with automatically generated OpenAPI documentation using the `qtype serve` command. + +### Command + +```bash +qtype serve [--host HOST] [--port PORT] [--reload] +``` + +### Explanation + +- **Swagger UI**: Interactive API documentation available at `http://localhost:8000/docs` +- **ReDoc**: Alternative API documentation at `http://localhost:8000/redoc` +- **REST Endpoints**: Each flow is available at `POST /invoke/{flow_id}` +- **Streaming Endpoints**: Flows with UI interfaces get `POST /stream/{flow_id}` for Server-Sent Events +- **Interactive UI**: Web interface at `http://localhost:8000/ui` (redirects from root) +- **--reload**: Auto-reload on file changes during development +- **--host/--port**: Override default host (localhost) and port (8000) + +### Example + +```bash +qtype serve examples/tutorials/01_hello_world.qtype.yaml +``` + +Then visit `http://localhost:8000/docs` to explore and test your API endpoints. + +### Available Endpoints + +- **`GET /flows`**: List all flows with metadata (inputs, outputs, interface type) +- **`POST /flows/{flow_id}`**: Execute a specific flow (e.g., `POST /flows/simple_example`) + +Each flow endpoint accepts JSON input matching the flow's input schema and returns structured results with `outputs` and `errors` arrays. + +## See Also + +- [Application Reference](../../components/Application.md) +- [Flow Reference](../../components/Flow.md) +- [FlowInterface Reference](../../components/FlowInterface.md) diff --git a/docs/How To/Qtype Server/serve_flows_as_ui.md b/docs/How To/Qtype Server/serve_flows_as_ui.md new file mode 100644 index 00000000..3617fa51 --- /dev/null +++ b/docs/How To/Qtype Server/serve_flows_as_ui.md @@ -0,0 +1,42 @@ +# Serve Flows as UI + +Expose your QType flows through an interactive web interface using the `qtype serve` command. + +### Command + +```bash +qtype serve [--host HOST] [--port PORT] [--reload] +``` + +### Explanation + +- **Interactive UI**: Web interface at `http://localhost:8000/ui` (redirects from root `/`) +- **Complete Flows**: Display as forms with input fields and output display +- **Conversational Flows**: Display as chat interfaces with message history +- **Auto-generated**: UI is automatically created based on flow inputs/outputs +- **--reload**: Auto-reload on file changes during development +- **--host/--port**: Override default host (localhost) and port (8000) + +### Example + +```bash +qtype serve examples/tutorials/01_hello_world.qtype.yaml +``` + +Then visit `http://localhost:8000/ui` to interact with your flow through the web interface. + +![Flow UI Screenshot](flow_as_ui.png) + +The UI automatically generates: + +- Input fields based on variable types +- Submit button to execute the flow +- Output display for results +- Error messages if execution fails + +## See Also + +- [Serve Flows as APIs](serve_flows_as_apis.md) +- [Flow Reference](../../components/Flow.md) +- [FlowInterface Reference](../../components/FlowInterface.md) +- [Tutorial: Build a Conversational Chatbot](../../Tutorials/02-conversational-chatbot.md) diff --git a/docs/How To/Qtype Server/use_conversational_interfaces.md b/docs/How To/Qtype Server/use_conversational_interfaces.md new file mode 100644 index 00000000..fd960680 --- /dev/null +++ b/docs/How To/Qtype Server/use_conversational_interfaces.md @@ -0,0 +1,59 @@ +# Use Conversational Interfaces + +The `Conversational` interface tells the QType UI to render a chat instead of just an "execute flow" button. + +Note that, if you set the interface to Conversational, QType will validate that the input and outputs are of type `ChatMessage`. If you set the interface to Conversational and this is not true, and error will be thrown. + +### QType YAML + +```yaml +flows: + - type: Flow + id: simple_chat_example + interface: + type: Conversational + variables: + - id: user_message + type: ChatMessage + - id: response_message + type: ChatMessage + inputs: + - user_message + outputs: + - response_message +``` + +### Web UI + +When you serve a conversational flow with `qtype serve`, the UI renders a chat interface: + +![Chat interface showing conversation with memory](../../Tutorials/example_chat.png) + + +### Explanation + +- **interface.type: Conversational**: Configures the flow to be served as a chat interface in the web UI rather than a simple form +- **ChatMessage type**: Domain type that structures messages with content blocks, role metadata, and conversation context +- **Reset on refresh**: Starting a new browser session creates a new conversation with fresh memory + +## Complete Example + +```yaml +--8<-- "../examples/tutorials/02_conversational_chat.qtype.yaml" +``` + +**Start the chat interface:** +```bash +qtype serve 02_conversational_chat.qtype.yaml +``` + +Visit [http://localhost:8000/ui](http://localhost:8000/ui) to interact with the chatbot. + +## See Also + +- [Serve Flows as UI](serve_flows_as_ui.md) +- [Tutorial: Build a Conversational Chatbot](../../Tutorials/02-conversational-chatbot.md) +- [Flow Reference](../../components/Flow.md) +- [FlowInterface Reference](../../components/FlowInterface.md) +- [ChatMessage Reference](../../components/ChatMessage.md) +- [Memory Concept](../../Concepts/Core/memory.md) diff --git a/docs/How To/Qtype Server/use_variables_with_ui_hints.md b/docs/How To/Qtype Server/use_variables_with_ui_hints.md new file mode 100644 index 00000000..cc5d3a36 --- /dev/null +++ b/docs/How To/Qtype Server/use_variables_with_ui_hints.md @@ -0,0 +1,47 @@ +# Use Variables with UI Hints + +Customize how input variables are displayed in the web UI using the `ui` field on variable definitions. + +### QType YAML + +```yaml +flows: + - type: Flow + id: generate_story + + variables: + # Use textarea widget for multi-line text input + - id: story_prompt + type: text + ui: + widget: textarea + + # Use file upload widget with mime type filtering + - id: document + type: file + ui: + accept: "application/pdf" + + # Variables without ui hints use default widgets + - id: max_length + type: int +``` + +### Explanation + +- **ui.widget**: For `text` variables, controls input style (`text` for single-line, `textarea` for multi-line) +- **ui.accept**: For `file` variables, specifies accepted mime types (e.g., `"application/pdf"`, `"image/*"`, `"*/*"`) +- **Default widgets**: Variables without `ui` hints automatically use appropriate widgets based on their type + +**Note**: UI hints are currently limited to text and file input customization. Other variable types use standard widgets. + +## Complete Example + +```yaml +--8<-- "../examples/language_features/ui_hints.qtype.yaml" +``` + +## See Also + +- [Serve Flows as UI](../../How%20To/Qtype%20Server/serve_flows_as_ui.md) +- [Flow Reference](../../components/Flow.md) diff --git a/docs/How To/Tools & Integration/bind_tool_inputs_and_outputs.md b/docs/How To/Tools & Integration/bind_tool_inputs_and_outputs.md new file mode 100644 index 00000000..e655e901 --- /dev/null +++ b/docs/How To/Tools & Integration/bind_tool_inputs_and_outputs.md @@ -0,0 +1,48 @@ +# Bind Tool Inputs and Outputs + +Map flow variables to tool parameters and capture tool results using `input_bindings` and `output_bindings` in the InvokeTool step. + +### QType YAML + +```yaml +steps: + # Tool with no inputs, only output binding + - type: InvokeTool + id: get_current_time + tool: qtype.application.commons.tools.get_current_timestamp + input_bindings: {} + output_bindings: + result: current_time + outputs: [current_time] + + # Tool with multiple input bindings + - type: InvokeTool + id: add_days + tool: qtype.application.commons.tools.timedelta + input_bindings: + timestamp: current_time # Tool param ← flow variable + days: days_until_due # Tool param ← flow variable + output_bindings: + result: deadline_time # Tool output → flow variable + outputs: [deadline_time] +``` + +### Explanation + +- **input_bindings**: Maps tool parameter names (left) to flow variable names (right) +- **output_bindings**: Maps tool output names (left) to flow variable names (right) +- **outputs**: Lists flow variables this step produces (must match output_bindings values) +- **Chaining**: Output variables from one tool become input variables for the next tool + +## Complete Example + +```yaml +--8<-- "../examples/tutorials/04_tools_and_function_calling.qtype.yaml" +``` + +## See Also + +- [Tutorial: Adding Tools to Your Application](../../Tutorials/04-tools-and-function-calling.md) +- [InvokeTool Reference](../../components/InvokeTool.md) +- [Create Tools from Python Modules](create_tools_from_python_modules.md) +- [Create Tools from OpenAPI Specifications](create_tools_from_openapi_specifications.md) diff --git a/docs/How To/Tools & Integration/create_tools_from_openapi_specifications.md b/docs/How To/Tools & Integration/create_tools_from_openapi_specifications.md new file mode 100644 index 00000000..5465286b --- /dev/null +++ b/docs/How To/Tools & Integration/create_tools_from_openapi_specifications.md @@ -0,0 +1,89 @@ +# Create Tools from OpenAPI Specifications + +Generate QType tool definitions automatically from OpenAPI/Swagger specifications using `qtype convert api`, which parses API endpoints, parameters, and schemas to create properly typed API tools. + +### Command + +```bash +# Convert from a URL (or use a local file path) +qtype convert api https://petstore3.swagger.io/api/v3/openapi.json --output petstore_tools.qtype.yaml +``` + +This creates the `petstore_tools.qtype.yaml` qtype file you can import into your application. + +### QType YAML + +**Generated tool YAML** (`petstore_tools.qtype.yaml`): +```yaml +id: swagger-petstore---openapi-30 +description: Tools created from API specification petstore_api.json + +auths: + - id: swagger-petstore---openapi-30_api_key_api_key + type: api_key + api_key: your_api_key_here + +tools: + - id: getPetById + name: Find pet by ID. + description: Returns a single pet. + type: APITool + method: GET + endpoint: /api/v3/pet/{petId} + auth: swagger-petstore---openapi-30_api_key_api_key + parameters: + petId: + type: int + optional: false + outputs: + id: + type: int + optional: true + name: + type: text + optional: false + status: + type: text + optional: true +``` + +### Explanation + +- **`convert api`**: CLI subcommand that converts OpenAPI specifications to tool definitions +- **OpenAPI spec**: JSON or YAML file following OpenAPI 3.0+ or Swagger 2.0 format +- **`--output`**: Target YAML file path; omit to print to stdout +- **operationId**: Becomes the tool ID (e.g., `getPetById`) +- **parameters**: Path, query, and header parameters become tool `parameters` +- **responses**: Response schema properties become tool `outputs` +- **servers**: Base URL is combined with path to create full endpoint +- **method**: HTTP method (GET, POST, PUT, DELETE, etc.) +- **securitySchemes**: Generates auth providers (API key, OAuth2, Bearer token) + +### Using Generated Tools + +```yaml +references: + - !include petstore_tools.qtype.yaml + +flows: + - id: fetch_pet + steps: + - type: InvokeTool + id: get_pet + tool: getPetById + input_bindings: + petId: pet_id + output_bindings: + name: pet_name + status: pet_status +``` + +See [Tutorial: Adding Tools to Your Application](../../Tutorials/04-tools-and-function-calling.md) for a detailed usage. + + +## See Also + +- [Tutorial: Adding Tools to Your Application](../../Tutorials/04-tools-and-function-calling.md) +- [How-To: Create Tools from Python Modules](create_tools_from_python_modules.md) +- [InvokeTool Reference](../../components/InvokeTool.md) +- [APITool Reference](../../components/APITool.md) diff --git a/docs/How To/Tools & Integration/create_tools_from_python_modules.md b/docs/How To/Tools & Integration/create_tools_from_python_modules.md new file mode 100644 index 00000000..3291ad1b --- /dev/null +++ b/docs/How To/Tools & Integration/create_tools_from_python_modules.md @@ -0,0 +1,90 @@ +# Create Tools from Python Modules + +Generate QType tool definitions automatically from Python functions using `qtype convert module`, which analyzes type hints and docstrings to create properly typed tools. + +### Command + +```bash +qtype convert module myapp.utils --output tools.qtype.yaml +``` + +### QType YAML + +**Input Python module** (`myapp/utils.py`): +```python +from datetime import datetime + +def calculate_age(birth_date: datetime, reference_date: datetime) -> int: + """Calculate age in years between two dates. + + Args: + birth_date: The birth date + reference_date: The date to calculate age at + + Returns: + Age in complete years + """ + age = reference_date.year - birth_date.year + if (reference_date.month, reference_date.day) < (birth_date.month, birth_date.day): + age -= 1 + return age +``` + +**Generated tool YAML** (`tools.qtype.yaml`): +```yaml +id: myapp.utils +description: Tools created from Python module myapp.utils +tools: + - id: myapp.utils.calculate_age + description: Calculate age in years between two dates. + type: PythonFunctionTool + function_name: calculate_age + module_path: myapp.utils + name: calculate_age + inputs: + birth_date: + type: datetime + optional: false + reference_date: + type: datetime + optional: false + outputs: + result: + type: int + optional: false +``` + +### Explanation + +- **`convert module`**: CLI subcommand that converts Python modules to tool definitions +- **module path**: Dot-separated Python module (e.g., `myapp.utils`, `package.submodule`) - must be importable +- **`--output`**: Target YAML file path; omit to print to stdout +- **Type hints**: Required on all parameters and return values; converted to QType types (int, text, datetime, etc.) +- **Optional parameters**: Detected from default values (e.g., `name: str = "default"` becomes `optional: true`) +- **Docstrings**: First line becomes tool description; supports Google, NumPy, and reStructuredText formats +- **Public functions only**: Functions starting with `_` are skipped + +### Using Generated Tools + +```yaml +references: + - !include tools.qtype.yaml + +flows: + - id: check_age + steps: + - type: InvokeTool + id: calc + tool: myapp.utils.calculate_age + input_bindings: + birth_date: dob + reference_date: today + output_bindings: + result: age +``` + +## See Also + +- [Tutorial: Adding Tools to Your Application](../../Tutorials/04-tools-and-function-calling.md) +- [InvokeTool Reference](../../components/InvokeTool.md) +- [PythonFunctionTool Reference](../../components/PythonFunctionTool.md) diff --git a/docs/Reference/.pages b/docs/Reference/.pages deleted file mode 100644 index 6e68af54..00000000 --- a/docs/Reference/.pages +++ /dev/null @@ -1,4 +0,0 @@ -title: Reference -nav: - - Python API - - Examples diff --git a/docs/Reference/Examples/rag.mmd b/docs/Reference/Examples/rag.mmd deleted file mode 100644 index 97fe6ee9..00000000 --- a/docs/Reference/Examples/rag.mmd +++ /dev/null @@ -1,58 +0,0 @@ -flowchart TD - subgraph APP ["📱 Application: rag_example"] - direction TB - - subgraph FLOW_0 ["🔄 Flow: rag_chat -Chat with the document collection using RAG"] - direction LR - FLOW_0_START@{shape: circle, label: "▶️ Start"} - FLOW_0_S0@{shape: rect, label: "⚙️ Step: extract_question"} - FLOW_0_S1@{shape: cyl, label: "🔎 Vector Search: search_index"} - FLOW_0_S2@{shape: doc, label: "📄 Template: build_prompt"} - FLOW_0_S3@{shape: rounded, label: "✨ generate_response"} - FLOW_0_START -->|user_message: ChatMessage'>| FLOW_0_S0 - FLOW_0_S0 -->|user_question: text| FLOW_0_S1 - FLOW_0_S1 -->|"search_results: list[RAGSearchResult]"| FLOW_0_S2 - FLOW_0_S0 -->|user_question: text| FLOW_0_S2 - FLOW_0_S2 -->|context_prompt: text| FLOW_0_S3 - end - - subgraph FLOW_1 ["🔄 Flow: document_ingestion -Load LlamaIndex Q&A pairs from HuggingFace, split, embed, and index documents"] - direction TB - FLOW_1_S0@{shape: rect, label: "⚙️ Step: load_documents"} - FLOW_1_S1@{shape: rect, label: "⚙️ Step: split_documents"} - FLOW_1_S2@{shape: rect, label: "⚙️ Step: embed_chunks"} - FLOW_1_S3@{shape: rect, label: "⚙️ Step: index_chunks"} - FLOW_1_S0 -->|raw_document: RAGDocument'>| FLOW_1_S1 - FLOW_1_S1 -->|document_chunk: RAGChunk'>| FLOW_1_S2 - FLOW_1_S2 -->|embedded_chunk: RAGChunk'>| FLOW_1_S3 - end - - subgraph RESOURCES ["🔧 Shared Resources"] - direction LR - AUTH_AWS_AUTH@{shape: hex, label: "🔐 aws_auth\nAWS"} - MODEL_CLAUDE_SONNET@{shape: rounded, label: "✨ claude_sonnet (aws-bedrock)" } - MODEL_CLAUDE_SONNET -.->|uses| AUTH_AWS_AUTH - MODEL_TITAN_EMBED_V2@{shape: rounded, label: "✨ titan_embed_v2 (aws-bedrock)" } - MODEL_TITAN_EMBED_V2 -.->|uses| AUTH_AWS_AUTH - end - - end - - FLOW_0_S1 -.-> INDEX_RAG_INDEX - FLOW_0_S3 -.->|uses| MODEL_CLAUDE_SONNET - - %% Styling - classDef appBox fill:none,stroke:#495057,stroke-width:3px - classDef flowBox fill:#e1f5fe,stroke:#0277bd,stroke-width:2px - classDef llmNode fill:#f3e5f5,stroke:#7b1fa2,stroke-width:2px - classDef modelNode fill:#e8f5e8,stroke:#2e7d32,stroke-width:2px - classDef authNode fill:#fff3e0,stroke:#ef6c00,stroke-width:2px - classDef telemetryNode fill:#fce4ec,stroke:#c2185b,stroke-width:2px - classDef resourceBox fill:#f5f5f5,stroke:#616161,stroke-width:1px - - class APP appBox - class FLOW_0 flowBox - class RESOURCES resourceBox - class TELEMETRY telemetryNode \ No newline at end of file diff --git a/docs/Reference/Examples/simple-chat.md b/docs/Reference/Examples/simple-chat.md deleted file mode 100644 index 4470e213..00000000 --- a/docs/Reference/Examples/simple-chat.md +++ /dev/null @@ -1,39 +0,0 @@ -# A Simple Chatbot - -This example creates a simple chat bot that you can converse with in the included UI, and keep track of the execution with open telemetry. - -### The QType File - -```yaml ---8<-- "../examples/openai/hello_world_chat_with_telemetry.qtype.yaml" -``` - -You can download it [here](https://github.com/bazaarvoice/qtype/blob/main/examples/openai/hello_world_chat_with_telemetry.qtype.yaml). -There is also a version for [AWS Bedrock](https://github.com/bazaarvoice/qtype/blob/main/examples/bedrock/hello_world_chat_with_telemetry.qtype.yaml). - -### The Architecture - -```mermaid ---8<-- "Reference/Examples/chat_with_telemetry.mmd" -``` - -### Authorization - -You'll need an OpenAI key. -Put it in a `.env` file, and name the variable `OPENAI_KEY` - -### Telemetry -The code pushes telemetry to a sync on your local machine. Start `arize-phoenix` with: -```bash -phoenix serve -``` -Before running. - -### Runing the App - -Just run: -```bash -qtype serve chat_with_telemetry.qtype.yaml -``` - -And you can opne thet chat at [http://localhost:8000/ui](http://localhost:8000/ui) \ No newline at end of file diff --git a/docs/Reference/cli.md b/docs/Reference/cli.md new file mode 100644 index 00000000..6b16467a --- /dev/null +++ b/docs/Reference/cli.md @@ -0,0 +1,293 @@ +# Command Line Interface + +The QType CLI lets you run applications, validate specifications, serve web interfaces, and generating resources. + +## Installation + +The QType CLI is installed with the qtype package. Run commands with: + +```bash +qtype [command] [options] +``` + +## Global Options + +``` +--log-level {DEBUG,INFO,WARNING,ERROR,CRITICAL} + Set the logging level (default: INFO) +``` + +## Commands + +### run + +Execute a QType application locally. + +```bash +qtype run [options] spec +``` + +#### Arguments + +- **`spec`** - Path to the QType YAML spec file + +#### Options + +- **`-f FLOW, --flow FLOW`** - The name of the flow to run. If not specified, runs the first flow found +- **`-i INPUT, --input INPUT`** - JSON blob of input values for the flow (default: `{}`) +- **`-I INPUT_FILE, --input-file INPUT_FILE`** - Path to a file (e.g., CSV, JSON, Parquet) with input data for batch processing +- **`-o OUTPUT, --output OUTPUT`** - Path to save output data. If input is a DataFrame, output will be saved as parquet. If single result, saved as JSON +- **`--progress`** - Show progress bars during flow execution + +#### Examples + +Run a simple application: +```bash +qtype run app.qtype.yaml +``` + +Run with inline JSON inputs: +```bash +qtype run app.qtype.yaml -i '{"question": "What is AI?"}' +``` + +Run a specific flow: +```bash +qtype run app.qtype.yaml --flow process_data +``` + +Batch process data from a file: +```bash +qtype run app.qtype.yaml --input-file inputs.csv --output results.parquet +``` + +#### See Also + +- [How To: Pass Inputs On The CLI](../How%20To/Command%20Line%20Usage/pass_inputs_on_the_cli.md) +- [How To: Load Multiple Inputs from Files](../How%20To/Command%20Line%20Usage/load_multiple_inputs_from_files.md) +- [Tutorial: Your First QType Application](../Tutorials/01-first-qtype-application.md) + +--- + +### validate + +Validate a QType YAML spec against the schema and semantic rules. + +```bash +qtype validate [options] spec +``` + +#### Arguments + +- **`spec`** - Path to the QType YAML spec file + +#### Options + +- **`-p, --print`** - Print the spec after validation (default: False) + +#### Examples + +Validate a specification: +```bash +qtype validate app.qtype.yaml +``` + +Validate and print the parsed spec: +```bash +qtype validate app.qtype.yaml --print +``` + +#### See Also + +- [How To: Validate QType YAML](../How%20To/Observability%20&%20Debugging/validate_qtype_yaml.md) +- [Reference: Semantic Validation Rules](semantic-validation-rules.md) + +--- + +### serve + +Serve a web experience for a QType application with an interactive UI. + +```bash +qtype serve [options] spec +``` + +#### Arguments + +- **`spec`** - Path to the QType YAML spec file + +#### Options + +- **`-p PORT, --port PORT`** - Port to run the server on (default: 8080) +- **`-H HOST, --host HOST`** - Host to bind the server to (default: 0.0.0.0) +- **`--reload`** - Enable auto-reload on code changes (default: False) + +#### Examples + +Serve an application: +```bash +qtype serve app.qtype.yaml +``` + +Serve on a specific port: +```bash +qtype serve app.qtype.yaml --port 3000 +``` + +Serve with auto-reload for development: +```bash +qtype serve app.qtype.yaml --reload +``` + +#### See Also + +- [How To: Serve Flows as APIs](../How%20To/Qtype%20Server/serve_flows_as_apis.md) +- [How To: Serve Flows as UI](../How%20To/Qtype%20Server/serve_flows_as_ui.md) +- [How To: Use Conversational Interfaces](../How%20To/Qtype%20Server/use_conversational_interfaces.md) +- [How To: Serve Applications with Auto-Reload](../How%20To/Qtype%20Server/serve_applications_with_auto_reload.md) +- [Tutorial: Building a Stateful Chatbot](../Tutorials/02-conversational-chatbot.md) + +--- + +### visualize + +Generate a visual diagram of your QType application architecture. + +```bash +qtype visualize [options] spec +``` + +#### Arguments + +- **`spec`** - Path to the QType YAML file + +#### Options + +- **`-o OUTPUT, --output OUTPUT`** - If provided, write the mermaid diagram to this file +- **`-nd, --no-display`** - If set, don't display the diagram in a browser (default: False) + +#### Examples + +Visualize and open in browser: +```bash +qtype visualize app.qtype.yaml +``` + +Save to file without displaying: +```bash +qtype visualize app.qtype.yaml --output diagram.mmd --no-display +``` + +Generate and save diagram: +```bash +qtype visualize app.qtype.yaml --output architecture.mmd +``` + +#### See Also + +- [How To: Visualize Application Architecture](../How%20To/Observability%20&%20Debugging/visualize_application_architecture.md) + +--- + +### convert + +Create QType tool definitions from external sources. + +```bash +qtype convert {module,api} [options] +``` + +#### Subcommands + +##### convert module + +Convert a Python module to QType tools format. + +```bash +qtype convert module [options] module_path +``` + +**Arguments:** + +- **`module_path`** - Path to the Python module to convert + +**Options:** + +- **`-o OUTPUT, --output OUTPUT`** - Output file path. If not specified, prints to stdout + +**Examples:** + +Convert a Python module: +```bash +qtype convert module myapp.utils --output tools.qtype.yaml +``` + +Print to stdout: +```bash +qtype convert module myapp.utils +``` + +**See Also:** + +- [How To: Create Tools from Python Modules](../How%20To/Tools%20&%20Integration/create_tools_from_python_modules.md) +- [Tutorial: Adding Tools to Your Application](../Tutorials/04-tools-and-function-calling.md) + +##### convert api + +Convert an OpenAPI/Swagger specification to QType format. + +```bash +qtype convert api [options] api_spec +``` + +**Arguments:** + +- **`api_spec`** - Path to the API specification file (supports local files or URLs) + +**Options:** + +- **`-o OUTPUT, --output OUTPUT`** - Output file path. If not specified, prints to stdout + +**Examples:** + +Convert an OpenAPI spec: +```bash +qtype convert api spec.oas.yaml --output api_tools.qtype.yaml +``` + +Convert from a URL: +```bash +qtype convert api https://petstore3.swagger.io/api/v3/openapi.json --output petstore.qtype.yaml +``` + +**See Also:** + +- [How To: Create Tools from OpenAPI Specifications](../How%20To/Tools%20&%20Integration/create_tools_from_openapi_specifications.md) +- [Tutorial: Adding Tools to Your Application](../Tutorials/04-tools-and-function-calling.md) + +--- + +### generate + +Generate QType project resources (primarily for internal development). + +```bash +qtype generate {commons,schema,dsl-docs,semantic-model} [options] +``` + +This command is primarily used for QType development and maintenance. + +#### Subcommands + +- **`commons`** - Generates the commons library tools from `tools.py` +- **`schema`** - Generates the JSON schema for the QType DSL from `model.py` +- **`dsl-docs`** - Generates markdown documentation for the QType DSL classes from `model.py` +- **`semantic-model`** - Generates the semantic model from QType DSL (See [Contributing](../Contributing/)) + +--- + +## Exit Codes + +- **0** - Success +- **1** - Error (validation failure, runtime error, etc.) + diff --git a/docs/Reference/Python API/plugins.md b/docs/Reference/plugins.md similarity index 100% rename from docs/Reference/Python API/plugins.md rename to docs/Reference/plugins.md diff --git a/docs/Reference/semantic-validation-rules.md b/docs/Reference/semantic-validation-rules.md new file mode 100644 index 00000000..8b641c63 --- /dev/null +++ b/docs/Reference/semantic-validation-rules.md @@ -0,0 +1,179 @@ +# Semantic Validation Rules + +Semantic validation happens after loading your yaml. You should expect to see these even if the yaml is validated by the spec. + +You can validate any qtype file with: +``` +qtype validate you_file.qtype.yaml +``` + +This document lists all semantic validation rules enforced by QType. These rules are checked after YAML parsing and reference resolution. + +--- + +## Agent + +- Must have exactly 1 input +- Must have exactly 1 output +- Input must be type `text` or `ChatMessage` +- Output type must match input type + +--- + +## Application + +- If using `SecretReference`, must configure `secret_manager` +- For `AWSSecretManager`, auth must be `AWSAuthProvider` + +--- + +## AWSAuthProvider + +- Must specify at least one authentication method: + - Access keys (`access_key_id` + `secret_access_key`) + - Profile name (`profile_name`) + - Role ARN (`role_arn`) +- If assuming a role, must provide base credentials (access keys or profile) + +--- + +## BedrockReranker + +- Must have exactly 2 inputs +- One input must be type `text` (query) +- One input must be type `list[SearchResult]` (results to rerank) +- Must have exactly 1 output of type `list[SearchResult]` + +--- + +## Collect + +- Must have exactly 1 input -- any type `T` +- Must have exactly 1 output of type `list[T]` +- Output list element type must match input type + +--- + +## Construct + +- Must have at least 1 input +- Must have exactly 1 output +- Output type must be a Pydantic BaseModel (Custom type or Domain type) + +--- + +## Decoder + +- Must have exactly 1 input of type `text` +- Must have at least 1 output + +--- + +## DocToTextConverter + +- Must have exactly 1 input of type `RAGDocument` +- Must have exactly 1 output of type `RAGDocument` + +--- + +## DocumentEmbedder + +- Must have exactly 1 input of type `RAGChunk` +- Must have exactly 1 output of type `RAGChunk` + +--- + +## DocumentSearch + +- Must have exactly 1 input of type `text` +- Must have exactly 1 output of type `list[SearchResult]` + +--- + +## DocumentSource + +- Must have exactly 1 output of type `RAGDocument` + +--- + +## DocumentSplitter + +- Must have exactly 1 input of type `RAGDocument` +- Must have exactly 1 output of type `RAGChunk` + +--- + +## Echo + +- Input and output variable IDs must match (order can differ) + +--- + +## Explode + +- Must have exactly 1 input of type `list[T]` +- Must have exactly 1 output of type `T` +- Output type must match input list element type + +--- + +## FieldExtractor + +- Must have exactly 1 input +- Must have exactly 1 output +- `json_path` must be non-empty + +--- + +## Flow + +**General:** +- Must have at least 1 step +- All step inputs must be fulfilled by flow inputs or previous step outputs + +**Conversational Interface:** +- Must have exactly 1 `ChatMessage` input +- All non-ChatMessage inputs must be listed in `session_inputs` +- Must have exactly 1 `ChatMessage` output + +**Complete Interface:** +- Must have exactly 1 input of type `text` +- Must have exactly 1 output of type `text` + +--- + +## IndexUpsert + +**For Vector Index:** +- Must have exactly 1 input +- Input must be type `RAGChunk` or `RAGDocument` + +**For Document Index:** +- Must have at least 1 input + +--- + +## LLMInference + +- Must have exactly 1 output +- Output must be type `text` or `ChatMessage` + +--- + +## PromptTemplate + +- Must have exactly 1 output +- Output must be type `text` + +--- + +## SQLSource + +- Must have at least 1 output + +--- + +## VectorSearch + +- Must have exactly 1 input of type `text` +- Must have exactly 1 output of type `list[RAGSearchResult]` \ No newline at end of file diff --git a/docs/Tutorials/01-first-qtype-application.md b/docs/Tutorials/01-first-qtype-application.md index aa00dc57..6d2c746d 100644 --- a/docs/Tutorials/01-first-qtype-application.md +++ b/docs/Tutorials/01-first-qtype-application.md @@ -1,8 +1,8 @@ -# Build Your First QType Application +# Your First QType Application **Time:** 15 minutes **Prerequisites:** None -**Example:** [`hello_world.qtype.yaml`](https://github.com/bazaarvoice/qtype/blob/main/examples/hello_world.qtype.yaml) +**Example:** [`01_hello_world.qtype.yaml`](https://github.com/bazaarvoice/qtype/blob/main/examples/tutorials/01_hello_world.qtype.yaml) **What you'll learn:** Build a working AI-powered question-answering application and understand the core concepts of QType. @@ -14,10 +14,10 @@ ### Create the File -Create a new file called `my_first_app.qtype.yaml` and add: +Create a new file called `01_hello_world.qtype.yaml` and add: ```yaml -id: hello_world +id: 01_hello_world description: My first QType application ``` @@ -26,8 +26,6 @@ description: My first QType application - Every QType application starts with an `id` - a unique name for your app - The `description` helps you remember what the app does (optional but helpful) -**Check your work:** Your file should have exactly 2 lines. Save it. - --- ### Add Your AI Model @@ -35,35 +33,50 @@ description: My first QType application Add these lines to your file: ```yaml -id: hello_world -description: My first QType application +auths: + - type: api_key + id: openai_auth + api_key: ${OPENAI_KEY} + host: https://api.openai.com models: - -- type: Model + - type: Model id: gpt-4 provider: openai model_id: gpt-4-turbo + auth: openai_auth inference_params: temperature: 0.7 + ``` **What this means:** +- `auths:` - different authorization credentials you will use for model invocation (if any) +- `api_key: ${OPENAI_KEY}` - the api key is read from the environment variable `OPENAI_KEY` - `models:` - Where you configure which AI to use -- `type: Model` - Tells QType "this is an AI model" - `id: gpt-4` - A nickname you'll use to refer to this model +- `model_id` - The provider's model id. - `provider: openai` - Which AI service to use - `temperature: 0.7` - Controls creativity (0 = focused, 1 = creative) **Check your work:** -1. Make sure the indentation matches exactly (2 spaces for each level) -2. Save the file -3. Run: `qtype validate my_first_app.qtype.yaml` +1. Save the file +2. Run: `qtype validate 01_hello_world.qtype.yaml` 4. You should see: `✅ Validation successful` -**Troubleshooting:** If you get an error about indentation, check that you're using spaces (not tabs) and that each nested item is indented by exactly 2 spaces. + +**Using AWS Bedrock instead?** Replace the models section with: +```yaml +models: + - type: Model + id: nova + provider: aws-bedrock + model_id: amazon.nova-lite-v1:0 +``` + +And ensure your AWS credentials are configured (`aws configure`). --- @@ -75,23 +88,19 @@ A "flow" is where you define what your app actually does. Add this to your file: ```yaml flows: - -- type: Flow + - type: Flow id: simple_example variables: - -- id: question + - id: question type: text - id: formatted_prompt type: text - id: answer type: text inputs: - -- question + - question outputs: - -- answer + - answer ``` **What this means:** @@ -105,7 +114,7 @@ flows: **Check your work:** -1. Validate again: `qtype validate my_first_app.qtype.yaml` +1. Validate again: `qtype validate 01_hello_world.qtype.yaml` 2. Still should see: `✅ Validation successful` --- @@ -116,26 +125,21 @@ Now tell QType what to do with the question. Add this inside your flow (after `o ```yaml steps: - -- id: format_prompt + - id: format_prompt type: PromptTemplate template: "You are a helpful assistant. Answer the following question:\n{question}\n" inputs: - -- question + - question outputs: + - formatted_prompt -- formatted_prompt - - id: llm_step type: LLMInference model: gpt-4 inputs: - -- formatted_prompt + - formatted_prompt outputs: - -- answer + - answer ``` **What this means:** @@ -152,7 +156,7 @@ Now tell QType what to do with the question. Add this inside your flow (after `o **Check your work:** -1. Validate: `qtype validate my_first_app.qtype.yaml` +1. Validate: `qtype validate 01_hello_world.qtype.yaml` 2. Should still pass ✅ --- @@ -169,18 +173,6 @@ OPENAI_KEY=sk-your-key-here Replace `sk-your-key-here` with your actual OpenAI API key. -**Using AWS Bedrock instead?** Replace the models section with: -```yaml -models: - -- type: Model - id: nova - provider: aws-bedrock - model_id: amazon.nova-lite-v1:0 -``` - -And ensure your AWS credentials are configured (`aws configure`). - --- ### Test It! @@ -188,7 +180,7 @@ And ensure your AWS credentials are configured (`aws configure`). Run your application: ```bash -qtype run -i '{"question":"What is 2+2?"}' my_first_app.qtype.yaml +qtype run -i '{"question":"What is 2+2?"}' 01_hello_world.qtype.yaml ``` **What you should see:** @@ -210,10 +202,10 @@ qtype run -i '{"question":"What is 2+2?"}' my_first_app.qtype.yaml ```bash # Simple math -qtype run -i '{"question":"What is the capital of France?"}' my_first_app.qtype.yaml +qtype run -i '{"question":"What is the capital of France?"}' 01_hello_world.qtype.yaml # More complex -qtype run -i '{"question":"Explain photosynthesis in one sentence"}' my_first_app.qtype.yaml +qtype run -i '{"question":"Explain photosynthesis in one sentence"}' 01_hello_world.qtype.yaml ``` --- @@ -235,7 +227,7 @@ Congratulations! You've learned: **Reference the complete example:** -- [`hello_world.qtype.yaml`](https://github.com/bazaarvoice/qtype/blob/main/examples/hello_world.qtype.yaml) - Full working example +- [`01_hello_world.qtype.yaml`](https://github.com/bazaarvoice/qtype/blob/main/examples/tutorials/01_hello_world.qtype.yaml) - Full working example **Learn more:** @@ -253,4 +245,4 @@ A: It makes data flow explicit and helps QType validate your app before running A: Yes! Define multiple models in the `models:` section and reference them by their `id` in steps. **Q: My validation passed but I get errors when running. Why?** -A: Validation checks structure, but runtime errors often involve authentication or model access. Check your API keys and model permissions. \ No newline at end of file +A: Validation checks structure, but runtime errors often involve authentication or model access. Check your API keys and model permissions. diff --git a/docs/Tutorials/02-conversational-chatbot.md b/docs/Tutorials/02-conversational-chatbot.md index e0a6a020..6bb45d9d 100644 --- a/docs/Tutorials/02-conversational-chatbot.md +++ b/docs/Tutorials/02-conversational-chatbot.md @@ -1,120 +1,53 @@ # Build a Conversational Chatbot **Time:** 20 minutes -**Prerequisites:** [Tutorial 1: Build Your First QType Application](01-first-qtype-application.md) -**Example:** [`hello_world_chat.qtype.yaml`](https://github.com/bazaarvoice/qtype/blob/main/examples/hello_world_chat.qtype.yaml) +**Prerequisites:** [Tutorial 1: Your First QType Application](01-first-qtype-application.md) +**Example:** [`02_conversational_chat.qtype.yaml`](https://github.com/bazaarvoice/qtype/blob/main/examples/02_conversational_chat.qtype.yaml) -**What you'll learn:** Add memory to your QType application and create a chatbot that remembers previous messages in the conversation. +**What you'll learn:** -**What you'll build:** A stateful chatbot that maintains conversation history and provides contextual responses. - ---- +* Stateful flows with memory +* Using the web ui +* Domain types -## Part 1: Stateless vs. Stateful Applications (3 minutes) - -In [Build Your First QType Application](01-first-qtype-application.md), you built a **stateless** application - it processed each question independently with no memory of previous interactions: - -``` -You: What is 2+2? -AI: 4. - -You: What about that times 3? -AI: I don't know what "that" refers to. ❌ -``` - -Today you'll build a **stateful** chatbot that remembers the conversation: - -``` -You: What is 2+2? -AI: 4. - -You: What about that times 3? -AI: 12. I multiplied the previous answer (4) by 3. ✅ -``` - -This requires two new concepts: **Memory** and **Conversational Interface**. +**What you'll build:** A stateful chatbot that maintains conversation history and provides contextual responses. --- -## Flow Interfaces: Complete vs Conversational - -QType flows have two interface types that control how they process requests: +## Background: A Quick Note on Flows -### Complete Interface (from previous tutorial) +Flows are effectively data pipelines -- they accept input values and produce output values. +The flow will execute for each input it receives. -- **Default behavior** - You don't need to specify it -- Processes one request → one response -- No memory between requests -- Each request is independent -- Like a REST API call or function call +Thus, for a conversational AI, each message from the user is one execution of the flow. -**Example use cases:** +Flows are inherently _stateless_: no data is stored between executions though they can use tools, apis, or memory to share data. -- Simple Q&A -- Data transformation -- Single-step calculations +In this example, we'll use memory to let the flow remember previous chat messages from both the user and the LLM. -**In YAML (optional to specify):** -```yaml -flows: - -- type: Flow - id: simple_flow - interface: - type: Complete # Optional - this is the default -``` - -### Conversational Interface (This Tutorial) - -- **Explicit configuration** - You must specify it -- Maintains conversation history -- Tracks message roles (user/assistant) -- Perfect for back-and-forth interaction - -**Example use cases:** - -- Chatbots -- Virtual assistants -- Multi-turn dialogues - -**In YAML (required):** -```yaml -flows: - - type: Flow - id: chat_flow - interface: - type: Conversational # Required for conversation memory -``` - -Let's compare the interfaces: - -| Feature | Complete Interface | Conversational Interface | - -**Key Rule:** Memory only works with Conversational interface. If your flow uses memory, it must declare `interface.type: Conversational`. - ---- ## Part 1: Add Memory to Your Application (5 minutes) ### Create Your Chatbot File -Create a new file called `my_chatbot.qtype.yaml`. Start by copying your application structure from the previous tutorial: +Create a new file called `02_conversational_chat.qtype.yaml`. Let's use bedrock for this example, but you could also use OpenAI as in the previous tutorial: ```yaml -id: my_chatbot +id: 02_conversational_chat description: A conversational chatbot with memory models: -- type: Model - id: gpt-4 - provider: openai - model_id: gpt-4-turbo +models: + - type: Model + id: nova_lite + provider: aws-bedrock + model_id: amazon.nova-lite-v1:0 inference_params: temperature: 0.7 -``` + max_tokens: 512 -**What's different:** We changed the `id` and `description` to reflect that this is a chatbot. +``` --- @@ -124,26 +57,20 @@ Now add a memory configuration *before* the `flows:` section: ```yaml memories: - -- id: chat_memory - token_limit: 50000 - chat_history_token_ratio: 0.7 + - id: chat_memory + token_limit: 10000 ``` **What this means:** - `memories:` - Section for memory configurations (new concept!) - `id: chat_memory` - A nickname you'll use to reference this memory -- `token_limit: 50000` - Maximum total tokens (includes conversation + system messages) -- `chat_history_token_ratio: 0.7` - Reserve 70% of tokens for conversation history - -**Why tokens matter:** -LLMs have a maximum context window (how much text they can "see" at once). GPT-4-turbo has a 128k token limit, but we're using 50k here for cost efficiency. The `chat_history_token_ratio` ensures the AI always has room to see enough conversation history while leaving space for its response. +- `token_limit: 10000` - Maximum total tokens to have in the memory **Check your work:** 1. Save the file -2. Validate: `qtype validate my_chatbot.qtype.yaml` +2. Validate: `qtype validate 02_conversational_chat.qtype.yaml` 3. Should pass ✅ (even though we haven't added flows yet) --- @@ -156,34 +83,23 @@ Add this flow definition: ```yaml flows: - -- type: Flow - id: chat_flow - description: Main chat flow with conversation memory + - type: Flow + id: simple_chat_example interface: type: Conversational variables: - -- id: user_message + - id: user_message type: ChatMessage - id: response_message type: ChatMessage inputs: - -- user_message + - user_message outputs: - -- response_message + - response_message ``` **New concepts explained:** -**`interface.type: Conversational`** - This is the key difference from the previous Complete interface! - -- Tells QType this flow maintains conversation state -- Automatically manages message history -- Required when using memory in LLMInference steps - **`ChatMessage` type** - A special domain type for chat applications - Represents a single message in a conversation @@ -206,15 +122,21 @@ ChatMessage: The `blocks` list allows multimodal messages (text + images + files), while `role` indicates who sent the message. QType automatically handles this structure when managing conversation history. + **Why two variables?** - `user_message` - What the user types - `response_message` - What the AI responds - QType tracks both in memory for context +**`interface.type: Conversational`** + +This tells QType that the flow should be served as a conversation. When you type `qtype serve` (covered below) this ensures that the ui shows a chat interface instead of just listing inputs and outputs. + + **Check your work:** -1. Validate: `qtype validate my_chatbot.qtype.yaml` +1. Validate: `qtype validate 02_conversational_chat.qtype.yaml` 2. Should still pass ✅ --- @@ -225,18 +147,15 @@ Add the LLM inference step that connects to your memory: ```yaml steps: - -- type: LLMInference - id: chat_step - model: gpt-4 + - id: llm_inference_step + type: LLMInference + model: nova_lite + system_message: "You are a helpful assistant." memory: chat_memory - system_message: "You are a helpful assistant. Be friendly and conversational." inputs: - -- user_message + - user_message outputs: - -- response_message + - response_message ``` **What's new:** @@ -250,7 +169,7 @@ Add the LLM inference step that connects to your memory: **Check your work:** -1. Validate: `qtype validate my_chatbot.qtype.yaml` +1. Validate: `qtype validate 02_conversational_chat.qtype.yaml` 2. Should pass ✅ --- @@ -262,22 +181,30 @@ Add the LLM inference step that connects to your memory: Create `.env` in the same folder (or update your existing one): ``` -OPENAI_API_KEY=sk-your-key-here +AWS_PROFILE=your-aws-profile ``` -**Already using AWS Bedrock?** Replace the model configuration with: +**Using OpenAI?** Replace the model configuration with: ```yaml +auths: + - type: api_key + id: openai_auth + api_key: ${OPENAI_KEY} + host: https://api.openai.com models: - -- type: Model - id: claude - provider: aws-bedrock - model_id: amazon.nova-lite-v1:0 + - type: Model + id: gpt-4 + provider: openai + model_id: gpt-4-turbo + auth: openai_auth inference_params: temperature: 0.7 ``` -And update the step to use `model: claude`. +And: + +- update the step to use `model: gtp-4`. +- update your `.env` file to have `OPENAI_KEY` --- @@ -286,7 +213,7 @@ And update the step to use `model: claude`. Unlike the previous tutorial where you used `qtype run` for one-off questions, conversational applications work better with the web interface: ```bash -qtype serve my_chatbot.qtype.yaml +qtype serve 02_conversational_chat.qtype.yaml ``` **What you'll see:** @@ -297,7 +224,11 @@ INFO: Uvicorn running on http://127.0.0.1:8000 **Visit:** [http://localhost:8000/ui](http://localhost:8000/ui) -You should see a chat interface with your application name at the top. +You should see a chat interface with your application name at the top. Give it a chat! + +![the ui showing a chat interface](example_chat.png) + + --- @@ -316,14 +247,7 @@ You: What food do I like? AI: You mentioned you love pizza! ✅ ``` -**Experiment:** - -1. Refresh the page - memory resets (new session) -2. Try a multi-step math problem: - -- "Remember the number 42" - - "Now multiply that by 2" - - Does it remember 42? +Refreshing the page creates a new session and the memory is removed. --- @@ -355,22 +279,8 @@ User: Sees response 2. Sending relevant history with each new question 3. Managing token limits automatically ---- - -### Why Token Management Matters - -Your `chat_history_token_ratio: 0.7` setting means: - -- **70% of tokens** → Conversation history (up to 35,000 tokens with our 50k limit) -- **30% of tokens** → System message + AI response (15,000 tokens) - -If the conversation gets too long, QType automatically: -1. Keeps recent messages -2. Drops older messages -3. Ensures the AI always has enough tokens to respond - -**Try it:** Have a very long conversation (50+ exchanges). Notice how the AI forgets early messages but remembers recent context. +**The memory is keyed on the user session** -- it's not accessible by other visitors to the page. --- @@ -381,28 +291,15 @@ Congratulations! You've mastered: ✅ **Memory configuration** - Storing conversation state ✅ **Conversational flows** - Multi-turn interactions ✅ **ChatMessage type** - Domain-specific data types -✅ **Token management** - Controlling context window usage ✅ **Web interface** - Using `qtype serve` for chat applications --- -## Compare: Complete vs Conversational Interfaces - -| Feature | Complete Interface | Conversational Interface | -|---------|----------------------|----------------------------| -| **Interface** | `Complete` (default) | `Conversational` (explicit) | -| **Memory** | None | `chat_memory` configuration | -| **Variable Types** | `text` (primitive) | `ChatMessage` (domain type) | -| **Testing** | `qtype run` (command line) | `qtype serve` (web UI) | -| **Use Case** | One-off questions | Multi-turn conversations | - ---- - ## Next Steps **Reference the complete example:** -- [`hello_world_chat.qtype.yaml`](https://github.com/bazaarvoice/qtype/blob/main/examples/hello_world_chat.qtype.yaml) - Full working example +- [`02_conversational_chat.qtype`](https://github.com/bazaarvoice/qtype/blob/main/examples/02_conversational_chat.qtype) - Full working example **Learn more:** @@ -424,7 +321,7 @@ A: Yes! You can define multiple memories in the `memories:` section and referenc A: No - memory only works with `Conversational` interface. Complete flows are stateless by design. If you need to remember information between requests, you must use the Conversational interface. **Q: When should I use Complete vs Conversational?** -A: Use Complete for independent requests (data transformation, single questions, API-like behavior). Use Conversational when you need context from previous interactions (chatbots, assistants, multi-step conversations). +A: Use Complete for streaming single responses from an llm. Use Conversational when you need context from previous interactions (chatbots, assistants, multi-step conversations). **Q: How do I clear memory during a conversation?** -A: Currently, you need to start a new session (refresh the page in the UI). Programmatic memory clearing is planned for a future release. +A: Currently, you need to start a new session (refresh the page in the UI). diff --git a/docs/Tutorials/03-structured-data.md b/docs/Tutorials/03-structured-data.md new file mode 100644 index 00000000..13bf2616 --- /dev/null +++ b/docs/Tutorials/03-structured-data.md @@ -0,0 +1,481 @@ +# Working with Types and Structured Data + +**Time:** 25 minutes +**Prerequisites:** [Tutorial 1: Your First QType Application](01-first-qtype-application.md) +**Example:** [`03_structured_data.qtype.yaml`](https://github.com/bazaarvoice/qtype/blob/main/examples/tutorials/03_structured_data.qtype.yaml) + +**What you'll learn:** + +* Define custom types for your domain +* Parse LLM JSON responses into structured data +* Build typed objects from extracted fields +* Work with list types + +**What you'll build:** An application that analyzes product reviews and extracts structured sentiment data including ratings, confidence scores, and key points. + +--- + +## Background: Why Custom Types? + +So far you've worked with simple `text` types. But real applications need structure: + +- **Domain models** - Product reviews, user profiles, search results +- **Validation** - Ensure data has required fields +- **Type safety** - Catch errors before runtime +- **Composability** - Build complex types from simpler ones + +QType lets you define **CustomTypes** to model your domain, just like you'd define classes in Python. + +--- + +## Part 1: Define Your Custom Type (5 minutes) + +### Create Your Application File + +Create `03_structured_data.qtype.yaml`: + +```yaml +id: review_sentiment_analyzer +description: | + Analyzes a product review to extract structured sentiment insights. + Demonstrates custom types and structured data extraction. +``` + +--- + +### Define a CustomType + +Before the `models:` section, add a `types:` section: + +```yaml +types: + - id: ReviewSentiment + description: Structured sentiment analysis of a review + properties: + sentiment: text + confidence: float + key_points: list[text] + rating: int +``` + +**What this means:** + +**`types:` section** - Where you define custom data structures + +**`properties:`** - The fields your type contains: +- `sentiment: text` - A simple text field +- `confidence: float` - A decimal number (0.0-1.0) +- `key_points: list[text]` - A list of strings (new!) +- `rating: int` - An integer (1-5 star rating) + +**List types** use the syntax `list[element_type]`. Examples: +- `list[text]` - List of strings +- `list[int]` - List of integers +- `list[ReviewSentiment]` - List of custom types + +--- + +### Add Model Configuration + +```yaml +models: + - type: Model + id: analyzer_model + provider: aws-bedrock + model_id: amazon.nova-lite-v1:0 + inference_params: + temperature: 0.7 + max_tokens: 512 +``` + +**Check your work:** + +```bash +qtype validate 03_structured_data.qtype.yaml +``` + +Should pass ✅ + +--- + +## Part 2: Build the Analysis Flow (10 minutes) + +### Define Flow Variables + +Add the flow structure: + +```yaml +flows: + - id: analyze_review + description: Analyzes a single review and extracts structured sentiment + inputs: + - review_text + outputs: + - result + + variables: + - id: review_text + type: text + - id: raw_llm_response + type: text + - id: llm_response + type: text + - id: sentiment + type: text + - id: confidence + type: float + - id: key_points + type: list[text] + - id: rating + type: int + - id: result + type: ReviewSentiment +``` + +**What's new:** + +**Multiple variable types** - Notice we have: +- Simple types (`text`, `float`, `int`) +- List type (`list[text]`) +- Custom type (`ReviewSentiment`) + +**Why so many variables?** Each step transforms data from one form to another. This explicit data flow makes debugging easier and documents how information moves through your application. + +--- + +### Step 1: Create the Analysis Prompt + +Add the first step under `steps:`: + +```yaml + steps: + # Step 1: Create analysis prompt + - id: analysis_prompt + type: PromptTemplate + template: | + Analyze this product review and extract structured information. + + Review: {{review_text}} + + Respond with ONLY valid JSON, no other text or markdown. Use this exact structure: + {{{{ + "sentiment": "positive|negative|neutral|mixed", + "confidence": 0.95, + "key_points": ["point 1", "point 2"], + "rating": 4 + }}}} + + Where: + - sentiment: overall sentiment (positive/negative/neutral/mixed) + - confidence: your confidence score (0.0-1.0) + - key_points: 2-3 main points from the review + - rating: estimated star rating 1-5 based on the tone + + Return ONLY the JSON object, nothing else. + inputs: + - review_text + outputs: + - raw_llm_response +``` + +**Key technique - Escaping braces:** + +Notice `{{` and `}}` in the template? QType uses Python's `.format()` method where `{variable}` is a placeholder. To include literal curly braces in the output, you must double them: +- `{{` → outputs `{` +- `}}` → outputs `}` + +So to output the JSON structure, we use `{{ ... }}` which renders as `{ ... }` in the actual prompt. + +**Why "ONLY valid JSON"?** LLMs often add explanatory text or wrap JSON in markdown code fences. Being explicit reduces these issues. + +--- + +### Step 2: Run LLM Inference + +```yaml + # Step 2: Run LLM inference + - id: analyze + type: LLMInference + model: analyzer_model + inputs: + - raw_llm_response + outputs: + - llm_response +``` + +**LLMInference step** sends the prompt to your model and returns the response as text. Simple and familiar from Tutorial 1. + +--- + +### Step 3: Parse JSON with Decoder + +Here's the new step type: + +```yaml + # Step 3: Parse the JSON response and build the ReviewSentiment object + # Decoder converts the JSON string into structured data + - id: parse_and_build + type: Decoder + format: json + inputs: + - llm_response + outputs: + - sentiment + - confidence + - key_points + - rating +``` + +**What Decoder does:** + +**`format: json`** - Tells QType to parse as JSON (also supports `xml`) + +**Multiple outputs** - Each output name must match a field in the JSON: +```json +{ + "sentiment": "positive", ← goes to sentiment variable + "confidence": 0.95, ← goes to confidence variable + "key_points": [...], ← goes to key_points variable + "rating": 4 ← goes to rating variable +} +``` + +**Smart parsing:** +- Automatically strips markdown code fences (````json`) +- Validates JSON syntax +- Maps JSON types to QType types (string→text, number→float/int, array→list) +- Raises clear errors if fields are missing or malformed + +**Check your work:** + +```bash +qtype validate 03_structured_data.qtype.yaml +``` + +--- + +### Step 4: Construct the Typed Object + +Final step - convert individual fields into your custom type: + +```yaml + # Step 4: Construct a ReviewSentiment object + # Construct builds typed objects from the decoded fields + - id: build_result + type: Construct + output_type: ReviewSentiment + field_mapping: + sentiment: sentiment + confidence: confidence + key_points: key_points + rating: rating + inputs: + - sentiment + - confidence + - key_points + - rating + outputs: + - result +``` + +**What Construct does:** + +**`output_type: ReviewSentiment`** - Specifies which custom type to build + +**`field_mapping:`** - Maps input variables to type properties: +```yaml +field_mapping: + : +``` + +In this case, names match (`sentiment: sentiment`), but you could use different names: +```yaml +field_mapping: + sentiment: analyzed_sentiment # Maps analyzed_sentiment variable to sentiment property +``` + +**Why Construct?** It validates that: +- All required properties are provided +- Types match (float for confidence, int for rating, etc.) +- The result is a valid `ReviewSentiment` instance + +This catches errors early rather than failing later in your application. + +**Final validation:** + +```bash +qtype validate 03_structured_data.qtype.yaml +``` + +Should pass ✅ + +--- + +## Part 3: Test Your Application (5 minutes) + +### Run It! + +```bash +qtype run -i '{"review_text":"These headphones are amazing! Great sound quality and super comfortable. Battery lasts all day."}' 03_structured_data.qtype.yaml +``` + +**Expected output:** + +```json +{ + "result": { + "sentiment": "positive", + "confidence": 0.95, + "key_points": [ + "Great sound quality", + "Super comfortable", + "Long battery life" + ], + "rating": 5 + } +} +``` + +--- + +### Try Different Reviews + +```bash +# Negative review +qtype run -i '{"review_text":"Terrible product. Broke after one week and customer service was unhelpful."}' 03_structured_data.qtype.yaml + +# Mixed review +qtype run -i '{"review_text":"Good sound but uncomfortable after an hour. Battery is okay but not great."}' 03_structured_data.qtype.yaml +``` + +Notice how the LLM adapts its analysis while maintaining the structured format! + +--- + +## Part 4: Understanding the Data Flow (5 minutes) + +### The Complete Pipeline + +Here's what happens when you run the application: + +``` +1. User Input (text) + "These headphones are amazing!" + ↓ + +2. PromptTemplate + Creates prompt with JSON format instructions + ↓ + +3. LLMInference + Sends to model → Returns JSON string + ↓ + +4. Decoder + Parses JSON string → Extracts individual fields + {sentiment: "positive", confidence: 0.95, ...} + ↓ + +5. Construct + Builds ReviewSentiment object from fields + ReviewSentiment(sentiment="positive", confidence=0.95, ...) + ↓ + +6. Output (ReviewSentiment) + Validated, typed data ready for downstream use +``` + +**Key insight:** Each step has a single, focused responsibility: +- **PromptTemplate** - Format instructions +- **LLMInference** - Get AI response +- **Decoder** - Parse structured data +- **Construct** - Validate and type + +This separation makes each step testable and reusable. + +**Note:** You could simplify this by having the LLM return `{"result": {...}}` and using Decoder to output directly to a `ReviewSentiment` variable, skipping the Construct step. However, this tutorial demonstrates both steps separately so you understand when to use each: +- **Decoder** - When you need to parse text and extract individual fields +- **Construct** - When you need to build typed objects from already-extracted data + +In practice, use the approach that best fits your use case. + +--- + +### Error Handling + +What happens if the LLM returns invalid JSON? + +**Decoder will fail** with a clear error: +``` +Invalid JSON input: Expecting ',' delimiter: line 2 column 5 (char 45) +``` + +**What if a field is missing?** +``` +Output variable 'confidence' not found in decoded result +``` + +**What if a type is wrong?** +``` +Cannot construct ReviewSentiment: field 'rating' expects int, got str +``` + +These explicit errors help you debug issues quickly. In production, you might add retry logic or fallback values. + +--- + +## What You've Learned + +Congratulations! You've mastered: + +✅ **CustomType definition** - Modeling your domain with structured types +✅ **List types** - Working with `list[text]` and other collections +✅ **Decoder step** - Parsing JSON into individual typed fields +✅ **Construct step** - Building validated custom type instances +✅ **Field mapping** - Connecting variables to type properties +✅ **Type safety** - Catching errors early with validation + +--- + +## Next Steps + +**Reference the complete example:** + +- [`03_structured_data.qtype.yaml`](https://github.com/bazaarvoice/qtype/blob/main/examples/tutorials/03_structured_data.qtype.yaml) - Full working example + +**Learn more:** + +- [CustomType Reference](../components/CustomType.md) - Complete type system +- [Decoder Step](../Concepts/Steps/decoder.md) - Advanced parsing options +- [Construct Step](../components/Construct.md) - Field mapping patterns +- [Type System](../Concepts/Core/types.md) - Primitives, domain types, and custom types + +--- + +## Common Questions + +**Q: Can I nest custom types?** +A: Yes! A CustomType property can be another CustomType: +```yaml +types: + - id: Address + properties: + street: text + city: text + - id: User + properties: + name: text + address: Address # Nested custom type +``` + +**Q: What if the LLM returns extra fields not in my type?** +A: Extra fields are ignored. Decoder only extracts the fields you've specified in `outputs:`. + +**Q: Can Decoder output the entire JSON as one variable?** +A: Not directly. Decoder maps JSON fields to individual outputs. If you need the whole JSON, use `type: any` in your variable and skip Decoder. + +**Q: When should I use Decoder vs FieldExtractor?** +A: Use **Decoder** when you have a JSON/XML string to parse. Use **FieldExtractor** when you already have structured data and need to extract specific fields using JSONPath (covered in advanced tutorials). + +**Q: Can I make properties optional?** +A: Currently all properties are required. For optional fields, you can define them in your flow logic but not include them in the Construct step. diff --git a/docs/Tutorials/03-tools-and-function-calling.md b/docs/Tutorials/03-tools-and-function-calling.md deleted file mode 100644 index 43e99949..00000000 --- a/docs/Tutorials/03-tools-and-function-calling.md +++ /dev/null @@ -1,460 +0,0 @@ -# Adding Tools and Function Calling - -**Time:** 20 minutes -**Prerequisites:** [Tutorial 1: Build Your First QType Application](01-first-qtype-application.md), [Tutorial 2: Build a Conversational Chatbot](02-conversational-chatbot.md) -**Example:** [`time_utilities.qtype.yaml`](https://github.com/bazaarvoice/qtype/blob/main/examples/time_utilities.qtype.yaml) - -**What you'll learn:** Extend your QType applications with tools - reusable functions that can perform calculations, call APIs, or execute custom logic. - -**What you'll build:** A time zone calculator that uses a real-world API tool to calculate time differences between cities. - ---- - -## What Are Tools? - -**Tools** in QType are reusable functions that your flows can invoke. They extend your application's capabilities beyond just LLM inference. - -Tools can be: - -- **Python functions** - Call any Python code -- **API endpoints** - Make HTTP requests to external services - -**Why use tools?** - -- Get real-time data (current time, weather, stock prices) -- Perform calculations or data transformations -- Integrate with databases or external systems -- Reuse common functionality across multiple flows - ---- - -## Part 1: Understanding the Commons Library (5 minutes) - -QType includes a built-in library of common tools at `qtype.application.commons.tools`. Let's explore what's available. - -### Generate the Commons Library - -**Directory Structure:** - -For this tutorial, we'll generate the commons library in a `common/` directory at the root of your project: - -``` -your-project/ -├── common/ # Generated commons library -│ ├── tools.qtype.yaml -│ └── aws.bedrock.models.qtype.yaml -└── examples/ # Your tutorial files - └── time_utilities.qtype.yaml -``` - -**Generate the files:** - -From your project root, run: - -```bash -qtype generate commons --prefix ./common/ -``` - -This creates two files: - -- `./common/tools.qtype.yaml` - All available Python function tools -- `./common/aws.bedrock.models.qtype.yaml` - AWS Bedrock model configurations - -**Check your work:** - -1. Look at `./common/tools.qtype.yaml` -2. Search for "get_current_timestamp" -3. You should see tool definitions with inputs and outputs - -### Tools We'll Use Today - -From the commons library, we'll use two time-related functions: - -1. **`get_current_timestamp()`** - Returns current UTC time as datetime -2. **`timedelta(timestamp, hours=0, ...)`** - Adds/subtracts time from a timestamp -3. **`calculate_time_difference(start_time, end_time)`** - Calculates duration between two times - ---- - -## Part 2: Create Your Application (5 minutes) - -### Set Up the Basic Structure - -Create a file called `time_utilities.qtype.yaml`: - -```yaml -id: time_utilities -description: A simple application demonstrating tool usage - -# Import the commons tools library -references: - -- !include ../common/tools.qtype.yaml -``` - -**What's happening:** - -- `references: - !include ...` - Imports tool definitions from another YAML file -- All tools from `tools.qtype.yaml` become available in your application -- Tools are referenced by their full IDs like `qtype.application.commons.tools.get_current_timestamp` - -**Check your work:** - -1. Save the file -2. Run: `qtype validate time_utilities.qtype.yaml` -3. Should pass ✅ (even with just these lines) - ---- - -### Define Your Flow Variables - -Add a flow with variables for each step's output: - -```yaml -flows: - -- type: Flow - id: time_info_flow - description: Get and format the current timestamp - - variables: - -- id: current_time - type: datetime - - id: time_two_hours_later - type: datetime - - id: time_difference - type: TimeDifferenceResultType - - outputs: - -- current_time - - time_difference -``` - -**What this means:** - -- `variables:` - Declares all data used in the flow -- `datetime` type - QType's built-in type for timestamps -- `TimeDifferenceResultType` - A custom type from the commons library -- `outputs:` - Only these two variables are returned as final results - -**Check your work:** - -1. Validate: `qtype validate time_utilities.qtype.yaml` -2. Should still pass ✅ - ---- - -## Part 3: Add Tool Invocation Steps (10 minutes) - -Now let's add steps that actually call our tools. - -### Step 1: Get Current Timestamp - -Add this step to your flow: - -```yaml - steps: - # Step 1: Get current timestamp - - id: get_time - type: InvokeTool - tool: qtype.application.commons.tools.get_current_timestamp - input_bindings: {} - output_bindings: - result: current_time -``` - -**Breaking down InvokeTool:** - -- `type: InvokeTool` - Step type for calling tools (new concept!) -- `tool: ` - Reference to the tool we want to call -- `input_bindings: {}` - Maps flow variables to tool parameters (empty because this tool has no parameters) -- `output_bindings:` - Maps tool outputs back to flow variables - - `result: current_time` - Tool returns `result`, we store it in `current_time` - ---- - -### Step 2: Calculate Future Time - -Add a step to calculate what time it will be in 2 hours: - -```yaml - # Step 2: Calculate time 2 hours from now - - id: add_hours - type: InvokeTool - tool: qtype.application.commons.tools.timedelta - input_bindings: - timestamp: current_time - hours: "2" - output_bindings: - result: time_two_hours_later -``` - -**What this does:** - -- Takes our `current_time` -- Adds 2 hours to it using the `timedelta` tool -- Stores the result in `time_two_hours_later` - -**Key concept:** Input bindings map flow variables to tool parameters. Here we're passing: - -- `timestamp: current_time` - The variable from Step 1 -- `hours: "2"` - The number of hours to add (as a string that will be converted to int) - ---- - -### Step 3: Calculate Time Difference - -Add a final step to calculate the difference: - -```yaml - # Step 3: Calculate the time difference - - id: calc_difference - type: InvokeTool - tool: qtype.application.commons.tools.calculate_time_difference - input_bindings: - start_time: current_time - end_time: time_two_hours_later - output_bindings: - result: time_difference -``` - -**What this does:** - -- Compares `current_time` and `time_two_hours_later` -- Returns a structured object with the difference in seconds, minutes, hours, days -- Stores in `time_difference` (which has type `TimeDifferenceResultType`) - -**Check your work:** - -1. Validate: `qtype validate time_utilities.qtype.yaml` -2. Should pass ✅ - ---- - -## Part 4: Run Your Application (5 minutes) - -### Test It! - -Run your application: - -```bash -qtype run time_utilities.qtype.yaml -``` - -**Expected output:** -```json -INFO: Executing workflow from examples/time_utilities.qtype.yaml -INFO: ✅ Flow execution completed successfully -INFO: Processed 1 input(s) -INFO: -Results: -current_time: 2025-11-07 18:47:09.696270+00:00 -time_difference: total_seconds=0.0 total_minutes=0.0 total_hours=0.0 total_days=0.0 days=0 seconds=0 microseconds=0 -``` - -**What happened:** - -1. QType called `get_current_timestamp()` and got the current UTC time -2. Added 2 hours to create a future timestamp -3. Calculated the difference between the two times -4. Returned only the outputs we specified (`current_time` and `time_difference`) - ---- - -## How Tool Invocation Works - -When QType executes an `InvokeTool` step: - -``` -1. Resolve the tool - ↓ -2. Prepare inputs (map variables → tool parameters) - ↓ -3. Validate input types - ↓ -4. Execute the function - ↓ -5. Capture outputs - ↓ -6. Map outputs → flow variables - ↓ -7. Type conversion (to QType types) -``` - -**Key insight:** Tools are just Python functions. QType: - -- Handles importing the module -- Validates all types match -- Converts between Python and QType types automatically -- Manages data flow between steps - ---- - -## What You've Learned - -Congratulations! You've mastered: - -✅ **Tool concepts** - What tools are and why they're useful -✅ **Commons library** - Built-in tools available in QType -✅ **InvokeTool step** - How to call tools from flows -✅ **Input/output bindings** - Mapping variables to tool parameters -✅ **Sequential tool chains** - Passing data between multiple tools - ---- - -## Tool Types in QType - -QType supports two types of tools: - -### PythonFunctionTool - -Calls a Python function from a module: - -```yaml -tools: - -- type: PythonFunctionTool - id: my_calculator - name: calculate - description: Performs mathematical calculations - function_name: calculate - module_path: my_tools.math - inputs: - expression: - type: text - optional: false - outputs: - result: - type: float - optional: false -``` - -### APITool - -Calls an HTTP API endpoint: - -```yaml -tools: - -- type: APITool - id: weather_api - name: get_weather - description: Fetches weather data - endpoint: https://api.weather.com/current - method: GET - auth: weather_api_key - inputs: - location: - type: text - outputs: - temperature: - type: float -``` - ---- - -## Common Patterns - -### Pattern 1: Sequential Tool Calls - -Chain tools where each step uses the previous output (like our example): - -```yaml -steps: - -- type: InvokeTool - tool: fetch_data - output_bindings: {result: raw_data} - - - type: InvokeTool - tool: process_data - input_bindings: {data: raw_data} - output_bindings: {result: processed_data} - - - type: InvokeTool - tool: save_data - input_bindings: {data: processed_data} -``` - -### Pattern 2: Using Variables from Flow Inputs - -Pass flow input variables to tools: - -```yaml -flows: - -- type: Flow - id: timezone_converter - variables: - -- id: user_timezone - type: text - - id: current_time - type: datetime - - id: converted_time - type: datetime - inputs: - -- user_timezone - steps: - -- type: InvokeTool - tool: get_current_timestamp - output_bindings: {result: current_time} - - - type: InvokeTool - tool: convert_timezone - input_bindings: - timestamp: current_time - timezone: user_timezone - output_bindings: {result: converted_time} -``` - -### Pattern 3: Optional Parameters - -Tools can have optional parameters (use defaults if not provided): - -```yaml -input_bindings: - timestamp: current_time - # hours parameter is optional, defaults to 0 -``` - ---- - -## Next Steps - -**Reference the complete example:** - -- [`time_utilities.qtype.yaml`](https://github.com/bazaarvoice/qtype/blob/main/examples/time_utilities.qtype.yaml) - Full working example - -**Learn more:** - -- [Tool Concept](../Concepts/Core/tool.md) - Full tool architecture -- [InvokeTool Step](../components/InvokeTool.md) - Complete API reference -- [Create Python Tools](../How-To%20Guides/Tools/python-tools.md) - Build your own tools -- [Create API Tools](../How-To%20Guides/Tools/api-tools.md) - Integrate external APIs - ---- - -## Common Questions - -**Q: Do I need to generate the commons library every time?** -A: No, only once. The generated files (`./common/tools.qtype.yaml`) can be committed to your repository and reused. - -**Q: Can I create my own tools?** -A: Yes! Use `qtype convert module` to convert any Python module to QType tools. See the [Python Tools guide](../How-To%20Guides/Tools/python-tools.md). - -**Q: What's the difference between InvokeTool and Agent?** -A: `InvokeTool` explicitly calls one specific tool. `Agent` (covered in a later tutorial) lets the LLM decide which tools to call based on the task. - -**Q: Can tools call other tools?** -A: Not directly. Tools are just functions. But your flow can chain multiple `InvokeTool` steps together. - -**Q: What types can I use in tool parameters?** -A: Any QType primitive type (`text`, `int`, `float`, `bool`, `datetime`) or custom types you define. See [Variable Types](../Concepts/Core/variable.md). - -**Q: How do I handle errors from tools?** -A: Tools that raise exceptions will stop the flow. Use `qtype run --log-level DEBUG` to see detailed error information. diff --git a/docs/Tutorials/04-data-processing-pipelines.md b/docs/Tutorials/04-data-processing-pipelines.md deleted file mode 100644 index d5064d73..00000000 --- a/docs/Tutorials/04-data-processing-pipelines.md +++ /dev/null @@ -1,334 +0,0 @@ -# Data Processing Pipelines - -**Time:** 15 minutes -**Prerequisites:** [Tutorial 1: Build Your First QType Application](01-first-qtype-application.md) -**Example:** [`data_processor.qtype.yaml`](https://github.com/bazaarvoice/qtype/blob/main/examples/data_processor.qtype.yaml) - -**What you'll learn:** Build a data processing flow that loads records from a CSV file and aggregates them. You'll learn how QType handles one-to-many operations. - -**What you'll build:** A data processor that reads a CSV file, analyzes multiple rows, and produces summary statistics. - ---- - -## Part 1: Data Processing Patterns (3 minutes) - -All previous tutorials followed a simple pattern: - -- [Build Your First QType Application](01-first-qtype-application.md): One question → one answer -- [Build a Conversational Chatbot](02-conversational-chatbot.md): One message → one response -- [Tools and Function Calling](03-tools-and-function-calling.md): Get time → calculate difference → return result - ---- - -## Understanding Data Processing in QType - -So far, you've built applications that process one request at a time: - -- Tutorial 1: One question → one answer -- Tutorial 2: One message → one response -- Tutorial 3: Get time → calculate difference → return result - -Today you'll work with **data pipelines** that process multiple records: - -``` -Load CSV file → Count records - (stream) (summary) -``` - -### Key Concepts - -**Streaming Processing** - Processing multiple records - -- FileSource reads the file and emits one record at a time -- Each record flows through the pipeline as it's read -- Aggregate collects all records and produces a summary - ---- - -## Part 1: Understanding the Pipeline (5 minutes) - -### The Flow We'll Build - -``` -┌──────────────┐ -│ CSV File │ -│ (5 records) │ -└──────┬───────┘ - │ FileSource (emits 5 items) - ↓ -┌──────────────┐ -│ Records │ -│ (streaming) │ -└──────┬───────┘ - │ Aggregate (counts all) - ↓ -┌──────────────┐ -│ Total: 5 │ -└──────────────┘ -``` - -**Key insight:** FileSource emits multiple outputs (one per row) from a single input (file path). - ---- - -### Create Sample Data - -First, let's create test data. Create a folder called `examples/data/`: - -```bash -mkdir -p examples/data -``` - -Create `examples/data/customers.csv`: - -```csv -name,region,purchases -Alice,West,5 -Bob,East,3 -Charlie,West,7 -Diana,North,2 -Eve,East,4 -``` - -**Note:** This is standard CSV format with a header row and data rows. - ---- - -## Part 2: Build the Pipeline (5 minutes) - -### Create Your Application - -Create `examples/data_processor.qtype.yaml`: - -```yaml -id: data_processor -description: Process CSV data to extract and summarize information -``` - ---- - -### Define Your Flow - -Add a flow that declares all the variables we'll use: - -```yaml -flows: - -- type: Flow - id: process_customers - description: Load customer data and count records - - variables: - -- id: file_path - type: text - - id: name - type: text - - id: region - type: text - - id: purchases - type: int - - id: stats - type: AggregateStats - - inputs: - -- file_path - - outputs: - -- stats -``` - -**What's happening:** - -- We declare 5 variables for each stage of processing -- Only `file_path` is required as input (the file path) -- Only `stats` is returned as output (the aggregate summary) -- The intermediate variables (`name`, `region`, `purchases`) flow between steps -- `AggregateStats` is a built-in type with success/failure counts - -**Check your work:** - -1. Validate: `uv run qtype validate examples/data_processor.qtype.yaml` -2. Should pass ✅ - ---- - -### Step 1: Load CSV Data - -Add the first step to read the file: - -```yaml - steps: - # Step 1: Read CSV file (emits many records, one per row) - - id: load_file - type: FileSource - path: file_path - inputs: - -- file_path - outputs: - -- name - - region - - purchases -``` - -**New concepts:** - -**`FileSource` step** - Reads data from files - -- `path: file_path` - Reference to variable containing file path -- Automatically detects format from file extension (`.csv`, `.parquet`, `.json`, `.jsonl`) -- Emits one output per row (streaming) -- Output variable names should match CSV column names - -**How it works:** - -``` -Input: file_path = "examples/data/customers.csv" -Process: Read file row by row -Output: 5 separate records with name, region, purchases -``` - -**Important:** The CSV columns (`name`, `region`, `purchases`) must match the output variable names exactly. - ---- - -### Step 2: Aggregate Results - -Add a step to count all the records: - -```yaml - # Step 2: Count all records - - id: count_records - type: Aggregate - inputs: - -- region - outputs: - -- stats -``` - -**`Aggregate` step** - Combines many items into one summary - -- Counts how many items flow through -- Waits for all upstream items before computing -- Emits a single summary with `AggregateStats` containing success/failure counts - -**What this does:** - -``` -Input: 5 records flow through (one at a time) -Output: stats = AggregateStats(num_successful=5, num_failed=0, num_total=5) -``` - -**Check your work:** - -1. Validate: `uv run qtype validate examples/data_processor.qtype.yaml` -2. Should pass ✅ - ---- - -## Part 3: Run Your Pipeline (5 minutes) - -### Test It! - -Run the flow with your test data: - -```bash -uv run qtype run -i '{"file_path":"examples/data/customers.csv"}' examples/data_processor.qtype.yaml -``` - -**Expected output:** - -``` -INFO: Executing workflow from examples/data_processor.qtype.yaml -INFO: ✅ Flow execution completed successfully -INFO: Processed 1 input(s) -INFO: -Results: - stats -0 num_successful=5 num_failed=0 num_total=5 -``` - -**What happened:** - -1. FileSource read 5 rows from CSV -2. Each row became a FlowMessage with name, region, purchases -3. All 5 messages streamed through to Aggregate -4. Aggregate counted them and emitted a single final summary with stats - -**Understanding the output:** - -The Aggregate step produces one summary result with statistics about the data that flowed through: - -- `num_successful=5` - 5 records processed successfully -- `num_failed=0` - 0 records had errors -- `num_total=5` - 5 total records processed - ---- - -## What You've Learned - -Congratulations! You've mastered: - -✅ **FileSource step** - Reading data from CSV files (also supports Parquet, JSON, JSONL) -✅ **Aggregate step** - Counting and combining results -✅ **Streaming data** - Processing records one at a time -✅ **Variable naming** - Output names must match column names - ---- - -## Compare: Conversational vs Complete Flows - -### Memory Comparison - -| Feature | Conversational (Build a Conversational Chatbot) | Complete (This Tutorial) | -|---------|----------------------------|----------------------| -| **Interface** | `interface: {type: Conversational}` | Default (no interface specified) | -| **Memory** | Required (stores chat history) | Not used | -| **Input/Output** | One message at a time | Can process multiple records | -| **Use Case** | Chat, assistants | Data processing, ETL | -| **Testing** | `qtype serve` (web UI) | `qtype run` (command line) | - ---- - -## Next Steps - -**Reference the complete example:** - -- [`data_processor.qtype.yaml`](https://github.com/bazaarvoice/qtype/blob/main/examples/data_processor.qtype.yaml) - Full working example - -**Learn more:** - -- [FileSource Reference](../components/FileSource.md) - All file formats -- [Aggregate Reference](../components/Aggregate.md) - Statistics details -- [AggregateStats Reference](../components/AggregateStats.md) - Output structure - ---- - -## Common Questions - -**Q: What file formats are supported?** -A: CSV, Parquet, JSON, and JSONL. The format is automatically detected from the file extension. - -**Q: Can I rename columns?** -A: Not currently. Output variable names must match the column names in the file exactly. - -**Q: How do I filter or transform data?** -A: Use the `FieldExtractor` step (from [Tools and Function Calling](03-tools-and-function-calling.md)) or `Decoder` step to parse and transform the data before aggregating. - -**Q: How do I process data from databases?** -A: Use `SQLSource` step instead of `FileSource`. It works similarly but connects to databases and executes SQL queries. - -**Q: How does streaming work with FileSource?** -A: FileSource reads and emits records one at a time rather than loading the entire file into memory. This allows processing large files efficiently. - -**Q: What does Aggregate output?** -A: Aggregate outputs a single summary message with `AggregateStats` containing counts of successful, failed, and total messages that flowed through the step. - -**Q: Can FileSource read from URLs or S3?** -A: Yes! FileSource uses fsspec, so it supports many protocols like `s3://`, `http://`, `gs://`, etc. Just provide the full URI as the file path. diff --git a/docs/Tutorials/04-tools-and-function-calling.md b/docs/Tutorials/04-tools-and-function-calling.md new file mode 100644 index 00000000..6e26599c --- /dev/null +++ b/docs/Tutorials/04-tools-and-function-calling.md @@ -0,0 +1,483 @@ +# Adding Tools to Your Application + +**Time:** 20 minutes +**Prerequisites:** [Tutorial 3: Working with Types and Structured Data](03-structured-data.md) +**Example:** [`04_tools_and_function_calling.qtype.yaml`](https://github.com/bazaarvoice/qtype/blob/main/examples/tutorials/04_tools_and_function_calling.qtype.yaml) + +**What you'll learn:** + +* Import pre-built tools from the commons library +* Use InvokeTool to call Python functions +* Chain multiple tools with input/output bindings +* Understand tool references and automatic generation + +**What you'll build:** A deadline calculator that uses tools to get the current time, add days, and format the result. + +--- + +## Background: What Are Tools? + +Tools extend your QType applications with custom steps. +Tools are how you plug QType into other existing systems that may not be supported in the shipped interpreter. + +**A tool is simply a reference to:** +- **A Python function** - Call any Python function with typed parameters +- **An API endpoint** - Make HTTP requests to external services + +Tools define their **inputs** (parameters) and **outputs** (return values) with explicit types. + +### The Commons Library + +QType provides a [commons library](https://github.com/bazaarvoice/qtype/blob/main/common/tools.qtype.yaml) published on GitHub with pre-built tools for common operations: + +- **Time utilities** - Get current time, add/subtract durations, format timestamps +- **String operations** - Base64 encoding/decoding, text transformations +- **Data processing** - JSON parsing, field extraction, type conversions + +### Automatic Tool Generation + +You don't need to write tool YAML files manually! QType can generate them automatically using `qtype convert`: + +**From Python modules:** +```bash +qtype convert python --module myapp.utils --output tools.qtype.yaml +``` + +**From OpenAPI specifications:** +```bash +qtype convert openapi --spec api_spec.yaml --output api_tools.qtype.yaml +``` + +The converter analyzes function signatures or API schemas and creates properly typed tool definitions. + +--- + +## Part 1: Import and Use Your First Tool (7 minutes) + +### Create Your Application File + +Create `04_tools_and_function_calling.qtype.yaml`: + +```yaml +id: deadline_calculator +description: | + Calculates a deadline by adding days to the current timestamp. + Demonstrates tool imports, InvokeTool step, and tool chaining. +``` + +--- + +### Import the Commons Library + +Add a `references:` section before `flows:`: + +```yaml +# Import pre-built tools from the commons library +references: + - !include https://raw.githubusercontent.com/bazaarvoice/qtype/refs/tags/v0.1.11/common/tools.qtype.yaml +``` + +**What this means:** + +**`!include`** - YAML directive to load another file's content. `!include` brings in yaml files inside the header. +- Can use local paths: `!include ../../common/tools.qtype.yaml` +- Or remote URLs: `!include https://...` (shown here) +- Imports all tools, types, and definitions from that file + +**`references:` section** - Where you import external components. References can be other applications or lists of models, tools, authorization providers, variables, or custom types. + +You can now reference any tool by its `id` (like `qtype.application.commons.tools.get_current_timestamp`). + +**Check your work:** + +```bash +qtype validate 04_tools_and_function_calling.qtype.yaml +``` + +Should pass ✅ (even with no flows yet - imports are valid) + +--- + +### Define Your Flow Variables + +Add the flow structure: + +```yaml +flows: + - id: calculate_deadline + description: Calculate a formatted deadline from current time plus days + inputs: + - days_until_due + outputs: + - deadline_formatted + + variables: + # Input + - id: days_until_due + type: int + + # Tool outputs + - id: current_time + type: datetime + - id: deadline_time + type: datetime + - id: format_string + type: text + - id: deadline_formatted + type: text +``` + +**New types:** + +**`datetime` type** - Built-in QType type for timestamps: +- Represents a point in time (date + time) +- Stored internally as ISO 8601 strings +- Automatically converted to/from Python `datetime` objects +- Tools can accept and return `datetime` values + +**Why all these variables?** Each tool transforms data: +- `current_time` ← output from get_current_timestamp +- `deadline_time` ← output from timedelta (current_time + days) +- `deadline_formatted` ← output from format_datetime (pretty string) + +Explicit variables make the data flow visible and debuggable. + +--- + +### Add Your First Tool Call + +Add this under `steps:`: + +```yaml + steps: + # Step 1: Get current timestamp using a tool + # This tool takes no inputs and returns the current UTC time + - id: get_current_time + type: InvokeTool + tool: qtype.application.commons.tools.get_current_timestamp + input_bindings: {} + output_bindings: + result: current_time + outputs: + - current_time +``` + +**New step type: InvokeTool** + +This is your primary way to call tools in QType flows. + +**`tool:`** - Full ID of the tool to invoke: +- Format: `.` +- Must match a tool defined in your imports or application +- Example: `qtype.application.commons.tools.get_current_timestamp` + +**`input_bindings:`** - Maps flow variables to tool parameters: +- Empty `{}` means no inputs needed +- This tool has no parameters - it just returns the current time + +**`output_bindings:`** - Maps tool outputs to flow variables: +- `result: current_time` means "take the tool's `result` output and store it in the `current_time` variable" +- Tool outputs are defined in the tool's YAML definition + +**`outputs:`** - Flow-level outputs this step produces: +- Lists which flow variables this step creates or modifies +- Used by QType to validate data flow through the pipeline + +**Check your work:** + +```bash +qtype validate 04_tools_and_function_calling.qtype.yaml +``` + +Should pass ✅ + +--- + +## Part 2: Chain Tools with Bindings (8 minutes) + +### Add a Constant Variable + +Before we can format our datetime, we need to define the format string: + +```yaml + # Step 2: Create a format string constant + - id: create_format_string + type: PromptTemplate + template: "%B %d, %Y at %I:%M %p UTC" + inputs: [] + outputs: + - format_string +``` + +**Pattern: Constants in flows** + +Since tool `input_bindings` only accept variable names (not literal values), we use PromptTemplate to create constants: +- Template with no placeholders → constant string +- `inputs: []` → no dependencies +- Produces `format_string` variable for later use + +**Format string syntax:** Uses Python's `strftime` format codes: +- `%B` - Full month name (January) +- `%d` - Day of month (14) +- `%Y` - 4-digit year (2026) +- `%I:%M %p` - Time in 12-hour format (03:30 PM) + +--- + +### Add Days with Input Bindings + +Now let's use a tool with multiple inputs: + +```yaml + # Step 3: Calculate deadline by adding days to current time + # input_bindings maps flow variables to tool parameters + - id: add_days + type: InvokeTool + tool: qtype.application.commons.tools.timedelta + input_bindings: + timestamp: current_time + days: days_until_due + output_bindings: + result: deadline_time + outputs: + - deadline_time +``` + +**Understanding bindings:** + +**Input bindings structure:** +```yaml +input_bindings: + : +``` + +In this case: +- Tool parameter `timestamp` ← gets value from flow variable `current_time` +- Tool parameter `days` ← gets value from flow variable `days_until_due` + +The `timedelta` tool definition (from commons library) looks like: +```yaml +inputs: + timestamp: + type: datetime + days: + type: int + hours: + type: int + optional: true + # ... more optional parameters +``` + +**Optional parameters:** You only need to bind the required parameters. `timedelta` has many optional parameters (hours, minutes, seconds, weeks), but we only use `days`. + +--- + +### Chain Tools Together + +Finally, format the deadline using the output from the previous step: + +```yaml + # Step 4: Format deadline for human readability + # Shows chaining: output from previous tool becomes input to this one + - id: format_deadline + type: InvokeTool + tool: qtype.application.commons.tools.format_datetime + input_bindings: + timestamp: deadline_time + format_string: format_string + output_bindings: + result: deadline_formatted + outputs: + - deadline_formatted +``` + +**Check your work:** + +```bash +qtype validate 04_tools_and_function_calling.qtype.yaml +``` + +Should pass ✅ + +--- + +## Part 3: Test Your Tools (5 minutes) + +### Run the Application + +```bash +qtype run -i '{"days_until_due": 3}' 04_tools_and_function_calling.qtype.yaml +``` + +**Expected output:** + +```json +{ + "deadline_formatted": "January 17, 2026 at 03:39 PM UTC" +} +``` + +The exact time will match when you run it, but the date should be 3 days from now. + +--- + +### Try Different Durations + +```bash +# One week deadline +qtype run -i '{"days_until_due": 7}' 04_tools_and_function_calling.qtype.yaml + +# Two weeks +qtype run -i '{"days_until_due": 14}' 04_tools_and_function_calling.qtype.yaml + +# Same day (0 days) +qtype run -i '{"days_until_due": 0}' 04_tools_and_function_calling.qtype.yaml +``` + +--- + +### Add the `--progress` Flag + +For more visibility into tool execution: + +```bash +qtype run -i '{"days_until_due": 3}' 04_tools_and_function_calling.qtype.yaml --progress +``` + +You'll see each step execute in real-time: + +``` +Step get_current_time ✔ 1 succeeded +Step create_format_string ✔ 1 succeeded +Step add_days ✔ 1 succeeded +Step format_deadline ✔ 1 succeeded +``` + +--- + +## Part 4: Understanding Tools Deeply (Bonus) + +### Tool Types and Custom Types + +Remember from Tutorial 3 where `calculate_time_difference` returns `TimeDifferenceResultType`? That's a custom type defined in the commons library: + +```yaml +types: + - id: TimeDifferenceResultType + properties: + days: int + seconds: int + microseconds: int + total_hours: float + total_minutes: float + total_seconds: float + total_days: float +``` + +If you use `calculate_time_difference`, you can extract fields from its result: + +```yaml +- id: calc_difference + type: InvokeTool + tool: qtype.application.commons.tools.calculate_time_difference + input_bindings: + start_time: start + end_time: end + output_bindings: + result: time_diff # time_diff is now TimeDifferenceResultType + +# Later, access fields using FieldExtractor or Construct +``` + +--- + +### Generating Your Own Tools + +When you're ready to create custom tools, use `qtype convert`: + +**From a Python module:** + +```bash +# Generate tools from all functions in myapp.utils +qtype convert python --module myapp.utils --output my_tools.qtype.yaml +``` + +QType will: +- Scan all public functions in the module +- Extract type hints from function signatures +- Generate tool definitions with proper input/output types +- Include docstrings as descriptions + +**From an OpenAPI spec:** + +```bash +# Generate tools from a REST API +qtype convert openapi --spec weather_api.yaml --output weather_tools.qtype.yaml +``` + +QType will: +- Parse endpoint definitions +- Create a tool for each operation +- Map request parameters to tool inputs +- Map response schemas to tool outputs +- Handle authentication configurations + + + +--- + +## What You've Learned + +Congratulations! You've mastered: + +✅ **Tool concepts** - References to Python functions or API calls +✅ **Importing tools** - Using `!include` with local or remote files +✅ **InvokeTool step** - Calling tools with input/output bindings +✅ **Input bindings** - Mapping flow variables to tool parameters +✅ **Output bindings** - Mapping tool results to flow variables +✅ **Tool chaining** - Connecting outputs to inputs across steps +✅ **Commons library** - Pre-built tools for common operations +✅ **Tool generation** - Using `qtype convert` to create tool definitions + +--- + +## Next Steps + +**Reference the complete example:** + +- [`04_tools_and_function_calling.qtype.yaml`](https://github.com/bazaarvoice/qtype/blob/main/examples/tutorials/04_tools_and_function_calling.qtype.yaml) - Full working example +- [Commons Library](https://github.com/bazaarvoice/qtype/blob/main/common/tools.qtype.yaml) - Browse all available tools +- [Commons Library Source](https://github.com/bazaarvoice/qtype/blob/v0.1.11/qtype/application/commons/tools.py) - Browse the source of tools. + + + + +--- + +## Common Questions + +**Q: Can I call multiple tools in parallel?** +A: Not directly in a single step. However, if tools don't depend on each other's outputs, QType's execution engine may parallelize them automatically based on dependency analysis. + +**Q: What happens if a tool raises an error?** +A: The flow stops and returns an error. + +**Q: Can I pass literal values instead of variables to tools?** +A: No - `input_bindings` only accepts variable names. Use PromptTemplate to create constant variables, or define them in your flow's input data. + +**Q: How do I know what parameters a tool accepts?** +A: Check the tool's YAML definition (in the commons library or your generated file). It lists all `inputs` with their types and whether they're optional. + +**Q: Can tools modify variables or have side effects?** +A: Tools are functional - they take inputs and return outputs without modifying flow state. Side effects (like writing files or calling APIs) happen inside the tool implementation, but they don't affect other flow variables. + +**Q: What's the difference between InvokeTool and Agent?** +A: **InvokeTool** explicitly calls a specific tool with defined bindings. **Agent** gives an LLM access to multiple tools and lets it decide which to use and when. Use InvokeTool for deterministic workflows, Agent for autonomous decision-making. diff --git a/docs/Tutorials/05-multi-flow-applications.md b/docs/Tutorials/05-multi-flow-applications.md deleted file mode 100644 index 2e45bd04..00000000 --- a/docs/Tutorials/05-multi-flow-applications.md +++ /dev/null @@ -1,281 +0,0 @@ -# Multi-Flow Applications - -**Time:** 20 minutes -**Prerequisites:** Tutorials 1-4 -**Example:** [`multi_flow_example.qtype.yaml`](https://github.com/bazaarvoice/qtype/blob/main/examples/multi_flow_example.qtype.yaml) - -## What You'll Learn - -In this tutorial, you'll learn how to: - -- Create applications with multiple independent flows -- Understand variable scoping within flows -- Choose which flow to execute at runtime -- Design modular, reusable flows - -## Why Multiple Flows? - -So far, we've built applications with a single flow. But real-world applications often need multiple distinct workflows: - -- Different use cases (e.g., "create customer" vs "update customer") -- Different entry points for different user roles -- Reusable sub-workflows for common operations -- A/B testing different approaches - -**Key concept:** Each flow is independent with its own variables, inputs, and outputs. Variables in one flow cannot access variables in another flow - this isolation prevents naming conflicts and makes flows easier to reason about. - -## The Multi-Flow Example - -Our example application has three independent flows for customer data processing: - -1. **`clean_names`** - Cleans and standardizes customer names -2. **`validate_names`** - Validates that names are legitimate person names -3. **`generate_profile`** - Generates complete customer profiles - -Let's examine the structure: - -```yaml -id: multi_flow_example -description: Multi-flow application demonstrating multiple independent flows - -models: - -- type: Model - id: gpt4o-mini - provider: openai - model_id: gpt-4o-mini - -flows: - -- type: Flow - id: clean_names - # ... flow definition ... - - - type: Flow - id: validate_names - # ... flow definition ... - - - type: Flow - id: generate_profile - # ... flow definition ... -``` - -## Variable Scoping - -Each flow defines its own variables that are completely isolated from other flows: - -```yaml -flows: - -- type: Flow - id: clean_names - variables: - -- id: raw_name # Only exists in clean_names flow - type: text - - id: clean_name # Only exists in clean_names flow - type: text - - - type: Flow - id: validate_names - variables: - -- id: name_to_validate # Only exists in validate_names flow - type: text - - id: validation_result # Only exists in validate_names flow - type: text -``` - -**Important:** Even though both flows work with names, they use different variable names. The `clean_name` variable in `clean_names` is completely separate from `name_to_validate` in `validate_names`. This prevents accidental data leakage and makes each flow self-contained. - -## Flow Inputs and Outputs - -Each flow declares what inputs it requires and what outputs it produces: - -```yaml -- type: Flow - id: clean_names - - inputs: - -- raw_name # Required input - must be provided when running this flow - - outputs: - -- clean_name # Output - available in results after flow completes -``` - -This contract makes flows easy to understand and test in isolation. - -## The Clean Names Flow - -Let's examine the `clean_names` flow in detail: - -```yaml -- type: Flow - id: clean_names - description: Clean and standardize customer names - - variables: - -- id: raw_name - type: text - - id: clean_prompt - type: text - - id: clean_name - type: text - - inputs: - -- raw_name - - outputs: - -- clean_name - - steps: - # Step 1: Create prompt to clean the name - - id: create_clean_prompt - type: PromptTemplate - template: "Clean this name by trimming whitespace and converting to title case: {{raw_name}}. Return ONLY the cleaned name, nothing else." - inputs: - -- raw_name - outputs: - -- clean_prompt - - # Step 2: Call LLM to clean the name - - id: clean_step - type: LLMInference - model: gpt4o-mini - inputs: - -- clean_prompt - outputs: - -- clean_name -``` - -**Pattern:** Notice the `PromptTemplate` → `LLMInference` pattern. This is the standard way to work with LLMs in QType: - -1. Use `PromptTemplate` to construct the prompt from variables -2. Pass the prompt to `LLMInference` to get the result - -## Running Specific Flows - -When you have multiple flows, you specify which one to run with the `-f` flag: - -```bash -# Run the clean_names flow -uv run qtype run -f clean_names \ - -i '{"raw_name":" john doe "}' \ - examples/multi_flow_example.qtype.yaml -``` - -Output: -``` -clean_name: John Doe -``` - -```bash -# Run the validate_names flow -uv run qtype run -f validate_names \ - -i '{"name_to_validate":"John Doe"}' \ - examples/multi_flow_example.qtype.yaml -``` - -Output: -``` -validation_result: Valid -``` - -```bash -# Run the generate_profile flow -uv run qtype run -f generate_profile \ - -i '{"customer_name":"John Doe"}' \ - examples/multi_flow_example.qtype.yaml -``` - -Output: -``` -customer_profile: Customer Profile for John Doe -Account Number: 12345678 -Member Since: January 2020 -Status: Gold -... -``` - -## Design Principles - -When designing multi-flow applications: - -**1. Keep flows focused:** Each flow should do one thing well. Our example has separate flows for cleaning, validation, and profile generation rather than one monolithic flow. - -**2. Make flows reusable:** Design flows that can be used in multiple contexts. The `clean_names` flow could be used anywhere you need name standardization. - -**3. Use clear variable names:** Since variables are scoped to their flow, use descriptive names that make sense within that flow's context. - -**4. Define explicit contracts:** Always specify inputs and outputs clearly so flows can be understood and tested independently. - -**5. Consider composition:** While this example shows independent flows, real applications might combine flows (using `InvokeFlow` step) to build complex workflows from simple building blocks. - -## Testing Flows Independently - -One major benefit of multi-flow applications is testability. You can validate and test each flow in isolation: - -```bash -# Validate the entire application -uv run qtype validate examples/multi_flow_example.qtype.yaml - -# Test just the clean_names flow -uv run qtype run -f clean_names -i '{"raw_name":" JANE DOE "}' examples/multi_flow_example.qtype.yaml - -# Test just the validate_names flow -uv run qtype run -f validate_names -i '{"name_to_validate":"X Æ A-12"}' examples/multi_flow_example.qtype.yaml -``` - -This makes debugging easier - if something goes wrong, you can quickly isolate which flow is causing the issue. - -## Visualizing Multi-Flow Applications - -You can generate a mermaid diagram showing all flows: - -```bash -uv run qtype visualize examples/multi_flow_example.qtype.yaml -o multi_flow.mmd -``` - -This creates a visual representation of your application's structure, showing all flows and their steps: - -```mermaid ---8<-- "Tutorials/multi_flow.mmd" -``` - -The diagram shows: - -- Three independent flows, each with its own variables and steps -- The `PromptTemplate` → `LLMInference` pattern in each flow -- A shared model resource (`gpt4o-mini`) used by all flows - -## What You've Learned - -You now understand how to: - -✅ **Structure applications with multiple flows** -✅ **Scope variables within flows** -✅ **Run specific flows with the `-f` flag** -✅ **Design modular, testable workflows** - ---- - -## Next Steps - -**Reference the complete example:** - -- [`multi_flow_example.qtype.yaml`](https://github.com/bazaarvoice/qtype/blob/main/examples/multi_flow_example.qtype.yaml) - Full working example - -**Learn more:** - -- [Flow Reference](../components/Flow.md) -- [PromptTemplate Reference](../components/PromptTemplate.md) -- [LLMInference Reference](../components/LLMInference.md) diff --git a/docs/Tutorials/06-rag-document-system.md b/docs/Tutorials/06-rag-document-system.md deleted file mode 100644 index 9af4963b..00000000 --- a/docs/Tutorials/06-rag-document-system.md +++ /dev/null @@ -1,1234 +0,0 @@ -# Build a RAG Document System - -**Time:** 30 minutes -**Prerequisites:** Tutorials 1-5 -**Example:** [`rag.qtype.yaml`](https://github.com/bazaarvoice/qtype/blob/main/examples/rag.qtype.yaml) - -**What you'll learn:** Build a production-ready Retrieval Augmented Generation (RAG) system with document ingestion and conversational search. - -**What you'll build:** A complete RAG application with two flows: one to ingest documents into a vector database, and one to chat with those documents using contextual retrieval. - -## Prerequisites Checklist - -Before starting, verify your environment is ready: - -**Required Software:** - -- QType installed: `pip install qtype[interpreter]` -- Docker installed and running: `docker --version` -- AWS CLI configured: `aws sts get-caller-identity` - -**Required Accounts/Keys:** - -- AWS account with Bedrock access -- Your AWS profile set: `export AWS_PROFILE=your-profile-name` - -**Required Python Packages:** - -- HuggingFace reader: `uv add llama-index-readers-huggingface-fs --optional interpreter` - -**Verify Your Setup:** - -```bash -# Check Docker is running -docker ps - -# Check AWS credentials -aws sts get-caller-identity - -# Check Python packages -pip list | grep llama-index-readers-huggingface-fs -``` - -**Time Required:** 30 minutes - ---- - -## What is RAG? - -**RAG (Retrieval Augmented Generation)** solves a key problem with LLMs: they can only answer questions about information they were trained on. - -**Without RAG:** -``` -You: What was discussed in last week's meeting? -AI: I don't have access to your meeting notes. ❌ -``` - -**With RAG:** -``` -You: What was discussed in last week's meeting? -AI: According to your notes, the team discussed Q4 roadmap... ✅ -``` - -### How RAG Works - -``` -┌─────────────────────────────────────────────────────────┐ -│ 1. INGESTION (One-time setup) │ -├─────────────────────────────────────────────────────────┤ -│ │ -│ Documents → Split → Embed → Store in Vector DB │ -│ │ -└─────────────────────────────────────────────────────────┘ - -┌─────────────────────────────────────────────────────────┐ -│ 2. RETRIEVAL (Every query) │ -├─────────────────────────────────────────────────────────┤ -│ │ -│ Question → Search Vector DB → Get Relevant Chunks │ -│ │ -└─────────────────────────────────────────────────────────┘ - -┌─────────────────────────────────────────────────────────┐ -│ 3. GENERATION (Every query) │ -├─────────────────────────────────────────────────────────┤ -│ │ -│ Question + Context → LLM → Answer │ -│ │ -└─────────────────────────────────────────────────────────┘ -``` - -**Key concepts:** - -- **Embeddings** - Convert text to numbers (vectors) that capture meaning -- **Vector Database** - Store and search embeddings by similarity -- **Retrieval** - Find the most relevant document chunks for a question -- **Context** - Provide retrieved chunks to the LLM for grounding - ---- - -## Part 1: Setup (5 minutes) - -### Start Qdrant Vector Database - -We'll use Qdrant for vector storage. Start it with Docker: - -```bash -docker run -p 6333:6333 -p 6334:6334 \ - -v $(pwd)/qdrant_storage:/qdrant/storage:z \ - qdrant/qdrant -``` - -**What this does:** - -- Runs Qdrant on `http://localhost:6333` -- Persists data in `./qdrant_storage/` -- Ready for connections immediately - -**Verify it's running:** -```bash -curl http://localhost:6333/ -``` - -You should see Qdrant version info. - ---- - -### Install HuggingFace Reader - -Our example loads documents from HuggingFace datasets: - -```bash -uv add llama-index-readers-huggingface-fs --optional interpreter -``` - -**What this installs:** - -- LlamaIndex HuggingFace reader for loading datasets -- Required for the `DocumentSource` step in our ingestion flow - ---- - -### Configure AWS Credentials - -The example uses AWS Bedrock for embeddings and generation. Set your profile: - -```bash -export AWS_PROFILE=your-profile-name -``` - -Or configure via AWS CLI: -```bash -aws configure -``` - -**Models we'll use:** - -- `amazon.titan-embed-text-v2:0` - Generate embeddings (1024 dimensions) -- `amazon.nova-lite-v1:0` - Generate conversational responses - ---- - -## Part 2: Understanding the Application (5 minutes) - -### Two-Flow Architecture - -RAG applications need two separate workflows: - -**Flow 1: `document_ingestion`** (Run once or periodically) -``` -Load docs → Split → Embed → Store in vector DB -``` - -**Flow 2: `rag_chat`** (Run for each user query) -``` -Question → Search vectors → Build context → Generate answer -``` - -**Why separate flows?** - -- Ingestion is expensive (run once, reuse forever) -- Chat is fast (only searches + generates) -- Different interface types (Complete vs Conversational) -- Can update documents without restarting chat - ---- - -### What We're Building - -Create `rag_example.qtype.yaml`: - -````yaml -id: rag_example -description: | - End-to-end RAG system with document ingestion and conversational search. -```` - -**Our dataset:** - -- LlamaIndex Q&A pairs from HuggingFace (1235 instruction-output pairs) -- Source: `AlignmentLab-AI/llama-index` dataset -- Perfect for testing RAG with structured knowledge - ---- - -## Part 3: Configure Shared Resources (5 minutes) - -### Add Authentication - -````yaml -auths: - - type: aws - id: aws_auth - profile_name: ${AWS_PROFILE} -```` - -**What this does:** - -- Uses your AWS credentials for Bedrock API calls -- References the `AWS_PROFILE` environment variable -- Shared by both models - ---- - -### Add Models - -````yaml -models: - # Embedding model for vector search - - type: EmbeddingModel - id: titan_embed_v2 - provider: aws-bedrock - model_id: amazon.titan-embed-text-v2:0 - dimensions: 1024 - auth: aws_auth - - # Generative model for chat responses - - type: Model - id: claude_sonnet - provider: aws-bedrock - model_id: amazon.nova-lite-v1:0 - inference_params: - temperature: 0.7 - max_tokens: 2048 - auth: aws_auth -```` - -**Key differences:** - -- `EmbeddingModel` - Converts text → vectors (used for search) -- `Model` - Generates text responses (used for chat) -- Both use the same `aws_auth` - -**Why separate models?** - -- Embedding models optimize for semantic similarity -- Generative models optimize for coherent text -- Different APIs and pricing - ---- - -### Add Vector Index - -````yaml -indexes: - - type: VectorIndex - module: llama_index.vector_stores.qdrant.QdrantVectorStore - id: rag_index - name: documents - embedding_model: titan_embed_v2 - args: - collection_name: documents - url: http://localhost:6333 - api_key: "" -```` - -**New concepts:** - -- `VectorIndex` - Configuration for vector storage -- `module` - LlamaIndex vector store implementation -- `embedding_model` - Links to our Titan embedding model -- `args` - Passed to QdrantVectorStore constructor - -**Why empty `api_key`?** - -- Local Qdrant doesn't need authentication -- Library validation requires the field (known bug) -- For production, use a real API key - ---- - -## Part 4: Build the Ingestion Flow (5 minutes) - -### Create the Flow Structure - -````yaml -flows: - - type: Flow - id: document_ingestion - description: Load, split, embed, and index documents - - variables: - - id: raw_document - type: RAGDocument - - id: document_chunk - type: RAGChunk - - id: embedded_chunk - type: RAGChunk - - outputs: - - embedded_chunk -```` - -**Built-in RAG types:** - -- `RAGDocument` - A complete document with text and metadata -- `RAGChunk` - A piece of a document (after splitting) -- Both include embeddings when available - -**Note:** No inputs! This flow loads data from HuggingFace directly. - ---- - -### Step 1: Load Documents - -````yaml - steps: - - id: load_documents - type: DocumentSource - reader_module: llama_index.readers.huggingface_fs.HuggingFaceFSReader - loader_args: - path: "datasets/AlignmentLab-AI/llama-index/modified_dataset.jsonl" - outputs: - - raw_document -```` - -**`DocumentSource` step:** - -- `reader_module` - LlamaIndex reader class to use -- `loader_args` - Arguments passed to reader's `load_data()` method -- `cardinality: many` - Emits one document per record (1235 in this case) - -**What this loads:** - -- Each record becomes a `RAGDocument` -- Contains instruction/output Q&A pairs -- Metadata preserved for filtering - ---- - -### Step 2: Split Documents - -````yaml - - id: split_documents - type: DocumentSplitter - splitter_name: "SentenceSplitter" - chunk_size: 512 - chunk_overlap: 50 - inputs: - - raw_document - outputs: - - document_chunk -```` - -**Why split documents?** - -- LLMs have context limits (can't process 100-page documents) -- Smaller chunks = more precise retrieval -- Overlap ensures context isn't lost at boundaries - -**`DocumentSplitter` parameters:** - -- `splitter_name` - LlamaIndex splitter to use -- `chunk_size` - Maximum tokens per chunk -- `chunk_overlap` - Tokens shared between adjacent chunks - -**Result:** 1235 documents → ~3000+ chunks (varies by document size) - ---- - -### Step 3: Embed Chunks - -````yaml - - id: embed_chunks - type: DocumentEmbedder - model: titan_embed_v2 - concurrency_config: - num_workers: 5 - inputs: - - document_chunk - outputs: - - embedded_chunk -```` - -**`DocumentEmbedder` step:** - -- Calls embedding model for each chunk -- Adds embedding vector to `RAGChunk` object -- `concurrency_config` - Process 5 chunks in parallel - -**Why parallel processing?** - -- Embedding 3000+ chunks sequentially is slow -- 5 workers = ~5x faster -- AWS Bedrock supports concurrent requests - -**What's an embedding?** - -- A 1024-dimensional vector of numbers -- Chunks with similar meanings have similar vectors -- Enables semantic search (not just keyword matching) - ---- - -### Step 4: Store in Vector Database - -````yaml - - id: index_chunks - type: IndexUpsert - index: rag_index - batch_config: - batch_size: 25 - inputs: - - embedded_chunk - outputs: - - embedded_chunk -```` - -**`IndexUpsert` step:** - -- Stores chunks in the vector database -- `batch_config` - Insert 25 chunks per API call (more efficient) -- `outputs` - Passes through chunks (for monitoring) - -**What "upsert" means:** - -- Insert if new, update if exists -- Safe to re-run without duplicates -- Uses chunk ID for deduplication - ---- - -### Run the Ingestion Flow - -```bash -uv run qtype run examples/rag.qtype.yaml --flow document_ingestion -``` - -**Expected output:** -``` -INFO: Loading documents from HuggingFace... -INFO: Loaded 1235 documents -INFO: Splitting documents... -INFO: Split into 3247 chunks -INFO: Embedding chunks (5 workers)... -INFO: Embedded 3247 chunks -INFO: Upserting to Qdrant (batch_size=25)... -INFO: ✅ Indexed 3247 chunks successfully -``` - -**This will take 5-10 minutes** due to embedding API calls. - -**Check Qdrant:** -```bash -curl http://localhost:6333/collections/documents -``` - -You should see 3247 vectors in the collection. - ---- - -## Part 5: Build the Chat Flow (5 minutes) - -### Create the Flow Structure - -````yaml - - type: Flow - id: rag_chat - description: Chat with the document collection using RAG - - interface: - type: Conversational - - variables: - - id: user_message - type: ChatMessage - - id: user_question - type: text - - id: search_results - type: list[RAGSearchResult] - - id: context_prompt - type: text - - id: assistant_response - type: ChatMessage - - inputs: - - user_message - - outputs: - - assistant_response -```` - -**Key points:** - -- `interface: Conversational` - Maintains chat history (from [Build a Conversational Chatbot](02-conversational-chatbot.md)) -- `ChatMessage` - Rich message type with text blocks (from [Build a Conversational Chatbot](02-conversational-chatbot.md)) -- `list[RAGSearchResult]` - Built-in type for search results -- All variables flow through the pipeline - ---- - -### Step 1: Extract Question Text - -````yaml - steps: - - id: extract_question - type: FieldExtractor - json_path: "$.blocks[?(@.type == 'text')].content" - inputs: - - user_message - outputs: - - user_question -```` - -**Why extract?** - -- `ChatMessage` contains blocks (text, images, etc.) -- We need plain text for vector search -- JSONPath filters for text-type blocks only - -**What this does:** - -- Input: `ChatMessage` with blocks -- Output: String with just the text content -- Handles multi-block messages automatically - ---- - -### Step 2: Search Vector Database - -````yaml - - id: search_index - type: VectorSearch - index: rag_index - default_top_k: 5 - inputs: - - user_question - outputs: - - search_results -```` - -**`VectorSearch` step:** - -- Embeds the question automatically using the index's embedding model -- Searches for similar chunks in Qdrant -- Returns top 5 most relevant chunks - -**How VectorSearch Handles Embedding:** - -VectorSearch automatically embeds your query using the `embedding_model` specified in the `VectorIndex` configuration. You don't need a separate DocumentEmbedder step for queries! - -```yaml -# The index configuration tells VectorSearch which model to use -indexes: - - type: VectorIndex - embedding_model: titan_embed_v2 # ← VectorSearch uses this - -# VectorSearch automatically embeds user_question with titan_embed_v2 -- type: VectorSearch - index: rag_index # Uses the embedding_model from this index -``` - -**How similarity works:** - -1. Question → embedding vector (using `titan_embed_v2`) -2. Compare to all stored chunk vectors -3. Return chunks with closest vectors (cosine similarity) - -**Result:** - -- `list[RAGSearchResult]` with 5 chunks -- Each has text, score, and metadata -- Ordered by relevance (best first) - ---- - -### Step 3: Build Context Prompt - -````yaml - - id: build_prompt - type: PromptTemplate - template: | - You are a helpful assistant that answers questions based on the provided context. - - Context from documents: - {search_results} - - User question: {user_question} - - Please provide a detailed answer based on the context above. If the context doesn't contain relevant information, say so. - inputs: - - search_results - - user_question - outputs: - - context_prompt -```` - -**`PromptTemplate` step:** - -- Combines question + retrieved chunks into one prompt -- `{variable}` - Template variables get replaced with values -- Output is a string ready for the LLM - -**Why this matters:** - -- LLMs need context and question together -- Template ensures consistent formatting -- Can adjust prompt without changing code - ---- - -### Step 4: Generate Response - -````yaml - - id: generate_response - type: LLMInference - model: claude_sonnet - system_message: "You are a helpful assistant that answers questions based on provided document context. Be concise and accurate." - inputs: - - context_prompt - outputs: - - assistant_response -```` - -**Standard LLM inference:** - -- Uses the generative model (not embedding model) -- System message guides behavior -- Returns `ChatMessage` for conversational interface - ---- - -### Run the Chat Flow - -```bash -uv run qtype serve examples/rag.qtype.yaml --flow rag_chat -``` - -**Open the Web UI:** -``` -http://localhost:8000 -``` - -**Try these questions:** - -``` -You: What is LlamaIndex? -AI: [Answers based on retrieved documentation chunks] - -You: How do I create a vector index? -AI: [Provides specific instructions from the docs] - -You: What embedding models are supported? -AI: [Lists models found in the documentation] -``` - -**What's happening:** - -1. Your question → extract text -2. Text → search vectors → get 5 relevant chunks -3. Question + chunks → build prompt -4. Prompt → LLM → answer -5. Answer → displayed in chat UI - ---- - -## Part 6: Understanding the Complete Flow (5 minutes) - -### The Full Pipeline - -``` -USER INPUT - ↓ -┌─────────────────────────┐ -│ 1. Extract Question │ ChatMessage → text -└──────────┬──────────────┘ - ↓ -┌─────────────────────────┐ -│ 2. Search Index │ text → list[RAGSearchResult] -│ (embed + similarity) │ (Auto-embeds question) -└──────────┬──────────────┘ - ↓ -┌─────────────────────────┐ -│ 3. Build Context │ question + results → prompt -│ (template) │ -└──────────┬──────────────┘ - ↓ -┌─────────────────────────┐ -│ 4. Generate Response │ prompt → ChatMessage -│ (LLM inference) │ -└──────────┬──────────────┘ - ↓ - USER OUTPUT -``` - -**Key insight:** `VectorSearch` handles embedding internally! - -- You pass plain text -- It calls the embedding model automatically -- Returns already-ranked results - ---- - -### Compare Ingestion vs Chat Flows - -| Aspect | Ingestion Flow | Chat Flow | -|--------|----------------|-----------| -| **Interface** | Complete (default) | Conversational | -| **Runs** | Once (or periodically) | Every query | -| **Speed** | Slow (minutes) | Fast (seconds) | -| **Cardinality** | Many (processes 1000s of docs) | One (one question) | -| **Purpose** | Prepare data | Answer questions | -| **Cost** | High (embed everything) | Low (embed one question) | - ---- - -## What You've Learned - -Congratulations! You've mastered: - -✅ **RAG architecture** - Ingestion, retrieval, generation -✅ **Vector embeddings** - Converting text to searchable vectors -✅ **Vector databases** - Storing and searching by similarity -✅ **DocumentSource** - Loading documents from various sources -✅ **DocumentSplitter** - Chunking large documents -✅ **DocumentEmbedder** - Creating embeddings with concurrency -✅ **IndexUpsert** - Batch insertion into vector stores -✅ **VectorSearch** - Semantic similarity search -✅ **Two-flow applications** - Separate ingestion and retrieval -✅ **Production RAG patterns** - Complete end-to-end system - ---- - -## Next Steps - -**Reference the complete example:** - -- [`rag.qtype.yaml`](https://github.com/bazaarvoice/qtype/blob/main/examples/rag.qtype.yaml) - Full working example - -**Learn more:** - -- [VectorIndex Reference](../components/VectorIndex.md) - All vector store options -- [DocumentSource Reference](../components/DocumentSource.md) - Document readers -- [VectorSearch Reference](../components/VectorSearch.md) - Advanced search features - ---- - -## Common Questions - -**Q: Why separate ingestion and chat flows?** -A: Ingestion is expensive (embedding thousands of chunks) and runs once. Chat is fast (embedding one query) and runs per request. Separating them optimizes both performance and cost. - -**Q: How do I run ingestion before chat?** -A: Always run the ingestion flow first: `uv run qtype run examples/rag.qtype.yaml --flow document_ingestion`, then start chat: `uv run qtype serve examples/rag.qtype.yaml --flow rag_chat` - -**Q: Can I use different embedding models for ingestion and search?** -A: No, you must use the same model for both. The VectorIndex configuration specifies one `embedding_model` that's used by both DocumentEmbedder (ingestion) and VectorSearch (queries). - -**Q: How do I check if documents were ingested successfully?** -A: Query Qdrant directly: `curl http://localhost:6333/collections/documents` to see collection stats and document count. - -**Q: What if my documents are too large?** -A: Adjust the DocumentSplitter `chunk_size` parameter. Smaller chunks (256-512 tokens) work better for precise retrieval. Larger chunks (1024+ tokens) preserve more context. - -**Q: How do I improve answer quality?** -A: Try: (1) Adjust `default_top_k` to retrieve more chunks, (2) Improve your system message to enforce context-only answers, (3) Experiment with chunk size and overlap, (4) Use metadata filters to narrow search scope. - -**Q: Can I add memory to the chat flow?** -A: Yes! Add a `memories:` section and reference it in the LLMInference step with `memory: chat_memory`. This lets the chatbot remember conversation history. - ---- - -## Congratulations! 🎉 - -You've completed the QType tutorial series! You now know how to: - -- Build stateless and stateful applications -- Work with tools and function calling -- Process data in pipelines -- Compose multi-flow applications -- Build production RAG systems - -**Ready for more?** Check out the [How-To Guides](../How-To%20Guides/) for advanced patterns and production deployments. - ---- - -## Production Considerations - -### Ingestion Optimization - -**Problem:** Ingesting large document collections is expensive. - -**Solutions:** - -1. **Incremental updates:** - -- Only ingest new/changed documents -- Use document IDs for deduplication -- Track last ingestion timestamp - -2. **Increase concurrency:** - -````yaml - concurrency_config: - num_workers: 20 # More parallel embedding calls -```` - -3. **Larger batches:** - -````yaml - batch_config: - batch_size: 100 # Fewer API calls to Qdrant -```` - -### Retrieval Optimization - -**Problem:** Always returning top 5 chunks may not be optimal. - -**Solutions:** - -1. **Adjust retrieval count:** - -- Increase `default_top_k` to retrieve more chunks -- More context can improve answer quality - -2. **Filter by metadata:** - -- Use the `filters` field to narrow search by document properties -- Filters are passed to the underlying vector store - -3. **Rerank results:** - -- Add a post-processing step after VectorSearch -- Use LLM to re-score and reorder retrieved chunks - -### Response Quality - -**Problem:** LLM makes up information not in context. - -**Solutions:** - -1. **Stronger system message:** - -````yaml - system_message: | - ONLY answer based on the provided context. - If the context doesn't contain the answer, say "I don't know." - Do NOT make up information. -```` - -2. **Show sources:** - -- Include chunk metadata in response -- Link to original documents -- Add citation markers - -3. **Lower temperature:** - -````yaml - inference_params: - temperature: 0.3 # More deterministic, less creative -```` - ---- - -## Common Issues and Solutions - -### Issue: "Collection not found" - -**Cause:** Chat flow ran before ingestion flow. - -**Solution:** -```bash -# Always run ingestion first -uv run qtype run examples/rag.qtype.yaml --flow document_ingestion - -# Then run chat -uv run qtype serve examples/rag.qtype.yaml --flow rag_chat -``` - ---- - -### Issue: "No relevant results found" - -**Cause:** Question embedding doesn't match document embeddings. - -**Solutions:** - -1. Check embedding model matches: - -````yaml - # Index and search must use same model - embedding_model: titan_embed_v2 -```` - -2. Increase `top_k`: - -````yaml - default_top_k: 10 # Get more candidates -```` - -3. Check document content: - -```bash - curl http://localhost:6333/collections/documents/points/scroll -``` - ---- - -### Issue: "Ingestion is too slow" - -**Cause:** Embedding 1000s of chunks sequentially. - -**Solutions:** - -1. Increase workers: - -````yaml - concurrency_config: - num_workers: 10 # Careful: API rate limits! -```` - -2. Use faster embedding model: - -````yaml - model_id: amazon.titan-embed-text-v1:0 # v1 is faster than v2 -```` - -3. Process in batches: - -- Split documents into smaller sets -- Run ingestion flow multiple times - ---- - -### Issue: "Qdrant connection failed" - -**Cause:** Qdrant isn't running. - -**Solution:** -```bash -# Check if Qdrant is running -curl http://localhost:6333/ - -# If not, start it -docker run -p 6333:6333 -p 6334:6334 \ - -v $(pwd)/qdrant_storage:/qdrant/storage:z \ - qdrant/qdrant -``` - ---- - -## Try These Extensions - -### 1. Adjust Search Results - -Retrieve more results for broader context: - -````yaml -- id: search_index - type: VectorSearch - index: rag_index - default_top_k: 10 # Get more results -```` - -### 2. Add Memory to Chat Flow - -Remember conversation history: - -````yaml -memories: - - id: chat_memory - token_limit: 50000 - -# In LLMInference step: -- id: generate_response - type: LLMInference - model: claude_sonnet - memory: chat_memory -```` - -### 3. Use Different Vector Store - -Switch to Pinecone or Weaviate: - -````yaml -indexes: - - type: VectorIndex - module: llama_index.vector_stores.pinecone.PineconeVectorStore - id: rag_index - embedding_model: titan_embed_v2 - args: - api_key: ${PINECONE_API_KEY} - environment: "us-west1-gcp" - index_name: "my-index" -```` - -### 4. Add File Upload - -Let users upload their own documents: - -````yaml -- id: load_documents - type: DocumentSource - reader_module: llama_index.readers.file.SimpleDirectoryReader - loader_args: - input_dir: "user_uploads/" -```` - -### 5. Add Document Metadata - -Enrich documents with custom metadata during ingestion: - -````yaml -- id: load_documents - type: DocumentSource - reader_module: llama_index.readers.file.SimpleDirectoryReader - loader_args: - input_dir: "user_uploads/" - file_metadata: - doc_type: "user_upload" - uploaded_by: "user123" -```` - ---- - -## Complete Code - -Here's the complete RAG application: - -````yaml -id: rag_example -description: | - End-to-end RAG system with document ingestion and conversational search. - -auths: - - type: aws - id: aws_auth - profile_name: ${AWS_PROFILE} - -models: - - type: EmbeddingModel - id: titan_embed_v2 - provider: aws-bedrock - model_id: amazon.titan-embed-text-v2:0 - dimensions: 1024 - auth: aws_auth - - - type: Model - id: claude_sonnet - provider: aws-bedrock - model_id: amazon.nova-lite-v1:0 - inference_params: - temperature: 0.7 - max_tokens: 2048 - auth: aws_auth - -indexes: - - type: VectorIndex - module: llama_index.vector_stores.qdrant.QdrantVectorStore - id: rag_index - name: documents - embedding_model: titan_embed_v2 - args: - collection_name: documents - url: http://localhost:6333 - api_key: "" - -flows: - - type: Flow - id: rag_chat - description: Chat with the document collection using RAG - - interface: - type: Conversational - - variables: - - id: user_message - type: ChatMessage - - id: user_question - type: text - - id: search_results - type: list[RAGSearchResult] - - id: context_prompt - type: text - - id: assistant_response - type: ChatMessage - - inputs: - - user_message - - outputs: - - assistant_response - - steps: - - id: extract_question - type: FieldExtractor - json_path: "$.blocks[?(@.type == 'text')].content" - inputs: - - user_message - outputs: - - user_question - - - id: search_index - type: VectorSearch - index: rag_index - default_top_k: 5 - inputs: - - user_question - outputs: - - search_results - - - id: build_prompt - type: PromptTemplate - template: | - You are a helpful assistant that answers questions based on the provided context. - - Context from documents: - {search_results} - - User question: {user_question} - - Please provide a detailed answer based on the context above. If the context doesn't contain relevant information, say so. - inputs: - - search_results - - user_question - outputs: - - context_prompt - - - id: generate_response - type: LLMInference - model: claude_sonnet - system_message: "You are a helpful assistant that answers questions based on provided document context. Be concise and accurate." - inputs: - - context_prompt - outputs: - - assistant_response - - - type: Flow - id: document_ingestion - description: Load, split, embed, and index documents - - variables: - - id: raw_document - type: RAGDocument - - id: document_chunk - type: RAGChunk - - id: embedded_chunk - type: RAGChunk - - outputs: - - embedded_chunk - - steps: - - id: load_documents - type: DocumentSource - reader_module: llama_index.readers.huggingface_fs.HuggingFaceFSReader - loader_args: - path: "datasets/AlignmentLab-AI/llama-index/modified_dataset.jsonl" - outputs: - - raw_document - - - id: split_documents - type: DocumentSplitter - splitter_name: "SentenceSplitter" - chunk_size: 512 - chunk_overlap: 50 - inputs: - - raw_document - outputs: - - document_chunk - - - id: embed_chunks - type: DocumentEmbedder - model: titan_embed_v2 - concurrency_config: - num_workers: 5 - inputs: - - document_chunk - outputs: - - embedded_chunk - - - id: index_chunks - type: IndexUpsert - index: rag_index - batch_config: - batch_size: 25 - inputs: - - embedded_chunk - outputs: - - embedded_chunk -```` - ---- - -## Next Steps - -**Explore More:** - -- [VectorIndex Reference](../components/VectorIndex.md) - All vector store options -- [DocumentSource Reference](../components/DocumentSource.md) - Document readers -- [VectorSearch Reference](../components/VectorSearch.md) - Advanced search features -- [RAG Best Practices](../How-To%20Guides/rag-best-practices.md) - Production patterns - -**Build Your Own:** - -- Load your own documents (PDFs, Word files, etc.) -- Experiment with different embedding models -- Try different chunk sizes and overlap -- Add metadata filtering to search -- Implement multi-modal RAG (text + images) - ---- - -## Congratulations! 🎉 - -You've completed the QType tutorial series! You now know how to: - -- Build stateless and stateful applications -- Work with tools and agents -- Process data in pipelines -- Compose multi-flow applications -- Build production RAG systems - -**Ready for more?** Check out the [How-To Guides](../How-To%20Guides/) for advanced patterns and production deployments. diff --git a/docs/Tutorials/complete_example_ui.png b/docs/Tutorials/complete_example_ui.png deleted file mode 100644 index b34e081ab4d62ed8c32eaa78461fb79a80a0b7d3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 70114 zcmeFZRaBc%w=N8Y;uLrHwm1~GLXlFe6nB?m!QGwWP}~cYBE{X^rMMH^AwVEl2q*3L z@BiGMi?hc#7kk~TkvB6dOP)35nKOy{tSpC(L5_idfPgLkQThu40#X11!V54O3j9ey zZD%^XLw1ppS4V?4Uo`Vb_-is(8EscJfTgR4iL(WQl>@-ug5AZ;*}}rX#TwuWLh2HS z3$gzd`rvF~;`$ZfK&$@M-U30*#Fdsyh*r|gik63qhmV#^K$u5B_#Hp(XC+!0Y4rh{ ziV6e-S_FA%Np;Wc({&$x^~H|o=SBH&tY7-y7|7^DZE^b@h42biuq13RtpjG}^7*+7 zqU_(LC8sw=HHr-+C)=p_{P-F-$Xr^)J=kAls1p3!pQ+S{B}!)c#al^qG(vpz9}$S? ziW%u3n!q(n1W>w{1Wmj+wFTs~V{wDG@lnk7_tZlPXBYHJLHb|YWj$*xUq`_E*IJ;2 zu>U+>3+&B{{MRTx-x2(K(+LS9fc{@|c)z9cPplvNH8kp|4u{pecI2vgB`vK1_}VDU%4w~ja4=wK1% zg(u`T=~1U;XFN%I@EEpH7gSd6t$I+{HN_JC=cV2+XRLieCGE6HNB@{d6dwtFnjPC9 zZbqnJTB~G(#cP}12ldQoTLrU`2&J^WipHm@gyUm>*!Z`z%#J*I`n2%BXlce{pld3| z-7_se`-oUvg6fpMPyUMPwTFL9(u-FHyWHJb&AX&8utEgmA|r=s-~2_6-em_<*e(|d zMLFiOmY8GJ_l|Rh3X974n&&%O@5W-+P z*CbT7ZV_O?m8Gfk3Aa<$vZGt9#w5{C_D}6ShH{BxX6YYu36JQrzePss3;vs?$lxX+ zTOtR)roXhmo5Z?%3Ffj9Pj~+0F@6QjEib6561lwG3^`^1-|YDL4t`}w-n;cnONaAB zLBfk&^d0LV&ISPg+I>SAZjI*OOpz_q2MD{0fZm<}GW<1|k z@kN!Ic0VF#^nKt&iT^)p_)Qhd+t z7nnq9Pu3kAFR;%afLN9#hwsrFedx2<8vZ=%cbtEF% zE21|Svwkm))?uDA!rF8yI9uy6LmuN4m^Og-jjaqcPS-9h6sJg2wgWq_`$wUc#`uR*Yns#Y;Gz37ogP zr!bEsC>ptojyKt~9AEc*F(S^nDn98Vq*cXvV2iR6N8)%G3Kn}Y*mfA}I%oEDW;PyB zuuJdbezFYR{lK>3i-=g=lIsVsr#+DJPZz{#45_-<1%Np=<$}qb7k0mRldUk)UuhJ; zZ5KNYm_&; zumk$k(__wRGV~}FlLaxd@dr!AXYh=&w$62bfRXl;HO+=J6AQt%YgWz&)K9-oF1_BV zdUr4TN&whyi}Ch$No7zmO5Lt~f0qr<`${lOk4*0KuarLI8Sy-~^!g(u0<^Kd*6|E0 zs?bi(dmsgpFFdoIKz&qI4r-19MyCLI;CcPYpTJ6{fWX{=z(eF2&?v2oC5BT^T9Z5+ z7))>B%7_QyM2(kA!i`MDnA2(pVAOMLYA>{7@1pa(Z8{U&HibM!>$0GpD%P4X|Cn>V zMF=o*5( zyHLTcA5{|qZ^MeZvDJl+qRQX0SNWqcO=q$oK!GT-d!_tHLyS>+qNEE}c=o4}IozOU2 z?2`p+If7!L=K&}UVCzpp&ISD1zx#CnS8s3YXqG3(0-UBX39<)|#e1r9xR<0BjxENW zSHLDL9<%PRZkn|3_x67cJ!rm)(7(1MSEB(@KoPJu`qytj6^6UQYkL6iotbOc{g83X z`~YR!=nIJ73cFHsW;sblFcUN4m>|ngDaWZ02z2dq72kSS!Jt+hdedntq7ks%EkdMx z8;F28mV+!27>EwmuJPqi9n|J*8~vN2VLPsvOTHU15WqO$1);;DYFF-f%Q(zth~#9uOrU>D({-ewu?{ zYd6v50-vyvJC5EfDSBgi-x)%4lzD5#%1#TE-;5#8`xrU(dOInHa%kFRBkwm)JiV2e z)BE8&!XM!`-qVI3&hs$!v2Nd)-Y?Q{56NGg5ON)v)LOP^zvxj_o~+ykXX%lMrQKAq zn`vHKXa`(^oh!U5y`xZDH5pWoZn;FRW&{+Zcgv;WKI*Pb?P3j}E4uc1kQBzm=NniM zyCJRQ$_<#_!dqD=NVa?z!pt72l11ruczHV@AfFWv5e0F#*ujq*Vl_;QqTqpXBe98y=lT26PKj zBzgHN^mUy=-M4+ly-B&priKjP&sXKM4{%`;Luph*=)g z)jHc4*zv_HQ1%C3Z-g>R%sX`F(sx=*SKoHGjqMny#&oU~sU*@Qy z{o*nR$*3KmO|J@rJB#kw1bgayQfv9)w%!DOW534uBkFJJ;8S@yU5)$$2}Q6Dj;qj` zyUIGl+y+z`>CP8_V*wO=Qpq(X0j!6DMNI>#Bjba@WtA^JqXUhDQmO2ojtQvFm5uw1 z>_(16utgZnl6bkt@(HOU__ZkNG&7Kb0gYQKCmCt^#I|~Q>&I-Q3L^o&qT}2M*G#R{ zx8qYrY!7kZl&6BT76O<|!8urfQ|p@CM$cfl^BeO_ued4D+Y+XA8F&5T2d#h>UvI4ijK9DPk$B0MLLzi7JBSXlAY>9YukS^P5;4& z!1fpVHjVUm_(=)xv+KM9P%(vcwGU0HhG>}?Z~V2J23J+W5zhzIQSci{NjsdYif;j>iTN4Eqe+1eU*A!-h8FW{p4_jxf_Mt=v~l2I%xsFz-hylIdlu6pjhh+LY(?i?h27gs32hH!5+Qz)6h0)g$q9`^UhLvq(=mQCA}~vCOQB`9TkZBXxn9^5<&>)4Pg6N1dUM;y>@)B< zoy5iySH9_Gk0gr%HX*>OBiSG(G0N}Y=w6z~Bs=*&WM0$Iy{10|^!rW!Jv{nYp30V6 zE#X@dN7d&@n=ZH~h-)E7J))5ww#Xjx5RdUM;LBEXL#w|mVeo$yY4*iDQYNXo{|(a2An z08ZO^3Xq_@(RP6d>f}|gr=v%GV1|;($>Ngin-+G2#%4iO7gd62d~U=y?k{woYT(h> z=wBwHr7R{!KIhmurva*EWWF=)PYqHbxq@;M*O(vivOPEv1G_+y=gm9HNdOfy#nZEK z;J!I&AuB|doP(CLG54!0^?N9Rd|k7)>#FMHv6UemaGu;fj=6$U;zMI{!!1IjqQ!Bu zLVLc)d#qHa5Jh{KnSATO{e5f?I%69KcSjhranz|*jgJ{_>d^>?R3+632$ps86nfa& z)jnK1p`2Qcz%`h_Z&9sb$62gtH(8ec3X@)lZg|hI_#}7Bt32A)Y0k{gja}$1_znFG zCp=C}EFUKoiI%C;7rkE4(MflbP~cCi=Uo0{rbkzv=Nn@ctxqW^m;x%g7Ovbv@s(>P zx3t;Y1KM}5_~Zz8A0F*E8;Cyw8>@YD8bE)fLZV7>L=`im@9iKsMy8Npt6{NeRxg|5GFQ%ZTC$Mz1WNJdrop0w(_?5Kea#wwvoU!>!l!El7l>5n z6Bo|fzNuMGicE$|t#<%uYFv4~v3T6yJR};U5SIsRLfn5#Aj&z4{X_^YJ;>p!s=ha_ za-+p&W>##%-8)W*bJmoAXy*lBLXJfjtk{iVnO6!k&TzHCL*$Ex*Rq1eJVR}y_|uK< zgf-hv!eID#=-m8-8_^ z+D(Lk&0&DrlLPcKu@ACpk88YEVs3Hd{1P{`S$QwG67L7|bZ#afpG{&WX-`;MPNzm) zRy0>5Y%{=96lhV{EZqc-(2&+z$ToE)vf>k-4zPn#OjB0s%a_O>{Uf!n#9~|66DbU^ z)83^=ejY2&Y<|s=#eg8tXjpNSj(UBc&Rq?W0@GJL%z&@S@GVro)SaH2KNsOa4 zU*wq`YK82CXk5-wGO7WpiHy81$c2NWfR-z^xGPsqKOpL7O*oWl)yr$$-5CYVzq2$R zT>fbGfWjDZ>qWwdxKggotUt`9z~FStVKGnvgEXjV=u5$R@UNmDaQvkwjR$J6esIi+ z@&W&Rm&978^z1A+aYJe&qYh#+ncH09+oIEiuFZgi?1}cyZZ9?wdZtKw3cFx$j(XBu zx=Zj}q^>p(#NElMKkTcZ+<724{>c6ygW*q4f1m*Ov&D+64YN<2VZi)&-Xl6g zy&GJ4Vz`DD{aZ0f=E{-E+;_{H|}eMV$GZrlZz*MGfWK1$R$^|qjvC&725x2m=o6HZkI zm9!ofo5#?C1z0^zd+&26vWnzV>%KtcC;@dI@YBYpo_eq;+;jtI3X|r}{^v#N8kC z2Z3sOr04GX{VS)Xoxg#L=}~NqhOY;9PyLfMFu$PO=n&xcRWOymUTeu7AJg0%2z!sI zL(vF^faAg4v=S}|SjaBFtZdq8$dd)s+2HbK!R_1f}lc>Dy0l4Bi?R@zd*CpFU zv%WNXHT+baP&P&WwFk9}LeAt>46TFj3sv&{H}pK#|C_(=6PNi%VE+%zO;7*d&*t~d zJG=+X=Pg&d6Z|K;z!9}c;aBBOFnRwk6){%6j`sg0All!Zs{K!D;s0g^#khF|oAe!= zv?b!3^K(L?>ybphBdv0e{?AENl6???{-qah=sByV=t`PP4;s5kXRLsI$sR zD~?`V{KeEP)9FJVr8D@GH7WIE=yl6@9{DtPcwg-OrBk7i~jBvb|1%<&tlF#yo-E)edo@r4w1W2)%DoiYB-7OInJm0xwh z_wO&X*=Bjulf5nWcb4B`ym_c^c#6l9>>oneT7A@;J2MPq*-uCQ0c|t*U_d7X*fV}= z4qr)*U5YJ4L&u`G`68S#YxW?PyBM3;FOz94W@9lc(pF8J*H4-~+_mP1Q)k*CqWpQP zplkPH(#MNZ_^dBoUTq{hM^+uwQZUbS`q+kz(3NExg$W)sG_iOp7t$U({(3{Dv{poB z?k^CQutnn8M+qkmYciC4Fa$Mck;Rz@8H;!xE6MuraoXGd9@}s+4`ZJZ&Z5uuQANri z8XHiVZj-ZR$xms`(~qHHqCTQ4*+&}^BC|V(hVM2ZL6mh~A%y z@{3dT56UPk>rell5NQNojW(E9YA>4sCBIhE9W+IIUWG{=d)rmldaWz4DdY?t@5Lt* z`(`j~j)<|fftaqc5EaM0x0E!ePKia&`0QZI>*GL8Aj|_Qy5Y|r^a~RE-NA4anrFZD zxh~nPN^flJNWs&7V2xVFw-u-dEL9kWSyD&)r>^=}Ay(Jk&u_xI$9z{a&7Z&Nu8&hYt^niuSeLwyHnU_x7lM(!8l^~ zIALY0ogro-%|R@A`lI`WVn$OF%kTh6ljr0}qoR38eu?NWwp*~mcZCCT1!B*ROKry) z9*fVu^F#cMhAT8E%h?KQ5B(q=mx8I_GkKxJp{qPM9DjpXzja1AHJ?bL4G)Q-!a_FM z&(Rj_K&0-J z?+{u2lY%)BZtP$;yu0ezSwZp|lf2L$ym#2~bSG&zX6T33JC()eux6_ZFgE{mhg#EH zt2f|h-ZbF|FnASXvG%a^yr)VH`y`FS;-0t5c-bhMu%xsx=_ZKYQT#fd@*~$2R`y?AYRsA4Bvm zH+M`1gjQj}j-^YlCoOvWS4q845X*wyp7y0bh5#oWZ>x(Lc9xd?LEWxbJLLRPofy#R zaJltgwEzcaC9`Npr^R2#&1UmHWlJ;*EUJ00P>cmLwRb%Fy^V%@TmYU(5dqig+*nR~ z_l?Ir>G{eOw$(K<;*J_<^{P`|Rs>VEnc9GR>eHl~qw|1&S+*a|zKPpS-V>8SpW`L- zc_&-#Pj_GO`4*_@y?fc|nG0We#=349#~ zqK&QetmS#S{c}+ST=DW|lWG*hRh`YK2hBb-2Q}BEFQ6Uh8Sm0qV0^KGld*7;`tGu? zXejgICcgVBWWZbrgi)*^Lo>a(eXS~?-(#z=ZyLqFMS&Z%*U4lqVJ}3&d@9p6>_+6+ zT9=mmsHvr*HRsnw|0vN)bz3=L{>)rxrp@&Id@1Ip-)keZP*2-)O7uK^Z(D*aT05X# z53q89#*+s9ejWEmr|GOq`>Hi1w(E(L-{V^J!5{3D3ae%nT~U{l{JgkdO=iwMPRp@G z0o+99A>m}^t#C0DlaW(yz1!-}(izjN@s-ql>@-0i*S!**0?s#xo0syX(UXWptS@6Y zFD>bmF0h0ZbFv>qT_a1uw<%I-XBt?+$l9=zwVpoo(?cKn|L^$31uCxfR+N@=>BDMP zJLFn_#>{7;$)s7H75fs`gbr)5j7XKZ)8lo-aTC0B5ue)^W*<9?SG}+c zBflMCkn`&mM(5_E{Zv*t`hCYGjlxWG<@@k06e)+Jhs16H;bJ)_GG95~{ryUp zW#wgSH}|zo^?gN*P$sd49rB=W!IIFWKho1i72n-+{b#V?sicWMO6vZ(`hFLC z@$0Qz2eI;hHMvAu2YGi+8* zW#O}1Wm9SPd>wOjD^M)+SDAPe)$AtbC^BP$5bN|!2L*NL-cO2$cFH7}S}KUue6lsu zp(-=I!5rPi8#UMEbdHL>+h%>JvyVpAb(i54)o+lCgr4s6maj)4UYB9PiXaq4^y{-? zOkb?08_>4{Pa= zt=V<(vod@tGYIqdYdD7p&laM65Qn-QP}tM)u1M{xz;b5Rw1#yb@LQg_wWQQ6S;uWsT0F0&tdT$PvCwnezS0sKDXb!?zl|wovs`R7?CIZB zn!6s-q1o@tCYv(0N68#}^Wa0EKYHO&kePK?v8?F2SJV zuh&X`kq0!&=K(^f@v73O*L4PeUH4sl(%>x+N=&$HS((UNZ|bG622&2zX+N6Q*v>*U z(pRpNX*O=cStp~m96Nu8l>XQr7XCJ+S*V@tQk0ev2GN~NlHy9vy%uzR(JdPtK%XlA z6A{w-;*`(Me?P4P)J$z=hcQwDuFgGtjgXxE9^z+^!hZs>Gd8ojpvZl{K(lz!Z@(^e zdSqIrY&3#-Sp2{MPr>wu-DF2=yBX6&2=^IvTmL{p)jpZpR2pWNe28tFdMlVZ#SHSu@smIKh#+-GYY&Kd7es%7AX?yz}GUM$a!Qcc`WGdzTKE48mZxQmRUok>mGYP0hkK?L{K!7AZb9Z8IV(6_tNO9XVd%@3_+74(tf(^? zA<9^ILh!?6Pi0x^pgixLGU!{rR-)gt!g#Kp@_tUK^d+9qi9o_C`&$1*_G*K;QPmH| zh=u1Q8LKby4{18)%3LNR?GZ&(oQXgzNQiCAPTQWS_p>^4Os(333CGEEx`)G|#rqZg z{r)_lUp2@laa_dtfh^OyUv$qTH()%t3h{J+V)#MDRxTaPZ0|&zQL{lh1dS}dJ==La zvDtK`)=~Lw(aVa1cYu`3(9wpmM+mUIhu43#io664##f(@nmm=SPH7Ayzg}-bJ(;GR z`BAlIrTY*ORuqsax&OV!lN#9fu;|kEbZERSa;tnsKI+|}(sW$JXQvBO5PeSlxKe?| zHciI%5vT~;_s);Xrm;%s1W;$Bq6uE&wSerHOMWrrQ_v5$b2c2{>iomZnDNJO2{>yE zvnNteSsO|+pu!oM53@7WI)pw25vPjzP5mL$wc^6#E6bhvp|44@2U$K9F`-tVQ9Q~_ zvm%vQE0XqgsW?07$a!u4gc(Ljl`)!)&g=^(4vpB^%pq0al_o81ou}^&Ag@AR?aT6( z1Ec1FX?R7Hd=~({xwP;?(F{(>!y^W{D$DL`nwkoiY3Gor>micl`z@)3w4kTH6w&)H zFS7iTq=t)Jll(Ah_+PwBd|ab<&O7l(0nD5$6xbMW6bGJhvU<%)MBe6Qao?2{lQ7Um z;_6`+i||*Q%K;~xZlAuhbT0ocSqNbJ+LWPfGz~)vJ==8Gq+tt1Khh2k`Dg@H}}5?iW#8(|mf%cP^N!l~Or9p?kSZ z?N22dqFkZgPoycsKTb8wmCl|%1L)5Z_fB=Ynr%BI5Mrd)Z&(d|gps0WO*u9g*?ptL zPN?a{W=8h6B#YR!dn>UOCfVElbp$hq1CG+zo~zAbOasFz0>%#S?~#8tbS+$3!N6nM zmnLF^hb`?8$lF>=*=uYUM0C-MHR0McHp}I0e{s;*u*aHc?IOFSCg_nOp&N;HV>icw z*BdfPrdTBLBw9{fgnMBA`WLq+N_nH5#k?Q7AjKPU$5pIaL$BmhntuHZN7ldVbO7ab zLgCb6l{*8n_mz0c)oIY_!;l7?$BO@4=2~zs}PrZxT$wyW%t;Mq$N`$ zG=9VXB++2j&K;RxOwdWPYA8pbF)*N>gDC2U-tq>`CI7a8i1t)TUE{l&M;Q?`IFg)T zE`V=*Y!>a6ShD1?%O1j=zYDsLA-r(nMoFEDwWN$}JP0oo*}KiK(X864jP>Bp5X;vd zg04hnso;7Sus4VH#vY1jM?nu|E#ws)0Frn36=TD-UBn0ZB!+t)TYV@d(1`ZknCo}iGMqRJu5 zz{#8asYc*)dsUzfi1Z7=gr@|^m z2?29zcQ8~!H3qnfTv6{;c3)RiqKiZnH_6G#PjH6nO_JKw(qWR;3#PEB@TKk?5qT-h={)xlE} zaDz6_v)gB+_u*w&%B%5YhN+jWjjcRa@giusz5?-z(fvaRf4-gB@Vnw>8;k{pqjH~Z zC^C~P-5nO_=yS(fZS@|-kiFW>FRl!K0)d#V0AzYi(3Fb^7J~h~O?AmvwvB1i+f<|V zF-MAR^P#AJEX(^dllvBeMHf2vCX#PU>I3A?8cs(>3WI5XiU}rvRrNoUKz^4}<95cp zv6?)VyX2)f<{1ODaIGJh?%rrvZg&z6Vj=m7=RVL{{+H}8lK3DB6|!a%8NIBt{5j+* zT9jK^&|FWRg(PdZ`03{IQ}}%bI`54~I`5q3f@4K1^^ha)R-$C+eba4dJ>R?o`Ui&2A&m}_kn2&}6Xd?B zpzV>M%4C+go~_S#$`yvZ$%bEY)D2(#ZF~C-^L@|}6uRN$seh1P<0;U!>L1>3jp~DW ze0jz5XQsBzZ+JtU)4jbh>l>%&rDOffAR0aoPfNfLbevWQ6$y6O%k{}N3*;JJOJ2@~ zE)%a;Hn#@}9eZdEag|zPvhfG7$Jvh;5>r)bYD{pB*VIbqmM)qhvSY_#UuWEN`qPn5 z){-}Uqz`q*M1LS;M}T_LWbZ_djQ`$&8i#vY(XzFtmW`6I=}Rd8ymzNl=x6%2aR@9lAx1)@>9 zh+-hjJ29Q4nrmoM-@AM`X%SmJAnLDwv`VHH>fQ4vITKcqMS0j#bq(il3rSV^B=`lV z=WDV&kI2`mp7^B^BdhT(D#|xyMVAXscb=ZyrzqB6t-BXw5G2n;-P z@p3c7zG=!NThJcaSkrfgoEzKz%D6WJ(2ZUrF4U4MFNdr|v^LDC>AIl=X2S0mK%gnF zt0i@2l-Dy%mf&0!4p$68i%+|-w$2{Gq$$ND%B!NocyV%?_o&&99E{iKAR%QoY`(AZYG6!t zD!0kyO?^ZXEaV+?_~W#u*5F2t$WjYST1V5QnXaQ%A!YVkj-KzSBUL5g+q3$E3#S5J zfHChlJDfd$qMw?G^7ekUe(+qRWKWiM11sxzZd!9Xyc2Bd2 zhCsMYYQId+3EgjOeX&qEvDf_Uxo^&xB)F2SHWYtx4p~24yAt1ucSTmTI<@-s8(@(> zvRiLG7I=E|J#}Hm5^LZTH7{*p)-#7@N9sv`a_jm6n|_=oZa&Wr$EPpkBk7JD47Oh3 zTN8AYJcI`)OqN&wP;-2cBJ)C2`bP+`0?;BPP)j~3+=*Du6zWS{Q4l^rkkREanE|8Td#0eeP*o+P{ig76zrZ#tSWMMtRi-sj-Ofd+vD3 zK71I9feXcq7pMTiKgTs?v6Qchs|`Y$AJ{<0oS zmqlQRR8q2nUA)K8!>+$k6Q1ry8}t)7)5nRsUtk>U(36ElB3mjSrK28EeA_!IIGrfn zyF1dvZ^b^rYAi~m20E!o4LTjz2{zbJyhMGQK}@#pftFX88{FQP%@cbtlJVwX-)TVi zrMR!i+id2YRZ93D2`VSyQ!VTTU|g<lvcU5)gyT5N% znlw`P7+LM_#%;U@eNMs;srLUem0tQ)k#e2dJA|NXGW2$A$;d5uux`K7afeFjp-GK& zN9EDVLf&fW;HILU_%l@J#eTQAK zz^2=(Vmkp=qx(P`5o0X+r=GS9;u)2B4Id;T3k$)RvxBGh%FJFr)Ylbmo5`N6W>75O zjkQm!ZYb^9Dd1Dr4n!JGq<8gKEl6B(v~*_Fm`Wl`&EgKI{VO9aRSehAzlFVo{rwaw5aNCywlQ(pN$rxW~NeP zEw^X`^H%BBV0!UKNIXD5cCjqN;)27={insj(nt< zq7HcUB)hl*n=RcO7@DVp(Ap>YHxG#!u@~?0s5e|T=)^61l~6Dwgo1ug$luw1&EMxm z)i)a3FlZrkZJT0mh=ZJ!P;vdbSiD@$4l}WUD-)}Ah|@b`5zG?t-v=&ouMJU<7%uHMvY8Ix{_wVGn5e?YqQt_t08vr5NbI$44H`mp+- zMow8rQGh_>dD`;a`_*OMxm@df_Rd>M71SFGi?be_Zo{#}9c)E(?lu@+QE5!};;470 zC8?zL=$S8Qb%*q;;~&d+n@$UotIOvSir0JANLVNi&6OvDW1g^>*XzJwxj79wT}d;5BetX_KCiVZ1DZ94hg#^xgDSxu+G4J*HHX;o#Y z;RkXtMay7lTh3-HUZ`9IH&0cdnxUm>8wqt!Pidi-o!_G$#t9`Acug_ALo8>F4vD&B z)zdFepvAJi6V$;+df6?h5FOGeZsq#N(xX12d&eUC*o|ZMkaoN;+ph)t4h5AbNmm(h zQ_d4a!{rT*WAwAc?~&!ww=zy2iCOM?#HT_Dzkk?rYm8AV(W4ue;WSl1YEXJ5s%X?# za*JLCHaB;C%@FcIKGRb~?bSJ>j~Sw#3kDf407|60`1!Ai=a|Fw(|2(!D+*mX3BKEq zA?&{(-HIAo_EzS*IX)6x{p9z7m9&G*5}|#(Up9{R+4JSRo0Xs@s(=di;E)e;%@)Iw zPkV6JqF?N`Fj^+1_c`5SpG4A}2)%qq2*kvLhUnsa635fbjjsWG;AB;FfUs!V&BB%_ z(ud!Y92ZuP15L?kX{q>CX$528{n)$}%n9U^V=W=~VQS~p6`G&>Hgpl^Gm~Y4+-bPE z6(AB1z1XI?7L)bdcFLh4;L)Gr79Dc`o_+T6UFHXLhcd>4GpRw}08vYZ`gxlDcnWtl zn@=V%6$h3AvqMY0bwtC8;XQD)ttC!exTQFtP%dS%@h6J-5CIK<#g@+SV5E90cay3= z2h%by%SDJ(yk^#O7uE8rJo5rw#oTlk;db~?PTSl(-s@+FSL`dMr4zKFZ?=L&po42( zJLs-i93Ls#C-mMS1omQ)J2!Va@maWEOXw2M^;H>)GW#m+!p7p|{KO8K7xm)5-|A{@ z{LvShG*sEz@)nf>Fs*^4)WU-j=_J#I`f7fVleb|5Z8Z=bW5zNzEWaV*K96?4Qi5apIz4$CH zpmAy0T>Jc=5Jev*rL@KEe0PwyfBm7*hTxi^{$Ss#V(+f&c};r37V>-t(hAe|Ce~0q zq@9Jmfr&6T@b8SWiAXMa@c37f8_W6#{TDh6eyeLYC3d`t|HjPnC^Ub+{LZl`zjo2e z<@@g{dsACUzsDz+cL{lQAsy@m0?YVKq#S@pNa}Y=1 zL_r^AlhMlKqUJ4k@y&XiS7VTwOiDfJg!D^cbgM-#d`gGjLioDP#7N!)L|e18j8=O# zY)GQ(%(^K>a<2g@%zO{M`|XhZEfU!;*J!%m+>>3JgT&Q7Wxe&foeIn5K}nqB4wyL^ zucy=K8EKV)`(nV;{#9J92r`dSY%>ITnUN;7kPeh z?)Em%cvHCPE5&|%yA!j6N#6-pz|Gez=Rc)X3XE~c@fhCI98!qW;cZq*$ z2D49Wxw&|F!e8&ag20>zsFmR41@JvQ>|bcty^wE@+!U!+d-S*{J0N2-gI-iGCU=36 zY+?(><-?tN-`cWRa9ZX;B_j%bpJ$c&zx>A<1WdCUTrDxy9U3r<) z9vw_89O0!$GC>dkr@;Pi`-ES1Lbxd=Ket;6_>_st=WIsF4b4yousP$3xCj%-`E28* z59aLH4o^;ObzCU^yz|AT!9g7oA7_xbpojt6Yz`X%Sy;NnC>ONSRLywt+~0VAKI7Z9 z8!kB0Yr;Q$*;w0Ed*jH;z`~q(2namE^7JIlKSGv?V7-jR8Su5mm-peXxsA8g(a7n) zd(GRI$y$9rzC$|QKiM4ojEwEi=<6d)AhF!PvA8(n2|YRZwp-7qUhUyQJ#fSDaX5X0 zFJej{>8o0yP-AHziJ`UW%5EIULIG(=0b@yRuiGwaYv13S@-Go#ZVk`eOi7-}G0m;v zO4~F8y>htZi~p+@;M9uNy7#O|bHCPVj9*+}(h6qpuLw0{?i+8hTknV^5f-*@4<34w zfSij~jtW0NKdWtat7?WJ5UENbtLdw{e5K=cv0wX{Cp<6bqH9-{L7w$Bi?}W`epYeJ z+%IzmkUXZ&q1LiXY|&g8`6KY#Ylpzs5lT~A2#cV2q~PWsMRHR+{%uZe11%eiZXFwo zTR3?gz{SVkMiRoCvm|1XlePK6X#Rk#{S7)*pG$`5U$BE3=w{0Z9FP@X>?L@j0CgiJB5i*=^?S8R5lv1X|>o(~21qNnp$Zx6>pk)OKgkEW6z-_+Z; z=laiS0oh?XC!Iz=-QvniH5L&w9f7~&?0Gm^A`xy{0)iELSZ~%m`JrBAf6r#cPqW3%F0*F--0K_+HzG5t0B3oly(JUjP&vnqt1D|EvL z89w^S#;CT1u<}{;dH-HnRqg87`<9nU-gNdO_tTkhF%DanMZ=PC_Ed;6IpFJD+lH&Slv4J?{68lSp{|CNP zYJN{t1cYYlcRW~q%?mQwJ*kBt@yud3$v(=*HNLt2Ip^}Y16}i5^e?{xke)-@X3BQt zT)qd4evoAEBh-H4+X$RpYf4LD= ziH5El)VlL(OmwbY!@5trOYO1Y;>l}e%{M)lhJ;}@igGWThn1^7_Yss+`)fWN>nsDf*YDMh?cHSCn`_oNjPc3WbY11$}U82u|AhaKEzZmaNVPp z&I=LWS^I*2d+$z@=2VD2rZakZzWn92o^wL6-R8v0T5Yk!*|pW26+w;~@n+k3(w;K^ zk0uGgeZJXim{U;Qtt$?E)r=BQ2%m{H*QCHzgpvC6WJ+d^wOSh9ZJA2-tVO_q`Pj6I zQPtdJ{_4A}T>hJtnHO+rN8(Tkc!%JK7-RCc55%N}BK|+wUodwDyk>H~_E@ulsTNISc9*%n= zO!xeL+d45>$c&0Pq)F}ETmVphfsc-I4zhwV;j)|Fo9$a5{1hsc#cvuS>g00uS>@DCW1reIv;b$mpWS7%g;4hSyZEwzs(&(5RYeT7d{%wB+h6(=PN-R zu}^_YN(!?LrZTlj?j-UM=idgRMlTVmdH7=Ix?$@ag`F-n<;E(t_zU)0D?Pp`Z{O3| z|8P6V_dlle+)^E^4bf|`o@ch7Oy12cB?5P8CXEy&D0_UhE*T_g|6lCAcTiKo_Xlc6 zQNcoyuF|B5O7AMtg-Gup(nBXeXd&1@>C$_o6CfZZ^biCAsi7qF5PI*uCA{GG`F;MLGuGZI9e3oA(aIbVayMO33U!9w=(Dux`<%-GcgYyoY zI^fAhk|utd1vzHP8vW87$(3{HSbG(5nf#8#HndR*u39o&d)nINGv%>98!I@Ma$T>EAJwVnrp2>E7%IHEj|0{LmZWk#Z+thBS~f%loON%< zn{s<`FUy6xWAoI}Ld`IIX6%=oTrj*5~{z=BNX={wCdCX4_6-w!xeMlY8 zADzhKo*a$t7bkG|K9BW@9>QQhOOdo?C^Noyx{zSw(*McYSeI92-;r3&Zyb&7K*g*t zVV9%VmzGwzU2^!WJGlhRe!WR+lg*)58s~SR1)hZDjE;8|8%u_1zTI;z-pdb2JwrB( zvx{^)WU!)F4DU}0EFC*fK=-mF%+fX_pjGEeG8rc;$%?W+4eg_BpTuUjwR*HXzHWiP z57b%@DiLOXWd5iM&_A;k@guh%BDYx1Il_B$HMb4Mke8tupw*(nVj^TdAgh9%9#O;Q z;SD_Z<5Uk2y|#pPZNryEtO4_n!o#*=tn^}sJbTNh`1 zE@Mu5%$L&DN;XV7_RsP$q&k4LnCHL@AYwq~dBAq8DaBCN03RDCaB6jKd8}A(vmnc! z(6+0a=KOZhAU=*ROpovf7$J)J%WM}<-D z`p}=|XUBJMOSEDBU2NDrVkOhkp$D)0iI{QW9f$mi~rEbJA)!=7*in2t~sAuLQ0={rBb-jd` znw`sXZE>sb`F6$nKnRSg06SaKgJ++!mn+Dw@NfH@o&5<9A9WCc>t@5OM)KB1xB=#V z8~bnPIsj>Pm9jD|bmWFM&bh7skOM^-eHE2xGYen^;_ANx(ZA`Hh$MJXUv5WA*?ss8XP=7JBeani|-?jWwYZ;&~w+Kc45 z?xZ#QO>U*|E)FgBF0lXrL19qn46sf;>zVv111ydeGdO`~P?c~_;03_{028n;`NE8L z9MZLe%VFBeZ3cKp)uD#2w`so!{>KNXF(F-70O)JJa?%)eK2}VTn#w7MRwy30)59#& zyJX{Iv=djhlTXvDE%SIp@7$_YYxOXZ#&e+++7o^= zj!bp6$g37buZ|cU_E8vlrx(W3#C4e2xL54$YYS}@R?x~rmJ53&s?bp!hUC&^HRX!c z2j?rYc=J=#`52352Sof%;7LcT3$r7C%)7g^eC^>Kyd0R6Ok7pC3d}l}cc35eXq}=$ z0;QaOmjzcN?=Tni6gQ3aV5yiW(vu)JonIt;C3~NL_gr3+=L@%?dp1MAMfi}XzhQ*mQUJ!~-xNmDNfUZ%-)L&|zTT9xoGxkum)@O`=x_5ev1*cEy z$Cs;Ys1tSMScTxadBq`Yki5snqWXpNB+1g#%c$Dp?ONBqWXwV8sx>cC?S+q2Qvtk2 z67294Qlp=8Ko3Xp=02rq3zL6rDNo&zG5f=9P7m z=JMSU6O=XZ@7c?hM;)?WT9nctQqtj%(F3jH1HLsZ9nm9&LAhZ`|{ij zmkJkhxnJ|g%>-Vj6SFmu9KIn$%{8y<(Rp<$Q)wTt9xZyDIo4|JBfL8iI+5kM_rGBJ3xN`16G5nHNT!)jcJcAKI>BR;2-Wstu6IQVR|(}ZpKk@mEb}Il;%dtvZkHgui$f?Bn!$nRMw7{ zlsH#qc`$z?=z+$b%Jwvl zsQ~JKC}*g|t$yvDEvXP=Y(0oDHFcowGvZXO=b1ihjW`00zfkh19A7H)I< zmREJj`?Y{bs&7$|b=cvd?9=X)3K>YSR>a7hbGLp+Bw;b;h^M0@Uq>Ov6+eP5H%ew9 z7ItBJLj;mKq@(gqn+;W^9JTGd6JIw8A{#)`!eU}RbQ`z=ffFQX!Fg_Ml~*!0mo~Hk zD~BE}&~gDA0%!}eyYvy6O|+x4{vf5DxZP&oHLuA2#={3!(=>jpm$7OLZf7@3x{90U z%$Wfh`b-{KL?p*}>B;29c+G8?AfN%l<}2>JxQYd3BB|lQk$&WJKi~EhgAhH(cI@vD za0P+|BQA@#$k(T{fm>5<{jXj?k_rn&B=)DB`SX?g=cwdPMx}fE@ch&45dN>c;%$Hi z8~`$12Ui*OM-hEOikv7`j#N;%OvxheL%tXN+Di^GyP#x+yE1j~I2d{qyipWCC!wk~ zTHu*d3B*@IM$K1CWDn$u7B)6AA8L0Wt@(NAy=ietZS4ke=vJb!2{H)mX?s;CIIo*Kl`Q65=cSz?M|9|?w)q}Um z%B3FoZj{%CKs@e*OQGsUbOK24{Cn`i(Cs|tqfXkxZuj?u!qFsIkcVk5+!6C z2N(^&=JrNZ0UIYR7zZU5J$9M|%6HqU8ni9C-k`SLWK$IxWY98!mjZEsJ~yyk*W$%x znpID|lX|@@k-c!Y!3n)2oE$cTjyTPqlzOXJ;DWHOu@d^qUrijsYdf6sr*?T1qW;07dP)(?$ zQGS&PQU`LxYOKxx&2kXkAZ1W>pKq^afp2$p0k)FUaOLH>So#=G@iifH0;KX)MoivH zo_b2f*}~a?8u(DPW9Sq^_av)AW9Rc&#k`%L%x3GE{mdW?xt5Z!_cAOyi%(=O$8glF zim2cNZ`%pg{RN5-pl4TkmAng6=M_)R{e&tPhC3xyVp8U=2RzT(B~8bT!b(-f=Fu4{ zX{hXeVd4%O=;=(uxg4%X#m$@CN7TnL6HuFcN#u4MbZksyY0Co0tzuYLaq4?2L~3!P zgX($COE($zd_bz7oiklv2e)?;rMpw`*@GlA+kpAG!a)J%jd5BdN^7UdV@cw?V6r6t zTge}}f}{gZJlGf@;^ddI?rm2fYdY3wac=%DJpD~BY`NVg22tN*OB=F&z@M>SMADIbLP@_^sgm~48Eey>Ub%0k}M!H1=;c0nFK9B91;{3pC zB=il&fz~bApZb3gjKf~8INp?>&i{(Dfx1;XL z1a$`Z*qM3asXTK^{n_!4jN=b$m)6Xl>!Jgm7&g}uI0RRoJDJrOIH+=#G{}oQ41)ZF zc@r72fWy!S1qlJV*@ChLSsSF7`NJ-$x6SF^8rs~1r)SIDlS2wIr2QawlDIC+xqWD! zj1A>Vi9zZ~xzF3q^r-{f7rRz`5104zavf^-N{)xhoJP~4y?kR1CjHC?^TJhkdk^-u5k-K4fzehE)7Qu+d4f1RUU<)4|euHK^e|7Q-ztN&y9E{Hrh z`hSPP|34-y4$65{JQnNHkkdKEH91ca7ZWaa85KpZBrB<>>~6tf(}h0&hC@#enP9of z@OF+AxFlHr9Xu8dui<-}j-ya5t2kY`s`^e)fq+wUVl>bB#2b<$m{<70yng)Jy8awG zWNyDt4*R5a=Y#D(C-dDe=+5_2^}3*jL$elQlg9>&1*J!cWN%Rls~?dz+{>k4(MG>8 z`2~YtSvHj$u)-srw@S5kfd~FjUeT?H@yHAz`ES?=aZ^FPSVl^9SG?vrgoG8{@a!}m z%TX`N*0#gHy-Tqn!=lcK&_0j3IxPze7h(C1UUXg$lVDgWeGOaa?GHyRi zp4XW_B6?p{J#o9>O`IVZ~#w7I;YpLUWfT z<0U|4U6P_M!Zyd@q?n&YVh%yq*B25>6;oonHe=YqYoGb8Qr&tir64=ZBu*q$6PCV) zZCbb#+3xkG70aztgBrBuo6w&Nc`0%{sa8W!IX>@j4@LePDVh62)$P0Da_p^U-)$?q zCj2W@q4T~P?b)~o3gYt1<3ko{`>%%p7_EMGxM;rzXLRD5*m6<#JvL@pB2_o|*P)H7 z)mj(`4x^zk-l;!HRlw(yZp4`bB8#^0O|ZkD!@%xVWxvpP{)hUe`i zzW5 z0Ma@G#cp4?>PZExD`sNZgQcK&UakQCnQW}x?wGqDk6CyR!<+K~R{eOvcr&Ld)3aQ* zzs*>6jzPx61-cEVgX~sty;-pL`7kkOT5eS}U%vytr;&)ApHMm84Fw<=*N|5xyg(=vtPc!Z`^I~=hYYK{KfAA0AC|+`scg|gf8H5IQe8$D zzOx*QJ_i7q3-e%1g@@N;vd4CZcP4lPj@7&6OwQEkHjJ|7{5UbgrM|>=KU%zjR;QoH zT+O95sPD&UJ;!&0sAgR+VSVf94;u`K=Q1oeYi^}(8qdLw6&YmH%XF0+rq*caS@_LW zfOHwv>!l(ar?}g8+?C5?Fe;UB%e`R`wf%eJnCuI;VW$-lwy2+xU_{?GOEc|I-SL%} zY1Y)DwgS2zH<(=$rW+U0=PgnP#1wyKVQM=>2(eK2@k%z+dFi`#^5ER7X3 zH{3-DPaU9fhr@xORDTA@kJ&kg=)CMQZBeBQaU4GB$oFsYJLhk7(p}v2aA*6*yQn&k zQ-kz~1QmgT;NQHeanPdOaqhUdc0ITG4J1W}|K;Qk?O^OQNo=nrYt4g%-*_9+;1Fw8 zPyXYV&i4Gy6c=0Z%KWTCbK^{b@_2#fwxeNWkIn(-LR18-07oCU^!H+D2glWG$A1f1 z6RwbJ4(y!rxY^)c8K$?Z9$hB3c)}8&e3`o}Hdd4S)JIe|Dw5e)DwQfIXB3Tp-aYU; zo35%t`%7H~w3E&{7Mp5TzhX?BTjKGZhUz;&zbkZ}ZAbay_o7JkF96%r+OS7iyI4?+ z$a5nt3%zcc@j@*Ve6L}!OJ_vL?Kltb!UlqFpf!g@?LGZPRW^Se z-irCGt6@*$f_v=dM=H^Z2WANS?>B{ymgE^HWZsxr*Qg#_WW7+W$K^pQj1I5--KmV& zRv*v*0-nIa4#sm1H7v9Sfsm9(aKnlZ#*&rBo7>dyZUT7hJ1?n24}N8)ROt{N5o!JO z%s&XvHy8-OO67M5iMcbEKj50m-zrrW=`}rXuR3tO9l0Oxk$zYGyghagd+144TDyx2 zyF(!i$)pX0QNt6p=vhBCsJdO9Lis%h)XoCyPucsdLzWWIfi`2&XSOu;;)zF-mSZJo-2pYcZK2_z=w0OHCvCep<;Y@F3oP8Fz z((oxz@c!j*`1dNghDP1lFYHjHw{M|>hZHi(V+#I*>nJFQ=UA(FX&uq{?YgO>&=tVh zdxf@Z=GnSL6Zi0Qv3eU*rB2g+Yd2W*g`jeDoRUl5oS$rQ0UHbgoSgUV3&Pj)#+Lt) zt=`T5`R4sUJC!c>yqM7OEX=|5oNru6)y`53FZR4aKes!Dd3K8-C?iI%uE=_hm#;)e zL17O0^c+|MLqTA0OkQwGS2`>3gD6}xOHqU65)*@lWVW8E-V*!;Z5n?}d$YQF7gv%x zS51vb^nE$)^U}q{o1=L7ZX2$7rB#et3zo#1-v{`J?Z?ihvVYWIAQdQ`P^vkSt9ln0 zX=s+NA*Xy!OdAzOIy$U*N;?LcO-Zt58WNO?9#bWfhlU}Df@im`9{CA%kS9)DXk20l ziZwUjJx~m%nRM`~y|5Tt2uUYbQ#fG=TI+Am&8{V;}^y2Z@mujAu?^8=~_$T}I} zyOnn%Ao_j~IwVB3re>>*ja+QtN-48XH$; zD#1#AbZ2C!wt#;`bs|Z?!H#<`Vz(|nZj%0eH$H1Nn?<#!E2hR;$rFWalOUaYxdJ=E z&Ecnpk=)C3=8>9cLXS>z&iZee|$ zBz}BedqTYvtLGjk>B<@(Yv?;!{M>gE)m?@~FbxShRL0B5y%lJ7(MSIPEVlEMELF|4 zK3Sg^T|S)T=Nuw3ogeS=bl^5Usq0aihdzZMk7elK!f21Qw}0edj+AVt>S$4s{sv-QlZVKo+Die7-gG zuI$t@UQE@93`mu?oqGm<%BK)Bj|AM#6_I;ietxUbOFB)9E@fKI>&p2 zE1dv^z^r*t0TwW#^QFAxuQwa@bw=?&NyqP7Y?H?`j$PccxhB{O34i1=e9jDi?%kK- zm0e8MBcnnfQ)4(iX*Ta$fDFwRy1YWbzKZ{Vg70NC`#2|$ni-HVVs6hfhIBSAPG7pS zE{>4aZ)d9PT)vw#fqLEJi^lkhYYNfxjq6LmkQ@=MZ;EAf8Zc8(<=G{#mY zHNOT_`F9lZg=?Q0jMaD69vtONuJT4y+RUq7)Lb5{f?dN8nVSmUp(~=$bC^%nF_gMB zID3-W6Cmi``MTS{|6K++68EPeV$+elK(mbVyHS->WEsEK&Z5RLz67mEwQ7JXNp293 zyi@e*zEXk1%eBy4;6xWra@6-1SET#+KwqAY2#dJJmTXG#Nz6B7r% zcD<%g3P#nw$(8S%1$v%SK$E!XXefTMey9JxRT#bO$| z9B0(A{hufj(0n1u=!Krc)F)g7`o`CMA_5}f{TCk+S$s@|7%>tB6~`B=#wjR_?svB( zmkj}I2(EaMfY?zNM(qVB72E%i$+9_*V~t`BSIIswlrM314>WXD4K)3nje_&$_Me&1B2 z9Mjbx@`Ph?7_6ERUy49_ExOkDC*$)SDh`D{r6Ca1l>r@U(MdO zZ@R$)=WQ)g(KRi_JOCXGN)Ke6K_(X8nS$0NrTrLVY}j1=BXLkUPxJ~Gm`Tv-7h@0} zNV7_CTJ4l*1M3o5kkAPOqXQ|j$!qj{B-Qp|c+D%TtZdsT;lqOm=OoaL$2jurDQ??&(PnWj zBu2pJio@zSO`Nl@!j`PopXPGU>fL-^#}g!QdnC(sA+Amp21(~|`1>C6i%TIVSPZx^ zMI1fZa0?NyHxiVSTR9EIUfJ)h{-~+d3hc%wg6B({zZ-2@rg!0kfmRf9JaU>F^{1uL zoms+LF(SO(_12102Sa5ewdoTfttKjq`SQk<_{D{_H;0E-TovIfsP85SH!*F z#aW{#?eDMt1+dGGc$phv1Vq`!3md)lz1Lz_vFJkb%jWqpKNS=c}P&j0H4{fpvxwy8%%;jJkW<-5v$uXzc&*(X&U-v<|b;VZplGU7j+M4$X;d6>y=`{h6wEo<{F* zU9)GwA1{)hX?`Tb&Ar*l3}5$G#kI1hV8}BG4bL<4a}w$*;oP;!EvT>?uKk6Kmd(zF z9Zr(9)}j3!pK)`8UX;&*^TBA17skw?P5JfjmYBeu%L5wVmNa4m%m}P=(kS#wo>$a~2MM19!-_GL%ts2}sXQ zUA~z@2K43NWMbqEdgkYpi_$vmb$&b!xO<2avny@MWYyk_n;DR24}Q~L^w^qn?Ontt z&y_}luY)&QF7`oa*#r!=V`2kNw@>c%J?bcJ>`^f2uX-Y34kT0&471AT2W33;7x;$h zHx9iJofRqq%{j!BxJ~U3-I#c35vtqq+uCvU1g@hmg~kc4OPjo?ovN=QS)JQjC&3fg z)ua-;4-1{cpq>SUxTtgAg#+t%7y$Kxj0`%0g5rS;c3`Do1=>^Vcc0-vQYb*^e22ni<1a5$v> zknZfkB}@5b&^-m{H1!DOJqi+?CHcQ13VRMpNlMNecKZVpZDcuB0zwvX082LF=X4Z$ zW`sQvdGyBq&mwEGD5oaqUb#r7XLV8VeWZuek4xlPQ9qRh;q~w0U#HC8E&BEolr04M zl}zEILP4qf?n6wEm}&XP)Q+mZynG1_9y}-)2Is-9q0YeS zEVKbj+JWa}Qq8qFNbU%%v+*<_xyGwJS|~F%HJKaaLG6i1HhR!o8C+W#Kkq!AIqP9h zyvXyxKa%f(Jx~Af8N@)|Tq8EeQ!X6kk}2ygsk&yFNwfSTpQqXUo%?6cp5NmZsWv4` z8Jgw-xWO#CIV%nxiz6+Rgd5x@U?*3vJJt0g$(!fHZ0t@=CC8c@P828P9bu3O&)l_#?GYXv6$3SQ8d9r`{u!40a{l7a?zy z-)**=+p0Kd%h8{kiW({N{*n*rl4ue*A8=ot$2aw5kUJ?l#7!bFhb`)HkVnZ#@}p;Z zV9y(aUAXVZ7I6vvs-HyiU9ex3I-kzEX3Cz5YsN3S8P%_{+y)wg{?@v|-TC!LGlUGO zx$Q>^@MW#zh|(C?;J4+=@jBh!R`uzp^OoJi;;L-Ba%a2pAS=0WG`m5ko*=F)EiIcf z#rj5#an^d#DCsph1atj|VM<*54?YJdT1Z}Hiq%QRD#l0=uD3)sPox*B&KhmIG@4_;!7q3c3CxK2A9Ue}Wq^`puf zHFRNl?iyqP5M9q4T9;UkT#UJ@T6wMT0I>$7!)$KqCj_inRdN7ZyKrS!Dzsrw6Xaa_ zl3|F{*E~O+z%5q>CEumzeUZ2sYXa`b8&TVi(ukumn;`ZlEY2yV<6P2Dm2Vs&sjj?% zgA-g;^k`GTEeX7=Qz2|PKt}wVZlYuHe6fJmAG|ybRI=KaC({MGw_nOL<>KOCql$2~ z?jFoUqYyq6+52D})aL~YJ-cscoibxNj=4cbQ$DTjZC(9}Ct{}xQ)}!<>1AhVERs|6h8!o4z`-_M==qoL)7e&*qZmu8 zLh#B#Y^o;5)xCEc5lF&~9j!MwKWO_=wC|qwMtlPL{kIy=)LKQ=8){|)iArT6Ztta* zUPi~0z~Aq=cTw&|xr_2+gKvJ=nH@Kzrw`3MW_ejxU8r47*gN9whQ$C_66skEkqZQ8p? zU+GV@XR9#b{Wfl4myRd+G)cqzCsol@9QIRWc4A#K#dy+gsmoqV-2Ia+F5oMmsT~yA zKd97duN;;T!G#zcuThJeyPIRXUUbBE`=XO2?Ns@@8ygKjxv9PM?X=stHnxyw6Z~U) z2RAH**_#2MZHlm`!9EM;ZiCyaMlQkzh*x4hC3+}}8r%Fn4pLTf_uoR6lcQa(@jNkr z+)TA)%sSWBMtW&dorR9myLEb?$KlTW08=@=W1@Z727W#4HY(+|H_jk2-J$4Y#XIXO zxtQsX$?7dPqvs!>nLLbfxm$bVvv~3xpJA$*azdo&-s1038ibl6-JmLvR1k;#|)6v{MbyTYj(TQ21EzEZL$HlH12p%J36nZOWN>q z6K&t<#wS9Ch@7!KgKj9&F z=>3f&+`P_I3E2zy!q0axJ8tvb9XEOG*$+$@8EEFA^M%iQOkp}_x%_-`$bpKn>VFaJ z)JQ~;$^N#gPty4He?GMF@$p&9{ivKj%dvtg=g!E+qX$7&WWQhA>LW2RrE&N83yZ>2 z0C#DcHY@<_Q!SzuV8L_o-yBWP-_Np7o3wnYC0?j|Hn`DgkozNYxqE_-k5AXXzj#|! z{-UgnkB_g(O8>d!lc|o@b7ipxb5n!L4eD>*K_F0aH@;B9`cUYWd|X^yK~WKw$GAq- ziS~t0HT?SsM|nGkSe2rCrtS4}v05GkSxQk9;)}_y(bc)Q?R|HBOTjh|fS8z=BI&6D z{(^y_Ax{cQ%X%)ZO#eYaet!KR=f7aC+xoimiyEr&XetVwz^ly?p`5&7b-78ZT=hX$4yBGXtw!#JUt)_DabXOGO-e83s8bTm};Bn*L;+ z55lLB_k-lPrx`L2+e_f;-?5npuneGb&|$nWj;nylWLcR=ii6yjsuiC)LxDqs&i zp|ILKaUI3}_T&0c?PGEglgA!JvMZS!q94xtfYA$~63}jqM~tSkBNyEZj}r6rdWriR zYuYB{^~Byby@Nxc;?_I2L8>+JKGi;)Q->;2j$L<5hzefqmMGccrQ_(02sEsNI$h4T zH0%(!(Mnl-c2uj6XXi5=16wUAf314#{9V^^Y_h=8v&V(U+G#A4Wh7_YX-pmRp?TJC z@Qx8~&&zsA*-%>n{9QNiTVT1;sOolwWgC^XRa{58^%u#O)_nEy&O|EX6Nxp+Y=045 z562=&B%;RFZQq&GdKPXFz4j(Py7J1C-T|e%eRuQFon@AWs-y84MfbJdjoG!}@42-e zn0WE4F^qT}RffO2IR`w6*Rf7@^*4u%UQ0z+7!g1oY5*2YNB`Zce3H}m(CB4Z<_SbL z%aWwlBAl*|g%+%~X6rJvRm5){mq^nm`Z}H4i_c!IHsZ;fG+(gxs``R=F{e+*a9jq9 z;Z?2jXDwdeb$8YGh$r>fW8jd_N3_U!FQP9dZ0aW#Btesk=VYtwt1;Igb)9COlOTmi z5-Oe23?YXZE|^~4CzbjFN7+P3@xEb`x-@ocVfw;rXSN%pnaqOiwn+4TvuHW*L&?iG zGL-{&FM87Re;#Cn@@JaIZQCM?JLq>RbSoyV{UHYJUPT{R2NM@(D_T;xe@XVP+yT+BUheN!_WQo5K9@<`sPDl zfa;It1Y)bSO25nM9KH)T!z`0-VQ%BL$m|{F9j(sSHmy1RXRAD*r4-5ML{QMAhkn%c=HSq^i4&Rf+od z*M9>(g7yYRb}yQ^XBOXh^ySX$Z){)KNI;nRvV_F5boGtA=mvL2a1B(gF9OH0{-vf+ zd-#OJCoR)|vp97+4GR#rtl8&E(@Z(JA=11MRWA{TauNJ+{4yUK&=@~n@-tY@Efi)8 zZ}qy!!g#=;QV0cfe0qY7A85H$(Brn*|hNz%`0M#8wdQb7Z0r4FY1w|E=QPNQX3(|-Q0a% zglw4Eru3LscG*(W)|dUm5P1jfVfO;^rZ5dI5}?h?qNh6!i%@C+`k2d)0R5UUeJSHf zC_Q1kjUkl&pvSbQaO;Qng^*w3X1;p|93>hLohO}ifJ3E{s9S1;j4p`1h+h?A9%oL~ z((I88CRKd5Oh{he(CNv0khKt1Ase@gmQ`_t=1U)nLdV^b_Y2-2)-PCpbmNCi`9#y* zQX+LK$U*vPC$fr>CSLH;VuADruBIA_q$}n`of_sp1e?dpcV_!=Y1?$MY ziLN70#Cmu^<;^P{`qujehar{@1gk8WhgGpG@ApzD#4Yze?0#BMA^hm5)MuNTXdr0C z{%C@Mc#~(d@W)Gz$3;&EMb5jz^}Zb9=4UyDhi=*%+!ZkTfIhb_mYK_e=DSwYK_@?# z#S$4n(+>(Rbhc0e8z*g^nDV{YB<3Bl#2U29d*^YP z%Bkc8jUO3^pot&Ss8!7m@~C&W!>ra_?j=r810&gHhihDzpc{05XF?b5(A)5Z4&P|r zy3BjCGaq>&;g%IMB8XMq(8JSD{(C6k{W`{X%0fnIq{Wwf7L0_bhx*^n1XP-{^QX^5f?k zJ}d?G%Mr0Ca--&oVOKNV3nzzVFC!1{RC(V8gxz{G$MScv22kcoQ;VcOH9mW%0wL9$dv)vHZ;&JQlDH zrgrVR5^(>%fuzhmbc5L~#^>Sm0T4FchIzIV*_^IY=PF~lw6_EW`juqzn|}0Vu|J@? zS(d4q5JA96*ivE}%$`(-jiRYLf&hMm2$)oAnSXf8(uEvINC1hdFk&L8gFr#e`^(?5 z^_EkS>XASkr-IHU*7z6uE~9@)L2?l7j|`Lk3<_M=orYh=oPON$XRy`}Y~a#ZI}Q|z z0G$>!irMl=o{8*MLMO&xuZWDnwY(?$N;!e5omGj5_tN@`Bb*QUKh-ho3$0CIibSBc zbb-j^uyEQ&r$h{-qZR3+k0x({6Pn7a#+#2+l$4+@qA!SgDV;x$9i-9YoBnothx?i9 zv+{TW)ILUiLHsC3U@`wl__G|n@agZ@-JF3XJbcBi;#`}{fYSic*q6amX{;abp4LOJ z+4ewR^|iP#EKeNQD^PEcs6f}#;U}j)tr97BgP<=>-(gg%O4;A@g>0 zwnirrOMGE;qanV=n@lk;%f?t%|B_{KAUtj+)hJEMbeT%S4CTjeJSn2%rwar|SlhT3 z#g#lLNldojB(yS80Z*EyB9E-lB5NUJdy7Et)MfX3>%e%Xh|1)jIPgpT<$|>RzoK zQ3BBy(Lrm5=-kCc?$qPf;0*Tlmw4lgyN{jcnD`^tzi`Vqqpq_Eh0j^$mY%9!ZHox# zATL4~DJI6sp|3BapK?}0nb5ksP0cJ1qZ3H`(xt@FLso*8<(Zu2nswI|(fBHOa)(-A z6H8p(PP^Cf;l+XsOP>#m2I;^275dy{+GNF|;;bLe017pCAzyRY85@p7;`EcLV8Z4_ zzq<248=sY^!G1^Px!b2hFK1#*I%)l6$5EShtPd(zDjjk+zE5cU7h$(P_kLYCHDnHI z!Sgyd3pzgg=iQXw9szY!!&%Dprd!TL^g9ecNpC$C>y4CEYFnIa5LKi zwq+AGQ!$vG>4>t{INp+$W7-pQ|5(3P=?;(tu6GRP$E`85WP8tBnxh?M@fCZixFU%j2rVSJ+$Qz+--K1iXR!4kz2KaI$epWysCKxm!b|o?k=Dr9h*)-XD zph&$FTz1)t<|0I%Y!~-E)JDc`%3bwOKY3J0kXFiffd8n>(4; z*KW7IxXe(4d*&()xZ-<}>Y|$AweJAkXTH^PtnrkjAC`^Z=u5RX*elwGQb!Ldx1