From 4b244926f364a914d9d0cf95b9e4aa4add10442d Mon Sep 17 00:00:00 2001 From: Claude Date: Wed, 5 Nov 2025 13:14:17 +0000 Subject: [PATCH] Add rig-simple example - Rust translation of langchain-simple using rig-core This example demonstrates how to use the rig-core library (https://github.com/0xPlaygrounds/rig) with Ollama to generate structured recipe data. It's a translation of the langchain-simple Python example using Rust and the rig framework. Key features: - Uses rig-core 0.23 with Ollama provider - Leverages rig's extractor pattern for structured output with JSON Schema - Integrates with Coagent server for logging LLM calls - Type-safe structured data using serde and schemars - Async/await with Tokio runtime The implementation includes: - Cargo.toml with dependencies (rig-core, serde, schemars, tokio, etc.) - Structured recipe models with JsonSchema derives - Recipe generator using rig's extractor - Coagent client for logging - Configuration management - Comprehensive README with examples and troubleshooting Related to rust-genai-simple example but using the rig framework instead. --- examples/rig-simple/.gitignore | 3 + examples/rig-simple/Cargo.toml | 14 ++ examples/rig-simple/README.md | 240 ++++++++++++++++++++++ examples/rig-simple/src/coagent_client.rs | 164 +++++++++++++++ examples/rig-simple/src/config.rs | 67 ++++++ examples/rig-simple/src/generator.rs | 153 ++++++++++++++ examples/rig-simple/src/main.rs | 152 ++++++++++++++ examples/rig-simple/src/models.rs | 61 ++++++ 8 files changed, 854 insertions(+) create mode 100644 examples/rig-simple/.gitignore create mode 100644 examples/rig-simple/Cargo.toml create mode 100644 examples/rig-simple/README.md create mode 100644 examples/rig-simple/src/coagent_client.rs create mode 100644 examples/rig-simple/src/config.rs create mode 100644 examples/rig-simple/src/generator.rs create mode 100644 examples/rig-simple/src/main.rs create mode 100644 examples/rig-simple/src/models.rs diff --git a/examples/rig-simple/.gitignore b/examples/rig-simple/.gitignore new file mode 100644 index 0000000..254f86d --- /dev/null +++ b/examples/rig-simple/.gitignore @@ -0,0 +1,3 @@ +target/ +Cargo.lock +generated_recipes.json diff --git a/examples/rig-simple/Cargo.toml b/examples/rig-simple/Cargo.toml new file mode 100644 index 0000000..c8ca469 --- /dev/null +++ b/examples/rig-simple/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "rig-simple" +version = "0.1.0" +edition = "2021" + +[dependencies] +rig-core = { version = "0.23" } +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +tokio = { version = "1.41", features = ["full"] } +reqwest = { version = "0.12", features = ["json"] } +uuid = { version = "1.10", features = ["v4"] } +anyhow = "1.0" +schemars = { version = "1.0", features = ["derive"] } diff --git a/examples/rig-simple/README.md b/examples/rig-simple/README.md new file mode 100644 index 0000000..70f3870 --- /dev/null +++ b/examples/rig-simple/README.md @@ -0,0 +1,240 @@ +# Try Coagent - rig-simple Example + +This example demonstrates how to use the `rig-core` library with Ollama to generate structured recipe data in Rust. This is a translation of the `langchain-simple` Python example using the rig framework. + +## Prerequisites + +1. **Rust 1.70+** - Install from https://rustup.rs +2. **Ollama** - Install from https://ollama.ai +3. **Coagent Server** - Running on `http://localhost:3000` + +## Installation + +### Install Ollama and Pull a Model + +```bash +# Install Ollama (see https://ollama.ai) +# Then pull a model: +ollama pull qwen3:8b +# or +ollama pull llama2 +``` + +## Configuration + +Edit `src/config.rs` to customize: +- Ollama model name (default: `qwen3:8b`) +- Ollama base URL (default: `http://localhost:11434`) +- Temperature and max_tokens +- Default ingredients list +- Number of recipes to generate + +## Usage + +### Build and Run + +```bash +cd examples/rig-simple +cargo run +``` + +### Development + +```bash +# Check for compilation errors +cargo check + +# Run with verbose output +RUST_LOG=debug cargo run + +# Build optimized release version +cargo build --release +./target/release/rig-simple +``` + +## What This Example Does + +1. **Structured Output**: Uses rig's extractor pattern with JsonSchema and serde for type-safe structured data +2. **rig-core Integration**: Demonstrates the rig framework with Ollama LLM +3. **Coagent Logging**: Logs all LLM calls and run metadata to the Coagent server +4. **Metadata Extraction**: Captures timing information +5. **Output**: Generates 3 recipes and saves them to `generated_recipes.json` + +## Key Differences from langchain-simple (Python) + +This Rust example provides similar functionality to the Python version but with: + +- **Type Safety**: Compile-time guarantees for data structures using Rust's type system +- **Performance**: Native performance with async/await using Tokio +- **No Python Dependencies**: Pure Rust implementation using `rig-core` +- **Error Handling**: Robust error handling with `anyhow` and `Result` types +- **Extractor Pattern**: Uses rig's built-in extractor for automatic JSON schema generation + +### Rust vs Python Comparison + +**Python (langchain-simple)**: +```python +from langchain_ollama import OllamaLLM +from pydantic import BaseModel + +class Recipe(BaseModel): + name: str + ingredients: List[str] + # ... + +llm = OllamaLLM(model="qwen3:8b") +result = chain.invoke({"ingredients": ingredients_str}) +``` + +**Rust (rig-simple)**: +```rust +use rig::providers::ollama; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[derive(JsonSchema, Serialize, Deserialize)] +struct Recipe { + name: String, + ingredients: Vec, + // ... +} + +let client = ollama::Client::from_url("http://localhost:11434"); +let extractor = client.extractor::("qwen3:8b").build(); +let result = extractor.extract(&prompt).await?; +``` + +## Why rig-core? + +The rig framework provides several advantages: + +1. **Unified Interface**: Single API for multiple LLM providers (20+ supported) +2. **Built-in Extractors**: Automatic structured output using JSON Schema +3. **Type Safety**: Full Rust type safety with serde and schemars +4. **Modern Design**: Async/await first design with Tokio +5. **Vector Store Support**: Built-in support for 10+ vector databases +6. **Production Ready**: Used by organizations like St Jude, Coral Protocol, and Nethermind + +## Project Structure + +``` +rig-simple/ +├── Cargo.toml # Rust dependencies +├── README.md # This file +└── src/ + ├── main.rs # Entry point and main logic + ├── config.rs # Configuration structs + ├── models.rs # Recipe data models with JsonSchema + ├── generator.rs # Recipe generation logic using rig + └── coagent_client.rs # Coagent logging client +``` + +## Troubleshooting + +### Ollama Connection Issues +- Ensure Ollama is running: `ollama serve` +- Check available models: `ollama list` +- Verify the model name in `src/config.rs` matches an available model + +### Coagent Server Issues +- Ensure the Coagent server is running on `http://localhost:3000` +- Check the server logs for any errors +- The example will continue running even if logging fails (warnings will be printed) + +### Model Not Found +```bash +ollama pull qwen3:8b +# or try another model +ollama pull llama2 +``` + +### Compilation Errors +```bash +# Update dependencies +cargo update + +# Clean and rebuild +cargo clean +cargo build +``` + +### JSON Parsing Errors + +If you encounter JSON parsing errors, the model might be returning malformed JSON or wrapping it in markdown code blocks. The example includes logic to handle common cases, but you may need to: + +1. Try a different model (e.g., `llama3.2`, `qwen3:8b`) +2. Adjust the temperature in `src/config.rs` +3. Check the raw response for debugging + +## Example Output + +``` +🍳 Advanced rig-core Recipe Generator (Structured Output) +============================================================ +Generating structured recipes with: chicken, tomatoes, onions, garlic, rice, olive oil, salt, pepper +Using model: qwen3:8b +Max recipes: 3 + +Generating recipes... (this may take a moment) +------------------------------------------------------------ + +🎉 Structured Recipe Collection: +============================================================ + +📋 Recipe 1: Chicken and Rice Pilaf +⏱️ Prep: 15 min | Cook: 30 min | Serves: 4 + +🥘 Ingredients: + • 1 lb chicken breast, diced + • 2 cups rice + • 1 onion, chopped + • 3 cloves garlic, minced + • 2 tomatoes, diced + • 2 tbsp olive oil + • Salt and pepper to taste + +👨‍🍳 Instructions: + 1. Heat olive oil in a large skillet over medium heat + 2. Add diced chicken and cook until browned + 3. Add onion and garlic, sauté until fragrant + 4. Stir in rice and cook for 2 minutes + 5. Add tomatoes and 4 cups water, bring to boil + 6. Reduce heat, cover, and simmer for 20 minutes + 7. Season with salt and pepper to taste + +💾 Recipes saved to: generated_recipes.json + + Duration: 4521 ms +``` + +## Dependencies + +- **rig-core** (0.23) - Rust LLM framework with multi-provider support +- **serde** (1.0) - Serialization framework +- **serde_json** (1.0) - JSON support for serde +- **schemars** (0.8) - JSON Schema generation for Rust types +- **tokio** (1.41) - Async runtime +- **reqwest** (0.12) - HTTP client for Coagent logging +- **uuid** (1.10) - UUID generation for session IDs +- **anyhow** (1.0) - Error handling + +## License + +This example is part of the Infinyon Coagent project. + +## Related Examples + +- **langchain-simple** - Python version using LangChain +- **rust-genai-simple** - Rust version using rust-genai +- **smolagents** - Python agent example + +## Contributing + +Contributions are welcome! Please feel free to submit issues or pull requests. + +## Learn More + +- **rig GitHub**: https://github.com/0xPlaygrounds/rig +- **rig Documentation**: https://docs.rs/rig-core +- **Ollama**: https://ollama.ai +- **Coagent**: https://github.com/terraphim/try-coagent diff --git a/examples/rig-simple/src/coagent_client.rs b/examples/rig-simple/src/coagent_client.rs new file mode 100644 index 0000000..e194236 --- /dev/null +++ b/examples/rig-simple/src/coagent_client.rs @@ -0,0 +1,164 @@ +//! CoagentClient for logging LLM calls and responses. + +use reqwest::Client; +use serde_json::{json, Value}; +use anyhow::Result; + +/// Client for logging to the Coagent server. +#[derive(Debug, Clone)] +pub struct CoagentClient { + base_url: String, + client: Client, + debug: bool, +} + +impl CoagentClient { + /// Create a new CoagentClient. + pub fn new(base_url: String, debug: bool) -> Self { + Self { + base_url, + client: Client::new(), + debug, + } + } + + /// Log the start of a session. + pub async fn log_session_start( + &self, + session_id: &str, + prompt: &str, + prompt_number: u32, + turn_number: u32, + ) -> Result<()> { + let url = format!("{}/sessions/{}/start", self.base_url, session_id); + let payload = json!({ + "prompt": prompt, + "prompt_number": prompt_number, + "turn_number": turn_number, + }); + + if self.debug { + println!("Logging session start: {}", url); + } + + self.client + .post(&url) + .json(&payload) + .send() + .await? + .error_for_status()?; + + Ok(()) + } + + /// Log a new LLM call (request). + pub async fn log_llm_call_new( + &self, + session_id: &str, + prompt: &str, + prompt_number: u32, + turn_number: u32, + issuer: &str, + system_prompt: &str, + ) -> Result<()> { + let url = format!("{}/sessions/{}/llm-calls", self.base_url, session_id); + let payload = json!({ + "prompt": prompt, + "prompt_number": prompt_number, + "turn_number": turn_number, + "issuer": issuer, + "system_prompt": system_prompt, + }); + + if self.debug { + println!("Logging LLM call: {}", url); + } + + self.client + .post(&url) + .json(&payload) + .send() + .await? + .error_for_status()?; + + Ok(()) + } + + /// Log an LLM response. + pub async fn log_llm_response( + &self, + session_id: &str, + response: &str, + prompt_number: u32, + turn_number: u32, + input_tokens: Option, + output_tokens: Option, + total_tokens: Option, + ) -> Result<()> { + let url = format!("{}/sessions/{}/llm-responses", self.base_url, session_id); + let mut payload = json!({ + "response": response, + "prompt_number": prompt_number, + "turn_number": turn_number, + }); + + if let Some(input) = input_tokens { + payload["input_tokens"] = json!(input); + } + if let Some(output) = output_tokens { + payload["output_tokens"] = json!(output); + } + if let Some(total) = total_tokens { + payload["total_tokens"] = json!(total); + } + + if self.debug { + println!("Logging LLM response: {}", url); + } + + self.client + .post(&url) + .json(&payload) + .send() + .await? + .error_for_status()?; + + Ok(()) + } + + /// Log the end of a session. + pub async fn log_session_end( + &self, + session_id: &str, + response: &str, + prompt_number: u32, + turn_number: u32, + elapsed_time_ms: u64, + meta: Option, + ) -> Result<()> { + let url = format!("{}/sessions/{}/end", self.base_url, session_id); + let mut payload = json!({ + "response": response, + "prompt_number": prompt_number, + "turn_number": turn_number, + "elapsed_time_ms": elapsed_time_ms, + }); + + if let Some(m) = meta { + payload["meta"] = m; + } + + if self.debug { + println!("Logging session end: {}", url); + } + + self.client + .post(&url) + .json(&payload) + .send() + .await? + .error_for_status()?; + + Ok(()) + } +} diff --git a/examples/rig-simple/src/config.rs b/examples/rig-simple/src/config.rs new file mode 100644 index 0000000..4711f82 --- /dev/null +++ b/examples/rig-simple/src/config.rs @@ -0,0 +1,67 @@ +//! Configuration for the rig-simple example. + +/// Configuration for Ollama LLM settings. +#[derive(Debug, Clone)] +pub struct OllamaConfig { + pub model_name: String, + pub base_url: String, + pub temperature: f64, + pub max_tokens: Option, +} + +impl Default for OllamaConfig { + fn default() -> Self { + Self { + model_name: "qwen3:8b".to_string(), + base_url: "http://localhost:11434".to_string(), + temperature: 0.7, + max_tokens: None, + } + } +} + +/// Configuration for recipe generation. +#[derive(Debug, Clone)] +pub struct RecipeConfig { + pub default_ingredients: Vec, + pub max_recipes: usize, +} + +impl Default for RecipeConfig { + fn default() -> Self { + Self { + default_ingredients: vec![ + "chicken".to_string(), + "tomatoes".to_string(), + "onions".to_string(), + "garlic".to_string(), + "rice".to_string(), + "olive oil".to_string(), + "salt".to_string(), + "pepper".to_string(), + ], + max_recipes: 3, + } + } +} + +/// Main application configuration. +#[derive(Debug, Clone)] +pub struct AppConfig { + pub ollama: OllamaConfig, + pub recipes: RecipeConfig, +} + +impl Default for AppConfig { + fn default() -> Self { + Self { + ollama: OllamaConfig::default(), + recipes: RecipeConfig::default(), + } + } +} + +/// Get the default configuration instance. +pub fn default_config() -> AppConfig { + AppConfig::default() +} diff --git a/examples/rig-simple/src/generator.rs b/examples/rig-simple/src/generator.rs new file mode 100644 index 0000000..567bb6a --- /dev/null +++ b/examples/rig-simple/src/generator.rs @@ -0,0 +1,153 @@ +//! Structured recipe generator using rig-core. + +use crate::coagent_client::CoagentClient; +use crate::config::AppConfig; +use crate::models::RecipeCollection; +use anyhow::{Context, Result}; +use rig::client::CompletionClient; +use rig::extractor::Extractor; +use rig::providers::ollama; +use std::time::Instant; + +/// Metadata captured during LLM execution. +#[derive(Debug, Clone)] +pub struct LlmMetadata { + pub start_time: Instant, + pub end_time: Option, + pub duration_ms: Option, + pub input_tokens: Option, + pub output_tokens: Option, + pub total_tokens: Option, +} + +impl LlmMetadata { + pub fn new() -> Self { + Self { + start_time: Instant::now(), + end_time: None, + duration_ms: None, + input_tokens: None, + output_tokens: None, + total_tokens: None, + } + } + + pub fn complete(&mut self, input_tokens: Option, output_tokens: Option) { + self.end_time = Some(Instant::now()); + self.duration_ms = Some(self.start_time.elapsed().as_millis() as u64); + self.input_tokens = input_tokens; + self.output_tokens = output_tokens; + self.total_tokens = match (input_tokens, output_tokens) { + (Some(i), Some(o)) => Some(i + o), + _ => None, + }; + } +} + +/// Advanced recipe generator with structured output using rig. +pub struct StructuredRecipeGenerator { + extractor: Extractor<::CompletionModel, RecipeCollection>, + coagent_client: CoagentClient, + config: AppConfig, +} + +impl StructuredRecipeGenerator { + /// Initialize the structured recipe generator. + pub fn new(config: AppConfig) -> Result { + // Create Ollama client (uses default localhost:11434) + // Note: rig-core 0.23 doesn't support custom base URLs directly in the Client constructor + // For custom URLs, you would need to configure the OLLAMA_HOST environment variable + let client = ollama::Client::new(); + + // Create an extractor for structured output + // Note: ExtractorBuilder doesn't support direct temperature configuration + // Temperature would need to be set on the model if supported by rig-core + let extractor = client + .extractor::(&config.ollama.model_name) + .preamble("You are a professional chef and cooking assistant. \ + Create exactly three practical and delicious recipes using the provided ingredients. \ + Each recipe should be complete with ingredients, instructions, and timing information. \ + Be creative but ensure the recipes are realistic and achievable.") + .build(); + + let coagent_client = CoagentClient::new("http://localhost:3000/api/v1".to_string(), true); + + Ok(Self { + extractor, + coagent_client, + config, + }) + } + + /// Generate structured recipe data. + pub async fn generate_structured_recipes( + &self, + run_id: &str, + ingredients: &[String], + ) -> Result<(RecipeCollection, LlmMetadata)> { + let mut metadata = LlmMetadata::new(); + + let ingredients_str = ingredients.join(", "); + let format_instructions = RecipeCollection::format_instructions(); + + let user_prompt = format!( + "Create three recipes using these ingredients: {} + +{} + +Make sure each recipe: +- Uses the provided ingredients prominently +- Has realistic preparation and cooking times +- Includes clear step-by-step instructions +- Specifies exact quantities for ingredients", + ingredients_str, format_instructions + ); + + // Log the LLM call (request) + if let Err(e) = self + .coagent_client + .log_llm_call_new( + run_id, + &user_prompt, + 1, + 1, + "rig-core", + "You are a professional chef and cooking assistant.", + ) + .await + { + eprintln!("Warning: Failed to log LLM call: {}", e); + } + + // Extract structured response using rig's extractor + let recipe_collection = self + .extractor + .extract(&user_prompt) + .await + .context("Failed to extract structured recipes")?; + + // Note: rig-core doesn't expose token usage in the same way as rust-genai + // We'll set these to None for now, but you could potentially track them + // if you implement custom tracking + metadata.complete(None, None); + + // Log the LLM response + if let Err(e) = self + .coagent_client + .log_llm_response( + run_id, + &serde_json::to_string_pretty(&recipe_collection)?, + 1, + 1, + metadata.input_tokens, + metadata.output_tokens, + metadata.total_tokens, + ) + .await + { + eprintln!("Warning: Failed to log LLM response: {}", e); + } + + Ok((recipe_collection, metadata)) + } +} diff --git a/examples/rig-simple/src/main.rs b/examples/rig-simple/src/main.rs new file mode 100644 index 0000000..59a3645 --- /dev/null +++ b/examples/rig-simple/src/main.rs @@ -0,0 +1,152 @@ +//! Advanced rig-core integration with structured output. +//! +//! This example demonstrates how to use rig-core with Ollama to generate +//! structured recipe data using Rust's type system and serde. + +mod coagent_client; +mod config; +mod generator; +mod models; + +use crate::coagent_client::CoagentClient; +use crate::config::default_config; +use crate::generator::StructuredRecipeGenerator; +use anyhow::Result; +use std::time::Instant; +use uuid::Uuid; + +#[tokio::main] +async fn main() -> Result<()> { + println!("🍳 Advanced rig-core Recipe Generator (Structured Output)"); + println!("{}", "=".repeat(60)); + + // Initialize CoagentClient for logging + let client = CoagentClient::new("http://localhost:3000/api/v1".to_string(), true); + + // Generate a unique run ID for this execution + let run_id = format!("recipe-gen-{}", &Uuid::new_v4().to_string()[..8]); + + // Log the start of the run + if let Err(e) = client + .log_session_start( + &run_id, + "Generate structured recipes using rig-core with Ollama", + 1, + 0, + ) + .await + { + eprintln!("Warning: Failed to log session start: {}", e); + } + + let start_time = Instant::now(); + + match run_generation(&run_id).await { + Ok(_) => { + let elapsed_time_ms = start_time.elapsed().as_millis() as u64; + + // Log the end of the session + if let Err(e) = client + .log_session_end( + &run_id, + "Generated recipes successfully", + 1, + 0, + elapsed_time_ms, + None, + ) + .await + { + eprintln!("Warning: Failed to log session end: {}", e); + } + } + Err(e) => { + eprintln!("❌ Error: {}", e); + eprintln!("\n💡 Troubleshooting:"); + eprintln!(" 1. Ensure Ollama is installed and running"); + eprintln!(" 2. Check that the model is available: ollama list"); + eprintln!(" 3. Try pulling the model: ollama pull qwen3:8b"); + + // Log the error + if let Err(log_err) = client + .log_session_end(&run_id, &format!("Error: {}", e), 1, 0, 0, None) + .await + { + eprintln!("Warning: Failed to log error: {}", log_err); + } + + return Err(e); + } + } + + Ok(()) +} + +async fn run_generation(run_id: &str) -> Result<()> { + // Get configuration + let config = default_config(); + + // Initialize generator + let generator = StructuredRecipeGenerator::new(config.clone())?; + + // Define ingredients from config + let ingredients = config.recipes.default_ingredients.clone(); + + println!( + "Generating structured recipes with: {}", + ingredients.join(", ") + ); + println!("Using model: {}", config.ollama.model_name); + println!("Max recipes: {}", config.recipes.max_recipes); + println!("\nGenerating recipes... (this may take a moment)"); + println!("{}", "-".repeat(60)); + + // Generate structured recipes + let (recipe_collection, metadata) = generator + .generate_structured_recipes(run_id, &ingredients) + .await?; + + println!("\n🎉 Structured Recipe Collection:"); + println!("{}", "=".repeat(60)); + + for (i, recipe) in recipe_collection.recipes.iter().enumerate() { + println!("\n📋 Recipe {}: {}", i + 1, recipe.name); + println!( + "⏱️ Prep: {} min | Cook: {} min | Serves: {}", + recipe.prep_time_minutes, recipe.cook_time_minutes, recipe.servings + ); + + println!("\n🥘 Ingredients:"); + for ingredient in &recipe.ingredients { + println!(" • {}", ingredient); + } + + println!("\n👨‍🍳 Instructions:"); + for (j, instruction) in recipe.instructions.iter().enumerate() { + println!(" {}. {}", j + 1, instruction); + } + + println!("{}", "-".repeat(40)); + } + + // Save to JSON file + let output_file = "generated_recipes.json"; + let json_content = serde_json::to_string_pretty(&recipe_collection)?; + std::fs::write(output_file, json_content)?; + + println!("\n💾 Recipes saved to: {}", output_file); + + // Print metadata + if let (Some(input), Some(output)) = (metadata.input_tokens, metadata.output_tokens) { + println!("\n📊 Token Usage:"); + println!(" Input tokens: {}", input); + println!(" Output tokens: {}", output); + println!(" Total tokens: {}", input + output); + } + + if let Some(duration) = metadata.duration_ms { + println!(" Duration: {} ms", duration); + } + + Ok(()) +} diff --git a/examples/rig-simple/src/models.rs b/examples/rig-simple/src/models.rs new file mode 100644 index 0000000..1e079ef --- /dev/null +++ b/examples/rig-simple/src/models.rs @@ -0,0 +1,61 @@ +//! Pydantic-like models for structured recipe data using rig's extractor. + +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +/// Pydantic model for a structured recipe. +#[derive(Debug, Clone, JsonSchema, Serialize, Deserialize)] +pub struct Recipe { + /// Name of the recipe + pub name: String, + /// List of ingredients with quantities + pub ingredients: Vec, + /// Step-by-step cooking instructions + pub instructions: Vec, + /// Preparation time in minutes + pub prep_time_minutes: u32, + /// Cooking time in minutes + pub cook_time_minutes: u32, + /// Number of servings + pub servings: u32, +} + +/// Collection of three recipes. +#[derive(Debug, Clone, JsonSchema, Serialize, Deserialize)] +pub struct RecipeCollection { + /// List of three recipes + pub recipes: Vec, +} + +impl RecipeCollection { + /// Get JSON schema instructions for the LLM (used as fallback if extractor doesn't work). + pub fn format_instructions() -> String { + r#"Return your response as a JSON object with the following structure: +{ + "recipes": [ + { + "name": "Recipe Name", + "ingredients": ["ingredient 1 with quantity", "ingredient 2 with quantity", ...], + "instructions": ["step 1", "step 2", ...], + "prep_time_minutes": , + "cook_time_minutes": , + "servings": + }, + { + "name": "Recipe Name 2", + ... + }, + { + "name": "Recipe Name 3", + ... + } + ] +} + +IMPORTANT: +- Return ONLY valid JSON, no markdown formatting or code blocks +- Include exactly 3 recipes in the recipes array +- All fields are required for each recipe +- Use realistic values for times and servings"#.to_string() + } +}