From 2b2ba0e7d4c0c6b0b05144911c67c2164a22429e Mon Sep 17 00:00:00 2001 From: talkenigs Date: Mon, 15 Dec 2025 11:43:12 +0200 Subject: [PATCH 1/8] feat: add ollama model --- frontend/src/lib/api-client.ts | 6 +- python/configs/config.yaml | 3 + python/configs/providers/ollama.yaml | 67 +++++++++++++++++++ python/pyproject.toml | 2 +- python/uv.lock | 22 +++++- python/valuecell/adapters/models/__init__.py | 2 + python/valuecell/adapters/models/factory.py | 24 +++++++ python/valuecell/config/manager.py | 1 + python/valuecell/core/super_agent/core.py | 4 ++ python/valuecell/server/api/routers/models.py | 1 + 10 files changed, 125 insertions(+), 7 deletions(-) create mode 100644 python/configs/providers/ollama.yaml diff --git a/frontend/src/lib/api-client.ts b/frontend/src/lib/api-client.ts index 4b9397d03..a80507218 100644 --- a/frontend/src/lib/api-client.ts +++ b/frontend/src/lib/api-client.ts @@ -55,9 +55,9 @@ class ApiClient { const errorData = await response.json().catch(() => ({})); const message = JSON.stringify( errorData.message || - errorData.detail || - response.statusText || - `HTTP ${response.status}`, + errorData.detail || + response.statusText || + `HTTP ${response.status}`, ); if (response.status === 401) { diff --git a/python/configs/config.yaml b/python/configs/config.yaml index f081770bb..504961762 100644 --- a/python/configs/config.yaml +++ b/python/configs/config.yaml @@ -50,6 +50,9 @@ models: dashscope: config_file: "providers/dashscope.yaml" api_key_env: "DASHSCOPE_API_KEY" + + ollama: + config_file: "providers/ollama.yaml" # Agent Configuration agents: diff --git a/python/configs/providers/ollama.yaml b/python/configs/providers/ollama.yaml new file mode 100644 index 000000000..d3bec3ea6 --- /dev/null +++ b/python/configs/providers/ollama.yaml @@ -0,0 +1,67 @@ +# ============================================ +# Ollama Provider Configuration +# ============================================ +# Ollama is a local LLM server that runs models on your machine. +# Make sure Ollama is installed and running before using this provider. +# +# Installation: https://ollama.ai +# Default endpoint: http://localhost:11434 + +name: "Ollama" +provider_type: "Ollama" +enabled: true + +# Default model if none specified in agent configuration +# Format: model_name:tag (e.g., "llama3.2", "gemma3:4b", "qwen3:8b") +default_model: "llama3.2" + +# Model Parameters Defaults +# These values are used as defaults when creating Ollama model instances +# defaults: + # host: "http://localhost:11434" +# temperature: 0.7 +# max_tokens: 4096 + + # Request timeout in seconds (null = no timeout) + # timeout: null + + # Response format (e.g., "json" for structured output, null = default) + # format: null + + # Additional model options as dictionary (temperature, top_p, etc.) + # Example: {temperature: 0.8, top_p: 0.9} + # options: null + + # How long to keep the model loaded in memory + # Can be a duration string (e.g., "5m", "1h") or seconds (e.g., 3600) + # null = use Ollama default + # keep_alive: null + + # Custom prompt template to use (null = use model default) + # template: null + + # System message to use for the conversation + # system: null + + # Whether to return raw response without formatting (null = false) + # raw: null + + # Whether to stream responses (true recommended for better UX) + # stream: true + + # Number of retry attempts on failure + # retries: 0 + + # Delay between retries in seconds + # delay_between_retries: 1 + + # If true, delay doubles after each retry (exponential backoff) + # exponential_backoff: false + +# Available Models +# List of Ollama models available for use +# To see all available models: ollama list +models: + - id: "llama3.2" + name: Llama3.2 + description: Llama3.2 model \ No newline at end of file diff --git a/python/pyproject.toml b/python/pyproject.toml index 50f8b2ce9..b6c0d00b4 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -19,7 +19,7 @@ dependencies = [ "yfinance>=0.2.65", "requests>=2.32.5", "akshare>=1.17.87", - "agno[openai, google, lancedb]>=2.0,<3.0", + "agno[openai, google, lancedb, ollama]>=2.0,<3.0", "edgartools>=4.12.2", "sqlalchemy>=2.0.43", "aiosqlite>=0.19.0", diff --git a/python/uv.lock b/python/uv.lock index 120418c1a..9dded18a5 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 2 +revision = 3 requires-python = ">=3.12" resolution-markers = [ "python_full_version >= '3.14'", @@ -62,6 +62,9 @@ lancedb = [ { name = "lancedb" }, { name = "tantivy" }, ] +ollama = [ + { name = "ollama" }, +] openai = [ { name = "openai" }, ] @@ -1938,6 +1941,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/17/d3/b64c356a907242d719fc668b71befd73324e47ab46c8ebbbede252c154b2/olefile-0.47-py2.py3-none-any.whl", hash = "sha256:543c7da2a7adadf21214938bb79c83ea12b473a4b6ee4ad4bf854e7715e13d1f", size = 114565, upload-time = "2023-12-01T16:22:51.518Z" }, ] +[[package]] +name = "ollama" +version = "0.6.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx" }, + { name = "pydantic" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9d/5a/652dac4b7affc2b37b95386f8ae78f22808af09d720689e3d7a86b6ed98e/ollama-0.6.1.tar.gz", hash = "sha256:478c67546836430034b415ed64fa890fd3d1ff91781a9d548b3325274e69d7c6", size = 51620, upload-time = "2025-11-13T23:02:17.416Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/4f/4a617ee93d8208d2bcf26b2d8b9402ceaed03e3853c754940e2290fed063/ollama-0.6.1-py3-none-any.whl", hash = "sha256:fc4c984b345735c5486faeee67d8a265214a31cbb828167782dc642ce0a2bf8c", size = 14354, upload-time = "2025-11-13T23:02:16.292Z" }, +] + [[package]] name = "openai" version = "1.107.0" @@ -3642,7 +3658,7 @@ version = "0.1.18" source = { editable = "." } dependencies = [ { name = "a2a-sdk", extra = ["http-server"] }, - { name = "agno", extra = ["google", "lancedb", "openai"] }, + { name = "agno", extra = ["google", "lancedb", "ollama", "openai"] }, { name = "aiofiles" }, { name = "aiosqlite" }, { name = "akshare" }, @@ -3700,7 +3716,7 @@ test = [ [package.metadata] requires-dist = [ { name = "a2a-sdk", extras = ["http-server"], specifier = ">=0.3.4" }, - { name = "agno", extras = ["openai", "google", "lancedb"], specifier = ">=2.0,<3.0" }, + { name = "agno", extras = ["openai", "google", "lancedb", "ollama"], specifier = ">=2.0,<3.0" }, { name = "aiofiles", specifier = ">=24.1.0" }, { name = "aiosqlite", specifier = ">=0.19.0" }, { name = "akshare", specifier = ">=1.17.87" }, diff --git a/python/valuecell/adapters/models/__init__.py b/python/valuecell/adapters/models/__init__.py index eca956049..dcc6b8bb7 100644 --- a/python/valuecell/adapters/models/__init__.py +++ b/python/valuecell/adapters/models/__init__.py @@ -29,6 +29,7 @@ OpenAICompatibleProvider, OpenAIProvider, OpenRouterProvider, + OllamaProvider, SiliconFlowProvider, create_model, create_model_for_agent, @@ -49,6 +50,7 @@ "SiliconFlowProvider", "DeepSeekProvider", "DashScopeProvider", + "OllamaProvider", # Convenience functions "create_model", "create_model_for_agent", diff --git a/python/valuecell/adapters/models/factory.py b/python/valuecell/adapters/models/factory.py index 2c8e6f924..341b90525 100644 --- a/python/valuecell/adapters/models/factory.py +++ b/python/valuecell/adapters/models/factory.py @@ -564,6 +564,29 @@ def create_embedder(self, model_id: Optional[str] = None, **kwargs): ) +class OllamaProvider(ModelProvider): + """Ollama model provider""" + + def create_model(self, model_id: Optional[str] = None, **kwargs): + """Create Ollama model via agno""" + try: + from agno.models.ollama import Ollama + except ImportError: + raise ImportError( + "agno package not installed, install with: pip install agno" + ) + + model_id = model_id or self.config.default_model + + logger.info(f"Creating Ollama model: {model_id}") + + return Ollama(id=model_id) + + def is_available(self) -> bool: + """Ollama doesn't require API key, just needs host configured""" + return bool(self.config.parameters.get("host")) + + class ModelFactory: """ Factory for creating model instances with provider abstraction @@ -585,6 +608,7 @@ class ModelFactory: "openai-compatible": OpenAICompatibleProvider, "deepseek": DeepSeekProvider, "dashscope": DashScopeProvider, + "ollama": OllamaProvider, } def __init__(self, config_manager: Optional[ConfigManager] = None): diff --git a/python/valuecell/config/manager.py b/python/valuecell/config/manager.py index e12830266..5da57fd19 100644 --- a/python/valuecell/config/manager.py +++ b/python/valuecell/config/manager.py @@ -135,6 +135,7 @@ def primary_provider(self) -> str: "openai", "openai-compatible", "azure", + "ollama", ] for preferred in preferred_order: diff --git a/python/valuecell/core/super_agent/core.py b/python/valuecell/core/super_agent/core.py index a6b9027e1..0ae8ef36f 100644 --- a/python/valuecell/core/super_agent/core.py +++ b/python/valuecell/core/super_agent/core.py @@ -156,6 +156,9 @@ async def run( add_history_to_context=True, stream=True, ): + content_type = getattr(response, "content_type", None) + if content_type is None: + continue if response.content_type == "str": yield response.content continue @@ -175,6 +178,7 @@ async def run( yield final_outcome except Exception as e: + logger.error(f"SuperAgent: error: {e}") yield SuperAgentOutcome( decision=SuperAgentDecision.ANSWER, reason=( diff --git a/python/valuecell/server/api/routers/models.py b/python/valuecell/server/api/routers/models.py index 6fec260a1..ec7797f6b 100644 --- a/python/valuecell/server/api/routers/models.py +++ b/python/valuecell/server/api/routers/models.py @@ -134,6 +134,7 @@ def _api_key_url_for(provider: str) -> str | None: "siliconflow": "https://cloud.siliconflow.cn/account/ak", "deepseek": "https://platform.deepseek.com/api_keys", "dashscope": "https://bailian.console.aliyun.com/#/home", + "ollama": None, } return mapping.get(provider) From fcb609883fc77965bbf5f996f26be256bed1b4c8 Mon Sep 17 00:00:00 2001 From: talkenigs Date: Mon, 15 Dec 2025 14:16:22 +0200 Subject: [PATCH 2/8] feat: ollama icon --- frontend/src/assets/png/index.ts | 1 + .../src/assets/png/model-providers/ollama.png | Bin 0 -> 1268 bytes frontend/src/constants/icons.ts | 2 ++ python/valuecell/core/super_agent/core.py | 2 +- 4 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 frontend/src/assets/png/model-providers/ollama.png diff --git a/frontend/src/assets/png/index.ts b/frontend/src/assets/png/index.ts index afa19c9d1..d357ffb85 100644 --- a/frontend/src/assets/png/index.ts +++ b/frontend/src/assets/png/index.ts @@ -39,5 +39,6 @@ export { default as OpenAiPng } from "./model-providers/openai.png"; export { default as OpenAiCompatiblePng } from "./model-providers/openai-compatible.png"; export { default as OpenRouterPng } from "./model-providers/openrouter.png"; export { default as SiliconFlowPng } from "./model-providers/siliconflow.png"; +export { default as OllamaSvg } from "./model-providers/ollama.png"; export { default as TrendPng } from "./trend.png"; diff --git a/frontend/src/assets/png/model-providers/ollama.png b/frontend/src/assets/png/model-providers/ollama.png new file mode 100644 index 0000000000000000000000000000000000000000..ee2681e759f3776beec423a23c6d6f159f41377f GIT binary patch literal 1268 zcmVY^F06NR)XcC@O%9eoNP5C;oC4i>`xMKdh27IGkNI5ymQDGBY9H|JREMu1Vlo!X zsyq*jAgl2-@HnzTTX8QS-lk8zTYRU=-YXC?fG$i5@yMyzhE+^!48VoKxBz5uTM8C>2mT%eE5ZXs2|X}^o3MEbdZdFmyL`ED*Zgske0!@2@8D*?H$%2e^y60HWDz3k+CC|v zQ=zzwM|PSPiEW?)@iKALfO~*XX^x_UNZh#B)GY(&fscqr@ebf2!>>~kfG>ataCuWO zI&dE0jab_bTp*iK^?Q*SOY-kaL~Z)4sb8Tvf=`>a+k~Y&5!4OId$IZn)hVms2x7Nu zz-izKBs~~FeEuobehBgZ4aE~bTc1{g%ME+}y-xJ3+K)Ryy@i;|nyK5H!Z&2N8o-0N zJnC1(94#uq_qhBjHzsj2^i-^E3o^qM+)-1T$0%?NI6-t!HYuDj|J_ORd0HPiUn(+y z8gl-%tI*d%rV}xv%dz?~WTtN-*C!SgWX9_WI(H*xW6iTaLCzaGT^?@HZU?Z`qHU4r zCI=Zn9r!sx-%+J6?mU7~#o782$zlDxk%^}o6No8kJZfamCUEsAomVM%Pc)31jmZ?b zLBw2c8M#);-e$y8tsonIPYW4~7nI(|f_trSToLWakR8F=bsVkpl7WS^Dr$9&YqR;&Pk|vZj0GowcQ)m}zIH$3YCYUvV zStWO{V5dSbjx+CbMToj^L8VT6R-qe7HLT8RQ9>Qs+i@;_Ug Date: Mon, 15 Dec 2025 16:16:24 +0200 Subject: [PATCH 3/8] feat: ollama model --- python/configs/providers/ollama.yaml | 78 ++++------------------- python/valuecell/core/super_agent/core.py | 7 +- 2 files changed, 15 insertions(+), 70 deletions(-) diff --git a/python/configs/providers/ollama.yaml b/python/configs/providers/ollama.yaml index d3bec3ea6..32067c58e 100644 --- a/python/configs/providers/ollama.yaml +++ b/python/configs/providers/ollama.yaml @@ -1,67 +1,15 @@ -# ============================================ -# Ollama Provider Configuration -# ============================================ -# Ollama is a local LLM server that runs models on your machine. -# Make sure Ollama is installed and running before using this provider. -# -# Installation: https://ollama.ai -# Default endpoint: http://localhost:11434 - -name: "Ollama" -provider_type: "Ollama" +name: Ollama +provider_type: Ollama enabled: true - -# Default model if none specified in agent configuration -# Format: model_name:tag (e.g., "llama3.2", "gemma3:4b", "qwen3:8b") -default_model: "llama3.2" - -# Model Parameters Defaults -# These values are used as defaults when creating Ollama model instances -# defaults: - # host: "http://localhost:11434" -# temperature: 0.7 -# max_tokens: 4096 - - # Request timeout in seconds (null = no timeout) - # timeout: null - - # Response format (e.g., "json" for structured output, null = default) - # format: null - - # Additional model options as dictionary (temperature, top_p, etc.) - # Example: {temperature: 0.8, top_p: 0.9} - # options: null - - # How long to keep the model loaded in memory - # Can be a duration string (e.g., "5m", "1h") or seconds (e.g., 3600) - # null = use Ollama default - # keep_alive: null - - # Custom prompt template to use (null = use model default) - # template: null - - # System message to use for the conversation - # system: null - - # Whether to return raw response without formatting (null = false) - # raw: null - - # Whether to stream responses (true recommended for better UX) - # stream: true - - # Number of retry attempts on failure - # retries: 0 - - # Delay between retries in seconds - # delay_between_retries: 1 - - # If true, delay doubles after each retry (exponential backoff) - # exponential_backoff: false - -# Available Models -# List of Ollama models available for use -# To see all available models: ollama list +default_model: qwen3:4b models: - - id: "llama3.2" - name: Llama3.2 - description: Llama3.2 model \ No newline at end of file +- id: qwen3:4b + name: qwen3:4b +- id: qwen3:1.7b + name: qwen3:1.7b +- id: qwen3:4b + name: qwen3:4b +- id: llama3.2 + name: Llama3.2 + description: Llama3.2 model + diff --git a/python/valuecell/core/super_agent/core.py b/python/valuecell/core/super_agent/core.py index 25dceede8..8989f5288 100644 --- a/python/valuecell/core/super_agent/core.py +++ b/python/valuecell/core/super_agent/core.py @@ -156,10 +156,8 @@ async def run( add_history_to_context=True, stream=True, ): - content_type = getattr(response, "content_type", None) - if content_type is None: - continue - if content_type == "str": + + if response.content_type == "str": yield response.content continue @@ -178,7 +176,6 @@ async def run( yield final_outcome except Exception as e: - logger.error(f"SuperAgent: error: {e}") yield SuperAgentOutcome( decision=SuperAgentDecision.ANSWER, reason=( From 5ecd49978ca453e3f438c26b3ea3867fa87ad0cc Mon Sep 17 00:00:00 2001 From: talkenigs Date: Mon, 15 Dec 2025 16:18:50 +0200 Subject: [PATCH 4/8] fix: changes --- frontend/src/lib/api-client.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/frontend/src/lib/api-client.ts b/frontend/src/lib/api-client.ts index a80507218..4b9397d03 100644 --- a/frontend/src/lib/api-client.ts +++ b/frontend/src/lib/api-client.ts @@ -55,9 +55,9 @@ class ApiClient { const errorData = await response.json().catch(() => ({})); const message = JSON.stringify( errorData.message || - errorData.detail || - response.statusText || - `HTTP ${response.status}`, + errorData.detail || + response.statusText || + `HTTP ${response.status}`, ); if (response.status === 401) { From 7c12903a37f9ac285a9c57df532d41048af59027 Mon Sep 17 00:00:00 2001 From: talkenigs Date: Mon, 15 Dec 2025 16:25:55 +0200 Subject: [PATCH 5/8] fix: ollama yaml --- python/configs/providers/ollama.yaml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/python/configs/providers/ollama.yaml b/python/configs/providers/ollama.yaml index 32067c58e..9325e8b38 100644 --- a/python/configs/providers/ollama.yaml +++ b/python/configs/providers/ollama.yaml @@ -5,11 +5,3 @@ default_model: qwen3:4b models: - id: qwen3:4b name: qwen3:4b -- id: qwen3:1.7b - name: qwen3:1.7b -- id: qwen3:4b - name: qwen3:4b -- id: llama3.2 - name: Llama3.2 - description: Llama3.2 model - From a858ee8c3cfb0861dbe2ae356d5f47e2f6bb5cfe Mon Sep 17 00:00:00 2001 From: talkenigs Date: Tue, 16 Dec 2025 07:47:39 +0200 Subject: [PATCH 6/8] fix: icons --- frontend/src/assets/png/index.ts | 2 +- frontend/src/constants/icons.ts | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/frontend/src/assets/png/index.ts b/frontend/src/assets/png/index.ts index d357ffb85..1d89d1cda 100644 --- a/frontend/src/assets/png/index.ts +++ b/frontend/src/assets/png/index.ts @@ -39,6 +39,6 @@ export { default as OpenAiPng } from "./model-providers/openai.png"; export { default as OpenAiCompatiblePng } from "./model-providers/openai-compatible.png"; export { default as OpenRouterPng } from "./model-providers/openrouter.png"; export { default as SiliconFlowPng } from "./model-providers/siliconflow.png"; -export { default as OllamaSvg } from "./model-providers/ollama.png"; +export { default as OllamaPng } from "./model-providers/ollama.png"; export { default as TrendPng } from "./trend.png"; diff --git a/frontend/src/constants/icons.ts b/frontend/src/constants/icons.ts index ef36b625a..e22daf7a6 100644 --- a/frontend/src/constants/icons.ts +++ b/frontend/src/constants/icons.ts @@ -15,7 +15,7 @@ import { OpenRouterPng, SiliconFlowPng, ValueCellAgentPng, - OllamaSvg, + OllamaPng, } from "@/assets/png"; export const MODEL_PROVIDER_ICONS = { @@ -27,7 +27,7 @@ export const MODEL_PROVIDER_ICONS = { google: GooglePng, azure: AzurePng, dashscope: DashScopePng, - ollama: OllamaSvg + ollama: OllamaPng }; export const EXCHANGE_ICONS = { From 5fd418d7be30c52b022f862e549522542f6fec36 Mon Sep 17 00:00:00 2001 From: talkenigs Date: Tue, 16 Dec 2025 08:43:03 +0200 Subject: [PATCH 7/8] fix: lints --- frontend/src/assets/png/index.ts | 2 +- frontend/src/constants/icons.ts | 4 ++-- python/valuecell/adapters/models/__init__.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/frontend/src/assets/png/index.ts b/frontend/src/assets/png/index.ts index 1d89d1cda..ab470bee1 100644 --- a/frontend/src/assets/png/index.ts +++ b/frontend/src/assets/png/index.ts @@ -35,10 +35,10 @@ export { default as AzurePng } from "./model-providers/azure.png"; export { default as DashScopePng } from "./model-providers/dashscope.png"; export { default as DeepSeekPng } from "./model-providers/deepseek.png"; export { default as GooglePng } from "./model-providers/google.png"; +export { default as OllamaPng } from "./model-providers/ollama.png"; export { default as OpenAiPng } from "./model-providers/openai.png"; export { default as OpenAiCompatiblePng } from "./model-providers/openai-compatible.png"; export { default as OpenRouterPng } from "./model-providers/openrouter.png"; export { default as SiliconFlowPng } from "./model-providers/siliconflow.png"; -export { default as OllamaPng } from "./model-providers/ollama.png"; export { default as TrendPng } from "./trend.png"; diff --git a/frontend/src/constants/icons.ts b/frontend/src/constants/icons.ts index e22daf7a6..25fe155bb 100644 --- a/frontend/src/constants/icons.ts +++ b/frontend/src/constants/icons.ts @@ -10,12 +10,12 @@ import { HyperliquidPng, MexcPng, OkxPng, + OllamaPng, OpenAiCompatiblePng, OpenAiPng, OpenRouterPng, SiliconFlowPng, ValueCellAgentPng, - OllamaPng, } from "@/assets/png"; export const MODEL_PROVIDER_ICONS = { @@ -27,7 +27,7 @@ export const MODEL_PROVIDER_ICONS = { google: GooglePng, azure: AzurePng, dashscope: DashScopePng, - ollama: OllamaPng + ollama: OllamaPng, }; export const EXCHANGE_ICONS = { diff --git a/python/valuecell/adapters/models/__init__.py b/python/valuecell/adapters/models/__init__.py index dcc6b8bb7..0bdbd6334 100644 --- a/python/valuecell/adapters/models/__init__.py +++ b/python/valuecell/adapters/models/__init__.py @@ -26,10 +26,10 @@ GoogleProvider, ModelFactory, ModelProvider, + OllamaProvider, OpenAICompatibleProvider, OpenAIProvider, OpenRouterProvider, - OllamaProvider, SiliconFlowProvider, create_model, create_model_for_agent, From 842bd7862098a596ad936f36f7bb7d3bfbba5c4b Mon Sep 17 00:00:00 2001 From: talkenigs Date: Wed, 24 Dec 2025 09:55:41 +0200 Subject: [PATCH 8/8] fix: formatting --- python/valuecell/core/super_agent/core.py | 1 - 1 file changed, 1 deletion(-) diff --git a/python/valuecell/core/super_agent/core.py b/python/valuecell/core/super_agent/core.py index 8989f5288..a6b9027e1 100644 --- a/python/valuecell/core/super_agent/core.py +++ b/python/valuecell/core/super_agent/core.py @@ -156,7 +156,6 @@ async def run( add_history_to_context=True, stream=True, ): - if response.content_type == "str": yield response.content continue