diff --git a/.github/propmts/ai-developer-guide.prompt.yml b/.github/propmts/ai-developer-guide.prompt.yml new file mode 100644 index 0000000..38372b2 --- /dev/null +++ b/.github/propmts/ai-developer-guide.prompt.yml @@ -0,0 +1,23 @@ +name: AI Developer Guide - Apply Best Practices +description: Based on the AI developer guide propose best practices to improve code, documentation, developer experience and more. +model: gpt-4o-mini +modelParameters: + temperature: 0.5 +messages: + - role: system + content: You are a tech lead and propose improvements to my project based on the AI developer gide. + - role: user + content: | + Read the developer guide at + https://github.com/dwmkerr/ai-developer-guide + You MUST follow the rules in this guide. + Propose improvements to my codebase. +# testData: +# - input: | +# The quick brown fox jumped over the lazy dog. +# The dog was too tired to react. +# expected: Summary - A fox jumped over a lazy, unresponsive dog. +# evaluators: +# - name: Output should start with 'Summary -' +# string: +# startsWith: 'Summary -' diff --git a/CHANGELOG.md b/CHANGELOG.md index 5bdbc73..fff30dc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,27 @@ # Changelog +## [0.15.0](https://github.com/dwmkerr/terminal-ai/compare/v0.14.0...v0.15.0) (2025-06-12) + + +### Features + +* **devx:** ai developer guide integrated to GitHub AI ([#104](https://github.com/dwmkerr/terminal-ai/issues/104)) ([b2e127a](https://github.com/dwmkerr/terminal-ai/commit/b2e127a1701be188be49ad64f5d9bc5800e958d6)) + + +### Bug Fixes + +* **devx:** correct prompt path ([c1b06e0](https://github.com/dwmkerr/terminal-ai/commit/c1b06e0197c002b2ec762bff3752512f40d589f6)) + +## [0.14.0](https://github.com/dwmkerr/terminal-ai/compare/v0.13.2...v0.14.0) (2025-05-24) + + +### Features + +* **config:** improve provider/model setup ([#99](https://github.com/dwmkerr/terminal-ai/issues/99)) ([8cd60bf](https://github.com/dwmkerr/terminal-ai/commit/8cd60bfe709a3c810ea3f8badef8e0138d6b7c08)) +* **docs:** langfuse integration ([#96](https://github.com/dwmkerr/terminal-ai/issues/96)) ([2471b63](https://github.com/dwmkerr/terminal-ai/commit/2471b63fde611d32ae9e0ec07382f62bbc620bd0)) +* **docs:** msty config guide ([#97](https://github.com/dwmkerr/terminal-ai/issues/97)) ([1e0c9de](https://github.com/dwmkerr/terminal-ai/commit/1e0c9de3c6a59979416b80b7cbee48ed7460aa47)) +* **providers:** add LiteLLM integration ([#103](https://github.com/dwmkerr/terminal-ai/issues/103)) ([8dc9f8f](https://github.com/dwmkerr/terminal-ai/commit/8dc9f8ff6aae4ab47d31305b4a5bff4d22faad62)) + ## [0.13.2](https://github.com/dwmkerr/terminal-ai/compare/v0.13.1...v0.13.2) (2025-05-01) diff --git a/README.md b/README.md index 44f9864..4164038 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@
🧠terminal-ai
@@ -333,9 +334,17 @@ export AI_MODEL="gpt-3.5-turbo" # Optional.
To configure multiple providers or advanced options, check the [Configuration Documentation](./docs/configuration.md).
+## Integrations
+
+Integration is available out-of-the-box for [Langfuse](https://langfuse.com/). See the [Integrations Guide](./docs/integrations.md) for details.
+
## Documentation
- [Configuration](./docs/configuration.md)
- [Developer Guide](./docs/developer-guide.md)
+- [Integrations Guide](./docs/integrations.md)
+- [Providers: Setting Up Ollama](./docs/providers/ollama.md)
- [Experimental Features](./docs/experimental-features.md)
- [Providers: Setting Up Ollama](./docs/providers/ollama.md)
+- [Providers: Setting Up MSTY](./docs/providers/msty.md)
+- [Providers: Setting Up LiteLLM](./docs/providers/litellm.md)
diff --git a/docs/configuration.md b/docs/configuration.md
index a9da72f..da9c8f9 100644
--- a/docs/configuration.md
+++ b/docs/configuration.md
@@ -213,4 +213,24 @@ providers:
baseURL: https://api.anthropic.com/v1
model: claude-3-opus-20240229
apiKey: '123'
+ msty:
+ name: msty
+ type: openai_compatible
+ baseURL: http://localhost:10000/v1/
+ model: gemma3:1b
+ apiKey: 'notused'
+ ollama:
+ name: ollama
+ type: openai_compatible
+ baseURL: http://localhost:11434/v1/
+ model: gemma3:1b
+ apiKey: 'notused'
+ litellm:
+ name: litellm
+ type: openai_compatible
+ baseURL: http://localhost:4000/
+ model: claude-3.7
+ apiKey: 'notused'
```
+
+Additional configuration options can be set for [Integrations](./integrations.md).
diff --git a/docs/images/integrations-langfuse.png b/docs/images/integrations-langfuse.png
new file mode 100644
index 0000000..8abc8cc
Binary files /dev/null and b/docs/images/integrations-langfuse.png differ
diff --git a/docs/images/litellm.png b/docs/images/litellm.png
new file mode 100644
index 0000000..34231a9
Binary files /dev/null and b/docs/images/litellm.png differ
diff --git a/docs/integrations.md b/docs/integrations.md
new file mode 100644
index 0000000..090a744
--- /dev/null
+++ b/docs/integrations.md
@@ -0,0 +1,57 @@
+# Integrations
+
+Terminal AI can integrate with various external services to enhance its capabilities. This document explains the available integrations and how to configure them.
+
+
+
+- [Langfuse](#langfuse)
+ - [Setup](#setup)
+ - [Configuration Options](#configuration-options)
+
+
+
+## Langfuse
+
+[Langfuse](https://langfuse.com) is an open-source observability platform for LLM applications. Integrating Terminal AI with Langfuse allows you to:
+
+- Track conversations and model interactions
+- Monitor latency, token usage and costs
+- Debug issues in production
+- Analyze model performance over time
+
+
+
+### Setup
+
+To configure Langfuse integration with Terminal AI:
+
+1. Create a Langfuse account at [langfuse.com](https://langfuse.com) or self-host it
+2. Create a new Lanfuse project
+3. Obtain your Langfuse secret key and public key
+4. Add the Langfuse configuration to your `~/.ai/config.yaml` file:
+
+```yaml
+integrations:
+ langfuse:
+ secretKey: "your-langfuse-secret-key"
+ publicKey: "your-langfuse-public-key"
+ baseUrl: "https://cloud.langfuse.com"
+ traceName: "terminal-ai"
+```
+
+You can validate your Langfuse configuration by running:
+
+```bash
+ai check
+```
+
+### Configuration Options
+
+| Configuration Option | Default Value | Description |
+|----------------------|---------------|-------------|
+| `secretKey` | (empty) | Your Langfuse secret key (required) |
+| `publicKey` | (empty) | Your Langfuse public key (required) |
+| `baseUrl` | `https://cloud.langfuse.com` | The Langfuse API endpoint |
+| `traceName` | `terminal-ai` | The name to use for traces in Langfuse |
+
+If you're self-hosting Langfuse, change the `baseUrl` to your instance URL.
diff --git a/docs/providers/litellm.md b/docs/providers/litellm.md
new file mode 100644
index 0000000..32b79f4
--- /dev/null
+++ b/docs/providers/litellm.md
@@ -0,0 +1,107 @@
+# litellm
+
+[LiteLLM](https://www.litellm.ai/) can be used to proxy requests to many different inference providers, track LLM spend, manage keys and more:
+
+
+
+LiteLLM exposes providers using the [OpenAI API specification](https://docs.litellm.ai/docs/#call-100-llms-using-the-openai-inputoutput-format). This means that it can be used to proxy Terminal AI requests from OpenAI format to non-OpenAI compatible providers.
+
+As an example, we will configure LiteLLM to proxy OpenAI SDK requests to Anthropic Claude. Note that as Claude has an [OpenAI compatible endpoint](https://docs.anthropic.com/en/api/openai-sdk) you could simply configure Terminal AI to use this (see the [Configuration Guide](../configuration.md)), however this example shows how LiteLLM can translate requests to Claudes own API format.
+
+Create a [LiteLLM configuration file](https://docs.litellm.ai/docs/proxy/configs) with Claude 3.7 configured as a model:
+
+```bash
+cat << EOF > litellm_config.yaml
+model_list:
+ - model_name: claude-3.7
+ litellm_params:
+ model: claude-3-7-sonnet-20250219
+ api_key: "os.environ/ANTHROPIC_API_KEY"
+EOF
+```
+
+Set your Anthropic API key. You could also provide an API base URL if you would like to call a custom endpoint (such as an internal AI API gateway):
+
+```bash
+ANTHROPIC_API_KEY="***"
+ANTHROPIC_API_BASE="https://api.anthropic.com/v1"
+```
+
+Now run the proxy container:
+
+```bash
+docker run \
+ -v $(pwd)/litellm_config.yaml:/app/config.yaml \
+ -e ANTHROPIC_API_KEY \
+ -e ANTHROPIC_API_BASE \
+ -p 4000:4000 \
+ ghcr.io/berriai/litellm:main-latest \
+ --config /app/config.yaml --detailed_debug
+```
+
+Run a completion to confirm that your configuration is correct:
+
+```bash
+curl --location 'http://0.0.0.0:4000/chat/completions' \
+--header 'Content-Type: application/json' \
+--data '{
+ "model": "claude-3.7",
+ "messages": [
+ {
+ "role": "user",
+ "content": "what llm are you"
+ }
+ ]
+ }'
+# example output:
+# {
+# "model":"claude-3-7-sonnet-20250219",
+# "message":{
+# "content":"I am Claude, an AI assistant created by Anthropic...",
+# "role":"assistant"
+# ...
+```
+
+Now run `ai init` to configure your provider, using the following details:
+
+- Provider Type: OpenAI Compatible
+- API Key: `notused` (LiteLLM has required keys loaded into its proxy)
+- Base URL: `http://localhost:4000/`
+- Model: `claude-3.7`
+- Provider name: `litellm`
+
+Choose 'yes' for 'Set as current provider' and 'Test API Key & Configuration'. You will see output similar to the below:
+
+```
+✔ Set as current provider? Yes
+✔ Test API Key & Configuration? Yes
+✔ Checking internet connection...
+✔ Checking Base URL http://localhost:4000/...
+✔ Checking API key...
+✔ Checking Model claude-3.7...
+✔ Checking API key rate limit...
+```
+
+At this point you will be able to interface with inference providers via the LiteLLM proxy:
+
+```
+✔ chat: bash one liner for a rainbow
+claude:
+
+ for i in {1..7}; do echo -e "\033[3${i}mRainbow\033[0m"; done
+
+Run this to see a simple rainbow text effect in your terminal.
+```
+
+You can also manually add the LiteLLM provider details to your [Configuration File](../configuration.md):
+
+```yaml
+provider: litellm # set the current provider
+providers:
+ litellm:
+ name: litellm
+ type: openai_compatible
+ baseURL: http://localhost:4000/
+ model: claude-3.7
+ apiKey: notused
+```
diff --git a/docs/providers/msty.md b/docs/providers/msty.md
index cf27212..89201e0 100644
--- a/docs/providers/msty.md
+++ b/docs/providers/msty.md
@@ -4,7 +4,47 @@ Terminal AI works out of the box with Msty.
To get started, first install: https://msty.app/
-Run Msty and configure a model. As an example we'll use Cohere as a remote provider:
+Once Msty is installed and running, you'll have access to various models through its local API server. Msty will automatically start an API server on port 10000 by default.
-- New Remote Model Provider: Cohere AI
-- API Key: (Provide your API key)
+Run Msty and configure a model. As an example we'll use Ollama as a local provider.
+
+Then run `ai init` to configure your provider, using the following details:
+
+- Provider Type: OpenAI Compatible
+- API Key: `notused` (by default, Msty doesn't need a key, but Terminal AI requires one to be configured)
+- Base URL: `http://localhost:10000/v1/`
+- Model: Choose from one of your available models (e.g. `gemma3:1b` or `llama3.2:latest`)
+
+Choose 'yes' for 'Set as current provider' and 'Test API Key & Configuration'. You will see output similar to the below:
+
+```
+✔ Set as current provider? Yes
+✔ Test API Key & Configuration? Yes
+✔ Checking internet connection...
+✔ Checking Base URL http://localhost:10000/v1/...
+✔ Checking API key...
+✔ Checking Model gemma3:1b...
+✔ Checking API key rate limit...
+```
+
+At this point you will be able to chat using your local Msty installation:
+
+```
+✔ chat: hi msty
+msty: Hello! How can I assist you today?
+```
+
+You can also manually add the Msty provider details to your [Configuration File](../configuration.md):
+
+```yaml
+provider: msty
+providers:
+ msty:
+ name: msty
+ type: openai_compatible
+ baseURL: http://localhost:10000/v1/
+ model: gemma3:1b
+ apiKey: notused
+```
+
+If you're using Msty with a remote provider (like Cohere), make sure that model is properly configured in your Msty app before attempting to use it with Terminal AI.
diff --git a/package.json b/package.json
index 170a275..305041f 100644
--- a/package.json
+++ b/package.json
@@ -2,7 +2,7 @@
"name": "@dwmkerr/terminal-ai",
"description": "Effortless AI in your terminal.",
"type": "commonjs",
- "version": "0.13.2",
+ "version": "0.15.0",
"main": "./dist/cli.js",
"bin": {
"ai": "dist/cli.js"
diff --git a/src/chat-pipeline/stages/ensure-api-key.ts b/src/chat-pipeline/stages/ensure-api-key.ts
index 3fc7ec1..40bad8c 100644
--- a/src/chat-pipeline/stages/ensure-api-key.ts
+++ b/src/chat-pipeline/stages/ensure-api-key.ts
@@ -20,5 +20,5 @@ export async function ensureApiKey(executionContext: ExecutionContext) {
// Initialise, this will mutate execution context to set the key, or die
// trying.
- return await init(executionContext, true);
+ return await init(executionContext);
}
diff --git a/src/cli.ts b/src/cli.ts
index 4055b5f..800379b 100755
--- a/src/cli.ts
+++ b/src/cli.ts
@@ -81,7 +81,7 @@ const cli = async (program: Command, executionContext: ExecutionContext) => {
.command("init")
.description("Set or update configuration")
.action(async () => {
- const nextCommand = await init(executionContext, true);
+ const nextCommand = await init(executionContext);
// The only possible next action is chat or quit.
if (nextCommand === Commands.Chat) {
return chat(
diff --git a/src/commands/init/init-first-run.ts b/src/commands/init/init-first-run.ts
index f87d46b..4990fa0 100644
--- a/src/commands/init/init-first-run.ts
+++ b/src/commands/init/init-first-run.ts
@@ -14,7 +14,6 @@ import { initSetProviderApiKey } from "./init-set-provider-api-key";
*/
export async function initFirstRun(
executionContext: ExecutionContext,
- askNextAction: boolean,
): Promise