diff --git a/.github/propmts/ai-developer-guide.prompt.yml b/.github/propmts/ai-developer-guide.prompt.yml new file mode 100644 index 0000000..38372b2 --- /dev/null +++ b/.github/propmts/ai-developer-guide.prompt.yml @@ -0,0 +1,23 @@ +name: AI Developer Guide - Apply Best Practices +description: Based on the AI developer guide propose best practices to improve code, documentation, developer experience and more. +model: gpt-4o-mini +modelParameters: + temperature: 0.5 +messages: + - role: system + content: You are a tech lead and propose improvements to my project based on the AI developer gide. + - role: user + content: | + Read the developer guide at + https://github.com/dwmkerr/ai-developer-guide + You MUST follow the rules in this guide. + Propose improvements to my codebase. +# testData: +# - input: | +# The quick brown fox jumped over the lazy dog. +# The dog was too tired to react. +# expected: Summary - A fox jumped over a lazy, unresponsive dog. +# evaluators: +# - name: Output should start with 'Summary -' +# string: +# startsWith: 'Summary -' diff --git a/CHANGELOG.md b/CHANGELOG.md index 5bdbc73..fff30dc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,27 @@ # Changelog +## [0.15.0](https://github.com/dwmkerr/terminal-ai/compare/v0.14.0...v0.15.0) (2025-06-12) + + +### Features + +* **devx:** ai developer guide integrated to GitHub AI ([#104](https://github.com/dwmkerr/terminal-ai/issues/104)) ([b2e127a](https://github.com/dwmkerr/terminal-ai/commit/b2e127a1701be188be49ad64f5d9bc5800e958d6)) + + +### Bug Fixes + +* **devx:** correct prompt path ([c1b06e0](https://github.com/dwmkerr/terminal-ai/commit/c1b06e0197c002b2ec762bff3752512f40d589f6)) + +## [0.14.0](https://github.com/dwmkerr/terminal-ai/compare/v0.13.2...v0.14.0) (2025-05-24) + + +### Features + +* **config:** improve provider/model setup ([#99](https://github.com/dwmkerr/terminal-ai/issues/99)) ([8cd60bf](https://github.com/dwmkerr/terminal-ai/commit/8cd60bfe709a3c810ea3f8badef8e0138d6b7c08)) +* **docs:** langfuse integration ([#96](https://github.com/dwmkerr/terminal-ai/issues/96)) ([2471b63](https://github.com/dwmkerr/terminal-ai/commit/2471b63fde611d32ae9e0ec07382f62bbc620bd0)) +* **docs:** msty config guide ([#97](https://github.com/dwmkerr/terminal-ai/issues/97)) ([1e0c9de](https://github.com/dwmkerr/terminal-ai/commit/1e0c9de3c6a59979416b80b7cbee48ed7460aa47)) +* **providers:** add LiteLLM integration ([#103](https://github.com/dwmkerr/terminal-ai/issues/103)) ([8dc9f8f](https://github.com/dwmkerr/terminal-ai/commit/8dc9f8ff6aae4ab47d31305b4a5bff4d22faad62)) + ## [0.13.2](https://github.com/dwmkerr/terminal-ai/compare/v0.13.1...v0.13.2) (2025-05-01) diff --git a/README.md b/README.md index 44f9864..4164038 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@

🧠 terminal-ai

Effortless AI in the shell. Maintain your flow and be more effective.

-
Supports OpenAI, Gemini, Claude, Ollama and many more
+
Supports OpenAI, Gemini, Claude, Ollama, MSTY, LiteLLM and many more.

Demo Recording of Terminal AI

@@ -12,7 +12,8 @@ Commands | Configuration | API Key | - Documentation | + Integrations | + Documentation

cicd @@ -333,9 +334,17 @@ export AI_MODEL="gpt-3.5-turbo" # Optional. To configure multiple providers or advanced options, check the [Configuration Documentation](./docs/configuration.md). +## Integrations + +Integration is available out-of-the-box for [Langfuse](https://langfuse.com/). See the [Integrations Guide](./docs/integrations.md) for details. + ## Documentation - [Configuration](./docs/configuration.md) - [Developer Guide](./docs/developer-guide.md) +- [Integrations Guide](./docs/integrations.md) +- [Providers: Setting Up Ollama](./docs/providers/ollama.md) - [Experimental Features](./docs/experimental-features.md) - [Providers: Setting Up Ollama](./docs/providers/ollama.md) +- [Providers: Setting Up MSTY](./docs/providers/msty.md) +- [Providers: Setting Up LiteLLM](./docs/providers/litellm.md) diff --git a/docs/configuration.md b/docs/configuration.md index a9da72f..da9c8f9 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -213,4 +213,24 @@ providers: baseURL: https://api.anthropic.com/v1 model: claude-3-opus-20240229 apiKey: '123' + msty: + name: msty + type: openai_compatible + baseURL: http://localhost:10000/v1/ + model: gemma3:1b + apiKey: 'notused' + ollama: + name: ollama + type: openai_compatible + baseURL: http://localhost:11434/v1/ + model: gemma3:1b + apiKey: 'notused' + litellm: + name: litellm + type: openai_compatible + baseURL: http://localhost:4000/ + model: claude-3.7 + apiKey: 'notused' ``` + +Additional configuration options can be set for [Integrations](./integrations.md). diff --git a/docs/images/integrations-langfuse.png b/docs/images/integrations-langfuse.png new file mode 100644 index 0000000..8abc8cc Binary files /dev/null and b/docs/images/integrations-langfuse.png differ diff --git a/docs/images/litellm.png b/docs/images/litellm.png new file mode 100644 index 0000000..34231a9 Binary files /dev/null and b/docs/images/litellm.png differ diff --git a/docs/integrations.md b/docs/integrations.md new file mode 100644 index 0000000..090a744 --- /dev/null +++ b/docs/integrations.md @@ -0,0 +1,57 @@ +# Integrations + +Terminal AI can integrate with various external services to enhance its capabilities. This document explains the available integrations and how to configure them. + + + +- [Langfuse](#langfuse) + - [Setup](#setup) + - [Configuration Options](#configuration-options) + + + +## Langfuse + +[Langfuse](https://langfuse.com) is an open-source observability platform for LLM applications. Integrating Terminal AI with Langfuse allows you to: + +- Track conversations and model interactions +- Monitor latency, token usage and costs +- Debug issues in production +- Analyze model performance over time + +![Langfuse Integration Screenshot](./images/integrations-langfuse.png) + +### Setup + +To configure Langfuse integration with Terminal AI: + +1. Create a Langfuse account at [langfuse.com](https://langfuse.com) or self-host it +2. Create a new Lanfuse project +3. Obtain your Langfuse secret key and public key +4. Add the Langfuse configuration to your `~/.ai/config.yaml` file: + +```yaml +integrations: + langfuse: + secretKey: "your-langfuse-secret-key" + publicKey: "your-langfuse-public-key" + baseUrl: "https://cloud.langfuse.com" + traceName: "terminal-ai" +``` + +You can validate your Langfuse configuration by running: + +```bash +ai check +``` + +### Configuration Options + +| Configuration Option | Default Value | Description | +|----------------------|---------------|-------------| +| `secretKey` | (empty) | Your Langfuse secret key (required) | +| `publicKey` | (empty) | Your Langfuse public key (required) | +| `baseUrl` | `https://cloud.langfuse.com` | The Langfuse API endpoint | +| `traceName` | `terminal-ai` | The name to use for traces in Langfuse | + +If you're self-hosting Langfuse, change the `baseUrl` to your instance URL. diff --git a/docs/providers/litellm.md b/docs/providers/litellm.md new file mode 100644 index 0000000..32b79f4 --- /dev/null +++ b/docs/providers/litellm.md @@ -0,0 +1,107 @@ +# litellm + +[LiteLLM](https://www.litellm.ai/) can be used to proxy requests to many different inference providers, track LLM spend, manage keys and more: + +LiteLLM Screenshot + +LiteLLM exposes providers using the [OpenAI API specification](https://docs.litellm.ai/docs/#call-100-llms-using-the-openai-inputoutput-format). This means that it can be used to proxy Terminal AI requests from OpenAI format to non-OpenAI compatible providers. + +As an example, we will configure LiteLLM to proxy OpenAI SDK requests to Anthropic Claude. Note that as Claude has an [OpenAI compatible endpoint](https://docs.anthropic.com/en/api/openai-sdk) you could simply configure Terminal AI to use this (see the [Configuration Guide](../configuration.md)), however this example shows how LiteLLM can translate requests to Claudes own API format. + +Create a [LiteLLM configuration file](https://docs.litellm.ai/docs/proxy/configs) with Claude 3.7 configured as a model: + +```bash +cat << EOF > litellm_config.yaml +model_list: + - model_name: claude-3.7 + litellm_params: + model: claude-3-7-sonnet-20250219 + api_key: "os.environ/ANTHROPIC_API_KEY" +EOF +``` + +Set your Anthropic API key. You could also provide an API base URL if you would like to call a custom endpoint (such as an internal AI API gateway): + +```bash +ANTHROPIC_API_KEY="***" +ANTHROPIC_API_BASE="https://api.anthropic.com/v1" +``` + +Now run the proxy container: + +```bash +docker run \ + -v $(pwd)/litellm_config.yaml:/app/config.yaml \ + -e ANTHROPIC_API_KEY \ + -e ANTHROPIC_API_BASE \ + -p 4000:4000 \ + ghcr.io/berriai/litellm:main-latest \ + --config /app/config.yaml --detailed_debug +``` + +Run a completion to confirm that your configuration is correct: + +```bash +curl --location 'http://0.0.0.0:4000/chat/completions' \ +--header 'Content-Type: application/json' \ +--data '{ + "model": "claude-3.7", + "messages": [ + { + "role": "user", + "content": "what llm are you" + } + ] + }' +# example output: +# { +# "model":"claude-3-7-sonnet-20250219", +# "message":{ +# "content":"I am Claude, an AI assistant created by Anthropic...", +# "role":"assistant" +# ... +``` + +Now run `ai init` to configure your provider, using the following details: + +- Provider Type: OpenAI Compatible +- API Key: `notused` (LiteLLM has required keys loaded into its proxy) +- Base URL: `http://localhost:4000/` +- Model: `claude-3.7` +- Provider name: `litellm` + +Choose 'yes' for 'Set as current provider' and 'Test API Key & Configuration'. You will see output similar to the below: + +``` +✔ Set as current provider? Yes +✔ Test API Key & Configuration? Yes +✔ Checking internet connection... +✔ Checking Base URL http://localhost:4000/... +✔ Checking API key... +✔ Checking Model claude-3.7... +✔ Checking API key rate limit... +``` + +At this point you will be able to interface with inference providers via the LiteLLM proxy: + +``` +✔ chat: bash one liner for a rainbow +claude: + + for i in {1..7}; do echo -e "\033[3${i}mRainbow\033[0m"; done + +Run this to see a simple rainbow text effect in your terminal. +``` + +You can also manually add the LiteLLM provider details to your [Configuration File](../configuration.md): + +```yaml +provider: litellm # set the current provider +providers: + litellm: + name: litellm + type: openai_compatible + baseURL: http://localhost:4000/ + model: claude-3.7 + apiKey: notused +``` diff --git a/docs/providers/msty.md b/docs/providers/msty.md index cf27212..89201e0 100644 --- a/docs/providers/msty.md +++ b/docs/providers/msty.md @@ -4,7 +4,47 @@ Terminal AI works out of the box with Msty. To get started, first install: https://msty.app/ -Run Msty and configure a model. As an example we'll use Cohere as a remote provider: +Once Msty is installed and running, you'll have access to various models through its local API server. Msty will automatically start an API server on port 10000 by default. -- New Remote Model Provider: Cohere AI -- API Key: (Provide your API key) +Run Msty and configure a model. As an example we'll use Ollama as a local provider. + +Then run `ai init` to configure your provider, using the following details: + +- Provider Type: OpenAI Compatible +- API Key: `notused` (by default, Msty doesn't need a key, but Terminal AI requires one to be configured) +- Base URL: `http://localhost:10000/v1/` +- Model: Choose from one of your available models (e.g. `gemma3:1b` or `llama3.2:latest`) + +Choose 'yes' for 'Set as current provider' and 'Test API Key & Configuration'. You will see output similar to the below: + +``` +✔ Set as current provider? Yes +✔ Test API Key & Configuration? Yes +✔ Checking internet connection... +✔ Checking Base URL http://localhost:10000/v1/... +✔ Checking API key... +✔ Checking Model gemma3:1b... +✔ Checking API key rate limit... +``` + +At this point you will be able to chat using your local Msty installation: + +``` +✔ chat: hi msty +msty: Hello! How can I assist you today? +``` + +You can also manually add the Msty provider details to your [Configuration File](../configuration.md): + +```yaml +provider: msty +providers: + msty: + name: msty + type: openai_compatible + baseURL: http://localhost:10000/v1/ + model: gemma3:1b + apiKey: notused +``` + +If you're using Msty with a remote provider (like Cohere), make sure that model is properly configured in your Msty app before attempting to use it with Terminal AI. diff --git a/package.json b/package.json index 170a275..305041f 100644 --- a/package.json +++ b/package.json @@ -2,7 +2,7 @@ "name": "@dwmkerr/terminal-ai", "description": "Effortless AI in your terminal.", "type": "commonjs", - "version": "0.13.2", + "version": "0.15.0", "main": "./dist/cli.js", "bin": { "ai": "dist/cli.js" diff --git a/src/chat-pipeline/stages/ensure-api-key.ts b/src/chat-pipeline/stages/ensure-api-key.ts index 3fc7ec1..40bad8c 100644 --- a/src/chat-pipeline/stages/ensure-api-key.ts +++ b/src/chat-pipeline/stages/ensure-api-key.ts @@ -20,5 +20,5 @@ export async function ensureApiKey(executionContext: ExecutionContext) { // Initialise, this will mutate execution context to set the key, or die // trying. - return await init(executionContext, true); + return await init(executionContext); } diff --git a/src/cli.ts b/src/cli.ts index 4055b5f..800379b 100755 --- a/src/cli.ts +++ b/src/cli.ts @@ -81,7 +81,7 @@ const cli = async (program: Command, executionContext: ExecutionContext) => { .command("init") .description("Set or update configuration") .action(async () => { - const nextCommand = await init(executionContext, true); + const nextCommand = await init(executionContext); // The only possible next action is chat or quit. if (nextCommand === Commands.Chat) { return chat( diff --git a/src/commands/init/init-first-run.ts b/src/commands/init/init-first-run.ts index f87d46b..4990fa0 100644 --- a/src/commands/init/init-first-run.ts +++ b/src/commands/init/init-first-run.ts @@ -14,7 +14,6 @@ import { initSetProviderApiKey } from "./init-set-provider-api-key"; */ export async function initFirstRun( executionContext: ExecutionContext, - askNextAction: boolean, ): Promise { const interactive = executionContext.isTTYstdin; @@ -33,8 +32,6 @@ To get a free key follow the guide at:`, ); await initSetProviderApiKey(executionContext); - // We now continue with a regular init run (which'll offer the option to - // update the model, whatever). However, we don't ask the user if they want - // to change their API key. - return await initRegularRun(executionContext, false, askNextAction); + // We now continue with a regular init run. + return await initRegularRun(executionContext); } diff --git a/src/commands/init/init-regular-run.ts b/src/commands/init/init-regular-run.ts index 8982aa5..65963ed 100644 --- a/src/commands/init/init-regular-run.ts +++ b/src/commands/init/init-regular-run.ts @@ -6,81 +6,114 @@ import { check } from "../../commands/check/check"; import { initUpdateProviders } from "./init-update-providers"; import { selectProvider } from "../../ui/select-provider"; import { updateConfigurationFile } from "../../configuration/update-configuration-file"; +import { selectModel } from "./select/select-model"; export async function initRegularRun( executionContext: ExecutionContext, - enableUpdateProvider: boolean, - askNextAction: boolean, ): Promise { - // If we have multiple providers, directly show the provider selection - const providers = Object.values(executionContext.config.providers); - if (providers.length > 1) { - // Use the enhanced selectProvider with params object, passing current provider name as default - const selectedProvider = await selectProvider({ - message: "Current Provider:", - currentProvider: executionContext.provider, - availableProviders: providers, - default: executionContext.provider.name, + let mainOption = ""; + + // Create a loop to return to the main menu after actions + while (mainOption !== "exit" && mainOption !== "chat") { + // Start with a select menu for the main options + mainOption = await select({ + message: inputPrompt("Terminal AI Configuration"), + choices: [ + { + name: "1. Select current provider / model", + value: "select_provider", + }, + { + name: "2. Configure or add provider", + value: "reconfigure_provider", + }, + { + name: "3. Check configuration", + value: "check", + }, + { + name: "4. Chat", + value: "chat", + }, + { + name: "0. Exit", + value: "exit", + }, + ], }); - // Only update if the selected provider is different from current - if ( - selectedProvider && - selectedProvider.name !== executionContext.provider.name - ) { - // Update the current provider directly in the execution context - executionContext.provider = selectedProvider; - // Update the configuration file - updateConfigurationFile(executionContext.configFilePath, { - provider: selectedProvider.name, - }); + if (mainOption === "exit") { + return Commands.Quit; } - } - // If we are going to let the user update their provider, do so now. - // The only reason we don't do this is if this function is coming - // directly after the first-run init. - if (enableUpdateProvider) { - // Offer advanced options. - const updateProvider = await confirm({ - message: "Reconfigure (key/model/etc) or add new Provider?", - default: false, - }); - if (updateProvider) { - await initUpdateProviders(executionContext); + if (mainOption === "chat") { + return Commands.Chat; } - } - // Offer to validate. - const validate = await confirm({ - message: "Test API Key & Configuration?", - default: false, - }); - if (validate) { - await check(executionContext); - } + if (mainOption === "check") { + await check(executionContext); + continue; // skip the 'check configuration' question + } - // Ask for the next action if we have chosen this option. - if (!askNextAction) { - return Commands.Unknown; - } - const answer = await select({ - message: inputPrompt("What next?"), - default: "chat", - choices: [ - { - name: "Chat", - value: "chat", - }, - { - name: "Quit", - value: "quit", - }, - ], - }); - if (answer === "chat") { - return Commands.Chat; + if (mainOption === "select_provider") { + // Select provider/model functionality + const providers = Object.values(executionContext.config.providers); + if (providers.length > 0) { + const selectedProvider = await selectProvider({ + message: "Current Provider:", + currentProvider: executionContext.provider, + availableProviders: providers, + default: executionContext.provider.name, + }); + + if ( + selectedProvider && + selectedProvider.name !== executionContext.provider.name + ) { + executionContext.provider = selectedProvider; + updateConfigurationFile(executionContext.configFilePath, { + provider: selectedProvider.name, + }); + } + + // Now allow selecting a model for the chosen provider + const provider = executionContext.provider; + const model = await selectModel(provider.model, provider.type); + + if (model && model !== provider.model) { + provider.model = model; + + // Save the model. It's either a named provider or the root provider. + if (provider.name !== "") { + updateConfigurationFile(executionContext.configFilePath, { + [`providers.${provider.name}.model`]: model, + }); + } else { + updateConfigurationFile(executionContext.configFilePath, { + [`model`]: model, + }); + } + } + } else { + console.log("No providers configured. Use option 2 to add a provider."); + } + } else if (mainOption === "reconfigure_provider") { + // Configure provider functionality + await initUpdateProviders(executionContext); + } + + // Offer to validate configuration + if (mainOption !== "exit" && mainOption !== "chat") { + const validate = await confirm({ + message: "Test API Key & Configuration?", + default: false, + }); + if (validate) { + await check(executionContext); + } + } } - return Commands.Quit; + // This point is reached if chat was selected + return Commands.Chat; } diff --git a/src/commands/init/init-regular-run.ts.backup b/src/commands/init/init-regular-run.ts.backup new file mode 100644 index 0000000..8982aa5 --- /dev/null +++ b/src/commands/init/init-regular-run.ts.backup @@ -0,0 +1,86 @@ +import { confirm, select } from "@inquirer/prompts"; +import { inputPrompt } from "../../theme"; +import { ExecutionContext } from "../../execution-context/execution-context"; +import { Commands } from "../commands"; +import { check } from "../../commands/check/check"; +import { initUpdateProviders } from "./init-update-providers"; +import { selectProvider } from "../../ui/select-provider"; +import { updateConfigurationFile } from "../../configuration/update-configuration-file"; + +export async function initRegularRun( + executionContext: ExecutionContext, + enableUpdateProvider: boolean, + askNextAction: boolean, +): Promise { + // If we have multiple providers, directly show the provider selection + const providers = Object.values(executionContext.config.providers); + if (providers.length > 1) { + // Use the enhanced selectProvider with params object, passing current provider name as default + const selectedProvider = await selectProvider({ + message: "Current Provider:", + currentProvider: executionContext.provider, + availableProviders: providers, + default: executionContext.provider.name, + }); + + // Only update if the selected provider is different from current + if ( + selectedProvider && + selectedProvider.name !== executionContext.provider.name + ) { + // Update the current provider directly in the execution context + executionContext.provider = selectedProvider; + // Update the configuration file + updateConfigurationFile(executionContext.configFilePath, { + provider: selectedProvider.name, + }); + } + } + + // If we are going to let the user update their provider, do so now. + // The only reason we don't do this is if this function is coming + // directly after the first-run init. + if (enableUpdateProvider) { + // Offer advanced options. + const updateProvider = await confirm({ + message: "Reconfigure (key/model/etc) or add new Provider?", + default: false, + }); + if (updateProvider) { + await initUpdateProviders(executionContext); + } + } + + // Offer to validate. + const validate = await confirm({ + message: "Test API Key & Configuration?", + default: false, + }); + if (validate) { + await check(executionContext); + } + + // Ask for the next action if we have chosen this option. + if (!askNextAction) { + return Commands.Unknown; + } + const answer = await select({ + message: inputPrompt("What next?"), + default: "chat", + choices: [ + { + name: "Chat", + value: "chat", + }, + { + name: "Quit", + value: "quit", + }, + ], + }); + if (answer === "chat") { + return Commands.Chat; + } + + return Commands.Quit; +} diff --git a/src/commands/init/init.test.ts b/src/commands/init/init.test.ts index b2aefee..8323648 100644 --- a/src/commands/init/init.test.ts +++ b/src/commands/init/init.test.ts @@ -7,7 +7,7 @@ describe("commands", () => { const executionContext = createTestExecutionContext(process, { isTTYstdin: false, }); - await expect(() => init(executionContext, false)).rejects.toThrow( + await expect(() => init(executionContext)).rejects.toThrow( /must be run interactively/, ); }); diff --git a/src/commands/init/init.ts b/src/commands/init/init.ts index 511ee15..d90f227 100644 --- a/src/commands/init/init.ts +++ b/src/commands/init/init.ts @@ -8,7 +8,6 @@ import { initRegularRun } from "./init-regular-run"; export async function init( executionContext: ExecutionContext, - askNextAction: boolean, ): Promise { const interactive = executionContext.isTTYstdin; @@ -26,6 +25,6 @@ export async function init( // run (e.g. fresh install, or config blatted to the point we're // fresh-install-like). return executionContext.isFirstRun - ? await initFirstRun(executionContext, askNextAction) - : await initRegularRun(executionContext, true, askNextAction); + ? await initFirstRun(executionContext) + : await initRegularRun(executionContext); } diff --git a/src/commands/init/select/select-edit-or-add-provider.ts b/src/commands/init/select/select-edit-or-add-provider.ts index ad666e5..bc359e7 100644 --- a/src/commands/init/select/select-edit-or-add-provider.ts +++ b/src/commands/init/select/select-edit-or-add-provider.ts @@ -21,19 +21,19 @@ export async function selectEditOrAddProvider( // Add the current provider, which might be the root. const currentChoice: ProviderChoice = { name: isRoot - ? "Update Provider" - : `Update ${currentProvider.name} (current)`, + ? "Configure Provider" + : `Configure ${currentProvider.name} (current)`, value: isRoot ? "update_root" : `update_${currentProvider.name}`, - description: `Update configuration for ${isRoot ? "current provider" : currentProvider.name}`, + description: `Configure settings for ${isRoot ? "current provider" : currentProvider.name}`, }; // Then the other providers. const nextChoices = [ ...allProviders .filter((p) => p !== currentProvider) .map((p) => ({ - name: `Update ${p.name}`, + name: `Configure ${p.name}`, value: `update_${p.name}`, - description: `Update configuration for ${p.name}`, + description: `Configure settings for ${p.name}`, })), ]; const addChoice: ProviderChoice = { @@ -45,7 +45,7 @@ export async function selectEditOrAddProvider( const choices = [currentChoice, ...nextChoices, new Separator(), addChoice]; const answer = await select({ - message: "Update / Add Provider:", + message: "Configure / Add Provider:", choices, });