Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
192 changes: 192 additions & 0 deletions src/main/agent/deepseek-model.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,192 @@
import {
ChatOpenAICompletions,
completionsApiContentBlockConverter,
convertStandardContentMessageToCompletionsMessage,
messageToOpenAIRole
} from '@langchain/openai'
import {
AIMessage,
BaseMessage,
ToolMessage,
convertToProviderContentBlock,
isDataContentBlock
} from '@langchain/core/messages'
import { convertLangChainToolCallToOpenAI } from '@langchain/core/output_parsers/openai_tools'
import type OpenAI from 'openai'

type ChatCompletionMessageParam = OpenAI.Chat.Completions.ChatCompletionMessageParam

function convertMessagesToCompletionsMessageParamsWithReasoning({
messages
}: {
messages: BaseMessage[]
}): ChatCompletionMessageParam[] {
return messages.flatMap((message) => {
if (
'response_metadata' in message &&
message.response_metadata &&
'output_version' in message.response_metadata &&
message.response_metadata.output_version === 'v1'
) {
return convertStandardContentMessageToCompletionsMessage({
message: message as Parameters<typeof convertStandardContentMessageToCompletionsMessage>[0]['message']
})
}

let role = messageToOpenAIRole(message as Parameters<typeof messageToOpenAIRole>[0])
const rawContent = (message as AIMessage).content
const content = Array.isArray(rawContent)
? rawContent.map((block) => {
if (isDataContentBlock(block)) {
return convertToProviderContentBlock(block, completionsApiContentBlockConverter)
}
return block
})
: rawContent

const completionParam: Record<string, unknown> = { role, content }

if ('name' in message && message.name != null) completionParam.name = message.name

if (
'additional_kwargs' in message &&
message.additional_kwargs &&
'function_call' in message.additional_kwargs &&
message.additional_kwargs.function_call != null
) {
completionParam.function_call = message.additional_kwargs.function_call
}

if (AIMessage.isInstance(message) && message.tool_calls?.length) {
completionParam.tool_calls = message.tool_calls.map(convertLangChainToolCallToOpenAI)
} else {
if (
'additional_kwargs' in message &&
message.additional_kwargs &&
'tool_calls' in message.additional_kwargs &&
message.additional_kwargs.tool_calls != null
) {
completionParam.tool_calls = message.additional_kwargs.tool_calls
}
if (ToolMessage.isInstance(message) && message.tool_call_id != null) {
completionParam.tool_call_id = message.tool_call_id
}
}

const reasoningContent =
'additional_kwargs' in message ? (message.additional_kwargs?.reasoning_content as unknown) : undefined
if (reasoningContent !== undefined) {
completionParam.reasoning_content = reasoningContent
}

if (
'additional_kwargs' in message &&
message.additional_kwargs &&
message.additional_kwargs.audio &&
typeof message.additional_kwargs.audio === 'object' &&
'id' in message.additional_kwargs.audio
) {
const audioMessage = {
role: 'assistant' as const,
audio: { id: String(message.additional_kwargs.audio.id) }
}
return [
completionParam as unknown as ChatCompletionMessageParam,
audioMessage as unknown as ChatCompletionMessageParam
]
}

return completionParam as unknown as ChatCompletionMessageParam
})
}

export class DeepSeekChatOpenAI extends ChatOpenAICompletions {
protected _convertCompletionsMessageToBaseMessage(
message: OpenAI.Chat.Completions.ChatCompletionMessage,
rawResponse: OpenAI.Chat.Completions.ChatCompletion
) {
const baseMessage = super._convertCompletionsMessageToBaseMessage(message, rawResponse)
const reasoningContent = (message as { reasoning_content?: unknown }).reasoning_content
if (AIMessage.isInstance(baseMessage) && reasoningContent != null) {
baseMessage.additional_kwargs = {
...baseMessage.additional_kwargs,
reasoning_content: reasoningContent
}
}
return baseMessage
}

public async _generate(
messages: Parameters<ChatOpenAICompletions['_generate']>[0],
options: Parameters<ChatOpenAICompletions['_generate']>[1],
_runManager: Parameters<ChatOpenAICompletions['_generate']>[2]
) {
const usageMetadata: Record<string, number | undefined> = {}
const params = this.invocationParams(options)

if (params.stream) {
throw new Error('DeepSeek streaming is disabled to preserve reasoning_content.')
}

const messagesMapped = convertMessagesToCompletionsMessageParamsWithReasoning({ messages })

const data = await this.completionWithRetry(
{
...params,
stream: false,
messages: messagesMapped
},
{
signal: options?.signal,
...options?.options
}
)

const usage = data?.usage
if (usage?.completion_tokens) usageMetadata.output_tokens = usage.completion_tokens
if (usage?.prompt_tokens) usageMetadata.input_tokens = usage.prompt_tokens
if (usage?.total_tokens) usageMetadata.total_tokens = usage.total_tokens

const generations: Array<{
text: string
message: AIMessage
generationInfo?: Record<string, unknown>
}> = []
for (const part of data?.choices ?? []) {
const text = part.message?.content ?? ''
const generation: {
text: string
message: AIMessage
generationInfo?: Record<string, unknown>
} = {
text,
message: this._convertCompletionsMessageToBaseMessage(
part.message ?? { role: 'assistant' },
data
) as AIMessage
}
generation.generationInfo = {
...(part.finish_reason ? { finish_reason: part.finish_reason } : {}),
...(part.logprobs ? { logprobs: part.logprobs } : {})
}
if (AIMessage.isInstance(generation.message)) {
generation.message.usage_metadata = usageMetadata as unknown as AIMessage['usage_metadata']
}
generation.message = new AIMessage(
Object.fromEntries(Object.entries(generation.message).filter(([key]) => !key.startsWith('lc_')))
)
generations.push(generation)
}

return {
generations,
llmOutput: {
tokenUsage: {
promptTokens: usageMetadata.input_tokens,
completionTokens: usageMetadata.output_tokens,
totalTokens: usageMetadata.total_tokens
}
}
}
}
}
55 changes: 50 additions & 5 deletions src/main/agent/runtime.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
/* eslint-disable @typescript-eslint/no-unused-vars */
import { createDeepAgent } from "deepagents"
import { getDefaultModel } from "../ipc/models"
import { getApiKey, getThreadCheckpointPath } from "../storage"
import Store from "electron-store"
import { ChatAnthropic } from "@langchain/anthropic"
import { ChatOpenAI } from "@langchain/openai"
import { ChatGoogleGenerativeAI } from "@langchain/google-genai"
import { getDefaultModel } from "../ipc/models"
import { getApiKey, getOpenworkDir, getThreadCheckpointPath } from "../storage"
import { DeepSeekChatOpenAI } from "./deepseek-model"
import { SqlJsSaver } from "../checkpointer/sqljs-saver"
import { LocalSandbox } from "./local-sandbox"

Expand All @@ -15,6 +17,34 @@ import type * as _lcZodTypes from "@langchain/core/utils/types"

import { BASE_SYSTEM_PROMPT } from "./system-prompt"

function ensureGraphInterruptHasInterrupts(): void {
const errorProto = Error.prototype as { interrupts?: unknown }
if (Object.prototype.hasOwnProperty.call(errorProto, "interrupts")) {
return
}

Object.defineProperty(errorProto, "interrupts", {
configurable: true,
get() {
if (this && (this.name === "GraphInterrupt" || this.name === "NodeInterrupt")) {
return []
}
return undefined
}
})
}

ensureGraphInterruptHasInterrupts()

const settingsStore = new Store({
name: "settings",
cwd: getOpenworkDir()
})

function getAutoApproveExecute(): boolean {
return Boolean(settingsStore.get("autoApproveExecute", false))
}

/**
* Generate the full system prompt for the agent.
*
Expand Down Expand Up @@ -61,7 +91,7 @@ export async function closeCheckpointer(threadId: string): Promise<void> {
// Get the appropriate model instance based on configuration
function getModelInstance(
modelId?: string
): ChatAnthropic | ChatOpenAI | ChatGoogleGenerativeAI | string {
): ChatAnthropic | ChatOpenAI | ChatGoogleGenerativeAI | DeepSeekChatOpenAI | string {
const model = modelId || getDefaultModel()
console.log("[Runtime] Using model:", model)

Expand Down Expand Up @@ -89,7 +119,19 @@ function getModelInstance(
}
return new ChatOpenAI({
model,
openAIApiKey: apiKey
apiKey
})
} else if (model.startsWith("deepseek")) {
const apiKey = getApiKey("deepseek")
console.log("[Runtime] DeepSeek API key present:", !!apiKey)
if (!apiKey) {
throw new Error("DeepSeek API key not configured")
}
return new DeepSeekChatOpenAI({
model,
apiKey,
configuration: { baseURL: "https://api.deepseek.com" },
streaming: false
})
} else if (model.startsWith("gemini")) {
const apiKey = getApiKey("google")
Expand Down Expand Up @@ -163,6 +205,9 @@ export async function createAgentRuntime(options: CreateAgentRuntimeOptions) {

The workspace root is: ${workspacePath}`

const autoApproveExecute = getAutoApproveExecute()
const interruptOn = autoApproveExecute ? undefined : { execute: true }

const agent = createDeepAgent({
model,
checkpointer,
Expand All @@ -171,7 +216,7 @@ The workspace root is: ${workspacePath}`
// Custom filesystem prompt for absolute paths (requires deepagents update)
filesystemSystemPrompt,
// Require human approval for all shell commands
interruptOn: { execute: true }
...(interruptOn ? { interruptOn } : {})
} as Parameters<typeof createDeepAgent>[0])

console.log("[Runtime] Deep agent created with LocalSandbox at:", workspacePath)
Expand Down
30 changes: 29 additions & 1 deletion src/main/ipc/models.ts
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,8 @@ const store = new Store({
const PROVIDERS: Omit<Provider, "hasApiKey">[] = [
{ id: "anthropic", name: "Anthropic" },
{ id: "openai", name: "OpenAI" },
{ id: "google", name: "Google" }
{ id: "google", name: "Google" },
{ id: "deepseek", name: "DeepSeek" }
]

// Available models configuration (updated Jan 2026)
Expand Down Expand Up @@ -161,6 +162,23 @@ const AVAILABLE_MODELS: ModelConfig[] = [
description: "Cost-efficient variant with faster response times",
available: true
},
// DeepSeek models (OpenAI-compatible)
{
id: 'deepseek-chat',
name: 'DeepSeek Chat (V3)',
provider: 'deepseek',
model: 'deepseek-chat',
description: 'General-purpose chat model with strong coding performance',
available: true
},
{
id: 'deepseek-reasoner',
name: 'DeepSeek Reasoner (R1)',
provider: 'deepseek',
model: 'deepseek-reasoner',
description: 'Reasoning-focused model for complex tasks',
available: true
},
// Google Gemini models
{
id: "gemini-3-pro-preview",
Expand Down Expand Up @@ -224,6 +242,16 @@ export function registerModelHandlers(ipcMain: IpcMain): void {
store.set("defaultModel", modelId)
})

// Get auto-approve setting for shell commands
ipcMain.handle('settings:getAutoApprove', async () => {
return store.get('autoApproveExecute', false) as boolean
})

// Set auto-approve setting for shell commands
ipcMain.handle('settings:setAutoApprove', async (_event, value: boolean) => {
store.set('autoApproveExecute', Boolean(value))
})

// Set API key for a provider (stored in ~/.openwork/.env)
ipcMain.handle("models:setApiKey", async (_event, { provider, apiKey }: SetApiKeyParams) => {
setApiKey(provider, apiKey)
Expand Down
1 change: 1 addition & 0 deletions src/main/storage.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ const ENV_VAR_NAMES: Record<ProviderId, string> = {
anthropic: "ANTHROPIC_API_KEY",
openai: "OPENAI_API_KEY",
google: "GOOGLE_API_KEY",
deepseek: "DEEPSEEK_API_KEY",
ollama: "" // Ollama doesn't require an API key
}

Expand Down
2 changes: 1 addition & 1 deletion src/main/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ export interface Run {
}

// Provider configuration
export type ProviderId = "anthropic" | "openai" | "google" | "ollama"
export type ProviderId = "anthropic" | "openai" | "google" | "deepseek" | "ollama"

export interface Provider {
id: ProviderId
Expand Down
4 changes: 4 additions & 0 deletions src/preload/index.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,10 @@ interface CustomAPI {
setApiKey: (provider: string, apiKey: string) => Promise<void>
getApiKey: (provider: string) => Promise<string | null>
}
settings: {
getAutoApproveExecute: () => Promise<boolean>
setAutoApproveExecute: (value: boolean) => Promise<void>
}
workspace: {
get: (threadId?: string) => Promise<string | null>
set: (threadId: string | undefined, path: string | null) => Promise<string | null>
Expand Down
8 changes: 8 additions & 0 deletions src/preload/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,14 @@ const api = {
return ipcRenderer.invoke("models:deleteApiKey", provider)
}
},
settings: {
getAutoApproveExecute: (): Promise<boolean> => {
return ipcRenderer.invoke('settings:getAutoApprove')
},
setAutoApproveExecute: (value: boolean): Promise<void> => {
return ipcRenderer.invoke('settings:setAutoApprove', value)
}
},
workspace: {
get: (threadId?: string): Promise<string | null> => {
return ipcRenderer.invoke("workspace:get", threadId)
Expand Down
3 changes: 2 additions & 1 deletion src/renderer/src/components/chat/ApiKeyDialog.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,8 @@ interface ApiKeyDialogProps {
const PROVIDER_INFO: Record<string, { placeholder: string; envVar: string }> = {
anthropic: { placeholder: "sk-ant-...", envVar: "ANTHROPIC_API_KEY" },
openai: { placeholder: "sk-...", envVar: "OPENAI_API_KEY" },
google: { placeholder: "AIza...", envVar: "GOOGLE_API_KEY" }
google: { placeholder: "AIza...", envVar: "GOOGLE_API_KEY" },
deepseek: { placeholder: "sk-...", envVar: "DEEPSEEK_API_KEY" }
}

export function ApiKeyDialog({
Expand Down
Loading