Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 13 additions & 11 deletions src/vs/workbench/contrib/cortexide/browser/autocompleteService.ts
Original file line number Diff line number Diff line change
Expand Up @@ -791,10 +791,16 @@ export class AutocompleteService extends Disposable implements IAutocompleteServ

// Detect if using local provider for prefix/suffix optimization
const featureName: FeatureName = 'Autocomplete'
const modelSelection = this._settingsService.state.modelSelectionOfFeature[featureName]
const isLocal = modelSelection && modelSelection.providerName !== 'auto'
? isLocalProvider(modelSelection.providerName, this._settingsService.state.settingsOfProvider)
: false
const modelSelection = this._settingsService.resolveAutoModelSelection(
this._settingsService.state.modelSelectionOfFeature[featureName]
)

if (!modelSelection || modelSelection.providerName === 'auto') {
// No model available - skip autocomplete
return []
}

const isLocal = isLocalProvider(modelSelection.providerName, this._settingsService.state.settingsOfProvider)

const { shouldGenerate, predictionType, llmPrefix, llmSuffix, stopTokens } = getCompletionOptions(prefixAndSuffix, relevantContext, justAcceptedAutocompletion, isLocal)

Expand Down Expand Up @@ -822,15 +828,11 @@ export class AutocompleteService extends Disposable implements IAutocompleteServ
console.log('starting autocomplete...', predictionType)

const overridesOfModel = this._settingsService.state.overridesOfModel
// Skip "auto" - it's not a real provider
const modelSelectionOptions = modelSelection && !(modelSelection.providerName === 'auto' && modelSelection.modelName === 'auto')
? this._settingsService.state.optionsOfModelSelection[featureName][modelSelection.providerName]?.[modelSelection.modelName]
: undefined
// Model selection is already resolved above, so we can safely access options
const modelSelectionOptions = this._settingsService.state.optionsOfModelSelection[featureName]?.[modelSelection.providerName]?.[modelSelection.modelName]

// Warm up local model in background (fire-and-forget, doesn't block)
if (modelSelection && modelSelection.providerName !== 'auto' && modelSelection.modelName !== 'auto') {
this._modelWarmupService.warmupModelIfNeeded(modelSelection.providerName, modelSelection.modelName, featureName)
}
this._modelWarmupService.warmupModelIfNeeded(modelSelection.providerName, modelSelection.modelName, featureName)

// set parameters of `newAutocompletion` appropriately
newAutocompletion.llmPromise = new Promise((resolve, reject) => {
Expand Down
430 changes: 353 additions & 77 deletions src/vs/workbench/contrib/cortexide/browser/chatThreadService.ts

Large diffs are not rendered by default.

20 changes: 13 additions & 7 deletions src/vs/workbench/contrib/cortexide/browser/quickEditActions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -164,10 +164,12 @@ registerAction2(class extends Action2 {

if (!instruction) return

// Check for model selection
const modelSelection = settingsService.state.modelSelectionOfFeature['Chat']
// Check for model selection and resolve "auto" if needed
const modelSelection = settingsService.resolveAutoModelSelection(
settingsService.state.modelSelectionOfFeature['Chat']
)
if (!modelSelection) {
notificationService.warn('Please select a model in CortexIDE Settings to use Inline Edit.')
notificationService.error('No model provider configured. Please configure a model provider in CortexIDE Settings.')
return
}

Expand Down Expand Up @@ -232,11 +234,15 @@ ${contextCode}

const userMessage = `Edit instruction: ${instruction}\n\nGenerate a SEARCH/REPLACE block for the selected code.`

// Ensure modelSelection is resolved and not null
if (!modelSelection || modelSelection.providerName === 'auto') {
notificationService.error('Failed to resolve model selection. Please configure a model provider in CortexIDE Settings.')
return
}

const chatOptions = settingsService.state.optionsOfModelSelection['Chat']
// Skip "auto" - it's not a real provider
const modelOptions = modelSelection && !(modelSelection.providerName === 'auto' && modelSelection.modelName === 'auto')
? chatOptions[modelSelection.providerName]?.[modelSelection.modelName]
: undefined
// Model selection is already resolved above, so we can safely access options
const modelOptions = chatOptions[modelSelection.providerName]?.[modelSelection.modelName]
const overrides = settingsService.state.overridesOfModel

requestId = llmMessageService.sendLLMMessage({
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2239,17 +2239,24 @@ const MCPToolWrapper = ({ toolMessage }: WrapperProps<string>) => {

if (toolMessage.type === 'success' || toolMessage.type === 'tool_request') {
const { result } = toolMessage
const resultStr = result ? mcpService.stringifyResult(result) : 'null'
componentParams.children = <ToolChildrenWrapper>
<SmallProseWrapper>
<ChatMarkdownRender
string={`\`\`\`json\n${resultStr}\n\`\`\``}
chatMessageLocation={undefined}
isApplyEnabled={false}
isLinkDetectionEnabled={true}
/>
</SmallProseWrapper>
</ToolChildrenWrapper>
if (result) {
const resultStr = mcpService.stringifyResult(result)
// Check if result is text (not JSON) - text events return plain text, others return JSON
// Type guard: check if result has 'event' property and it's 'text'
const isTextResult = typeof result === 'object' && result !== null && 'event' in result && (result as any).event === 'text'
// If it's text, display as markdown; otherwise display as JSON code block
const displayContent = isTextResult ? resultStr : `\`\`\`json\n${resultStr}\n\`\`\``
componentParams.children = <ToolChildrenWrapper>
<SmallProseWrapper>
<ChatMarkdownRender
string={displayContent}
chatMessageLocation={undefined}
isApplyEnabled={false}
isLinkDetectionEnabled={true}
/>
</SmallProseWrapper>
</ToolChildrenWrapper>
}
}
else if (toolMessage.type === 'tool_error') {
const { result } = toolMessage
Expand Down Expand Up @@ -3375,11 +3382,13 @@ const PlanComponent = React.memo(({ message, isCheckpointGhost, threadId, messag
{toolMsg.result}
</div>
)}
{isSuccess && toolMsg.result && typeof toolMsg.result === 'object' && (
{isSuccess && toolMsg.result && (
<details className="mt-1">
<summary className="text-void-fg-3 cursor-pointer text-xs hover:text-void-fg-2">View result</summary>
<pre className="mt-1 p-2 bg-void-bg-2 rounded text-xs overflow-auto max-h-32 border border-void-border-1">
{JSON.stringify(toolMsg.result, null, 2)}
{typeof toolMsg.result === 'string'
? toolMsg.result
: JSON.stringify(toolMsg.result, null, 2)}
</pre>
</details>
)}
Expand Down Expand Up @@ -4101,7 +4110,10 @@ export const SidebarChat = () => {
await addImagesRaw(files);
}, [addImagesRaw, settingsState, accessor]);

const isDisabled = (instructionsAreEmpty && imageAttachments.length === 0 && pdfAttachments.length === 0) || !!isFeatureNameDisabled('Chat', settingsState)
// Compute isDisabled - ensure it's reactive to settings changes
const isDisabled = useMemo(() => {
return (instructionsAreEmpty && imageAttachments.length === 0 && pdfAttachments.length === 0) || !!isFeatureNameDisabled('Chat', settingsState)
}, [instructionsAreEmpty, imageAttachments.length, pdfAttachments.length, settingsState])

const sidebarRef = useRef<HTMLDivElement>(null)
const scrollContainerRef = useRef<HTMLDivElement | null>(null)
Expand Down Expand Up @@ -4561,11 +4573,14 @@ export const SidebarChat = () => {
}, [setInstructionsAreEmpty])
const onKeyDown = useCallback((e: KeyboardEvent<HTMLTextAreaElement>) => {
if (e.key === 'Enter' && !e.shiftKey && !e.nativeEvent.isComposing) {
onSubmit()
// Check isDisabled again at the time of key press (not closure value)
if (!isDisabled && !isRunning) {
onSubmit()
}
} else if (e.key === 'Escape' && isRunning) {
onAbort()
}
}, [onSubmit, onAbort, isRunning])
}, [onSubmit, onAbort, isRunning, isDisabled])

// Context usage calculation + warning (partially memoized - draft tokens calculated on each render)
const [ctxWarned, setCtxWarned] = useState(false)
Expand Down
12 changes: 8 additions & 4 deletions src/vs/workbench/contrib/cortexide/browser/toolsService.ts
Original file line number Diff line number Diff line change
Expand Up @@ -569,8 +569,10 @@ export class ToolsService implements IToolsService {

rewrite_file: async ({ uri, newContent }) => {
await cortexideModelService.initializeModel(uri)
if (this.commandBarService.getStreamState(uri) === 'streaming') {
throw new Error(`Another LLM is currently making changes to this file. Please stop streaming for now and ask the user to resume later.`)
const streamState = this.commandBarService.getStreamState(uri)
if (streamState === 'streaming') {
// Only block if actually streaming to the same file - allow if streaming to different file
throw new Error(`Cannot edit file ${uri.fsPath}: Another operation is currently streaming changes to this file. Please wait for it to complete or cancel it first.`)
}
await editCodeService.callBeforeApplyOrEdit(uri)
editCodeService.instantlyRewriteFile({ uri, newContent })
Expand All @@ -585,8 +587,10 @@ export class ToolsService implements IToolsService {

edit_file: async ({ uri, searchReplaceBlocks }) => {
await cortexideModelService.initializeModel(uri)
if (this.commandBarService.getStreamState(uri) === 'streaming') {
throw new Error(`Another LLM is currently making changes to this file. Please stop streaming for now and ask the user to resume later.`)
const streamState = this.commandBarService.getStreamState(uri)
if (streamState === 'streaming') {
// Only block if actually streaming to the same file - allow if streaming to different file
throw new Error(`Cannot edit file ${uri.fsPath}: Another operation is currently streaming changes to this file. Please wait for it to complete or cancel it first.`)
}
await editCodeService.callBeforeApplyOrEdit(uri)
editCodeService.instantlyApplySearchReplaceBlocks({ uri, searchReplaceBlocks })
Expand Down
19 changes: 15 additions & 4 deletions src/vs/workbench/contrib/cortexide/common/codeReviewService.ts
Original file line number Diff line number Diff line change
Expand Up @@ -176,10 +176,21 @@ Provide your review annotations as a JSON array:`;

// Get model selection from settings (use Chat feature model selection)
const settings = this.settingsService.state;
const modelSelection = settings.modelSelectionOfFeature['Chat'] || { providerName: 'auto', modelName: 'auto' };
const modelOptions = modelSelection && !(modelSelection.providerName === 'auto' && modelSelection.modelName === 'auto')
? settings.optionsOfModelSelection['Chat']?.[modelSelection.providerName]?.[modelSelection.modelName]
: undefined;
const modelSelection = this.settingsService.resolveAutoModelSelection(
settings.modelSelectionOfFeature['Chat'] || { providerName: 'auto', modelName: 'auto' }
);

if (!modelSelection) {
return {
uri,
annotations: [],
summary: 'No model provider configured. Please configure a model provider in CortexIDE Settings.',
success: false,
error: 'No models available',
};
}

const modelOptions = settings.optionsOfModelSelection['Chat']?.[modelSelection.providerName]?.[modelSelection.modelName];
const overrides = settings.overridesOfModel;

// Call LLM directly
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,12 @@ export interface ICortexideSettingsService {
addMCPUserStateOfNames(userStateOfName: MCPUserStateOfName): Promise<void>;
removeMCPUserStateOfNames(serverNames: string[]): Promise<void>;
setMCPServerState(serverName: string, state: MCPUserState): Promise<void>;

/**
* Resolve "auto" model selection to a real model, or return null if no models are available
* This is a shared utility used across all features for consistent auto selection handling
*/
resolveAutoModelSelection(modelSelection: ModelSelection | null | undefined): ModelSelection | null;
}


Expand Down Expand Up @@ -666,6 +672,37 @@ class VoidSettingsService extends Disposable implements ICortexideSettingsServic
this._metricsService.capture('Update MCP Server State', { serverName, state });
}

/**
* Resolve "auto" model selection to a real model, or return null if no models are available
* This is a shared utility used across all features for consistent auto selection handling
*/
resolveAutoModelSelection(modelSelection: ModelSelection | null | undefined): ModelSelection | null {
// If selection is null/undefined or not "auto", return as-is
if (!modelSelection || !(modelSelection.providerName === 'auto' && modelSelection.modelName === 'auto')) {
return modelSelection || null
}

// Try to find the first available configured model (prefer online models first, then local)
const providerNames: ProviderName[] = ['anthropic', 'openAI', 'gemini', 'xAI', 'mistral', 'deepseek', 'groq', 'ollama', 'vLLM', 'lmStudio', 'openAICompatible', 'openRouter', 'liteLLM']

for (const providerName of providerNames) {
const providerSettings = this.state.settingsOfProvider[providerName]
if (providerSettings && providerSettings._didFillInProviderSettings) {
const models = providerSettings.models || []
const firstModel = models.find(m => !m.isHidden)
if (firstModel) {
return {
providerName,
modelName: firstModel.modelName,
}
}
}
}

// No models available
return null
}

}


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -519,7 +519,9 @@ export const defaultGlobalSettings: GlobalSettings = {
syncSCMToChat: true,
enableFastApply: true,
chatMode: 'agent',
autoApprove: {},
autoApprove: {
'edits': true, // Auto-approve basic file edits by default (similar to Cursor's behavior)
},
showInlineSuggestions: true,
includeToolLintErrors: true,
isOnboardingComplete: false,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -179,11 +179,26 @@ class EditRiskScoringService extends Disposable implements IEditRiskScoringServi
// Model not available, skip this check
}

// 7. Empty file creation (low risk)
// 7. File creation (low risk, especially for non-critical files)
if (context.operation === 'create_file_or_folder') {
riskScore = Math.max(0.1, riskScore); // Minimum risk for new files
if (riskScore === 0.1) {
riskFactors.push('New file creation (low risk)');
// Only add minimum risk if not already a critical file
if (!isCriticalFile) {
riskScore = Math.max(0.05, riskScore); // Very low risk for new non-critical files
if (riskScore === 0.05) {
riskFactors.push('New file creation (low risk)');
}
} else {
riskScore = Math.max(0.1, riskScore); // Slightly higher for critical files
}
}

// 8. Small edits are very low risk (basic operations like adding comments, small changes)
if (context.operation === 'edit_file' && context.originalContent && context.newContent) {
const sizeChangeRatio = Math.abs(context.newContent.length - context.originalContent.length) / Math.max(context.originalContent.length, 1);
if (sizeChangeRatio < 0.05 && !isCriticalFile) {
// Very small changes (< 5%) to non-critical files are very low risk
riskScore = Math.max(0.05, riskScore);
confidenceFactors.push('Very small change (< 5%)');
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ export const sendLLMMessage = async ({
const onFinalMessage: OnFinalMessage = (params) => {
const { fullText, fullReasoning, toolCall } = params
if (_didAbort) return
captureLLMEvent(`${loggingName} - Received Full Message`, { messageLength: fullText.length, reasoningLength: fullReasoning?.length, duration: new Date().getMilliseconds() - submit_time.getMilliseconds(), toolCallName: toolCall?.name })
captureLLMEvent(`${loggingName} - Received Full Message`, { messageLength: fullText.length, reasoningLength: fullReasoning?.length, duration: Date.now() - submit_time.getTime(), toolCallName: toolCall?.name })
onFinalMessage_(params)
}

Expand Down Expand Up @@ -109,7 +109,7 @@ export const sendLLMMessage = async ({
try {
// Skip "auto" - it's not a real provider
if (providerName === 'auto') {
onError({ message: `Error: Cannot use "auto" provider - must resolve to a real model first.`, fullError: null })
onError({ message: `Error: Cannot use "auto" provider - must resolve to a real model first. This usually means auto model selection failed. Please check your model provider settings or select a specific model.`, fullError: null })
return
}
const implementation = sendLLMMessageToProviderImplementation[providerName]
Expand Down
Loading