Skip to content

Commit e7383e0

Browse files
committed
feat: add command to select LLM model
1 parent 120ae73 commit e7383e0

File tree

4 files changed

+263
-3
lines changed

4 files changed

+263
-3
lines changed

package.json

+5
Original file line numberDiff line numberDiff line change
@@ -135,6 +135,11 @@
135135
"command": "cody-plus-plus.selectProvider",
136136
"title": "Select LLM Provider",
137137
"category": "Cody++"
138+
},
139+
{
140+
"command": "cody-plus-plus.selectLlm",
141+
"title": "Select LLM (switch model)",
142+
"category": "Cody++"
138143
}
139144
],
140145
"menus": {

src/commands/__tests__/providerCommands.test.ts

+158-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ import * as vscode from 'vscode'
44
import * as llmModule from '../../core/llm'
55
import { CONFIG_KEYS, SUPPORTED_PROVIDERS } from '../../core/llm/constants'
66
import * as workspaceConfigUtils from '../../utils/workspace-config'
7-
import { selectProvider } from '../providerCommands'
7+
import { selectLLM, selectProvider } from '../providerCommands'
88

99
suite('Provider Commands Tests', () => {
1010
let sandbox: sinon.SinonSandbox
@@ -211,4 +211,161 @@ suite('Provider Commands Tests', () => {
211211
assert.ok(showInformationMessageStub.firstCall.args[0].includes('Successfully configured'))
212212
})
213213
})
214+
215+
suite('selectLLM', () => {
216+
test('should return false when provider or API key is not configured', async () => {
217+
// Simulate missing provider and API key
218+
configGet.withArgs(CONFIG_KEYS.PROVIDER).returns(undefined)
219+
configGet.withArgs(CONFIG_KEYS.API_KEY).returns(undefined)
220+
221+
const result = await selectLLM()
222+
223+
assert.strictEqual(result, false)
224+
assert.strictEqual(showWarningMessageStub.calledOnce, true)
225+
assert.strictEqual(
226+
showWarningMessageStub.firstCall.args[0],
227+
'Provider and API key must be configured first. Use the "Select LLM Provider" command.'
228+
)
229+
assert.strictEqual(showQuickPickStub.called, false) // No further prompts
230+
})
231+
232+
test('should return false when provider code is invalid', async () => {
233+
// Simulate configured but invalid provider
234+
configGet.withArgs(CONFIG_KEYS.PROVIDER).returns('invalid-provider')
235+
configGet.withArgs(CONFIG_KEYS.API_KEY).returns('test-api-key')
236+
237+
const result = await selectLLM()
238+
239+
assert.strictEqual(result, false)
240+
assert.strictEqual(showErrorMessageStub.calledOnce, true)
241+
assert.strictEqual(
242+
showErrorMessageStub.firstCall.args[0],
243+
"Configuration Error: Invalid provider code 'invalid-provider'."
244+
)
245+
assert.strictEqual(showQuickPickStub.called, false) // No further prompts
246+
})
247+
248+
test('should return false when model selection is cancelled', async () => {
249+
// Mock valid configuration
250+
const mockProvider = SUPPORTED_PROVIDERS[0]
251+
configGet.withArgs(CONFIG_KEYS.PROVIDER).returns(mockProvider.code)
252+
configGet.withArgs(CONFIG_KEYS.API_KEY).returns('test-api-key')
253+
configGet.withArgs(CONFIG_KEYS.MODEL).returns('current-model')
254+
255+
// Simulate user cancelling model selection
256+
showQuickPickStub.resolves(undefined)
257+
258+
const result = await selectLLM()
259+
260+
assert.strictEqual(result, false)
261+
assert.strictEqual(showQuickPickStub.calledOnce, true)
262+
assert.strictEqual(showInformationMessageStub.calledOnce, true)
263+
assert.strictEqual(showInformationMessageStub.firstCall.args[0], 'Model selection cancelled.')
264+
})
265+
266+
test('should handle model fetch errors and fall back to input box', async () => {
267+
// Mock valid configuration
268+
const mockProvider = SUPPORTED_PROVIDERS[0]
269+
configGet.withArgs(CONFIG_KEYS.PROVIDER).returns(mockProvider.code)
270+
configGet.withArgs(CONFIG_KEYS.API_KEY).returns('test-api-key')
271+
configGet.withArgs(CONFIG_KEYS.MODEL).returns('current-model')
272+
273+
// Simulate models fetch error
274+
fetchModelsStub.rejects(new Error('Network error'))
275+
276+
// Mock model input (after fetch error)
277+
showInputBoxStub.resolves('new-model')
278+
279+
const result = await selectLLM()
280+
281+
assert.strictEqual(result, true)
282+
assert.strictEqual(showWarningMessageStub.calledOnce, true)
283+
assert.ok(showWarningMessageStub.firstCall.args[0].includes('Could not fetch models'))
284+
assert.strictEqual(showInputBoxStub.calledOnce, true)
285+
assert.strictEqual(updateModelConfigStub.calledOnceWith('new-model'), true)
286+
})
287+
288+
test('should return true without updating when selected model is the same as current', async () => {
289+
// Mock valid configuration
290+
const mockProvider = SUPPORTED_PROVIDERS[0]
291+
configGet.withArgs(CONFIG_KEYS.PROVIDER).returns(mockProvider.code)
292+
configGet.withArgs(CONFIG_KEYS.API_KEY).returns('test-api-key')
293+
configGet.withArgs(CONFIG_KEYS.MODEL).returns('current-model')
294+
295+
// Simulate user selecting the same model
296+
showQuickPickStub.resolves('current-model')
297+
298+
const result = await selectLLM()
299+
300+
assert.strictEqual(result, true)
301+
assert.strictEqual(showQuickPickStub.calledOnce, true)
302+
assert.strictEqual(showInformationMessageStub.calledOnce, true)
303+
assert.strictEqual(
304+
showInformationMessageStub.firstCall.args[0],
305+
'Selected model is the same as the current one. No changes made.'
306+
)
307+
assert.strictEqual(updateModelConfigStub.called, false) // Config not updated
308+
})
309+
310+
test('should successfully update model configuration for OpenAI-compatible provider', async () => {
311+
// Find the OpenAI Compatible provider
312+
const openAICompatibleProvider = SUPPORTED_PROVIDERS.find(
313+
p => p.code === 'openai-compatible'
314+
)!
315+
316+
// Mock valid configuration
317+
configGet.withArgs(CONFIG_KEYS.PROVIDER).returns(openAICompatibleProvider.code)
318+
configGet.withArgs(CONFIG_KEYS.API_KEY).returns('test-api-key')
319+
configGet.withArgs(CONFIG_KEYS.OPENAI_BASE_URL).returns('https://custom-api.com')
320+
configGet.withArgs(CONFIG_KEYS.MODEL).returns('current-model')
321+
322+
// Simulate user selecting a different model
323+
showQuickPickStub.resolves('new-model')
324+
325+
const result = await selectLLM()
326+
327+
assert.strictEqual(result, true)
328+
assert.strictEqual(showQuickPickStub.calledOnce, true)
329+
assert.strictEqual(createProviderStub.calledOnce, true)
330+
assert.deepStrictEqual(createProviderStub.firstCall.args, [
331+
openAICompatibleProvider.code,
332+
{
333+
apiKey: 'test-api-key',
334+
baseUrl: 'https://custom-api.com'
335+
}
336+
])
337+
assert.strictEqual(updateModelConfigStub.calledOnceWith('new-model'), true)
338+
assert.strictEqual(showInformationMessageStub.calledOnce, true)
339+
assert.strictEqual(
340+
showInformationMessageStub.firstCall.args[0],
341+
'Successfully updated LLM model to new-model'
342+
)
343+
})
344+
345+
test('should handle config update errors', async () => {
346+
// Mock valid configuration
347+
const mockProvider = SUPPORTED_PROVIDERS[0]
348+
configGet.withArgs(CONFIG_KEYS.PROVIDER).returns(mockProvider.code)
349+
configGet.withArgs(CONFIG_KEYS.API_KEY).returns('test-api-key')
350+
configGet.withArgs(CONFIG_KEYS.MODEL).returns('current-model')
351+
352+
// Simulate user selecting a new model
353+
showQuickPickStub.resolves('new-model')
354+
355+
// Mock configuration update error
356+
const updateError = new Error('Configuration update failed')
357+
updateModelConfigStub.rejects(updateError)
358+
359+
const result = await selectLLM()
360+
361+
assert.strictEqual(result, false)
362+
assert.strictEqual(showQuickPickStub.calledOnce, true)
363+
assert.strictEqual(updateModelConfigStub.calledOnceWith('new-model'), true)
364+
assert.strictEqual(showErrorMessageStub.calledOnce, true)
365+
assert.strictEqual(
366+
showErrorMessageStub.firstCall.args[0],
367+
'Failed to save model configuration: Configuration update failed'
368+
)
369+
})
370+
})
214371
})

src/commands/providerCommands.ts

+96-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,11 @@
11
import * as vscode from 'vscode'
22
import { createProvider } from '../core/llm'
3-
import { CONFIG_KEYS, LLMProviderDetails, SUPPORTED_PROVIDERS } from '../core/llm/constants'
3+
import {
4+
CONFIG_KEYS,
5+
LLMProviderDetails,
6+
SUPPORTED_PROVIDERS,
7+
SUPPORTED_PROVIDER_CODES
8+
} from '../core/llm/constants'
49
import {
510
updateApiKeyConfig,
611
updateBaseUrlConfig,
@@ -143,3 +148,93 @@ export const selectProvider = async (): Promise<boolean> => {
143148
return false // Indicate failure during update
144149
}
145150
}
151+
152+
export const selectLLM = async (): Promise<boolean> => {
153+
// Get current configuration
154+
const config = vscode.workspace.getConfiguration('codyPlusPlus')
155+
const currentProviderCode = config.get<string>(CONFIG_KEYS.PROVIDER) || 'openai'
156+
const currentApiKey = config.get<string>(CONFIG_KEYS.API_KEY)
157+
const currentBaseUrl = config.get<string>(CONFIG_KEYS.OPENAI_BASE_URL)
158+
const currentModel = config.get<string>(CONFIG_KEYS.MODEL)
159+
160+
// Ensure provider and API key are set
161+
if (!currentProviderCode || !currentApiKey) {
162+
vscode.window.showWarningMessage(
163+
'Provider and API key must be configured first. Use the "Select LLM Provider" command.'
164+
)
165+
return false
166+
}
167+
168+
const providerDetails = SUPPORTED_PROVIDERS.find(p => p.code === currentProviderCode)
169+
if (!providerDetails) {
170+
vscode.window.showErrorMessage(
171+
`Configuration Error: Invalid provider code '${currentProviderCode}'.`
172+
)
173+
return false
174+
}
175+
176+
let models: string[] = []
177+
try {
178+
// Use current config to create provider for fetching models
179+
const provider = createProvider(currentProviderCode as SUPPORTED_PROVIDER_CODES, {
180+
apiKey: currentApiKey,
181+
baseUrl:
182+
currentProviderCode === 'openai-compatible' ? currentBaseUrl : providerDetails.baseURL
183+
})
184+
models = await provider.fetchModels()
185+
} catch (error: any) {
186+
console.error(`Failed to fetch models for ${providerDetails.name}:`, error)
187+
vscode.window.showWarningMessage(
188+
`Could not fetch models. Error: ${error.message}. Please enter the model name manually.`
189+
)
190+
// Allow manual entry even if fetch fails
191+
}
192+
193+
let modelInput: string | undefined
194+
if (models.length > 0) {
195+
modelInput = await vscode.window.showQuickPick(models, {
196+
placeHolder: `Select a model (current: ${currentModel || providerDetails.defaultModel})`,
197+
title: `Choose ${providerDetails.name} Model`,
198+
canPickMany: false,
199+
ignoreFocusOut: true
200+
})
201+
} else {
202+
modelInput = await vscode.window.showInputBox({
203+
prompt: `Enter model name (leave empty for default: ${providerDetails.defaultModel})`,
204+
placeHolder: providerDetails.defaultModel,
205+
value: currentModel || '', // Use empty string if undefined
206+
ignoreFocusOut: true
207+
})
208+
}
209+
210+
if (modelInput === undefined) {
211+
vscode.window.showInformationMessage('Model selection cancelled.')
212+
return false // Cancelled
213+
}
214+
215+
// Use provider default if model input is empty string, otherwise use the input
216+
const finalModel = modelInput || providerDetails.defaultModel
217+
218+
if (finalModel === currentModel) {
219+
vscode.window.showInformationMessage(
220+
'Selected model is the same as the current one. No changes made.'
221+
)
222+
return true // No change needed, considered success
223+
}
224+
225+
// Update only the model configuration
226+
try {
227+
await updateModelConfig(finalModel)
228+
console.log(
229+
`Successfully updated LLM model to ${finalModel} for provider ${providerDetails.name}`
230+
)
231+
vscode.window.showInformationMessage(`Successfully updated LLM model to ${finalModel}`)
232+
return true // Indicate success
233+
} catch (error: any) {
234+
console.error('Failed to update model configuration:', error)
235+
vscode.window.showErrorMessage(
236+
`Failed to save model configuration: ${error.message || 'Unknown error'}`
237+
)
238+
return false // Indicate failure during update
239+
}
240+
}

src/extension.ts

+4-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ import * as vscode from 'vscode'
33
// Import custom command handlers
44
import { addCustomCommand, editCustomCommand } from './commands/addCustomCommand'
55
import { addFile, addFilesSmart, addFolder, addSelection } from './commands/addToCody'
6-
import { selectProvider } from './commands/providerCommands'
6+
import { selectLLM, selectProvider } from './commands/providerCommands'
77
// Import services and views
88
import { CustomCommandService } from './services/customCommand.service'
99
import { TelemetryService } from './services/telemetry.service'
@@ -110,6 +110,8 @@ export async function activate(context: vscode.ExtensionContext) {
110110
selectProvider
111111
)
112112

113+
const selectLlmDisposable = vscode.commands.registerCommand('cody-plus-plus.selectLlm', selectLLM)
114+
113115
// Create and register the webview view for displaying custom commands in the sidebar
114116
const customCommandsWebviewProvider = new MainWebviewView(
115117
context.extensionUri,
@@ -132,6 +134,7 @@ export async function activate(context: vscode.ExtensionContext) {
132134
addSelectionRecursiveDisposable,
133135
addFilesSmartDisposable,
134136
selectProviderDisposable,
137+
selectLlmDisposable,
135138
addCustomCommandDisposable,
136139
editCommandDisposable,
137140
deleteCommandDisposable

0 commit comments

Comments
 (0)