Skip to content

Commit d4585af

Browse files
committed
feat: add comprehensive testing guidelines and implement tests for LLM constants and providers
1 parent ed18f70 commit d4585af

File tree

7 files changed

+659
-0
lines changed

7 files changed

+659
-0
lines changed

.sourcegraph/tests.rule.md

+77
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
# Testing Guidelines for Cody++
2+
3+
## File Patterns
4+
5+
- `src/**/__tests__/**/*.test.ts` - All test files
6+
7+
## Testing Framework
8+
9+
Cody++ uses:
10+
11+
- Mocha testing framework with `suite` and `test` functions
12+
- Sinon for stubbing and mocking dependencies
13+
- Assert from Node.js for assertions
14+
- VS Code test API for extension testing
15+
16+
## Test Structure Guidelines
17+
18+
1. Location and Naming
19+
20+
- Place test files in a **tests** directory adjacent to the file being tested
21+
- Name test files with the pattern [filename].test.ts
22+
- Use descriptive test names following the pattern: should <expected behavior> when <condition>
23+
24+
2. Tests should follow the pattern:
25+
26+
- `setup()` - Set up test dependencies and stubs
27+
- Test cases with descriptive names
28+
- `teardown()` - Clean up stubs and resources
29+
30+
3. Use Sinon sandbox pattern:
31+
32+
```typescript
33+
let sandbox: sinon.SinonSandbox
34+
35+
setup(() => {
36+
sandbox = sinon.createSandbox()
37+
// Setup stubs
38+
})
39+
40+
teardown(() => {
41+
sandbox.restore()
42+
})
43+
```
44+
45+
4. For VS Code API testing:
46+
47+
- Use `vscode.workspace` stubs for configuration
48+
- Mock filesystem operations
49+
- Use proper event emitters for testing events
50+
51+
5. For service testing (like telemetry):
52+
- Reset singletons between tests
53+
- Mock external dependencies
54+
- Verify correct calls are made
55+
56+
## Extension Testing
57+
58+
For extension-level tests:
59+
60+
- Verify command registration
61+
- Test activation events
62+
- Use timeouts for async operations
63+
64+
## Running Tests
65+
66+
After writing or modifying tests, always run the full test suite to ensure all tests pass:
67+
68+
```
69+
pnpm test
70+
```
71+
72+
This command will:
73+
74+
- Compile the TypeScript files
75+
- Start the VS Code extension host
76+
- Execute all test suites
77+
- Report test results and any failures
+68
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
import * as assert from 'assert'
2+
import {
3+
API_ENDPOINTS,
4+
CONFIG_KEYS,
5+
CONTENT_TYPES,
6+
DEFAULT_MODELS,
7+
ERROR_MESSAGES,
8+
HEADERS,
9+
SOURCEGRAPH_SUPPORTED_LLM_PROVIDERS
10+
} from '../constants'
11+
12+
suite('LLM Constants', () => {
13+
test('should export CONFIG_KEYS with correct values', () => {
14+
assert.deepStrictEqual(CONFIG_KEYS, {
15+
API_KEY: 'llmApiKey',
16+
MODEL: 'llmModel',
17+
OPENAI_BASE_URL: 'openaiBaseUrl'
18+
})
19+
})
20+
21+
test('should export API_ENDPOINTS with correct values', () => {
22+
assert.ok(API_ENDPOINTS.SOURCEGRAPH)
23+
assert.strictEqual(API_ENDPOINTS.SOURCEGRAPH.BASE_URL, 'https://sourcegraph.com')
24+
assert.strictEqual(API_ENDPOINTS.SOURCEGRAPH.MODELS, '/.api/modelconfig/supported-models.json')
25+
assert.strictEqual(API_ENDPOINTS.SOURCEGRAPH.COMPLETIONS, '/.api/completions/stream')
26+
assert.strictEqual(API_ENDPOINTS.SOURCEGRAPH.GRAPHQL, '/.api/graphql')
27+
28+
assert.ok(API_ENDPOINTS.OPENAI)
29+
assert.strictEqual(API_ENDPOINTS.OPENAI.DEFAULT_BASE_URL, 'https://api.openai.com/v1')
30+
assert.strictEqual(API_ENDPOINTS.OPENAI.MODELS, '/models')
31+
assert.strictEqual(API_ENDPOINTS.OPENAI.CHAT_COMPLETIONS, '/chat/completions')
32+
})
33+
34+
test('should export DEFAULT_MODELS with correct values', () => {
35+
assert.deepStrictEqual(DEFAULT_MODELS, {
36+
SOURCEGRAPH: 'claude-3.5-sonnet',
37+
OPENAI: 'gpt-4o-mini'
38+
})
39+
})
40+
41+
test('should export SOURCEGRAPH_SUPPORTED_LLM_PROVIDERS with correct values', () => {
42+
assert.deepStrictEqual(SOURCEGRAPH_SUPPORTED_LLM_PROVIDERS, ['anthropic', 'google', 'openai'])
43+
})
44+
45+
test('should export ERROR_MESSAGES with correct values', () => {
46+
assert.deepStrictEqual(ERROR_MESSAGES, {
47+
NOT_AUTHENTICATED: 'Authentication required. Please sign in.',
48+
INVALID_TOKEN: 'Invalid authentication token.',
49+
NO_TOKEN: 'No token provided.',
50+
NETWORK_ERROR: 'Network request failed.',
51+
UNKNOWN_ERROR: 'An unknown error occurred.',
52+
INVALID_RESPONSE: 'Invalid response format from API.'
53+
})
54+
})
55+
56+
test('should export HEADERS with correct values', () => {
57+
assert.deepStrictEqual(HEADERS, {
58+
CONTENT_TYPE: 'Content-Type',
59+
AUTHORIZATION: 'Authorization'
60+
})
61+
})
62+
63+
test('should export CONTENT_TYPES with correct values', () => {
64+
assert.deepStrictEqual(CONTENT_TYPES, {
65+
JSON: 'application/json'
66+
})
67+
})
68+
})

src/core/llm/__tests__/index.test.ts

+70
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
import * as assert from 'assert'
2+
import * as sinon from 'sinon'
3+
import * as vscode from 'vscode'
4+
import { LLMProvider } from '../../../constants/llm'
5+
import { createProvider } from '../index'
6+
import { OpenAIProvider } from '../providers/openai'
7+
import { SourcegraphProvider } from '../providers/sourcegraph'
8+
9+
suite('LLM Provider Factory', () => {
10+
let sandbox: sinon.SinonSandbox
11+
let getConfigurationStub: sinon.SinonStub
12+
let configGetStub: sinon.SinonStub
13+
14+
setup(() => {
15+
sandbox = sinon.createSandbox()
16+
17+
// Mock VS Code workspace configuration
18+
configGetStub = sandbox.stub()
19+
20+
getConfigurationStub = sandbox.stub(vscode.workspace, 'getConfiguration')
21+
getConfigurationStub.returns({
22+
get: configGetStub
23+
})
24+
})
25+
26+
teardown(() => {
27+
sandbox.restore()
28+
})
29+
30+
test('should create OpenAI provider when configured', () => {
31+
configGetStub.withArgs('llmProvider').returns(LLMProvider.OpenAI)
32+
33+
const provider = createProvider()
34+
35+
// Verify getConfiguration was called at least once with 'codyPlusPlus'
36+
sinon.assert.calledWith(getConfigurationStub, 'codyPlusPlus')
37+
// Verify the provider type get was called
38+
sinon.assert.calledWith(configGetStub, 'llmProvider')
39+
40+
assert.ok(provider instanceof OpenAIProvider)
41+
assert.strictEqual(provider.providerIdentifier, LLMProvider.OpenAI)
42+
})
43+
44+
test('should create Sourcegraph provider when configured', () => {
45+
configGetStub.withArgs('llmProvider').returns(LLMProvider.Sourcegraph)
46+
47+
const provider = createProvider()
48+
49+
// Verify getConfiguration was called at least once with 'codyPlusPlus'
50+
sinon.assert.calledWith(getConfigurationStub, 'codyPlusPlus')
51+
// Verify the provider type get was called
52+
sinon.assert.calledWith(configGetStub, 'llmProvider')
53+
54+
assert.ok(provider instanceof SourcegraphProvider)
55+
assert.strictEqual(provider.providerIdentifier, LLMProvider.Sourcegraph)
56+
})
57+
58+
test('should throw error when invalid provider configured', () => {
59+
configGetStub.withArgs('llmProvider').returns('invalid-provider')
60+
61+
assert.throws(() => {
62+
createProvider()
63+
}, /Unsupported LLM provider: invalid-provider/)
64+
65+
// Verify getConfiguration was called with 'codyPlusPlus'
66+
sinon.assert.calledWith(getConfigurationStub, 'codyPlusPlus')
67+
// Verify the provider type get was called
68+
sinon.assert.calledWith(configGetStub, 'llmProvider')
69+
})
70+
})

src/core/llm/__tests__/types.test.ts

+68
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
import * as assert from 'assert'
2+
import { LLMProvider } from '../../../constants/llm'
3+
import { BaseLLMProvider, CompletionConfig, CompletionRequest, CompletionResponse } from '../types'
4+
5+
suite('LLM Types', () => {
6+
test('should correctly type CompletionConfig', () => {
7+
const config: CompletionConfig = {
8+
model: 'test-model',
9+
maxTokens: 500,
10+
temperature: 0.7,
11+
responseFormat: {
12+
type: 'json',
13+
schema: { type: 'object' }
14+
}
15+
}
16+
17+
assert.strictEqual(config.model, 'test-model')
18+
assert.strictEqual(config.maxTokens, 500)
19+
assert.strictEqual(config.temperature, 0.7)
20+
assert.strictEqual(config.responseFormat?.type, 'json')
21+
})
22+
23+
test('should correctly type CompletionRequest', () => {
24+
const request: CompletionRequest = {
25+
messages: [
26+
{ role: 'system', content: 'You are a helpful assistant' },
27+
{ role: 'user', content: 'Hello' }
28+
],
29+
config: {
30+
model: 'test-model',
31+
maxTokens: 100
32+
}
33+
}
34+
35+
assert.strictEqual(request.messages.length, 2)
36+
assert.strictEqual(request.messages[0].role, 'system')
37+
assert.strictEqual(request.messages[0].content, 'You are a helpful assistant')
38+
assert.strictEqual(request.messages[1].role, 'user')
39+
assert.strictEqual(request.messages[1].content, 'Hello')
40+
assert.strictEqual(request.config?.model, 'test-model')
41+
})
42+
43+
test('should correctly type CompletionResponse', () => {
44+
const response: CompletionResponse = {
45+
text: 'This is a response'
46+
}
47+
48+
assert.strictEqual(response.text, 'This is a response')
49+
})
50+
51+
test('should correctly implement BaseLLMProvider interface', async () => {
52+
// Create a mock implementation of BaseLLMProvider
53+
const mockProvider: BaseLLMProvider = {
54+
providerIdentifier: LLMProvider.OpenAI,
55+
complete: async (request: CompletionRequest): Promise<CompletionResponse> => {
56+
return { text: 'Mock response' }
57+
}
58+
}
59+
60+
assert.strictEqual(mockProvider.providerIdentifier, LLMProvider.OpenAI)
61+
62+
const response = await mockProvider.complete({
63+
messages: [{ role: 'user', content: 'Test' }]
64+
})
65+
66+
assert.strictEqual(response.text, 'Mock response')
67+
})
68+
})

src/core/llm/__tests__/utils.test.ts

+108
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,108 @@
1+
import * as assert from 'assert'
2+
import * as sinon from 'sinon'
3+
import * as vscode from 'vscode'
4+
import { FEW_SHOT_EXAMPLES, SYSTEM_PROMPT } from '../../../constants/llm'
5+
import * as fileOperations from '../../filesystem/operations'
6+
import { createCompletionRequestMessages, parseLLMResponse } from '../utils'
7+
8+
// Define FileMetadata interface to match what's expected by filesystem operations
9+
interface FileMetadata {
10+
name: string
11+
path: string
12+
isDirectory: boolean
13+
type: string
14+
}
15+
16+
suite('LLM Utils', () => {
17+
let sandbox: sinon.SinonSandbox
18+
19+
setup(() => {
20+
sandbox = sinon.createSandbox()
21+
})
22+
23+
teardown(() => {
24+
sandbox.restore()
25+
})
26+
27+
suite('createCompletionRequestMessages', () => {
28+
test('should create messages with correct format and content', async () => {
29+
const mockRootUri = { fsPath: '/test/workspace' } as vscode.Uri
30+
31+
// Mock the filesystem operations without specific type dependencies
32+
const mockFileTree = [
33+
{
34+
name: 'file1.ts',
35+
path: '/test/workspace/file1.ts',
36+
isDirectory: false,
37+
type: 'file'
38+
},
39+
{
40+
name: 'folder1',
41+
path: '/test/workspace/folder1',
42+
isDirectory: true,
43+
type: 'directory'
44+
}
45+
]
46+
47+
const mockFormattedTree = '├── file1.ts\n└── folder1/'
48+
49+
// Stub the filesystem operations with any type to avoid type conflicts
50+
const getWorkspaceFileTreeStub = sandbox
51+
.stub(fileOperations, 'getWorkspaceFileTree')
52+
.resolves(mockFileTree as any)
53+
const formatFileTreeStub = sandbox
54+
.stub(fileOperations, 'formatFileTree')
55+
.returns(mockFormattedTree)
56+
57+
const userPrompt = 'Find all typescript files'
58+
const result = await createCompletionRequestMessages(userPrompt, mockRootUri)
59+
60+
// Verify the filesystem operations were called correctly
61+
sinon.assert.calledOnce(getWorkspaceFileTreeStub)
62+
sinon.assert.calledWith(getWorkspaceFileTreeStub, mockRootUri)
63+
sinon.assert.calledOnce(formatFileTreeStub)
64+
sinon.assert.calledWith(formatFileTreeStub, mockRootUri.fsPath, mockFileTree as any)
65+
66+
// Check the result messages
67+
assert.strictEqual(result.length, FEW_SHOT_EXAMPLES.length + 2) // system + examples + user
68+
assert.strictEqual(result[0].role, 'system')
69+
assert.strictEqual(result[0].content, SYSTEM_PROMPT)
70+
71+
// The last message should be the user message
72+
const userMessage = result[result.length - 1]
73+
assert.strictEqual(userMessage.role, 'user')
74+
assert.ok(userMessage.content.includes('<file-tree>'))
75+
assert.ok(userMessage.content.includes(mockRootUri.fsPath))
76+
assert.ok(userMessage.content.includes(mockFormattedTree))
77+
assert.ok(userMessage.content.includes(`User request: ${userPrompt}`))
78+
})
79+
})
80+
81+
suite('parseLLMResponse', () => {
82+
test('should correctly parse valid JSON array response', () => {
83+
const response = '["file1.ts", "src/file2.ts"]'
84+
const result = parseLLMResponse(response)
85+
86+
assert.deepStrictEqual(result, ['file1.ts', 'src/file2.ts'])
87+
})
88+
89+
test('should correctly parse response with files property', () => {
90+
const response = '{"files": ["file1.ts", "src/file2.ts"]}'
91+
const result = parseLLMResponse(response)
92+
93+
assert.deepStrictEqual(result, ['file1.ts', 'src/file2.ts'])
94+
})
95+
96+
test('should return empty array for empty response', () => {
97+
const result = parseLLMResponse('')
98+
assert.deepStrictEqual(result, [])
99+
})
100+
101+
test('should return empty array for response not matching expected format', () => {
102+
const response = '{"notFiles": ["file1.ts"]}'
103+
const result = parseLLMResponse(response)
104+
105+
assert.deepStrictEqual(result, [])
106+
})
107+
})
108+
})

0 commit comments

Comments
 (0)