357 lines
11 KiB
TypeScript
357 lines
11 KiB
TypeScript
/**
|
|
* Tests for Gemini Provider
|
|
*/
|
|
|
|
import { describe, it, expect, beforeEach } from 'bun:test';
|
|
import { GeminiProvider, AIProviderError, AIErrorType } from '../src/index.js';
|
|
|
|
describe('GeminiProvider', () => {
|
|
let provider: GeminiProvider;
|
|
|
|
beforeEach(() => {
|
|
provider = new GeminiProvider({
|
|
apiKey: 'test-api-key',
|
|
defaultModel: 'gemini-1.5-flash'
|
|
});
|
|
});
|
|
|
|
describe('constructor', () => {
|
|
it('should create provider with valid config', () => {
|
|
expect(provider).toBeInstanceOf(GeminiProvider);
|
|
expect(provider.isInitialized()).toBe(false);
|
|
});
|
|
|
|
it('should throw error for missing API key', () => {
|
|
expect(() => {
|
|
new GeminiProvider({ apiKey: '' });
|
|
}).toThrow(AIProviderError);
|
|
});
|
|
|
|
it('should set default model', () => {
|
|
const customProvider = new GeminiProvider({
|
|
apiKey: 'test-key',
|
|
defaultModel: 'gemini-1.5-pro'
|
|
});
|
|
expect(customProvider).toBeInstanceOf(GeminiProvider);
|
|
});
|
|
|
|
it('should handle safety settings and generation config', () => {
|
|
const customProvider = new GeminiProvider({
|
|
apiKey: 'test-key',
|
|
safetySettings: [],
|
|
generationConfig: {
|
|
temperature: 0.8,
|
|
topP: 0.9,
|
|
topK: 40
|
|
}
|
|
});
|
|
expect(customProvider).toBeInstanceOf(GeminiProvider);
|
|
});
|
|
});
|
|
|
|
describe('getInfo', () => {
|
|
it('should return provider information', () => {
|
|
const info = provider.getInfo();
|
|
|
|
expect(info.name).toBe('Gemini');
|
|
expect(info.version).toBe('1.0.0');
|
|
expect(info.supportsStreaming).toBe(true);
|
|
expect(info.models).toContain('gemini-1.5-flash');
|
|
expect(info.models).toContain('gemini-1.5-pro');
|
|
expect(info.models).toContain('gemini-1.0-pro');
|
|
expect(info.maxContextLength).toBe(1000000);
|
|
expect(info.capabilities).toHaveProperty('vision', true);
|
|
expect(info.capabilities).toHaveProperty('functionCalling', true);
|
|
expect(info.capabilities).toHaveProperty('systemMessages', true);
|
|
expect(info.capabilities).toHaveProperty('multimodal', true);
|
|
expect(info.capabilities).toHaveProperty('largeContext', true);
|
|
});
|
|
});
|
|
|
|
describe('validation', () => {
|
|
it('should validate temperature range', async () => {
|
|
// Mock initialization to avoid API call
|
|
(provider as any).initialized = true;
|
|
(provider as any).client = {};
|
|
(provider as any).model = {};
|
|
|
|
await expect(
|
|
provider.complete({
|
|
messages: [{ role: 'user', content: 'test' }],
|
|
temperature: 1.5
|
|
})
|
|
).rejects.toThrow('Temperature must be between 0.0 and 1.0');
|
|
});
|
|
|
|
it('should validate top_p range', async () => {
|
|
(provider as any).initialized = true;
|
|
(provider as any).client = {};
|
|
(provider as any).model = {};
|
|
|
|
await expect(
|
|
provider.complete({
|
|
messages: [{ role: 'user', content: 'test' }],
|
|
topP: 1.5
|
|
})
|
|
).rejects.toThrow('Top-p must be between 0.0 and 1.0');
|
|
});
|
|
|
|
it('should validate message format', async () => {
|
|
(provider as any).initialized = true;
|
|
(provider as any).client = {};
|
|
(provider as any).model = {};
|
|
|
|
await expect(
|
|
provider.complete({
|
|
messages: [{ role: 'invalid' as any, content: 'test' }]
|
|
})
|
|
).rejects.toThrow('Each message must have a valid role');
|
|
});
|
|
|
|
it('should validate empty content', async () => {
|
|
(provider as any).initialized = true;
|
|
(provider as any).client = {};
|
|
(provider as any).model = {};
|
|
|
|
await expect(
|
|
provider.complete({
|
|
messages: [{ role: 'user', content: '' }]
|
|
})
|
|
).rejects.toThrow('Each message must have non-empty string content');
|
|
});
|
|
|
|
it('should require initialization before use', async () => {
|
|
await expect(
|
|
provider.complete({
|
|
messages: [{ role: 'user', content: 'test' }]
|
|
})
|
|
).rejects.toThrow('Provider must be initialized before use');
|
|
});
|
|
});
|
|
|
|
describe('error handling', () => {
|
|
it('should handle authentication errors', () => {
|
|
const error = new Error('API key invalid');
|
|
|
|
const providerError = (provider as any).handleGeminiError(error);
|
|
|
|
expect(providerError).toBeInstanceOf(AIProviderError);
|
|
expect(providerError.type).toBe(AIErrorType.AUTHENTICATION);
|
|
expect(providerError.message).toContain('Authentication failed');
|
|
});
|
|
|
|
it('should handle rate limit errors', () => {
|
|
const error = new Error('quota exceeded');
|
|
|
|
const providerError = (provider as any).handleGeminiError(error);
|
|
|
|
expect(providerError).toBeInstanceOf(AIProviderError);
|
|
expect(providerError.type).toBe(AIErrorType.RATE_LIMIT);
|
|
expect(providerError.message).toContain('Rate limit exceeded');
|
|
});
|
|
|
|
it('should handle model not found errors', () => {
|
|
const error = new Error('model not found');
|
|
|
|
const providerError = (provider as any).handleGeminiError(error);
|
|
|
|
expect(providerError).toBeInstanceOf(AIProviderError);
|
|
expect(providerError.type).toBe(AIErrorType.MODEL_NOT_FOUND);
|
|
expect(providerError.message).toContain('Model not found');
|
|
});
|
|
|
|
it('should handle invalid request errors', () => {
|
|
const error = new Error('invalid request parameters');
|
|
|
|
const providerError = (provider as any).handleGeminiError(error);
|
|
|
|
expect(providerError).toBeInstanceOf(AIProviderError);
|
|
expect(providerError.type).toBe(AIErrorType.INVALID_REQUEST);
|
|
});
|
|
|
|
it('should handle network errors', () => {
|
|
const error = new Error('network connection failed');
|
|
|
|
const providerError = (provider as any).handleGeminiError(error);
|
|
|
|
expect(providerError).toBeInstanceOf(AIProviderError);
|
|
expect(providerError.type).toBe(AIErrorType.NETWORK);
|
|
});
|
|
|
|
it('should handle timeout errors', () => {
|
|
const error = new Error('request timeout');
|
|
|
|
const providerError = (provider as any).handleGeminiError(error);
|
|
|
|
expect(providerError).toBeInstanceOf(AIProviderError);
|
|
expect(providerError.type).toBe(AIErrorType.TIMEOUT);
|
|
});
|
|
|
|
it('should handle unknown errors', () => {
|
|
const error = new Error('Unknown error');
|
|
|
|
const providerError = (provider as any).handleGeminiError(error);
|
|
|
|
expect(providerError).toBeInstanceOf(AIProviderError);
|
|
expect(providerError.type).toBe(AIErrorType.UNKNOWN);
|
|
});
|
|
});
|
|
|
|
describe('message conversion', () => {
|
|
it('should convert messages to Gemini format', () => {
|
|
const messages = [
|
|
{ role: 'system' as const, content: 'You are helpful' },
|
|
{ role: 'user' as const, content: 'Hello' },
|
|
{ role: 'assistant' as const, content: 'Hi there' }
|
|
];
|
|
|
|
const result = (provider as any).convertMessages(messages);
|
|
|
|
expect(result.systemInstruction).toBe('You are helpful');
|
|
expect(result.contents).toHaveLength(2);
|
|
expect(result.contents[0]).toEqual({
|
|
role: 'user',
|
|
parts: [{ text: 'Hello' }]
|
|
});
|
|
expect(result.contents[1]).toEqual({
|
|
role: 'model',
|
|
parts: [{ text: 'Hi there' }]
|
|
});
|
|
});
|
|
|
|
it('should handle multiple system messages', () => {
|
|
const messages = [
|
|
{ role: 'system' as const, content: 'You are helpful' },
|
|
{ role: 'user' as const, content: 'Hello' },
|
|
{ role: 'system' as const, content: 'Be concise' }
|
|
];
|
|
|
|
const result = (provider as any).convertMessages(messages);
|
|
|
|
expect(result.systemInstruction).toBe('You are helpful\n\nBe concise');
|
|
expect(result.contents).toHaveLength(1);
|
|
expect(result.contents[0].role).toBe('user');
|
|
});
|
|
|
|
it('should handle messages without system prompts', () => {
|
|
const messages = [
|
|
{ role: 'user' as const, content: 'Hello' },
|
|
{ role: 'assistant' as const, content: 'Hi there' }
|
|
];
|
|
|
|
const result = (provider as any).convertMessages(messages);
|
|
|
|
expect(result.systemInstruction).toBeUndefined();
|
|
expect(result.contents).toHaveLength(2);
|
|
});
|
|
|
|
it('should convert assistant role to model role', () => {
|
|
const messages = [
|
|
{ role: 'assistant' as const, content: 'I am an assistant' }
|
|
];
|
|
|
|
const result = (provider as any).convertMessages(messages);
|
|
|
|
expect(result.contents[0].role).toBe('model');
|
|
expect(result.contents[0].parts[0].text).toBe('I am an assistant');
|
|
});
|
|
});
|
|
|
|
describe('generation config', () => {
|
|
it('should build generation config from completion params', () => {
|
|
const params = {
|
|
messages: [{ role: 'user' as const, content: 'test' }],
|
|
temperature: 0.8,
|
|
topP: 0.9,
|
|
maxTokens: 500,
|
|
stopSequences: ['STOP', 'END']
|
|
};
|
|
|
|
const result = (provider as any).buildGenerationConfig(params);
|
|
|
|
expect(result.temperature).toBe(0.8);
|
|
expect(result.topP).toBe(0.9);
|
|
expect(result.maxOutputTokens).toBe(500);
|
|
expect(result.stopSequences).toEqual(['STOP', 'END']);
|
|
});
|
|
|
|
it('should use default temperature when not provided', () => {
|
|
const params = {
|
|
messages: [{ role: 'user' as const, content: 'test' }]
|
|
};
|
|
|
|
const result = (provider as any).buildGenerationConfig(params);
|
|
|
|
expect(result.temperature).toBe(0.7);
|
|
expect(result.maxOutputTokens).toBe(1000);
|
|
});
|
|
});
|
|
|
|
describe('response formatting', () => {
|
|
it('should format completion response correctly', () => {
|
|
const mockResponse = {
|
|
candidates: [{
|
|
content: {
|
|
parts: [{ text: 'Hello there!' }]
|
|
},
|
|
finishReason: 'STOP',
|
|
safetyRatings: [],
|
|
citationMetadata: null
|
|
}],
|
|
usageMetadata: {
|
|
promptTokenCount: 10,
|
|
candidatesTokenCount: 20,
|
|
totalTokenCount: 30
|
|
}
|
|
};
|
|
|
|
const result = (provider as any).formatCompletionResponse(mockResponse, 'gemini-1.5-flash');
|
|
|
|
expect(result.content).toBe('Hello there!');
|
|
expect(result.model).toBe('gemini-1.5-flash');
|
|
expect(result.usage.promptTokens).toBe(10);
|
|
expect(result.usage.completionTokens).toBe(20);
|
|
expect(result.usage.totalTokens).toBe(30);
|
|
expect(result.metadata.finishReason).toBe('STOP');
|
|
});
|
|
|
|
it('should handle multiple text parts', () => {
|
|
const mockResponse = {
|
|
candidates: [{
|
|
content: {
|
|
parts: [
|
|
{ text: 'Hello ' },
|
|
{ text: 'there!' },
|
|
{ functionCall: { name: 'test' } } // Non-text part should be filtered
|
|
]
|
|
},
|
|
finishReason: 'STOP'
|
|
}],
|
|
usageMetadata: {
|
|
promptTokenCount: 5,
|
|
candidatesTokenCount: 10,
|
|
totalTokenCount: 15
|
|
}
|
|
};
|
|
|
|
const result = (provider as any).formatCompletionResponse(mockResponse, 'gemini-1.5-flash');
|
|
|
|
expect(result.content).toBe('Hello there!');
|
|
});
|
|
|
|
it('should throw error for empty response', () => {
|
|
const mockResponse = {
|
|
candidates: [],
|
|
usageMetadata: {
|
|
promptTokenCount: 5,
|
|
candidatesTokenCount: 0,
|
|
totalTokenCount: 5
|
|
}
|
|
};
|
|
|
|
expect(() => {
|
|
(provider as any).formatCompletionResponse(mockResponse, 'gemini-1.5-flash');
|
|
}).toThrow('No content in Gemini response');
|
|
});
|
|
});
|
|
});
|