feat: add OpenAI provider integration and examples

This commit is contained in:
2025-05-28 12:04:10 +02:00
parent 42902445fb
commit aa2fd98cc1
9 changed files with 1061 additions and 17 deletions

261
tests/openai.test.ts Normal file
View File

@ -0,0 +1,261 @@
/**
* Tests for OpenAI Provider
*/
import { describe, it, expect, beforeEach } from 'bun:test';
import { OpenAIProvider, AIProviderError, AIErrorType } from '../src/index.js';
describe('OpenAIProvider', () => {
let provider: OpenAIProvider;
beforeEach(() => {
provider = new OpenAIProvider({
apiKey: 'test-api-key',
defaultModel: 'gpt-3.5-turbo'
});
});
describe('constructor', () => {
it('should create provider with valid config', () => {
expect(provider).toBeInstanceOf(OpenAIProvider);
expect(provider.isInitialized()).toBe(false);
});
it('should throw error for missing API key', () => {
expect(() => {
new OpenAIProvider({ apiKey: '' });
}).toThrow(AIProviderError);
});
it('should set default model', () => {
const customProvider = new OpenAIProvider({
apiKey: 'test-key',
defaultModel: 'gpt-4'
});
expect(customProvider).toBeInstanceOf(OpenAIProvider);
});
it('should handle organization and project options', () => {
const customProvider = new OpenAIProvider({
apiKey: 'test-key',
organization: 'org-123',
project: 'proj-456'
});
expect(customProvider).toBeInstanceOf(OpenAIProvider);
});
});
describe('getInfo', () => {
it('should return provider information', () => {
const info = provider.getInfo();
expect(info.name).toBe('OpenAI');
expect(info.version).toBe('1.0.0');
expect(info.supportsStreaming).toBe(true);
expect(info.models).toContain('gpt-4');
expect(info.models).toContain('gpt-3.5-turbo');
expect(info.maxContextLength).toBe(128000);
expect(info.capabilities).toHaveProperty('vision', true);
expect(info.capabilities).toHaveProperty('functionCalling', true);
expect(info.capabilities).toHaveProperty('jsonMode', true);
expect(info.capabilities).toHaveProperty('systemMessages', true);
});
});
describe('validation', () => {
it('should validate temperature range', async () => {
// Mock initialization to avoid API call
(provider as any).initialized = true;
(provider as any).client = {};
await expect(
provider.complete({
messages: [{ role: 'user', content: 'test' }],
temperature: 1.5
})
).rejects.toThrow('Temperature must be between 0.0 and 1.0');
});
it('should validate top_p range', async () => {
(provider as any).initialized = true;
(provider as any).client = {};
await expect(
provider.complete({
messages: [{ role: 'user', content: 'test' }],
topP: 1.5
})
).rejects.toThrow('Top-p must be between 0.0 and 1.0');
});
it('should validate message format', async () => {
(provider as any).initialized = true;
(provider as any).client = {};
await expect(
provider.complete({
messages: [{ role: 'invalid' as any, content: 'test' }]
})
).rejects.toThrow('Each message must have a valid role');
});
it('should validate empty content', async () => {
(provider as any).initialized = true;
(provider as any).client = {};
await expect(
provider.complete({
messages: [{ role: 'user', content: '' }]
})
).rejects.toThrow('Each message must have non-empty string content');
});
it('should require initialization before use', async () => {
await expect(
provider.complete({
messages: [{ role: 'user', content: 'test' }]
})
).rejects.toThrow('Provider must be initialized before use');
});
});
describe('error handling', () => {
it('should handle authentication errors', () => {
const error = new Error('Unauthorized');
(error as any).status = 401;
const providerError = (provider as any).handleOpenAIError(error);
expect(providerError).toBeInstanceOf(AIProviderError);
expect(providerError.type).toBe(AIErrorType.AUTHENTICATION);
expect(providerError.message).toContain('Authentication failed');
});
it('should handle rate limit errors', () => {
const error = new Error('Rate limited');
(error as any).status = 429;
const providerError = (provider as any).handleOpenAIError(error);
expect(providerError).toBeInstanceOf(AIProviderError);
expect(providerError.type).toBe(AIErrorType.RATE_LIMIT);
expect(providerError.message).toContain('Rate limit exceeded');
});
it('should handle model not found errors', () => {
const error = new Error('Model not found');
(error as any).status = 404;
const providerError = (provider as any).handleOpenAIError(error);
expect(providerError).toBeInstanceOf(AIProviderError);
expect(providerError.type).toBe(AIErrorType.MODEL_NOT_FOUND);
expect(providerError.message).toContain('Model not found');
});
it('should handle invalid request errors', () => {
const error = new Error('Bad request');
(error as any).status = 400;
const providerError = (provider as any).handleOpenAIError(error);
expect(providerError).toBeInstanceOf(AIProviderError);
expect(providerError.type).toBe(AIErrorType.INVALID_REQUEST);
});
it('should handle server errors', () => {
const error = new Error('Internal server error');
(error as any).status = 500;
const providerError = (provider as any).handleOpenAIError(error);
expect(providerError).toBeInstanceOf(AIProviderError);
expect(providerError.type).toBe(AIErrorType.NETWORK);
});
it('should handle unknown errors', () => {
const error = new Error('Unknown error');
const providerError = (provider as any).handleOpenAIError(error);
expect(providerError).toBeInstanceOf(AIProviderError);
expect(providerError.type).toBe(AIErrorType.UNKNOWN);
});
});
describe('message conversion', () => {
it('should convert messages to OpenAI format', () => {
const messages = [
{ role: 'system' as const, content: 'You are helpful' },
{ role: 'user' as const, content: 'Hello' },
{ role: 'assistant' as const, content: 'Hi there' }
];
const result = (provider as any).convertMessages(messages);
expect(result).toHaveLength(3);
expect(result[0]).toEqual({ role: 'system', content: 'You are helpful' });
expect(result[1]).toEqual({ role: 'user', content: 'Hello' });
expect(result[2]).toEqual({ role: 'assistant', content: 'Hi there' });
});
it('should handle messages with metadata', () => {
const messages = [
{
role: 'user' as const,
content: 'Hello',
metadata: { timestamp: '2024-01-01' }
}
];
const result = (provider as any).convertMessages(messages);
expect(result).toHaveLength(1);
expect(result[0]).toEqual({ role: 'user', content: 'Hello' });
// Metadata should not be included in OpenAI format
});
});
describe('response formatting', () => {
it('should format completion response correctly', () => {
const mockResponse = {
id: 'chatcmpl-123',
model: 'gpt-3.5-turbo',
choices: [{
message: { content: 'Hello there!' },
finish_reason: 'stop'
}],
usage: {
prompt_tokens: 10,
completion_tokens: 20,
total_tokens: 30
},
system_fingerprint: 'fp_123'
};
const result = (provider as any).formatCompletionResponse(mockResponse);
expect(result.content).toBe('Hello there!');
expect(result.model).toBe('gpt-3.5-turbo');
expect(result.id).toBe('chatcmpl-123');
expect(result.usage.promptTokens).toBe(10);
expect(result.usage.completionTokens).toBe(20);
expect(result.usage.totalTokens).toBe(30);
expect(result.metadata.finishReason).toBe('stop');
expect(result.metadata.systemFingerprint).toBe('fp_123');
});
it('should throw error for empty response', () => {
const mockResponse = {
id: 'chatcmpl-123',
model: 'gpt-3.5-turbo',
choices: [],
usage: { prompt_tokens: 10, completion_tokens: 0, total_tokens: 10 }
};
expect(() => {
(provider as any).formatCompletionResponse(mockResponse);
}).toThrow('No content in OpenAI response');
});
});
});