feat: add OpenAI provider integration and examples

This commit is contained in:
2025-05-28 12:04:10 +02:00
parent 42902445fb
commit aa2fd98cc1
9 changed files with 1061 additions and 17 deletions

View File

@@ -28,11 +28,13 @@ export { BaseAIProvider } from './providers/base.js';
// Concrete provider implementations
export { ClaudeProvider, type ClaudeConfig } from './providers/claude.js';
export { OpenAIProvider, type OpenAIConfig } from './providers/openai.js';
// Utility functions and factory
export {
createProvider,
createClaudeProvider,
createOpenAIProvider,
ProviderRegistry,
type ProviderType,
type ProviderConfigMap
@@ -49,4 +51,4 @@ export const VERSION = '1.0.0';
/**
* List of supported providers
*/
export const SUPPORTED_PROVIDERS = ['claude'] as const;
export const SUPPORTED_PROVIDERS = ['claude', 'openai'] as const;

View File

@@ -4,4 +4,5 @@
*/
export { BaseAIProvider } from './base.js';
export { ClaudeProvider, type ClaudeConfig } from './claude.js';
export { ClaudeProvider, type ClaudeConfig } from './claude.js';
export { OpenAIProvider, type OpenAIConfig } from './openai.js';

316
src/providers/openai.ts Normal file
View File

@@ -0,0 +1,316 @@
/**
* OpenAI Provider implementation using OpenAI's API
* Provides integration with GPT models through a standardized interface
*/
import OpenAI from 'openai';
import type {
AIProviderConfig,
CompletionParams,
CompletionResponse,
CompletionChunk,
ProviderInfo,
AIMessage
} from '../types/index.js';
import { BaseAIProvider } from './base.js';
import { AIProviderError, AIErrorType } from '../types/index.js';
/**
* Configuration specific to OpenAI provider
*/
export interface OpenAIConfig extends AIProviderConfig {
/** Default model to use if not specified in requests (default: gpt-4) */
defaultModel?: string;
/** Organization ID (optional) */
organization?: string;
/** Project ID (optional) */
project?: string;
}
/**
* OpenAI provider implementation
*/
export class OpenAIProvider extends BaseAIProvider {
private client: OpenAI | null = null;
private readonly defaultModel: string;
private readonly organization?: string;
private readonly project?: string;
constructor(config: OpenAIConfig) {
super(config);
this.defaultModel = config.defaultModel || 'gpt-4';
this.organization = config.organization;
this.project = config.project;
}
/**
* Initialize the OpenAI provider by setting up the OpenAI client
*/
protected async doInitialize(): Promise<void> {
try {
this.client = new OpenAI({
apiKey: this.config.apiKey,
baseURL: this.config.baseUrl,
timeout: this.config.timeout,
maxRetries: this.config.maxRetries,
organization: this.organization,
project: this.project
});
// Test the connection by making a simple request
await this.validateConnection();
} catch (error) {
throw new AIProviderError(
`Failed to initialize OpenAI provider: ${(error as Error).message}`,
AIErrorType.AUTHENTICATION,
undefined,
error as Error
);
}
}
/**
* Generate a completion using OpenAI
*/
protected async doComplete(params: CompletionParams): Promise<CompletionResponse> {
if (!this.client) {
throw new AIProviderError('Client not initialized', AIErrorType.INVALID_REQUEST);
}
try {
const response = await this.client.chat.completions.create({
model: params.model || this.defaultModel,
messages: this.convertMessages(params.messages),
max_tokens: params.maxTokens || 1000,
temperature: params.temperature ?? 0.7,
top_p: params.topP,
stop: params.stopSequences,
stream: false
});
return this.formatCompletionResponse(response);
} catch (error) {
throw this.handleOpenAIError(error as Error);
}
}
/**
* Generate a streaming completion using OpenAI
*/
protected async *doStream(params: CompletionParams): AsyncIterable<CompletionChunk> {
if (!this.client) {
throw new AIProviderError('Client not initialized', AIErrorType.INVALID_REQUEST);
}
try {
const stream = await this.client.chat.completions.create({
model: params.model || this.defaultModel,
messages: this.convertMessages(params.messages),
max_tokens: params.maxTokens || 1000,
temperature: params.temperature ?? 0.7,
top_p: params.topP,
stop: params.stopSequences,
stream: true
});
let messageId = '';
let totalPromptTokens = 0;
let totalCompletionTokens = 0;
for await (const chunk of stream) {
if (chunk.id && !messageId) {
messageId = chunk.id;
}
const delta = chunk.choices[0]?.delta;
if (delta?.content) {
yield {
content: delta.content,
isComplete: false,
id: messageId || chunk.id
};
}
// Check for completion
if (chunk.choices[0]?.finish_reason) {
// For OpenAI, we need to make a separate call to get usage stats
// or track them during streaming (not available in all stream responses)
yield {
content: '',
isComplete: true,
id: messageId || chunk.id,
usage: {
promptTokens: totalPromptTokens,
completionTokens: totalCompletionTokens,
totalTokens: totalPromptTokens + totalCompletionTokens
}
};
}
}
} catch (error) {
throw this.handleOpenAIError(error as Error);
}
}
/**
* Get information about the OpenAI provider
*/
public getInfo(): ProviderInfo {
return {
name: 'OpenAI',
version: '1.0.0',
models: [
'gpt-4',
'gpt-4-turbo',
'gpt-4-turbo-preview',
'gpt-4-0125-preview',
'gpt-4-1106-preview',
'gpt-3.5-turbo',
'gpt-3.5-turbo-0125',
'gpt-3.5-turbo-1106'
],
maxContextLength: 128000, // GPT-4 Turbo context length
supportsStreaming: true,
capabilities: {
vision: true,
functionCalling: true,
jsonMode: true,
systemMessages: true
}
};
}
/**
* Validate the connection by making a simple request
*/
private async validateConnection(): Promise<void> {
if (!this.client) {
throw new Error('Client not initialized');
}
try {
// Make a minimal request to validate credentials
await this.client.chat.completions.create({
model: this.defaultModel,
messages: [{ role: 'user', content: 'Hi' }],
max_tokens: 1
});
} catch (error: any) {
if (error.status === 401 || error.status === 403) {
throw new AIProviderError(
'Invalid API key. Please check your OpenAI API key.',
AIErrorType.AUTHENTICATION,
error.status
);
}
// For other errors during validation, we'll let initialization proceed
// as they might be temporary issues
}
}
/**
* Convert our generic message format to OpenAI's format
* OpenAI supports system messages directly in the messages array
*/
private convertMessages(messages: AIMessage[]): OpenAI.Chat.Completions.ChatCompletionMessageParam[] {
return messages.map(message => ({
role: message.role as 'system' | 'user' | 'assistant',
content: message.content
}));
}
/**
* Format OpenAI's response to our standard format
*/
private formatCompletionResponse(response: OpenAI.Chat.Completions.ChatCompletion): CompletionResponse {
const choice = response.choices[0];
if (!choice || !choice.message.content) {
throw new AIProviderError(
'No content in OpenAI response',
AIErrorType.UNKNOWN
);
}
return {
content: choice.message.content,
model: response.model,
usage: {
promptTokens: response.usage?.prompt_tokens || 0,
completionTokens: response.usage?.completion_tokens || 0,
totalTokens: response.usage?.total_tokens || 0
},
id: response.id,
metadata: {
finishReason: choice.finish_reason,
systemFingerprint: response.system_fingerprint
}
};
}
/**
* Handle OpenAI-specific errors and convert them to our standard format
*/
private handleOpenAIError(error: any): AIProviderError {
if (error instanceof AIProviderError) {
return error;
}
const status = error.status || error.statusCode;
const message = error.message || 'Unknown OpenAI API error';
switch (status) {
case 400:
return new AIProviderError(
`Invalid request: ${message}`,
AIErrorType.INVALID_REQUEST,
status,
error
);
case 401:
return new AIProviderError(
'Authentication failed. Please check your OpenAI API key.',
AIErrorType.AUTHENTICATION,
status,
error
);
case 403:
return new AIProviderError(
'Access forbidden. Please check your API key permissions.',
AIErrorType.AUTHENTICATION,
status,
error
);
case 404:
return new AIProviderError(
'Model not found. Please check the model name.',
AIErrorType.MODEL_NOT_FOUND,
status,
error
);
case 429:
return new AIProviderError(
'Rate limit exceeded. Please slow down your requests.',
AIErrorType.RATE_LIMIT,
status,
error
);
case 500:
case 502:
case 503:
case 504:
return new AIProviderError(
'OpenAI service temporarily unavailable. Please try again later.',
AIErrorType.NETWORK,
status,
error
);
default:
return new AIProviderError(
`OpenAI API error: ${message}`,
AIErrorType.UNKNOWN,
status,
error
);
}
}
}

View File

@@ -5,18 +5,20 @@
import type { AIProviderConfig } from '../types/index.js';
import { ClaudeProvider, type ClaudeConfig } from '../providers/claude.js';
import { OpenAIProvider, type OpenAIConfig } from '../providers/openai.js';
import { BaseAIProvider } from '../providers/base.js';
/**
* Supported AI provider types
*/
export type ProviderType = 'claude';
export type ProviderType = 'claude' | 'openai';
/**
* Configuration map for different provider types
*/
export interface ProviderConfigMap {
claude: ClaudeConfig;
openai: OpenAIConfig;
}
/**
@@ -32,6 +34,8 @@ export function createProvider<T extends ProviderType>(
switch (type) {
case 'claude':
return new ClaudeProvider(config as ClaudeConfig);
case 'openai':
return new OpenAIProvider(config as OpenAIConfig);
default:
throw new Error(`Unsupported provider type: ${type}`);
}
@@ -53,6 +57,22 @@ export function createClaudeProvider(
});
}
/**
* Create an OpenAI provider with simplified configuration
* @param apiKey - OpenAI API key
* @param options - Optional additional configuration
* @returns Configured OpenAI provider instance
*/
export function createOpenAIProvider(
apiKey: string,
options: Partial<Omit<OpenAIConfig, 'apiKey'>> = {}
): OpenAIProvider {
return new OpenAIProvider({
apiKey,
...options
});
}
/**
* Provider registry for dynamic provider creation
*/
@@ -101,4 +121,5 @@ export class ProviderRegistry {
}
// Pre-register built-in providers
ProviderRegistry.register('claude', ClaudeProvider);
ProviderRegistry.register('claude', ClaudeProvider);
ProviderRegistry.register('openai', OpenAIProvider);