feat: add initial implementation of Simple AI Provider package

This commit is contained in:
2025-05-28 11:54:24 +02:00
commit 42902445fb
15 changed files with 1616 additions and 0 deletions

52
src/index.ts Normal file
View File

@@ -0,0 +1,52 @@
/**
* Simple AI Provider - Main Entry Point
*
* A professional, extensible package for integrating multiple AI providers
* into your applications with a unified interface.
*
* @author Jan-Marlon Leibl
* @version 1.0.0
*/
// Core types and interfaces
export type {
AIProviderConfig,
AIMessage,
MessageRole,
CompletionParams,
CompletionResponse,
CompletionChunk,
TokenUsage,
ProviderInfo
} from './types/index.js';
// Error handling
export { AIProviderError, AIErrorType } from './types/index.js';
// Base provider class
export { BaseAIProvider } from './providers/base.js';
// Concrete provider implementations
export { ClaudeProvider, type ClaudeConfig } from './providers/claude.js';
// Utility functions and factory
export {
createProvider,
createClaudeProvider,
ProviderRegistry,
type ProviderType,
type ProviderConfigMap
} from './utils/factory.js';
// Re-export everything from providers for convenience
export * from './providers/index.js';
/**
* Package version
*/
export const VERSION = '1.0.0';
/**
* List of supported providers
*/
export const SUPPORTED_PROVIDERS = ['claude'] as const;

264
src/providers/base.ts Normal file
View File

@@ -0,0 +1,264 @@
/**
* Abstract base class for all AI providers
* Provides common functionality and enforces a consistent interface
*/
import type {
AIProviderConfig,
CompletionParams,
CompletionResponse,
CompletionChunk,
ProviderInfo
} from '../types/index.js';
import { AIProviderError, AIErrorType } from '../types/index.js';
/**
* Abstract base class that all AI providers must extend
*/
export abstract class BaseAIProvider {
protected config: AIProviderConfig;
protected initialized: boolean = false;
constructor(config: AIProviderConfig) {
this.config = this.validateConfig(config);
}
/**
* Validates the provider configuration
* @param config - Configuration object to validate
* @returns Validated configuration
* @throws AIProviderError if configuration is invalid
*/
protected validateConfig(config: AIProviderConfig): AIProviderConfig {
if (!config.apiKey || typeof config.apiKey !== 'string') {
throw new AIProviderError(
'API key is required and must be a string',
AIErrorType.INVALID_REQUEST
);
}
// Set default values
const validatedConfig = {
...config,
timeout: config.timeout ?? 30000,
maxRetries: config.maxRetries ?? 3
};
return validatedConfig;
}
/**
* Initialize the provider (setup connections, validate credentials, etc.)
* Must be called before using the provider
*/
public async initialize(): Promise<void> {
try {
await this.doInitialize();
this.initialized = true;
} catch (error) {
throw this.handleError(error as Error);
}
}
/**
* Check if the provider is initialized
*/
public isInitialized(): boolean {
return this.initialized;
}
/**
* Generate a completion based on the provided parameters
* @param params - Parameters for the completion request
* @returns Promise resolving to completion response
*/
public async complete(params: CompletionParams): Promise<CompletionResponse> {
this.ensureInitialized();
this.validateCompletionParams(params);
try {
return await this.doComplete(params);
} catch (error) {
throw this.handleError(error as Error);
}
}
/**
* Generate a streaming completion
* @param params - Parameters for the completion request
* @returns AsyncIterable of completion chunks
*/
public async *stream(params: CompletionParams): AsyncIterable<CompletionChunk> {
this.ensureInitialized();
this.validateCompletionParams(params);
try {
yield* this.doStream(params);
} catch (error) {
throw this.handleError(error as Error);
}
}
/**
* Get information about this provider
* @returns Provider information and capabilities
*/
public abstract getInfo(): ProviderInfo;
/**
* Provider-specific initialization logic
* Override this method in concrete implementations
*/
protected abstract doInitialize(): Promise<void>;
/**
* Provider-specific completion logic
* Override this method in concrete implementations
*/
protected abstract doComplete(params: CompletionParams): Promise<CompletionResponse>;
/**
* Provider-specific streaming logic
* Override this method in concrete implementations
*/
protected abstract doStream(params: CompletionParams): AsyncIterable<CompletionChunk>;
/**
* Ensures the provider is initialized before use
* @throws AIProviderError if not initialized
*/
protected ensureInitialized(): void {
if (!this.initialized) {
throw new AIProviderError(
'Provider must be initialized before use. Call initialize() first.',
AIErrorType.INVALID_REQUEST
);
}
}
/**
* Validates completion parameters
* @param params - Parameters to validate
* @throws AIProviderError if parameters are invalid
*/
protected validateCompletionParams(params: CompletionParams): void {
if (!params.messages || !Array.isArray(params.messages) || params.messages.length === 0) {
throw new AIProviderError(
'Messages array is required and must not be empty',
AIErrorType.INVALID_REQUEST
);
}
for (const message of params.messages) {
if (!message.role || !['system', 'user', 'assistant'].includes(message.role)) {
throw new AIProviderError(
'Each message must have a valid role (system, user, or assistant)',
AIErrorType.INVALID_REQUEST
);
}
if (!message.content || typeof message.content !== 'string') {
throw new AIProviderError(
'Each message must have non-empty string content',
AIErrorType.INVALID_REQUEST
);
}
}
if (params.temperature !== undefined && (params.temperature < 0 || params.temperature > 1)) {
throw new AIProviderError(
'Temperature must be between 0.0 and 1.0',
AIErrorType.INVALID_REQUEST
);
}
if (params.topP !== undefined && (params.topP < 0 || params.topP > 1)) {
throw new AIProviderError(
'Top-p must be between 0.0 and 1.0',
AIErrorType.INVALID_REQUEST
);
}
if (params.maxTokens !== undefined && params.maxTokens < 1) {
throw new AIProviderError(
'Max tokens must be a positive integer',
AIErrorType.INVALID_REQUEST
);
}
}
/**
* Handles and transforms errors into AIProviderError instances
* @param error - The original error
* @returns AIProviderError with appropriate type and context
*/
protected handleError(error: Error): AIProviderError {
if (error instanceof AIProviderError) {
return error;
}
// Handle common HTTP status codes
if ('status' in error) {
const status = (error as any).status;
switch (status) {
case 401:
case 403:
return new AIProviderError(
'Authentication failed. Please check your API key.',
AIErrorType.AUTHENTICATION,
status,
error
);
case 429:
return new AIProviderError(
'Rate limit exceeded. Please try again later.',
AIErrorType.RATE_LIMIT,
status,
error
);
case 404:
return new AIProviderError(
'Model or endpoint not found.',
AIErrorType.MODEL_NOT_FOUND,
status,
error
);
case 400:
return new AIProviderError(
'Invalid request parameters.',
AIErrorType.INVALID_REQUEST,
status,
error
);
}
}
// Handle timeout errors
if (error.message.includes('timeout') || error.message.includes('ETIMEDOUT')) {
return new AIProviderError(
'Request timed out. Please try again.',
AIErrorType.TIMEOUT,
undefined,
error
);
}
// Handle network errors
if (error.message.includes('network') || error.message.includes('ENOTFOUND')) {
return new AIProviderError(
'Network error occurred. Please check your connection.',
AIErrorType.NETWORK,
undefined,
error
);
}
// Default to unknown error
return new AIProviderError(
`Unknown error: ${error.message}`,
AIErrorType.UNKNOWN,
undefined,
error
);
}
}

323
src/providers/claude.ts Normal file
View File

@@ -0,0 +1,323 @@
/**
* Claude AI Provider implementation using Anthropic's API
* Provides integration with Claude models through a standardized interface
*/
import Anthropic from '@anthropic-ai/sdk';
import type {
AIProviderConfig,
CompletionParams,
CompletionResponse,
CompletionChunk,
ProviderInfo,
AIMessage
} from '../types/index.js';
import { BaseAIProvider } from './base.js';
import { AIProviderError, AIErrorType } from '../types/index.js';
/**
* Configuration specific to Claude provider
*/
export interface ClaudeConfig extends AIProviderConfig {
/** Default model to use if not specified in requests (default: claude-3-5-sonnet-20241022) */
defaultModel?: string;
/** Anthropic API version (default: 2023-06-01) */
version?: string;
}
/**
* Claude AI provider implementation
*/
export class ClaudeProvider extends BaseAIProvider {
private client: Anthropic | null = null;
private readonly defaultModel: string;
private readonly version: string;
constructor(config: ClaudeConfig) {
super(config);
this.defaultModel = config.defaultModel || 'claude-3-5-sonnet-20241022';
this.version = config.version || '2023-06-01';
}
/**
* Initialize the Claude provider by setting up the Anthropic client
*/
protected async doInitialize(): Promise<void> {
try {
this.client = new Anthropic({
apiKey: this.config.apiKey,
baseURL: this.config.baseUrl,
timeout: this.config.timeout,
maxRetries: this.config.maxRetries,
defaultHeaders: {
'anthropic-version': this.version
}
});
// Test the connection by making a simple request
await this.validateConnection();
} catch (error) {
throw new AIProviderError(
`Failed to initialize Claude provider: ${(error as Error).message}`,
AIErrorType.AUTHENTICATION,
undefined,
error as Error
);
}
}
/**
* Generate a completion using Claude
*/
protected async doComplete(params: CompletionParams): Promise<CompletionResponse> {
if (!this.client) {
throw new AIProviderError('Client not initialized', AIErrorType.INVALID_REQUEST);
}
try {
const { system, messages } = this.convertMessages(params.messages);
const response = await this.client.messages.create({
model: params.model || this.defaultModel,
max_tokens: params.maxTokens || 1000,
temperature: params.temperature ?? 0.7,
top_p: params.topP,
stop_sequences: params.stopSequences,
system: system || undefined,
messages: messages.map(msg => ({
role: msg.role as 'user' | 'assistant',
content: msg.content
}))
});
return this.formatCompletionResponse(response);
} catch (error) {
throw this.handleAnthropicError(error as Error);
}
}
/**
* Generate a streaming completion using Claude
*/
protected async *doStream(params: CompletionParams): AsyncIterable<CompletionChunk> {
if (!this.client) {
throw new AIProviderError('Client not initialized', AIErrorType.INVALID_REQUEST);
}
try {
const { system, messages } = this.convertMessages(params.messages);
const stream = await this.client.messages.create({
model: params.model || this.defaultModel,
max_tokens: params.maxTokens || 1000,
temperature: params.temperature ?? 0.7,
top_p: params.topP,
stop_sequences: params.stopSequences,
system: system || undefined,
messages: messages.map(msg => ({
role: msg.role as 'user' | 'assistant',
content: msg.content
})),
stream: true
});
let content = '';
let messageId = '';
for await (const chunk of stream) {
if (chunk.type === 'message_start') {
messageId = chunk.message.id;
} else if (chunk.type === 'content_block_delta' && chunk.delta.type === 'text_delta') {
content += chunk.delta.text;
yield {
content: chunk.delta.text,
isComplete: false,
id: messageId
};
} else if (chunk.type === 'message_delta' && chunk.usage) {
// Final chunk with usage information
yield {
content: '',
isComplete: true,
id: messageId,
usage: {
promptTokens: chunk.usage.input_tokens || 0,
completionTokens: chunk.usage.output_tokens || 0,
totalTokens: (chunk.usage.input_tokens || 0) + (chunk.usage.output_tokens || 0)
}
};
}
}
} catch (error) {
throw this.handleAnthropicError(error as Error);
}
}
/**
* Get information about the Claude provider
*/
public getInfo(): ProviderInfo {
return {
name: 'Claude',
version: '1.0.0',
models: [
'claude-3-5-sonnet-20241022',
'claude-3-5-haiku-20241022',
'claude-3-opus-20240229',
'claude-3-sonnet-20240229',
'claude-3-haiku-20240307'
],
maxContextLength: 200000, // Claude 3.5 Sonnet context length
supportsStreaming: true,
capabilities: {
vision: true,
functionCalling: true,
systemMessages: true
}
};
}
/**
* Validate the connection by making a simple request
*/
private async validateConnection(): Promise<void> {
if (!this.client) {
throw new Error('Client not initialized');
}
try {
// Make a minimal request to validate credentials
await this.client.messages.create({
model: this.defaultModel,
max_tokens: 1,
messages: [{ role: 'user', content: 'Hi' }]
});
} catch (error: any) {
if (error.status === 401 || error.status === 403) {
throw new AIProviderError(
'Invalid API key. Please check your Anthropic API key.',
AIErrorType.AUTHENTICATION,
error.status
);
}
// For other errors during validation, we'll let initialization proceed
// as they might be temporary issues
}
}
/**
* Convert our generic message format to Claude's format
* Claude requires system messages to be separate from the conversation
*/
private convertMessages(messages: AIMessage[]): { system: string | null; messages: AIMessage[] } {
let system: string | null = null;
const conversationMessages: AIMessage[] = [];
for (const message of messages) {
if (message.role === 'system') {
// Combine multiple system messages
if (system) {
system += '\n\n' + message.content;
} else {
system = message.content;
}
} else {
conversationMessages.push(message);
}
}
return { system, messages: conversationMessages };
}
/**
* Format Anthropic's response to our standard format
*/
private formatCompletionResponse(response: any): CompletionResponse {
const content = response.content
.filter((block: any) => block.type === 'text')
.map((block: any) => block.text)
.join('');
return {
content,
model: response.model,
usage: {
promptTokens: response.usage.input_tokens,
completionTokens: response.usage.output_tokens,
totalTokens: response.usage.input_tokens + response.usage.output_tokens
},
id: response.id,
metadata: {
stopReason: response.stop_reason,
stopSequence: response.stop_sequence
}
};
}
/**
* Handle Anthropic-specific errors and convert them to our standard format
*/
private handleAnthropicError(error: any): AIProviderError {
if (error instanceof AIProviderError) {
return error;
}
const status = error.status || error.statusCode;
const message = error.message || 'Unknown Anthropic API error';
switch (status) {
case 400:
return new AIProviderError(
`Invalid request: ${message}`,
AIErrorType.INVALID_REQUEST,
status,
error
);
case 401:
return new AIProviderError(
'Authentication failed. Please check your Anthropic API key.',
AIErrorType.AUTHENTICATION,
status,
error
);
case 403:
return new AIProviderError(
'Access forbidden. Please check your API key permissions.',
AIErrorType.AUTHENTICATION,
status,
error
);
case 404:
return new AIProviderError(
'Model not found. Please check the model name.',
AIErrorType.MODEL_NOT_FOUND,
status,
error
);
case 429:
return new AIProviderError(
'Rate limit exceeded. Please slow down your requests.',
AIErrorType.RATE_LIMIT,
status,
error
);
case 500:
case 502:
case 503:
case 504:
return new AIProviderError(
'Anthropic service temporarily unavailable. Please try again later.',
AIErrorType.NETWORK,
status,
error
);
default:
return new AIProviderError(
`Anthropic API error: ${message}`,
AIErrorType.UNKNOWN,
status,
error
);
}
}
}

7
src/providers/index.ts Normal file
View File

@@ -0,0 +1,7 @@
/**
* Provider exports
* Centralizes all AI provider implementations for easy importing
*/
export { BaseAIProvider } from './base.js';
export { ClaudeProvider, type ClaudeConfig } from './claude.js';

147
src/types/index.ts Normal file
View File

@@ -0,0 +1,147 @@
/**
* Core types and interfaces for the Simple AI Provider package
* Defines the contract that all AI providers must implement
*/
/**
* Configuration options for AI providers
*/
export interface AIProviderConfig {
/** API key for the AI service */
apiKey: string;
/** Optional base URL for custom endpoints */
baseUrl?: string;
/** Request timeout in milliseconds (default: 30000) */
timeout?: number;
/** Maximum number of retry attempts (default: 3) */
maxRetries?: number;
/** Additional provider-specific options */
[key: string]: any;
}
/**
* Message role types supported by AI providers
*/
export type MessageRole = 'system' | 'user' | 'assistant';
/**
* Individual message in a conversation
*/
export interface AIMessage {
/** Role of the message sender */
role: MessageRole;
/** Content of the message */
content: string;
/** Optional metadata for the message */
metadata?: Record<string, any>;
}
/**
* Parameters for AI completion requests
*/
export interface CompletionParams {
/** Array of messages forming the conversation */
messages: AIMessage[];
/** Model to use for completion */
model?: string;
/** Maximum tokens to generate (default: 1000) */
maxTokens?: number;
/** Temperature for randomness (0.0 to 1.0, default: 0.7) */
temperature?: number;
/** Top-p for nucleus sampling (default: 1.0) */
topP?: number;
/** Stop sequences to end generation */
stopSequences?: string[];
/** Whether to stream the response */
stream?: boolean;
/** Additional provider-specific parameters */
[key: string]: any;
}
/**
* Usage statistics for a completion
*/
export interface TokenUsage {
/** Number of tokens in the prompt */
promptTokens: number;
/** Number of tokens in the completion */
completionTokens: number;
/** Total number of tokens used */
totalTokens: number;
}
/**
* Response from an AI completion request
*/
export interface CompletionResponse {
/** Generated content */
content: string;
/** Model used for generation */
model: string;
/** Token usage statistics */
usage: TokenUsage;
/** Unique identifier for the request */
id: string;
/** Provider-specific metadata */
metadata?: Record<string, any>;
}
/**
* Streaming chunk from an AI completion
*/
export interface CompletionChunk {
/** Content delta for this chunk */
content: string;
/** Whether this is the final chunk */
isComplete: boolean;
/** Unique identifier for the request */
id: string;
/** Token usage (only available on final chunk) */
usage?: TokenUsage;
}
/**
* Error types that can occur during AI operations
*/
export enum AIErrorType {
AUTHENTICATION = 'authentication',
RATE_LIMIT = 'rate_limit',
INVALID_REQUEST = 'invalid_request',
MODEL_NOT_FOUND = 'model_not_found',
NETWORK = 'network',
TIMEOUT = 'timeout',
UNKNOWN = 'unknown'
}
/**
* Custom error class for AI provider errors
*/
export class AIProviderError extends Error {
constructor(
message: string,
public type: AIErrorType,
public statusCode?: number,
public originalError?: Error
) {
super(message);
this.name = 'AIProviderError';
}
}
/**
* Provider information and capabilities
*/
export interface ProviderInfo {
/** Name of the provider */
name: string;
/** Version of the provider implementation */
version: string;
/** List of available models */
models: string[];
/** Maximum context length supported */
maxContextLength: number;
/** Whether streaming is supported */
supportsStreaming: boolean;
/** Additional capabilities */
capabilities?: Record<string, any>;
}

104
src/utils/factory.ts Normal file
View File

@@ -0,0 +1,104 @@
/**
* Factory utilities for creating AI providers
* Provides convenient methods for instantiating and configuring providers
*/
import type { AIProviderConfig } from '../types/index.js';
import { ClaudeProvider, type ClaudeConfig } from '../providers/claude.js';
import { BaseAIProvider } from '../providers/base.js';
/**
* Supported AI provider types
*/
export type ProviderType = 'claude';
/**
* Configuration map for different provider types
*/
export interface ProviderConfigMap {
claude: ClaudeConfig;
}
/**
* Factory function to create AI providers
* @param type - The type of provider to create
* @param config - Configuration for the provider
* @returns Configured AI provider instance
*/
export function createProvider<T extends ProviderType>(
type: T,
config: ProviderConfigMap[T]
): BaseAIProvider {
switch (type) {
case 'claude':
return new ClaudeProvider(config as ClaudeConfig);
default:
throw new Error(`Unsupported provider type: ${type}`);
}
}
/**
* Create a Claude provider with simplified configuration
* @param apiKey - Anthropic API key
* @param options - Optional additional configuration
* @returns Configured Claude provider instance
*/
export function createClaudeProvider(
apiKey: string,
options: Partial<Omit<ClaudeConfig, 'apiKey'>> = {}
): ClaudeProvider {
return new ClaudeProvider({
apiKey,
...options
});
}
/**
* Provider registry for dynamic provider creation
*/
export class ProviderRegistry {
private static providers = new Map<string, new (config: AIProviderConfig) => BaseAIProvider>();
/**
* Register a new provider type
* @param name - Name of the provider
* @param providerClass - Provider class constructor
*/
static register(name: string, providerClass: new (config: AIProviderConfig) => BaseAIProvider): void {
this.providers.set(name.toLowerCase(), providerClass);
}
/**
* Create a provider by name
* @param name - Name of the provider
* @param config - Configuration for the provider
* @returns Provider instance
*/
static create(name: string, config: AIProviderConfig): BaseAIProvider {
const ProviderClass = this.providers.get(name.toLowerCase());
if (!ProviderClass) {
throw new Error(`Provider '${name}' is not registered`);
}
return new ProviderClass(config);
}
/**
* Get list of registered provider names
* @returns Array of registered provider names
*/
static getRegisteredProviders(): string[] {
return Array.from(this.providers.keys());
}
/**
* Check if a provider is registered
* @param name - Name of the provider
* @returns True if provider is registered
*/
static isRegistered(name: string): boolean {
return this.providers.has(name.toLowerCase());
}
}
// Pre-register built-in providers
ProviderRegistry.register('claude', ClaudeProvider);