487 lines
12 KiB
Markdown
487 lines
12 KiB
Markdown
# Simple AI Provider
|
|
|
|
A professional, extensible TypeScript package for integrating multiple AI providers into your applications with a unified interface. Currently supports **Claude (Anthropic)**, **OpenAI (GPT)**, and **Google Gemini** with plans to add more providers.
|
|
|
|
## Features
|
|
|
|
- 🎯 **Unified Interface**: Same API across all AI providers
|
|
- 🔒 **Type-Safe**: Full TypeScript support with comprehensive type definitions
|
|
- 🚀 **Easy to Use**: Simple factory functions and intuitive configuration
|
|
- 📡 **Streaming Support**: Real-time streaming responses where supported
|
|
- 🛡️ **Error Handling**: Robust error handling with categorized error types
|
|
- 🔧 **Extensible**: Easy to add new AI providers
|
|
- 📦 **Modern**: Built with ES modules and modern JavaScript features
|
|
- 🌐 **Multi-Provider**: Switch between Claude, OpenAI, and Gemini seamlessly
|
|
|
|
## Installation
|
|
|
|
```bash
|
|
npm install simple-ai-provider
|
|
# or
|
|
yarn add simple-ai-provider
|
|
# or
|
|
bun add simple-ai-provider
|
|
```
|
|
|
|
## Quick Start
|
|
|
|
### Basic Usage with Claude
|
|
|
|
```typescript
|
|
import { createClaudeProvider } from 'simple-ai-provider';
|
|
|
|
// Create a Claude provider
|
|
const claude = createClaudeProvider('your-anthropic-api-key');
|
|
|
|
// Initialize the provider
|
|
await claude.initialize();
|
|
|
|
// Generate a completion
|
|
const response = await claude.complete({
|
|
messages: [
|
|
{ role: 'user', content: 'Hello! How are you today?' }
|
|
],
|
|
maxTokens: 100,
|
|
temperature: 0.7
|
|
});
|
|
|
|
console.log(response.content);
|
|
```
|
|
|
|
### Basic Usage with OpenAI
|
|
|
|
```typescript
|
|
import { createOpenAIProvider } from 'simple-ai-provider';
|
|
|
|
// Create an OpenAI provider
|
|
const openai = createOpenAIProvider('your-openai-api-key');
|
|
|
|
// Initialize the provider
|
|
await openai.initialize();
|
|
|
|
// Generate a completion
|
|
const response = await openai.complete({
|
|
messages: [
|
|
{ role: 'user', content: 'Hello! How are you today?' }
|
|
],
|
|
maxTokens: 100,
|
|
temperature: 0.7
|
|
});
|
|
|
|
console.log(response.content);
|
|
```
|
|
|
|
### Basic Usage with Gemini
|
|
|
|
```typescript
|
|
import { createGeminiProvider } from 'simple-ai-provider';
|
|
|
|
// Create a Gemini provider
|
|
const gemini = createGeminiProvider('your-google-ai-api-key');
|
|
|
|
// Initialize the provider
|
|
await gemini.initialize();
|
|
|
|
// Generate a completion
|
|
const response = await gemini.complete({
|
|
messages: [
|
|
{ role: 'user', content: 'Hello! How are you today?' }
|
|
],
|
|
maxTokens: 100,
|
|
temperature: 0.7
|
|
});
|
|
|
|
console.log(response.content);
|
|
```
|
|
|
|
### Multi-Provider Usage
|
|
|
|
```typescript
|
|
import { createProvider, createClaudeProvider, createOpenAIProvider, createGeminiProvider } from 'simple-ai-provider';
|
|
|
|
// Method 1: Using specific factory functions
|
|
const claude = createClaudeProvider('your-anthropic-api-key');
|
|
const openai = createOpenAIProvider('your-openai-api-key');
|
|
const gemini = createGeminiProvider('your-google-ai-api-key');
|
|
|
|
// Method 2: Using generic factory
|
|
const claude2 = createProvider('claude', { apiKey: 'your-anthropic-api-key' });
|
|
const openai2 = createProvider('openai', { apiKey: 'your-openai-api-key' });
|
|
const gemini2 = createProvider('gemini', { apiKey: 'your-google-ai-api-key' });
|
|
|
|
// Initialize all
|
|
await Promise.all([claude.initialize(), openai.initialize(), gemini.initialize()]);
|
|
|
|
// Use the same interface for all providers
|
|
const prompt = { messages: [{ role: 'user', content: 'Explain AI' }] };
|
|
|
|
const claudeResponse = await claude.complete(prompt);
|
|
const openaiResponse = await openai.complete(prompt);
|
|
const geminiResponse = await gemini.complete(prompt);
|
|
```
|
|
|
|
### Streaming Responses
|
|
|
|
```typescript
|
|
import { createGeminiProvider } from 'simple-ai-provider';
|
|
|
|
const gemini = createGeminiProvider('your-google-ai-api-key');
|
|
await gemini.initialize();
|
|
|
|
// Stream a completion
|
|
for await (const chunk of gemini.stream({
|
|
messages: [
|
|
{ role: 'user', content: 'Write a short story about a robot.' }
|
|
],
|
|
maxTokens: 500
|
|
})) {
|
|
if (!chunk.isComplete) {
|
|
process.stdout.write(chunk.content);
|
|
} else {
|
|
console.log('\n\nUsage:', chunk.usage);
|
|
}
|
|
}
|
|
```
|
|
|
|
### Advanced Configuration
|
|
|
|
```typescript
|
|
import { ClaudeProvider, OpenAIProvider, GeminiProvider } from 'simple-ai-provider';
|
|
|
|
// Claude with custom configuration
|
|
const claude = new ClaudeProvider({
|
|
apiKey: 'your-anthropic-api-key',
|
|
defaultModel: 'claude-3-5-sonnet-20241022',
|
|
timeout: 30000,
|
|
maxRetries: 3,
|
|
baseUrl: 'https://api.anthropic.com' // optional custom endpoint
|
|
});
|
|
|
|
// OpenAI with organization and project
|
|
const openai = new OpenAIProvider({
|
|
apiKey: 'your-openai-api-key',
|
|
defaultModel: 'gpt-4',
|
|
organization: 'org-your-org-id',
|
|
project: 'proj-your-project-id',
|
|
timeout: 60000,
|
|
maxRetries: 5
|
|
});
|
|
|
|
// Gemini with safety settings and generation config
|
|
const gemini = new GeminiProvider({
|
|
apiKey: 'your-google-ai-api-key',
|
|
defaultModel: 'gemini-1.5-pro',
|
|
safetySettings: [], // Configure content filtering
|
|
generationConfig: {
|
|
temperature: 0.8,
|
|
topP: 0.9,
|
|
topK: 40,
|
|
maxOutputTokens: 2048
|
|
},
|
|
timeout: 45000
|
|
});
|
|
|
|
await Promise.all([claude.initialize(), openai.initialize(), gemini.initialize()]);
|
|
|
|
const response = await gemini.complete({
|
|
messages: [
|
|
{ role: 'system', content: 'You are a helpful assistant.' },
|
|
{ role: 'user', content: 'Explain quantum computing in simple terms.' }
|
|
],
|
|
model: 'gemini-1.5-flash',
|
|
maxTokens: 300,
|
|
temperature: 0.5,
|
|
topP: 0.9,
|
|
stopSequences: ['\n\n']
|
|
});
|
|
```
|
|
|
|
## API Reference
|
|
|
|
### Core Types
|
|
|
|
#### `AIMessage`
|
|
```typescript
|
|
interface AIMessage {
|
|
role: 'system' | 'user' | 'assistant';
|
|
content: string;
|
|
metadata?: Record<string, any>;
|
|
}
|
|
```
|
|
|
|
#### `CompletionParams`
|
|
```typescript
|
|
interface CompletionParams {
|
|
messages: AIMessage[];
|
|
model?: string;
|
|
maxTokens?: number;
|
|
temperature?: number;
|
|
topP?: number;
|
|
stopSequences?: string[];
|
|
stream?: boolean;
|
|
}
|
|
```
|
|
|
|
#### `CompletionResponse`
|
|
```typescript
|
|
interface CompletionResponse {
|
|
content: string;
|
|
model: string;
|
|
usage: TokenUsage;
|
|
id: string;
|
|
metadata?: Record<string, any>;
|
|
}
|
|
```
|
|
|
|
### Factory Functions
|
|
|
|
#### `createClaudeProvider(apiKey, options?)`
|
|
Creates a Claude provider with simplified configuration.
|
|
|
|
```typescript
|
|
const claude = createClaudeProvider('your-api-key', {
|
|
defaultModel: 'claude-3-5-sonnet-20241022',
|
|
timeout: 30000
|
|
});
|
|
```
|
|
|
|
#### `createOpenAIProvider(apiKey, options?)`
|
|
Creates an OpenAI provider with simplified configuration.
|
|
|
|
```typescript
|
|
const openai = createOpenAIProvider('your-api-key', {
|
|
defaultModel: 'gpt-4',
|
|
organization: 'org-123',
|
|
timeout: 60000
|
|
});
|
|
```
|
|
|
|
#### `createGeminiProvider(apiKey, options?)`
|
|
Creates a Gemini provider with simplified configuration.
|
|
|
|
```typescript
|
|
const gemini = createGeminiProvider('your-api-key', {
|
|
defaultModel: 'gemini-1.5-pro',
|
|
safetySettings: [],
|
|
generationConfig: {
|
|
temperature: 0.8,
|
|
topK: 40
|
|
}
|
|
});
|
|
```
|
|
|
|
#### `createProvider(type, config)`
|
|
Generic factory function for creating any provider type.
|
|
|
|
```typescript
|
|
const claude = createProvider('claude', {
|
|
apiKey: 'your-api-key',
|
|
defaultModel: 'claude-3-5-sonnet-20241022'
|
|
});
|
|
|
|
const openai = createProvider('openai', {
|
|
apiKey: 'your-api-key',
|
|
defaultModel: 'gpt-4'
|
|
});
|
|
|
|
const gemini = createProvider('gemini', {
|
|
apiKey: 'your-api-key',
|
|
defaultModel: 'gemini-1.5-flash'
|
|
});
|
|
```
|
|
|
|
### Provider Methods
|
|
|
|
#### `initialize(): Promise<void>`
|
|
Initializes the provider and validates the configuration.
|
|
|
|
#### `complete(params): Promise<CompletionResponse>`
|
|
Generates a completion based on the provided parameters.
|
|
|
|
#### `stream(params): AsyncIterable<CompletionChunk>`
|
|
Generates a streaming completion.
|
|
|
|
#### `getInfo(): ProviderInfo`
|
|
Returns information about the provider and its capabilities.
|
|
|
|
#### `isInitialized(): boolean`
|
|
Checks if the provider has been initialized.
|
|
|
|
## Error Handling
|
|
|
|
The package provides comprehensive error handling with categorized error types:
|
|
|
|
```typescript
|
|
import { AIProviderError, AIErrorType } from 'simple-ai-provider';
|
|
|
|
try {
|
|
const response = await openai.complete({
|
|
messages: [{ role: 'user', content: 'Hello!' }]
|
|
});
|
|
} catch (error) {
|
|
if (error instanceof AIProviderError) {
|
|
switch (error.type) {
|
|
case AIErrorType.AUTHENTICATION:
|
|
console.error('Invalid API key');
|
|
break;
|
|
case AIErrorType.RATE_LIMIT:
|
|
console.error('Rate limit exceeded');
|
|
break;
|
|
case AIErrorType.INVALID_REQUEST:
|
|
console.error('Invalid request parameters');
|
|
break;
|
|
default:
|
|
console.error('Unknown error:', error.message);
|
|
}
|
|
}
|
|
}
|
|
```
|
|
|
|
## Supported Models
|
|
|
|
### Claude (Anthropic)
|
|
- `claude-3-5-sonnet-20241022` (default)
|
|
- `claude-3-5-haiku-20241022`
|
|
- `claude-3-opus-20240229`
|
|
- `claude-3-sonnet-20240229`
|
|
- `claude-3-haiku-20240307`
|
|
|
|
### OpenAI (GPT)
|
|
- `gpt-4` (default)
|
|
- `gpt-4-turbo`
|
|
- `gpt-4-turbo-preview`
|
|
- `gpt-4-0125-preview`
|
|
- `gpt-4-1106-preview`
|
|
- `gpt-3.5-turbo`
|
|
- `gpt-3.5-turbo-0125`
|
|
- `gpt-3.5-turbo-1106`
|
|
|
|
### Google Gemini
|
|
- `gemini-1.5-flash` (default)
|
|
- `gemini-1.5-flash-8b`
|
|
- `gemini-1.5-pro`
|
|
- `gemini-1.0-pro`
|
|
- `gemini-1.0-pro-vision`
|
|
|
|
## Environment Variables
|
|
|
|
You can set your API keys as environment variables:
|
|
|
|
```bash
|
|
export ANTHROPIC_API_KEY="your-anthropic-api-key"
|
|
export OPENAI_API_KEY="your-openai-api-key"
|
|
export GOOGLE_AI_API_KEY="your-google-ai-api-key"
|
|
```
|
|
|
|
```typescript
|
|
const claude = createClaudeProvider(process.env.ANTHROPIC_API_KEY!);
|
|
const openai = createOpenAIProvider(process.env.OPENAI_API_KEY!);
|
|
const gemini = createGeminiProvider(process.env.GOOGLE_AI_API_KEY!);
|
|
```
|
|
|
|
## Provider Comparison
|
|
|
|
| Feature | Claude | OpenAI | Gemini |
|
|
|---------|--------|--------|--------|
|
|
| **Models** | 5 models | 8+ models | 5 models |
|
|
| **Max Context** | 200K tokens | 128K tokens | 1M tokens |
|
|
| **Streaming** | ✅ | ✅ | ✅ |
|
|
| **Vision** | ✅ | ✅ | ✅ |
|
|
| **Function Calling** | ✅ | ✅ | ✅ |
|
|
| **JSON Mode** | ❌ | ✅ | ❌ |
|
|
| **System Messages** | ✅ (separate) | ✅ (inline) | ✅ (separate) |
|
|
| **Multimodal** | ✅ | ✅ | ✅ |
|
|
| **Safety Controls** | Basic | Basic | Advanced |
|
|
| **Special Features** | Advanced reasoning | JSON mode, plugins | Largest context, advanced safety |
|
|
|
|
## Best Practices
|
|
|
|
1. **Always initialize providers** before using them
|
|
2. **Handle errors gracefully** with proper error types
|
|
3. **Use appropriate models** for your use case (speed vs. capability vs. context)
|
|
4. **Set reasonable timeouts** for your application
|
|
5. **Implement retry logic** for production applications
|
|
6. **Monitor token usage** to control costs
|
|
7. **Use environment variables** for API keys
|
|
8. **Consider provider-specific features** when choosing
|
|
9. **Configure safety settings** appropriately for Gemini
|
|
10. **Leverage large context** capabilities of Gemini for complex tasks
|
|
|
|
## Advanced Usage
|
|
|
|
### Provider Registry
|
|
|
|
```typescript
|
|
import { ProviderRegistry } from 'simple-ai-provider';
|
|
|
|
// List all registered providers
|
|
console.log(ProviderRegistry.getRegisteredProviders()); // ['claude', 'openai', 'gemini']
|
|
|
|
// Create provider by name
|
|
const provider = ProviderRegistry.create('gemini', {
|
|
apiKey: 'your-api-key'
|
|
});
|
|
|
|
// Check if provider is registered
|
|
if (ProviderRegistry.isRegistered('gemini')) {
|
|
console.log('Gemini is available!');
|
|
}
|
|
```
|
|
|
|
### Gemini-Specific Features
|
|
|
|
```typescript
|
|
import { createGeminiProvider } from 'simple-ai-provider';
|
|
|
|
const gemini = createGeminiProvider('your-api-key', {
|
|
defaultModel: 'gemini-1.5-pro',
|
|
safetySettings: [
|
|
{
|
|
category: 'HARM_CATEGORY_HARASSMENT',
|
|
threshold: 'BLOCK_MEDIUM_AND_ABOVE'
|
|
}
|
|
],
|
|
generationConfig: {
|
|
temperature: 0.9,
|
|
topP: 0.8,
|
|
topK: 40,
|
|
maxOutputTokens: 2048,
|
|
stopSequences: ['END', 'STOP']
|
|
}
|
|
});
|
|
|
|
await gemini.initialize();
|
|
|
|
// Large context example (up to 1M tokens)
|
|
const response = await gemini.complete({
|
|
messages: [
|
|
{ role: 'system', content: 'You are analyzing a large document.' },
|
|
{ role: 'user', content: 'Your very large text here...' }
|
|
],
|
|
maxTokens: 2048
|
|
});
|
|
```
|
|
|
|
## Contributing
|
|
|
|
Contributions are welcome! Please feel free to submit a Pull Request. For major changes, please open an issue first to discuss what you would like to change.
|
|
|
|
## License
|
|
|
|
MIT
|
|
|
|
## Changelog
|
|
|
|
### 1.0.0
|
|
- Initial release
|
|
- Claude provider implementation
|
|
- OpenAI provider implementation
|
|
- Gemini provider implementation
|
|
- Streaming support for all providers
|
|
- Comprehensive error handling
|
|
- TypeScript support
|
|
- Provider registry system
|
|
- Multi-provider examples
|
|
- Large context support (Gemini)
|
|
- Advanced safety controls (Gemini)
|