feat: add Google Gemini provider integration and docs
This commit is contained in:
205
README.md
205
README.md
@ -1,6 +1,6 @@
|
||||
# Simple AI Provider
|
||||
|
||||
A professional, extensible TypeScript package for integrating multiple AI providers into your applications with a unified interface. Currently supports **Claude (Anthropic)** and **OpenAI (GPT)** with plans to add more providers.
|
||||
A professional, extensible TypeScript package for integrating multiple AI providers into your applications with a unified interface. Currently supports **Claude (Anthropic)**, **OpenAI (GPT)**, and **Google Gemini** with plans to add more providers.
|
||||
|
||||
## Features
|
||||
|
||||
@ -11,7 +11,7 @@ A professional, extensible TypeScript package for integrating multiple AI provid
|
||||
- 🛡️ **Error Handling**: Robust error handling with categorized error types
|
||||
- 🔧 **Extensible**: Easy to add new AI providers
|
||||
- 📦 **Modern**: Built with ES modules and modern JavaScript features
|
||||
- 🌐 **Multi-Provider**: Switch between Claude and OpenAI seamlessly
|
||||
- 🌐 **Multi-Provider**: Switch between Claude, OpenAI, and Gemini seamlessly
|
||||
|
||||
## Installation
|
||||
|
||||
@ -71,39 +71,65 @@ const response = await openai.complete({
|
||||
console.log(response.content);
|
||||
```
|
||||
|
||||
### Basic Usage with Gemini
|
||||
|
||||
```typescript
|
||||
import { createGeminiProvider } from 'simple-ai-provider';
|
||||
|
||||
// Create a Gemini provider
|
||||
const gemini = createGeminiProvider('your-google-ai-api-key');
|
||||
|
||||
// Initialize the provider
|
||||
await gemini.initialize();
|
||||
|
||||
// Generate a completion
|
||||
const response = await gemini.complete({
|
||||
messages: [
|
||||
{ role: 'user', content: 'Hello! How are you today?' }
|
||||
],
|
||||
maxTokens: 100,
|
||||
temperature: 0.7
|
||||
});
|
||||
|
||||
console.log(response.content);
|
||||
```
|
||||
|
||||
### Multi-Provider Usage
|
||||
|
||||
```typescript
|
||||
import { createProvider, createClaudeProvider, createOpenAIProvider } from 'simple-ai-provider';
|
||||
import { createProvider, createClaudeProvider, createOpenAIProvider, createGeminiProvider } from 'simple-ai-provider';
|
||||
|
||||
// Method 1: Using specific factory functions
|
||||
const claude = createClaudeProvider('your-anthropic-api-key');
|
||||
const openai = createOpenAIProvider('your-openai-api-key');
|
||||
const gemini = createGeminiProvider('your-google-ai-api-key');
|
||||
|
||||
// Method 2: Using generic factory
|
||||
const claude2 = createProvider('claude', { apiKey: 'your-anthropic-api-key' });
|
||||
const openai2 = createProvider('openai', { apiKey: 'your-openai-api-key' });
|
||||
const gemini2 = createProvider('gemini', { apiKey: 'your-google-ai-api-key' });
|
||||
|
||||
// Initialize both
|
||||
await Promise.all([claude.initialize(), openai.initialize()]);
|
||||
// Initialize all
|
||||
await Promise.all([claude.initialize(), openai.initialize(), gemini.initialize()]);
|
||||
|
||||
// Use the same interface for both providers
|
||||
// Use the same interface for all providers
|
||||
const prompt = { messages: [{ role: 'user', content: 'Explain AI' }] };
|
||||
|
||||
const claudeResponse = await claude.complete(prompt);
|
||||
const openaiResponse = await openai.complete(prompt);
|
||||
const geminiResponse = await gemini.complete(prompt);
|
||||
```
|
||||
|
||||
### Streaming Responses
|
||||
|
||||
```typescript
|
||||
import { createOpenAIProvider } from 'simple-ai-provider';
|
||||
import { createGeminiProvider } from 'simple-ai-provider';
|
||||
|
||||
const openai = createOpenAIProvider('your-openai-api-key');
|
||||
await openai.initialize();
|
||||
const gemini = createGeminiProvider('your-google-ai-api-key');
|
||||
await gemini.initialize();
|
||||
|
||||
// Stream a completion
|
||||
for await (const chunk of openai.stream({
|
||||
for await (const chunk of gemini.stream({
|
||||
messages: [
|
||||
{ role: 'user', content: 'Write a short story about a robot.' }
|
||||
],
|
||||
@ -120,7 +146,7 @@ for await (const chunk of openai.stream({
|
||||
### Advanced Configuration
|
||||
|
||||
```typescript
|
||||
import { ClaudeProvider, OpenAIProvider } from 'simple-ai-provider';
|
||||
import { ClaudeProvider, OpenAIProvider, GeminiProvider } from 'simple-ai-provider';
|
||||
|
||||
// Claude with custom configuration
|
||||
const claude = new ClaudeProvider({
|
||||
@ -141,14 +167,28 @@ const openai = new OpenAIProvider({
|
||||
maxRetries: 5
|
||||
});
|
||||
|
||||
await Promise.all([claude.initialize(), openai.initialize()]);
|
||||
// Gemini with safety settings and generation config
|
||||
const gemini = new GeminiProvider({
|
||||
apiKey: 'your-google-ai-api-key',
|
||||
defaultModel: 'gemini-1.5-pro',
|
||||
safetySettings: [], // Configure content filtering
|
||||
generationConfig: {
|
||||
temperature: 0.8,
|
||||
topP: 0.9,
|
||||
topK: 40,
|
||||
maxOutputTokens: 2048
|
||||
},
|
||||
timeout: 45000
|
||||
});
|
||||
|
||||
const response = await openai.complete({
|
||||
await Promise.all([claude.initialize(), openai.initialize(), gemini.initialize()]);
|
||||
|
||||
const response = await gemini.complete({
|
||||
messages: [
|
||||
{ role: 'system', content: 'You are a helpful assistant.' },
|
||||
{ role: 'user', content: 'Explain quantum computing in simple terms.' }
|
||||
],
|
||||
model: 'gpt-4-turbo',
|
||||
model: 'gemini-1.5-flash',
|
||||
maxTokens: 300,
|
||||
temperature: 0.5,
|
||||
topP: 0.9,
|
||||
@ -216,6 +256,20 @@ const openai = createOpenAIProvider('your-api-key', {
|
||||
});
|
||||
```
|
||||
|
||||
#### `createGeminiProvider(apiKey, options?)`
|
||||
Creates a Gemini provider with simplified configuration.
|
||||
|
||||
```typescript
|
||||
const gemini = createGeminiProvider('your-api-key', {
|
||||
defaultModel: 'gemini-1.5-pro',
|
||||
safetySettings: [],
|
||||
generationConfig: {
|
||||
temperature: 0.8,
|
||||
topK: 40
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
#### `createProvider(type, config)`
|
||||
Generic factory function for creating any provider type.
|
||||
|
||||
@ -229,6 +283,11 @@ const openai = createProvider('openai', {
|
||||
apiKey: 'your-api-key',
|
||||
defaultModel: 'gpt-4'
|
||||
});
|
||||
|
||||
const gemini = createProvider('gemini', {
|
||||
apiKey: 'your-api-key',
|
||||
defaultModel: 'gemini-1.5-flash'
|
||||
});
|
||||
```
|
||||
|
||||
### Provider Methods
|
||||
@ -297,6 +356,13 @@ try {
|
||||
- `gpt-3.5-turbo-0125`
|
||||
- `gpt-3.5-turbo-1106`
|
||||
|
||||
### Google Gemini
|
||||
- `gemini-1.5-flash` (default)
|
||||
- `gemini-1.5-flash-8b`
|
||||
- `gemini-1.5-pro`
|
||||
- `gemini-1.0-pro`
|
||||
- `gemini-1.0-pro-vision`
|
||||
|
||||
## Environment Variables
|
||||
|
||||
You can set your API keys as environment variables:
|
||||
@ -304,35 +370,42 @@ You can set your API keys as environment variables:
|
||||
```bash
|
||||
export ANTHROPIC_API_KEY="your-anthropic-api-key"
|
||||
export OPENAI_API_KEY="your-openai-api-key"
|
||||
export GOOGLE_AI_API_KEY="your-google-ai-api-key"
|
||||
```
|
||||
|
||||
```typescript
|
||||
const claude = createClaudeProvider(process.env.ANTHROPIC_API_KEY!);
|
||||
const openai = createOpenAIProvider(process.env.OPENAI_API_KEY!);
|
||||
const gemini = createGeminiProvider(process.env.GOOGLE_AI_API_KEY!);
|
||||
```
|
||||
|
||||
## Provider Comparison
|
||||
|
||||
| Feature | Claude | OpenAI |
|
||||
|---------|--------|--------|
|
||||
| **Models** | 5 models | 8+ models |
|
||||
| **Max Context** | 200K tokens | 128K tokens |
|
||||
| **Streaming** | ✅ | ✅ |
|
||||
| **Vision** | ✅ | ✅ |
|
||||
| **Function Calling** | ✅ | ✅ |
|
||||
| **JSON Mode** | ❌ | ✅ |
|
||||
| **System Messages** | ✅ (separate) | ✅ (inline) |
|
||||
| Feature | Claude | OpenAI | Gemini |
|
||||
|---------|--------|--------|--------|
|
||||
| **Models** | 5 models | 8+ models | 5 models |
|
||||
| **Max Context** | 200K tokens | 128K tokens | 1M tokens |
|
||||
| **Streaming** | ✅ | ✅ | ✅ |
|
||||
| **Vision** | ✅ | ✅ | ✅ |
|
||||
| **Function Calling** | ✅ | ✅ | ✅ |
|
||||
| **JSON Mode** | ❌ | ✅ | ❌ |
|
||||
| **System Messages** | ✅ (separate) | ✅ (inline) | ✅ (separate) |
|
||||
| **Multimodal** | ✅ | ✅ | ✅ |
|
||||
| **Safety Controls** | Basic | Basic | Advanced |
|
||||
| **Special Features** | Advanced reasoning | JSON mode, plugins | Largest context, advanced safety |
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Always initialize providers** before using them
|
||||
2. **Handle errors gracefully** with proper error types
|
||||
3. **Use appropriate models** for your use case (speed vs. capability)
|
||||
3. **Use appropriate models** for your use case (speed vs. capability vs. context)
|
||||
4. **Set reasonable timeouts** for your application
|
||||
5. **Implement retry logic** for production applications
|
||||
6. **Monitor token usage** to control costs
|
||||
7. **Use environment variables** for API keys
|
||||
8. **Consider provider-specific features** when choosing
|
||||
9. **Configure safety settings** appropriately for Gemini
|
||||
10. **Leverage large context** capabilities of Gemini for complex tasks
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
@ -342,70 +415,51 @@ const openai = createOpenAIProvider(process.env.OPENAI_API_KEY!);
|
||||
import { ProviderRegistry } from 'simple-ai-provider';
|
||||
|
||||
// List all registered providers
|
||||
console.log(ProviderRegistry.getRegisteredProviders()); // ['claude', 'openai']
|
||||
console.log(ProviderRegistry.getRegisteredProviders()); // ['claude', 'openai', 'gemini']
|
||||
|
||||
// Create provider by name
|
||||
const provider = ProviderRegistry.create('openai', {
|
||||
const provider = ProviderRegistry.create('gemini', {
|
||||
apiKey: 'your-api-key'
|
||||
});
|
||||
|
||||
// Check if provider is registered
|
||||
if (ProviderRegistry.isRegistered('claude')) {
|
||||
console.log('Claude is available!');
|
||||
if (ProviderRegistry.isRegistered('gemini')) {
|
||||
console.log('Gemini is available!');
|
||||
}
|
||||
```
|
||||
|
||||
### Custom Error Handling
|
||||
### Gemini-Specific Features
|
||||
|
||||
```typescript
|
||||
function handleAIError(error: unknown, providerName: string) {
|
||||
if (error instanceof AIProviderError) {
|
||||
console.error(`${providerName} Error (${error.type}):`, error.message);
|
||||
|
||||
if (error.statusCode) {
|
||||
console.error('HTTP Status:', error.statusCode);
|
||||
}
|
||||
|
||||
if (error.originalError) {
|
||||
console.error('Original Error:', error.originalError.message);
|
||||
import { createGeminiProvider } from 'simple-ai-provider';
|
||||
|
||||
const gemini = createGeminiProvider('your-api-key', {
|
||||
defaultModel: 'gemini-1.5-pro',
|
||||
safetySettings: [
|
||||
{
|
||||
category: 'HARM_CATEGORY_HARASSMENT',
|
||||
threshold: 'BLOCK_MEDIUM_AND_ABOVE'
|
||||
}
|
||||
],
|
||||
generationConfig: {
|
||||
temperature: 0.9,
|
||||
topP: 0.8,
|
||||
topK: 40,
|
||||
maxOutputTokens: 2048,
|
||||
stopSequences: ['END', 'STOP']
|
||||
}
|
||||
}
|
||||
```
|
||||
});
|
||||
|
||||
## Extending the Package
|
||||
await gemini.initialize();
|
||||
|
||||
To add a new AI provider, extend the `BaseAIProvider` class:
|
||||
|
||||
```typescript
|
||||
import { BaseAIProvider } from 'simple-ai-provider';
|
||||
|
||||
class MyCustomProvider extends BaseAIProvider {
|
||||
protected async doInitialize(): Promise<void> {
|
||||
// Initialize your provider
|
||||
}
|
||||
|
||||
protected async doComplete(params: CompletionParams): Promise<CompletionResponse> {
|
||||
// Implement completion logic
|
||||
}
|
||||
|
||||
protected async *doStream(params: CompletionParams): AsyncIterable<CompletionChunk> {
|
||||
// Implement streaming logic
|
||||
}
|
||||
|
||||
public getInfo(): ProviderInfo {
|
||||
return {
|
||||
name: 'MyCustomProvider',
|
||||
version: '1.0.0',
|
||||
models: ['my-model'],
|
||||
maxContextLength: 4096,
|
||||
supportsStreaming: true
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Register your provider
|
||||
ProviderRegistry.register('mycustom', MyCustomProvider);
|
||||
// Large context example (up to 1M tokens)
|
||||
const response = await gemini.complete({
|
||||
messages: [
|
||||
{ role: 'system', content: 'You are analyzing a large document.' },
|
||||
{ role: 'user', content: 'Your very large text here...' }
|
||||
],
|
||||
maxTokens: 2048
|
||||
});
|
||||
```
|
||||
|
||||
## Contributing
|
||||
@ -422,8 +476,11 @@ MIT
|
||||
- Initial release
|
||||
- Claude provider implementation
|
||||
- OpenAI provider implementation
|
||||
- Streaming support for both providers
|
||||
- Gemini provider implementation
|
||||
- Streaming support for all providers
|
||||
- Comprehensive error handling
|
||||
- TypeScript support
|
||||
- Provider registry system
|
||||
- Multi-provider examples
|
||||
- Large context support (Gemini)
|
||||
- Advanced safety controls (Gemini)
|
||||
|
Reference in New Issue
Block a user