feat(docs): update README with OpenWebUI support details
This commit is contained in:
760
README.md
760
README.md
@ -1,440 +1,135 @@
|
|||||||
# Simple AI Provider
|
# Simple AI Provider
|
||||||
|
|
||||||
A professional, extensible TypeScript package for integrating multiple AI providers into your applications with a unified interface. Currently supports **Claude (Anthropic)**, **OpenAI (GPT)**, and **Google Gemini** with plans to add more providers.
|
A professional, type-safe TypeScript package that provides a unified interface for multiple AI providers. Currently supports **Claude (Anthropic)**, **OpenAI**, **Google Gemini**, and **OpenWebUI** with a consistent API across all providers.
|
||||||
|
|
||||||
## Features
|
## ✨ Features
|
||||||
|
|
||||||
- 🎯 **Unified Interface**: Same API across all AI providers
|
- 🔗 **Unified Interface**: Same API for Claude, OpenAI, Gemini, and OpenWebUI
|
||||||
- 🔒 **Type-Safe**: Full TypeScript support with comprehensive type definitions
|
- 🎯 **Type Safety**: Full TypeScript support with comprehensive type definitions
|
||||||
- 🚀 **Easy to Use**: Simple factory functions and intuitive configuration
|
- 🚀 **Streaming Support**: Real-time response streaming for all providers
|
||||||
- 📡 **Streaming Support**: Real-time streaming responses where supported
|
- 🛡️ **Error Handling**: Standardized error types with provider-specific details
|
||||||
- 🛡️ **Error Handling**: Robust error handling with categorized error types
|
- 🏭 **Factory Pattern**: Easy provider creation and management
|
||||||
- 🔧 **Extensible**: Easy to add new AI providers
|
- 🔧 **Configurable**: Extensive configuration options for each provider
|
||||||
- 📦 **Modern**: Built with ES modules and modern JavaScript features
|
- 📦 **Zero Dependencies**: Lightweight with minimal external dependencies
|
||||||
- 🌐 **Multi-Provider**: Switch between Claude, OpenAI, and Gemini seamlessly
|
- 🌐 **Local Support**: OpenWebUI integration for local/private AI models
|
||||||
|
|
||||||
## Installation
|
## 🚀 Quick Start
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
npm install simple-ai-provider
|
npm install simple-ai-provider
|
||||||
# or
|
# or
|
||||||
yarn add simple-ai-provider
|
|
||||||
# or
|
|
||||||
bun add simple-ai-provider
|
bun add simple-ai-provider
|
||||||
```
|
```
|
||||||
|
|
||||||
## Quick Start
|
### Basic Usage
|
||||||
|
|
||||||
### Basic Usage with Claude
|
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
import { createClaudeProvider } from 'simple-ai-provider';
|
import { ClaudeProvider, OpenAIProvider, GeminiProvider, OpenWebUIProvider } from 'simple-ai-provider';
|
||||||
|
|
||||||
// Create a Claude provider
|
// Claude
|
||||||
const claude = createClaudeProvider('your-anthropic-api-key');
|
const claude = new ClaudeProvider({
|
||||||
|
apiKey: process.env.ANTHROPIC_API_KEY!,
|
||||||
|
defaultModel: 'claude-3-5-sonnet-20241022'
|
||||||
|
});
|
||||||
|
|
||||||
// Initialize the provider
|
// OpenAI
|
||||||
|
const openai = new OpenAIProvider({
|
||||||
|
apiKey: process.env.OPENAI_API_KEY!,
|
||||||
|
defaultModel: 'gpt-4o'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Google Gemini
|
||||||
|
const gemini = new GeminiProvider({
|
||||||
|
apiKey: process.env.GOOGLE_AI_API_KEY!,
|
||||||
|
defaultModel: 'gemini-1.5-flash'
|
||||||
|
});
|
||||||
|
|
||||||
|
// OpenWebUI (local)
|
||||||
|
const openwebui = new OpenWebUIProvider({
|
||||||
|
apiKey: 'ollama', // Often not required
|
||||||
|
baseUrl: 'http://localhost:3000',
|
||||||
|
defaultModel: 'llama2'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Initialize and use any provider
|
||||||
await claude.initialize();
|
await claude.initialize();
|
||||||
|
|
||||||
// Generate a completion
|
|
||||||
const response = await claude.complete({
|
const response = await claude.complete({
|
||||||
messages: [
|
|
||||||
{ role: 'user', content: 'Hello! How are you today?' }
|
|
||||||
],
|
|
||||||
maxTokens: 100,
|
|
||||||
temperature: 0.7
|
|
||||||
});
|
|
||||||
|
|
||||||
console.log(response.content);
|
|
||||||
```
|
|
||||||
|
|
||||||
### Basic Usage with OpenAI
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
import { createOpenAIProvider } from 'simple-ai-provider';
|
|
||||||
|
|
||||||
// Create an OpenAI provider
|
|
||||||
const openai = createOpenAIProvider('your-openai-api-key');
|
|
||||||
|
|
||||||
// Initialize the provider
|
|
||||||
await openai.initialize();
|
|
||||||
|
|
||||||
// Generate a completion
|
|
||||||
const response = await openai.complete({
|
|
||||||
messages: [
|
|
||||||
{ role: 'user', content: 'Hello! How are you today?' }
|
|
||||||
],
|
|
||||||
maxTokens: 100,
|
|
||||||
temperature: 0.7
|
|
||||||
});
|
|
||||||
|
|
||||||
console.log(response.content);
|
|
||||||
```
|
|
||||||
|
|
||||||
### Basic Usage with Gemini
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
import { createGeminiProvider } from 'simple-ai-provider';
|
|
||||||
|
|
||||||
// Create a Gemini provider
|
|
||||||
const gemini = createGeminiProvider('your-google-ai-api-key');
|
|
||||||
|
|
||||||
// Initialize the provider
|
|
||||||
await gemini.initialize();
|
|
||||||
|
|
||||||
// Generate a completion
|
|
||||||
const response = await gemini.complete({
|
|
||||||
messages: [
|
|
||||||
{ role: 'user', content: 'Hello! How are you today?' }
|
|
||||||
],
|
|
||||||
maxTokens: 100,
|
|
||||||
temperature: 0.7
|
|
||||||
});
|
|
||||||
|
|
||||||
console.log(response.content);
|
|
||||||
```
|
|
||||||
|
|
||||||
### Multi-Provider Usage
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
import { createProvider, createClaudeProvider, createOpenAIProvider, createGeminiProvider } from 'simple-ai-provider';
|
|
||||||
|
|
||||||
// Method 1: Using specific factory functions
|
|
||||||
const claude = createClaudeProvider('your-anthropic-api-key');
|
|
||||||
const openai = createOpenAIProvider('your-openai-api-key');
|
|
||||||
const gemini = createGeminiProvider('your-google-ai-api-key');
|
|
||||||
|
|
||||||
// Method 2: Using generic factory
|
|
||||||
const claude2 = createProvider('claude', { apiKey: 'your-anthropic-api-key' });
|
|
||||||
const openai2 = createProvider('openai', { apiKey: 'your-openai-api-key' });
|
|
||||||
const gemini2 = createProvider('gemini', { apiKey: 'your-google-ai-api-key' });
|
|
||||||
|
|
||||||
// Initialize all
|
|
||||||
await Promise.all([claude.initialize(), openai.initialize(), gemini.initialize()]);
|
|
||||||
|
|
||||||
// Use the same interface for all providers
|
|
||||||
const prompt = { messages: [{ role: 'user', content: 'Explain AI' }] };
|
|
||||||
|
|
||||||
const claudeResponse = await claude.complete(prompt);
|
|
||||||
const openaiResponse = await openai.complete(prompt);
|
|
||||||
const geminiResponse = await gemini.complete(prompt);
|
|
||||||
```
|
|
||||||
|
|
||||||
### Streaming Responses
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
import { createGeminiProvider } from 'simple-ai-provider';
|
|
||||||
|
|
||||||
const gemini = createGeminiProvider('your-google-ai-api-key');
|
|
||||||
await gemini.initialize();
|
|
||||||
|
|
||||||
// Stream a completion
|
|
||||||
for await (const chunk of gemini.stream({
|
|
||||||
messages: [
|
|
||||||
{ role: 'user', content: 'Write a short story about a robot.' }
|
|
||||||
],
|
|
||||||
maxTokens: 500
|
|
||||||
})) {
|
|
||||||
if (!chunk.isComplete) {
|
|
||||||
process.stdout.write(chunk.content);
|
|
||||||
} else {
|
|
||||||
console.log('\n\nUsage:', chunk.usage);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Advanced Configuration
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
import { ClaudeProvider, OpenAIProvider, GeminiProvider } from 'simple-ai-provider';
|
|
||||||
|
|
||||||
// Claude with custom configuration
|
|
||||||
const claude = new ClaudeProvider({
|
|
||||||
apiKey: 'your-anthropic-api-key',
|
|
||||||
defaultModel: 'claude-3-5-sonnet-20241022',
|
|
||||||
timeout: 30000,
|
|
||||||
maxRetries: 3,
|
|
||||||
baseUrl: 'https://api.anthropic.com' // optional custom endpoint
|
|
||||||
});
|
|
||||||
|
|
||||||
// OpenAI with organization and project
|
|
||||||
const openai = new OpenAIProvider({
|
|
||||||
apiKey: 'your-openai-api-key',
|
|
||||||
defaultModel: 'gpt-4',
|
|
||||||
organization: 'org-your-org-id',
|
|
||||||
project: 'proj-your-project-id',
|
|
||||||
timeout: 60000,
|
|
||||||
maxRetries: 5
|
|
||||||
});
|
|
||||||
|
|
||||||
// Gemini with safety settings and generation config
|
|
||||||
const gemini = new GeminiProvider({
|
|
||||||
apiKey: 'your-google-ai-api-key',
|
|
||||||
defaultModel: 'gemini-1.5-pro',
|
|
||||||
safetySettings: [], // Configure content filtering
|
|
||||||
generationConfig: {
|
|
||||||
temperature: 0.8,
|
|
||||||
topP: 0.9,
|
|
||||||
topK: 40,
|
|
||||||
maxOutputTokens: 2048
|
|
||||||
},
|
|
||||||
timeout: 45000
|
|
||||||
});
|
|
||||||
|
|
||||||
await Promise.all([claude.initialize(), openai.initialize(), gemini.initialize()]);
|
|
||||||
|
|
||||||
const response = await gemini.complete({
|
|
||||||
messages: [
|
messages: [
|
||||||
{ role: 'system', content: 'You are a helpful assistant.' },
|
{ role: 'system', content: 'You are a helpful assistant.' },
|
||||||
{ role: 'user', content: 'Explain quantum computing in simple terms.' }
|
{ role: 'user', content: 'Explain TypeScript in one sentence.' }
|
||||||
],
|
],
|
||||||
model: 'gemini-1.5-flash',
|
maxTokens: 100,
|
||||||
maxTokens: 300,
|
temperature: 0.7
|
||||||
temperature: 0.5,
|
|
||||||
topP: 0.9,
|
|
||||||
stopSequences: ['\n\n']
|
|
||||||
});
|
});
|
||||||
|
|
||||||
|
console.log(response.content);
|
||||||
```
|
```
|
||||||
|
|
||||||
## API Reference
|
## 🏭 Factory Functions
|
||||||
|
|
||||||
### Core Types
|
Create providers using factory functions for cleaner code:
|
||||||
|
|
||||||
#### `AIMessage`
|
|
||||||
```typescript
|
```typescript
|
||||||
interface AIMessage {
|
import { createProvider, createClaudeProvider, createOpenAIProvider, createGeminiProvider, createOpenWebUIProvider } from 'simple-ai-provider';
|
||||||
role: 'system' | 'user' | 'assistant';
|
|
||||||
content: string;
|
// Method 1: Specific factory functions
|
||||||
metadata?: Record<string, any>;
|
const claude = createClaudeProvider({ apiKey: 'your-key' });
|
||||||
}
|
const openai = createOpenAIProvider({ apiKey: 'your-key' });
|
||||||
|
const gemini = createGeminiProvider({ apiKey: 'your-key' });
|
||||||
|
const openwebui = createOpenWebUIProvider({ apiKey: 'your-key', baseUrl: 'http://localhost:3000' });
|
||||||
|
|
||||||
|
// Method 2: Generic factory
|
||||||
|
const provider = createProvider('claude', { apiKey: 'your-key' });
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `CompletionParams`
|
## 📝 Environment Variables
|
||||||
```typescript
|
|
||||||
interface CompletionParams {
|
Set up your API keys:
|
||||||
messages: AIMessage[];
|
|
||||||
model?: string;
|
```bash
|
||||||
maxTokens?: number;
|
# Required for respective providers
|
||||||
temperature?: number;
|
export ANTHROPIC_API_KEY="your-claude-api-key"
|
||||||
topP?: number;
|
export OPENAI_API_KEY="your-openai-api-key"
|
||||||
stopSequences?: string[];
|
export GOOGLE_AI_API_KEY="your-gemini-api-key"
|
||||||
stream?: boolean;
|
|
||||||
}
|
# OpenWebUI Bearer Token (get from Settings > Account in OpenWebUI)
|
||||||
|
export OPENWEBUI_API_KEY="your-bearer-token"
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `CompletionResponse`
|
## 🔧 Provider-Specific Configuration
|
||||||
```typescript
|
|
||||||
interface CompletionResponse {
|
|
||||||
content: string;
|
|
||||||
model: string;
|
|
||||||
usage: TokenUsage;
|
|
||||||
id: string;
|
|
||||||
metadata?: Record<string, any>;
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Factory Functions
|
### Claude Configuration
|
||||||
|
|
||||||
#### `createClaudeProvider(apiKey, options?)`
|
|
||||||
Creates a Claude provider with simplified configuration.
|
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
const claude = createClaudeProvider('your-api-key', {
|
const claude = new ClaudeProvider({
|
||||||
|
apiKey: 'your-api-key',
|
||||||
defaultModel: 'claude-3-5-sonnet-20241022',
|
defaultModel: 'claude-3-5-sonnet-20241022',
|
||||||
|
version: '2023-06-01',
|
||||||
|
maxRetries: 3,
|
||||||
timeout: 30000
|
timeout: 30000
|
||||||
});
|
});
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `createOpenAIProvider(apiKey, options?)`
|
### OpenAI Configuration
|
||||||
Creates an OpenAI provider with simplified configuration.
|
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
const openai = createOpenAIProvider('your-api-key', {
|
const openai = new OpenAIProvider({
|
||||||
defaultModel: 'gpt-4',
|
|
||||||
organization: 'org-123',
|
|
||||||
timeout: 60000
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
#### `createGeminiProvider(apiKey, options?)`
|
|
||||||
Creates a Gemini provider with simplified configuration.
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
const gemini = createGeminiProvider('your-api-key', {
|
|
||||||
defaultModel: 'gemini-1.5-pro',
|
|
||||||
safetySettings: [],
|
|
||||||
generationConfig: {
|
|
||||||
temperature: 0.8,
|
|
||||||
topK: 40
|
|
||||||
}
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
#### `createProvider(type, config)`
|
|
||||||
Generic factory function for creating any provider type.
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
const claude = createProvider('claude', {
|
|
||||||
apiKey: 'your-api-key',
|
apiKey: 'your-api-key',
|
||||||
defaultModel: 'claude-3-5-sonnet-20241022'
|
defaultModel: 'gpt-4o',
|
||||||
|
organization: 'your-org-id',
|
||||||
|
project: 'your-project-id',
|
||||||
|
maxRetries: 3,
|
||||||
|
timeout: 30000
|
||||||
});
|
});
|
||||||
|
```
|
||||||
|
|
||||||
const openai = createProvider('openai', {
|
### Gemini Configuration
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const gemini = new GeminiProvider({
|
||||||
apiKey: 'your-api-key',
|
apiKey: 'your-api-key',
|
||||||
defaultModel: 'gpt-4'
|
defaultModel: 'gemini-1.5-flash',
|
||||||
});
|
|
||||||
|
|
||||||
const gemini = createProvider('gemini', {
|
|
||||||
apiKey: 'your-api-key',
|
|
||||||
defaultModel: 'gemini-1.5-flash'
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
### Provider Methods
|
|
||||||
|
|
||||||
#### `initialize(): Promise<void>`
|
|
||||||
Initializes the provider and validates the configuration.
|
|
||||||
|
|
||||||
#### `complete(params): Promise<CompletionResponse>`
|
|
||||||
Generates a completion based on the provided parameters.
|
|
||||||
|
|
||||||
#### `stream(params): AsyncIterable<CompletionChunk>`
|
|
||||||
Generates a streaming completion.
|
|
||||||
|
|
||||||
#### `getInfo(): ProviderInfo`
|
|
||||||
Returns information about the provider and its capabilities.
|
|
||||||
|
|
||||||
#### `isInitialized(): boolean`
|
|
||||||
Checks if the provider has been initialized.
|
|
||||||
|
|
||||||
## Error Handling
|
|
||||||
|
|
||||||
The package provides comprehensive error handling with categorized error types:
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
import { AIProviderError, AIErrorType } from 'simple-ai-provider';
|
|
||||||
|
|
||||||
try {
|
|
||||||
const response = await openai.complete({
|
|
||||||
messages: [{ role: 'user', content: 'Hello!' }]
|
|
||||||
});
|
|
||||||
} catch (error) {
|
|
||||||
if (error instanceof AIProviderError) {
|
|
||||||
switch (error.type) {
|
|
||||||
case AIErrorType.AUTHENTICATION:
|
|
||||||
console.error('Invalid API key');
|
|
||||||
break;
|
|
||||||
case AIErrorType.RATE_LIMIT:
|
|
||||||
console.error('Rate limit exceeded');
|
|
||||||
break;
|
|
||||||
case AIErrorType.INVALID_REQUEST:
|
|
||||||
console.error('Invalid request parameters');
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
console.error('Unknown error:', error.message);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Supported Models
|
|
||||||
|
|
||||||
### Claude (Anthropic)
|
|
||||||
- `claude-3-5-sonnet-20241022` (default)
|
|
||||||
- `claude-3-5-haiku-20241022`
|
|
||||||
- `claude-3-opus-20240229`
|
|
||||||
- `claude-3-sonnet-20240229`
|
|
||||||
- `claude-3-haiku-20240307`
|
|
||||||
|
|
||||||
### OpenAI (GPT)
|
|
||||||
- `gpt-4` (default)
|
|
||||||
- `gpt-4-turbo`
|
|
||||||
- `gpt-4-turbo-preview`
|
|
||||||
- `gpt-4-0125-preview`
|
|
||||||
- `gpt-4-1106-preview`
|
|
||||||
- `gpt-3.5-turbo`
|
|
||||||
- `gpt-3.5-turbo-0125`
|
|
||||||
- `gpt-3.5-turbo-1106`
|
|
||||||
|
|
||||||
### Google Gemini
|
|
||||||
- `gemini-1.5-flash` (default)
|
|
||||||
- `gemini-1.5-flash-8b`
|
|
||||||
- `gemini-1.5-pro`
|
|
||||||
- `gemini-1.0-pro`
|
|
||||||
- `gemini-1.0-pro-vision`
|
|
||||||
|
|
||||||
## Environment Variables
|
|
||||||
|
|
||||||
You can set your API keys as environment variables:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
export ANTHROPIC_API_KEY="your-anthropic-api-key"
|
|
||||||
export OPENAI_API_KEY="your-openai-api-key"
|
|
||||||
export GOOGLE_AI_API_KEY="your-google-ai-api-key"
|
|
||||||
```
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
const claude = createClaudeProvider(process.env.ANTHROPIC_API_KEY!);
|
|
||||||
const openai = createOpenAIProvider(process.env.OPENAI_API_KEY!);
|
|
||||||
const gemini = createGeminiProvider(process.env.GOOGLE_AI_API_KEY!);
|
|
||||||
```
|
|
||||||
|
|
||||||
## Provider Comparison
|
|
||||||
|
|
||||||
| Feature | Claude | OpenAI | Gemini |
|
|
||||||
|---------|--------|--------|--------|
|
|
||||||
| **Models** | 5 models | 8+ models | 5 models |
|
|
||||||
| **Max Context** | 200K tokens | 128K tokens | 1M tokens |
|
|
||||||
| **Streaming** | ✅ | ✅ | ✅ |
|
|
||||||
| **Vision** | ✅ | ✅ | ✅ |
|
|
||||||
| **Function Calling** | ✅ | ✅ | ✅ |
|
|
||||||
| **JSON Mode** | ❌ | ✅ | ❌ |
|
|
||||||
| **System Messages** | ✅ (separate) | ✅ (inline) | ✅ (separate) |
|
|
||||||
| **Multimodal** | ✅ | ✅ | ✅ |
|
|
||||||
| **Safety Controls** | Basic | Basic | Advanced |
|
|
||||||
| **Special Features** | Advanced reasoning | JSON mode, plugins | Largest context, advanced safety |
|
|
||||||
|
|
||||||
## Best Practices
|
|
||||||
|
|
||||||
1. **Always initialize providers** before using them
|
|
||||||
2. **Handle errors gracefully** with proper error types
|
|
||||||
3. **Use appropriate models** for your use case (speed vs. capability vs. context)
|
|
||||||
4. **Set reasonable timeouts** for your application
|
|
||||||
5. **Implement retry logic** for production applications
|
|
||||||
6. **Monitor token usage** to control costs
|
|
||||||
7. **Use environment variables** for API keys
|
|
||||||
8. **Consider provider-specific features** when choosing
|
|
||||||
9. **Configure safety settings** appropriately for Gemini
|
|
||||||
10. **Leverage large context** capabilities of Gemini for complex tasks
|
|
||||||
|
|
||||||
## Advanced Usage
|
|
||||||
|
|
||||||
### Provider Registry
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
import { ProviderRegistry } from 'simple-ai-provider';
|
|
||||||
|
|
||||||
// List all registered providers
|
|
||||||
console.log(ProviderRegistry.getRegisteredProviders()); // ['claude', 'openai', 'gemini']
|
|
||||||
|
|
||||||
// Create provider by name
|
|
||||||
const provider = ProviderRegistry.create('gemini', {
|
|
||||||
apiKey: 'your-api-key'
|
|
||||||
});
|
|
||||||
|
|
||||||
// Check if provider is registered
|
|
||||||
if (ProviderRegistry.isRegistered('gemini')) {
|
|
||||||
console.log('Gemini is available!');
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Gemini-Specific Features
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
import { createGeminiProvider } from 'simple-ai-provider';
|
|
||||||
|
|
||||||
const gemini = createGeminiProvider('your-api-key', {
|
|
||||||
defaultModel: 'gemini-1.5-pro',
|
|
||||||
safetySettings: [
|
safetySettings: [
|
||||||
{
|
{
|
||||||
category: 'HARM_CATEGORY_HARASSMENT',
|
category: 'HARM_CATEGORY_HARASSMENT',
|
||||||
@ -442,45 +137,268 @@ const gemini = createGeminiProvider('your-api-key', {
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
generationConfig: {
|
generationConfig: {
|
||||||
temperature: 0.9,
|
temperature: 0.7,
|
||||||
topP: 0.8,
|
topP: 0.8,
|
||||||
topK: 40,
|
topK: 40,
|
||||||
maxOutputTokens: 2048,
|
maxOutputTokens: 1000
|
||||||
stopSequences: ['END', 'STOP']
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
await gemini.initialize();
|
|
||||||
|
|
||||||
// Large context example (up to 1M tokens)
|
|
||||||
const response = await gemini.complete({
|
|
||||||
messages: [
|
|
||||||
{ role: 'system', content: 'You are analyzing a large document.' },
|
|
||||||
{ role: 'user', content: 'Your very large text here...' }
|
|
||||||
],
|
|
||||||
maxTokens: 2048
|
|
||||||
});
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Contributing
|
### OpenWebUI Configuration
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const openwebui = new OpenWebUIProvider({
|
||||||
|
apiKey: 'your-bearer-token', // Get from OpenWebUI Settings > Account
|
||||||
|
baseUrl: 'http://localhost:3000', // Your OpenWebUI instance
|
||||||
|
defaultModel: 'llama3.1',
|
||||||
|
useOllamaProxy: false, // Use OpenWebUI's chat API (recommended)
|
||||||
|
// useOllamaProxy: true, // Use Ollama API proxy for direct model access
|
||||||
|
dangerouslyAllowInsecureConnections: true, // For local HTTPS
|
||||||
|
timeout: 60000, // Longer timeout for local inference
|
||||||
|
maxRetries: 2
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🌊 Streaming Support
|
||||||
|
|
||||||
|
All providers support real-time streaming:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const stream = provider.stream({
|
||||||
|
messages: [{ role: 'user', content: 'Count from 1 to 10' }],
|
||||||
|
maxTokens: 100
|
||||||
|
});
|
||||||
|
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
if (!chunk.isComplete) {
|
||||||
|
process.stdout.write(chunk.content);
|
||||||
|
} else {
|
||||||
|
console.log('\nDone! Usage:', chunk.usage);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🔀 Multi-Provider Usage
|
||||||
|
|
||||||
|
Use multiple providers seamlessly:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const providers = {
|
||||||
|
claude: new ClaudeProvider({ apiKey: process.env.ANTHROPIC_API_KEY! }),
|
||||||
|
openai: new OpenAIProvider({ apiKey: process.env.OPENAI_API_KEY! }),
|
||||||
|
gemini: new GeminiProvider({ apiKey: process.env.GOOGLE_AI_API_KEY! }),
|
||||||
|
openwebui: new OpenWebUIProvider({
|
||||||
|
apiKey: 'ollama',
|
||||||
|
baseUrl: 'http://localhost:3000'
|
||||||
|
})
|
||||||
|
};
|
||||||
|
|
||||||
|
// Initialize all providers
|
||||||
|
await Promise.all(Object.values(providers).map(p => p.initialize()));
|
||||||
|
|
||||||
|
// Use the same interface for all
|
||||||
|
const prompt = {
|
||||||
|
messages: [{ role: 'user', content: 'Hello!' }],
|
||||||
|
maxTokens: 50
|
||||||
|
};
|
||||||
|
|
||||||
|
for (const [name, provider] of Object.entries(providers)) {
|
||||||
|
try {
|
||||||
|
const response = await provider.complete(prompt);
|
||||||
|
console.log(`${name}: ${response.content}`);
|
||||||
|
} catch (error) {
|
||||||
|
console.log(`${name} failed: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 📊 Provider Comparison
|
||||||
|
|
||||||
|
| Provider | Context Length | Streaming | Vision | Function Calling | Local Execution | Best For |
|
||||||
|
|----------|---------------|-----------|--------|------------------|-----------------|----------|
|
||||||
|
| **Claude** | 200K tokens | ✅ | ✅ | ✅ | ❌ | Reasoning, Analysis, Code Review |
|
||||||
|
| **OpenAI** | 128K tokens | ✅ | ✅ | ✅ | ❌ | General Purpose, Function Calling |
|
||||||
|
| **Gemini** | 1M tokens | ✅ | ✅ | ✅ | ❌ | Large Documents, Multimodal |
|
||||||
|
| **OpenWebUI** | 8K-32K tokens | ✅ | Varies | Limited | ✅ | Privacy, Custom Models, Local |
|
||||||
|
|
||||||
|
## 🎯 Available Models
|
||||||
|
|
||||||
|
### Claude Models
|
||||||
|
- `claude-3-5-sonnet-20241022` (recommended)
|
||||||
|
- `claude-3-5-haiku-20241022`
|
||||||
|
- `claude-3-opus-20240229`
|
||||||
|
- `claude-3-sonnet-20240229`
|
||||||
|
- `claude-3-haiku-20240307`
|
||||||
|
|
||||||
|
### OpenAI Models
|
||||||
|
- `gpt-4o` (recommended)
|
||||||
|
- `gpt-4o-mini`
|
||||||
|
- `gpt-4-turbo`
|
||||||
|
- `gpt-4`
|
||||||
|
- `gpt-3.5-turbo`
|
||||||
|
|
||||||
|
### Gemini Models
|
||||||
|
- `gemini-1.5-flash` (recommended, fast)
|
||||||
|
- `gemini-1.5-flash-8b` (fastest)
|
||||||
|
- `gemini-1.5-pro` (most capable)
|
||||||
|
- `gemini-1.0-pro`
|
||||||
|
- `gemini-1.0-pro-vision`
|
||||||
|
|
||||||
|
### OpenWebUI Models
|
||||||
|
*Available models depend on your local installation:*
|
||||||
|
- `llama3.1`, `llama3.1:8b`, `llama3.1:70b`
|
||||||
|
- `llama3.2`, `llama3.2:1b`, `llama3.2:3b`
|
||||||
|
- `codellama`, `codellama:7b`, `codellama:13b`, `codellama:34b`
|
||||||
|
- `mistral`, `mistral:7b`
|
||||||
|
- `mixtral`, `mixtral:8x7b`
|
||||||
|
- `phi3`, `phi3:mini`
|
||||||
|
- `gemma2`, `gemma2:2b`, `gemma2:9b`
|
||||||
|
- `qwen2.5`, `granite3.1-dense:8b`
|
||||||
|
- *Custom models as installed*
|
||||||
|
|
||||||
|
## 🚨 Error Handling
|
||||||
|
|
||||||
|
The package provides standardized error handling:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { AIProviderError, AIErrorType } from 'simple-ai-provider';
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await provider.complete({
|
||||||
|
messages: [{ role: 'user', content: 'Hello' }]
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof AIProviderError) {
|
||||||
|
switch (error.type) {
|
||||||
|
case AIErrorType.AUTHENTICATION:
|
||||||
|
console.log('Invalid API key');
|
||||||
|
break;
|
||||||
|
case AIErrorType.RATE_LIMIT:
|
||||||
|
console.log('Rate limited, try again later');
|
||||||
|
break;
|
||||||
|
case AIErrorType.MODEL_NOT_FOUND:
|
||||||
|
console.log('Model not available');
|
||||||
|
break;
|
||||||
|
case AIErrorType.NETWORK:
|
||||||
|
console.log('Network/connection issue');
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
console.log('Unknown error:', error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🔧 Advanced Usage
|
||||||
|
|
||||||
|
### Custom Base URLs
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// OpenAI-compatible endpoint
|
||||||
|
const customOpenAI = new OpenAIProvider({
|
||||||
|
apiKey: 'your-key',
|
||||||
|
baseUrl: 'https://api.custom-provider.com/v1'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Custom OpenWebUI instance
|
||||||
|
const remoteOpenWebUI = new OpenWebUIProvider({
|
||||||
|
apiKey: 'your-key',
|
||||||
|
baseUrl: 'https://my-openwebui.example.com',
|
||||||
|
apiPath: '/api/v1'
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### Provider Information
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const info = provider.getInfo();
|
||||||
|
console.log(`Provider: ${info.name} v${info.version}`);
|
||||||
|
console.log(`Models: ${info.models.join(', ')}`);
|
||||||
|
console.log(`Max Context: ${info.maxContextLength} tokens`);
|
||||||
|
console.log(`Supports Streaming: ${info.supportsStreaming}`);
|
||||||
|
console.log('Capabilities:', info.capabilities);
|
||||||
|
```
|
||||||
|
|
||||||
|
### OpenWebUI-Specific Features
|
||||||
|
|
||||||
|
OpenWebUI offers unique advantages for local AI deployment:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const openwebui = new OpenWebUIProvider({
|
||||||
|
apiKey: 'your-bearer-token', // Get from OpenWebUI Settings > Account
|
||||||
|
baseUrl: 'http://localhost:3000',
|
||||||
|
defaultModel: 'llama3.1',
|
||||||
|
useOllamaProxy: false, // Use chat completions API (recommended)
|
||||||
|
// Longer timeout for local inference
|
||||||
|
timeout: 120000,
|
||||||
|
// Allow self-signed certificates for local development
|
||||||
|
dangerouslyAllowInsecureConnections: true
|
||||||
|
});
|
||||||
|
|
||||||
|
// Test connection and list available models
|
||||||
|
try {
|
||||||
|
await openwebui.initialize();
|
||||||
|
console.log('Connected to local OpenWebUI instance');
|
||||||
|
|
||||||
|
// Use either chat completions or Ollama proxy
|
||||||
|
const response = await openwebui.complete({
|
||||||
|
messages: [{ role: 'user', content: 'Hello!' }],
|
||||||
|
maxTokens: 100
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.log('OpenWebUI not available:', error.message);
|
||||||
|
// Gracefully fallback to cloud providers
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**OpenWebUI API Modes:**
|
||||||
|
- **Chat Completions** (`useOllamaProxy: false`): OpenWebUI's native API with full features
|
||||||
|
- **Ollama Proxy** (`useOllamaProxy: true`): Direct access to Ollama API for raw model interaction
|
||||||
|
|
||||||
|
## 📦 TypeScript Support
|
||||||
|
|
||||||
|
Full TypeScript support with comprehensive type definitions:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import type {
|
||||||
|
CompletionParams,
|
||||||
|
CompletionResponse,
|
||||||
|
CompletionChunk,
|
||||||
|
ProviderInfo,
|
||||||
|
ClaudeConfig,
|
||||||
|
OpenAIConfig,
|
||||||
|
GeminiConfig,
|
||||||
|
OpenWebUIConfig
|
||||||
|
} from 'simple-ai-provider';
|
||||||
|
|
||||||
|
// Type-safe configuration
|
||||||
|
const config: ClaudeConfig = {
|
||||||
|
apiKey: 'your-key',
|
||||||
|
defaultModel: 'claude-3-5-sonnet-20241022',
|
||||||
|
// TypeScript will validate all options
|
||||||
|
};
|
||||||
|
|
||||||
|
// Type-safe responses
|
||||||
|
const response: CompletionResponse = await provider.complete(params);
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🤝 Contributing
|
||||||
|
|
||||||
Contributions are welcome! Please feel free to submit a Pull Request. For major changes, please open an issue first to discuss what you would like to change.
|
Contributions are welcome! Please feel free to submit a Pull Request. For major changes, please open an issue first to discuss what you would like to change.
|
||||||
|
|
||||||
## License
|
## 📄 License
|
||||||
|
|
||||||
MIT
|
MIT License - see the [LICENSE](LICENSE) file for details.
|
||||||
|
|
||||||
## Changelog
|
## 🔗 Links
|
||||||
|
|
||||||
### 1.0.0
|
- [Anthropic Claude API](https://docs.anthropic.com/claude/reference/)
|
||||||
- Initial release
|
- [OpenAI API](https://platform.openai.com/docs/)
|
||||||
- Claude provider implementation
|
- [Google Gemini API](https://ai.google.dev/)
|
||||||
- OpenAI provider implementation
|
- [OpenWebUI](https://openwebui.com/)
|
||||||
- Gemini provider implementation
|
- [GitHub Repository](https://github.com/your-username/simple-ai-provider)
|
||||||
- Streaming support for all providers
|
|
||||||
- Comprehensive error handling
|
---
|
||||||
- TypeScript support
|
|
||||||
- Provider registry system
|
⭐ **Star this repo if you find it helpful!**
|
||||||
- Multi-provider examples
|
|
||||||
- Large context support (Gemini)
|
|
||||||
- Advanced safety controls (Gemini)
|
|
||||||
|
@ -1,297 +1,269 @@
|
|||||||
/**
|
/**
|
||||||
* Multi-provider example for Simple AI Provider
|
* Multi-Provider Example - Demonstrating all supported AI providers
|
||||||
* Demonstrates how to use Claude, OpenAI, and Gemini providers
|
*
|
||||||
|
* This example shows how to use Claude, OpenAI, Gemini, and OpenWebUI providers
|
||||||
|
* with consistent interfaces, showcasing the power of the unified API.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import {
|
import {
|
||||||
createClaudeProvider,
|
ClaudeProvider,
|
||||||
createOpenAIProvider,
|
OpenAIProvider,
|
||||||
createGeminiProvider,
|
GeminiProvider,
|
||||||
|
OpenWebUIProvider,
|
||||||
createProvider,
|
createProvider,
|
||||||
ProviderRegistry,
|
type ClaudeConfig,
|
||||||
AIProviderError,
|
type OpenAIConfig,
|
||||||
AIErrorType
|
type GeminiConfig,
|
||||||
|
type OpenWebUIConfig,
|
||||||
|
type CompletionParams
|
||||||
} from '../src/index.js';
|
} from '../src/index.js';
|
||||||
|
|
||||||
async function multiProviderExample() {
|
// Provider configurations
|
||||||
console.log('=== Multi-Provider AI Example ===\n');
|
const configs = {
|
||||||
|
claude: {
|
||||||
|
apiKey: process.env.ANTHROPIC_API_KEY || 'your-claude-api-key',
|
||||||
|
defaultModel: 'claude-3-5-sonnet-20241022'
|
||||||
|
} as ClaudeConfig,
|
||||||
|
|
||||||
// Get API keys from environment
|
openai: {
|
||||||
const claudeApiKey = process.env.ANTHROPIC_API_KEY || 'your-claude-api-key';
|
apiKey: process.env.OPENAI_API_KEY || 'your-openai-api-key',
|
||||||
const openaiApiKey = process.env.OPENAI_API_KEY || 'your-openai-api-key';
|
defaultModel: 'gpt-4o'
|
||||||
const geminiApiKey = process.env.GOOGLE_AI_API_KEY || 'your-gemini-api-key';
|
} as OpenAIConfig,
|
||||||
|
|
||||||
|
gemini: {
|
||||||
|
apiKey: process.env.GOOGLE_AI_API_KEY || 'your-gemini-api-key',
|
||||||
|
defaultModel: 'gemini-1.5-flash'
|
||||||
|
} as GeminiConfig,
|
||||||
|
|
||||||
|
openwebui: {
|
||||||
|
apiKey: process.env.OPENWEBUI_API_KEY || 'your-bearer-token', // Get from OpenWebUI Settings > Account
|
||||||
|
baseUrl: 'http://localhost:3000',
|
||||||
|
defaultModel: 'llama3.1',
|
||||||
|
useOllamaProxy: false // Set to true to use Ollama API proxy
|
||||||
|
} as OpenWebUIConfig
|
||||||
|
};
|
||||||
|
|
||||||
|
async function demonstrateProviders() {
|
||||||
|
console.log('🤖 Multi-Provider AI Demo\n');
|
||||||
|
|
||||||
|
// ===== 1. Direct Provider Creation =====
|
||||||
|
console.log('1️⃣ Creating providers directly...\n');
|
||||||
|
|
||||||
|
const claude = new ClaudeProvider(configs.claude);
|
||||||
|
const openai = new OpenAIProvider(configs.openai);
|
||||||
|
const gemini = new GeminiProvider(configs.gemini);
|
||||||
|
const openwebui = new OpenWebUIProvider(configs.openwebui);
|
||||||
|
|
||||||
|
const providers = { claude, openai, gemini, openwebui };
|
||||||
|
|
||||||
|
// ===== 2. Factory Creation =====
|
||||||
|
console.log('2️⃣ Creating providers via factory...\n');
|
||||||
|
|
||||||
|
const factoryProviders = {
|
||||||
|
claude: createProvider('claude', configs.claude),
|
||||||
|
openai: createProvider('openai', configs.openai),
|
||||||
|
gemini: createProvider('gemini', configs.gemini),
|
||||||
|
openwebui: createProvider('openwebui', configs.openwebui)
|
||||||
|
};
|
||||||
|
|
||||||
|
// ===== 3. Provider Information =====
|
||||||
|
console.log('3️⃣ Provider Information:\n');
|
||||||
|
|
||||||
|
for (const [name, provider] of Object.entries(providers)) {
|
||||||
|
const info = provider.getInfo();
|
||||||
|
console.log(`${name.toUpperCase()}: ${info.name} v${info.version}`);
|
||||||
|
console.log(` • Context: ${info.maxContextLength.toLocaleString()} tokens`);
|
||||||
|
console.log(` • Streaming: ${info.supportsStreaming ? '✅' : '❌'}`);
|
||||||
|
console.log(` • Models: ${info.models.slice(0, 3).join(', ')}${info.models.length > 3 ? '...' : ''}`);
|
||||||
|
|
||||||
|
if (info.capabilities) {
|
||||||
|
console.log(` • Vision: ${info.capabilities.vision ? '✅' : '❌'}`);
|
||||||
|
console.log(` • Function Calling: ${info.capabilities.functionCalling ? '✅' : '❌'}`);
|
||||||
|
console.log(` • Local Execution: ${info.capabilities.localExecution ? '✅' : '❌'}`);
|
||||||
|
}
|
||||||
|
console.log();
|
||||||
|
}
|
||||||
|
|
||||||
|
// ===== 4. Common Completion Example =====
|
||||||
|
console.log('4️⃣ Running completions across all providers...\n');
|
||||||
|
|
||||||
|
const messages = [
|
||||||
|
{ role: 'system' as const, content: 'You are a helpful assistant. Be concise.' },
|
||||||
|
{ role: 'user' as const, content: 'What is TypeScript? Answer in one sentence.' }
|
||||||
|
];
|
||||||
|
|
||||||
|
const params: CompletionParams = {
|
||||||
|
messages,
|
||||||
|
maxTokens: 50,
|
||||||
|
temperature: 0.7
|
||||||
|
};
|
||||||
|
|
||||||
|
for (const [name, provider] of Object.entries(providers)) {
|
||||||
|
try {
|
||||||
|
console.log(`${name.toUpperCase()} Response:`);
|
||||||
|
|
||||||
|
// Initialize provider (would be done once in real app)
|
||||||
|
await provider.initialize();
|
||||||
|
|
||||||
|
const response = await provider.complete(params);
|
||||||
|
console.log(` ✅ ${response.content.trim()}`);
|
||||||
|
console.log(` 📊 Tokens: ${response.usage.totalTokens} (${response.usage.promptTokens}+${response.usage.completionTokens})\n`);
|
||||||
|
|
||||||
|
} catch (error) {
|
||||||
|
console.log(` ❌ Error: ${(error as Error).message}\n`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ===== 5. Streaming Example =====
|
||||||
|
console.log('5️⃣ Streaming example (Claude)...\n');
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Method 1: Using factory functions
|
await claude.initialize();
|
||||||
console.log('1. Creating providers using factory functions...');
|
|
||||||
|
|
||||||
const claude = createClaudeProvider(claudeApiKey, {
|
console.log('Claude Streaming Response:');
|
||||||
defaultModel: 'claude-3-5-haiku-20241022'
|
process.stdout.write(' ');
|
||||||
});
|
|
||||||
|
|
||||||
const openai = createOpenAIProvider(openaiApiKey, {
|
|
||||||
defaultModel: 'gpt-3.5-turbo'
|
|
||||||
});
|
|
||||||
|
|
||||||
const gemini = createGeminiProvider(geminiApiKey, {
|
|
||||||
defaultModel: 'gemini-1.5-flash'
|
|
||||||
});
|
|
||||||
|
|
||||||
console.log('✓ Providers created\n');
|
|
||||||
|
|
||||||
// Method 2: Using generic createProvider function
|
|
||||||
console.log('2. Creating providers using generic factory...');
|
|
||||||
|
|
||||||
const claude2 = createProvider('claude', {
|
|
||||||
apiKey: claudeApiKey,
|
|
||||||
defaultModel: 'claude-3-5-haiku-20241022'
|
|
||||||
});
|
|
||||||
|
|
||||||
const openai2 = createProvider('openai', {
|
|
||||||
apiKey: openaiApiKey,
|
|
||||||
defaultModel: 'gpt-3.5-turbo'
|
|
||||||
});
|
|
||||||
|
|
||||||
const gemini2 = createProvider('gemini', {
|
|
||||||
apiKey: geminiApiKey,
|
|
||||||
defaultModel: 'gemini-1.5-flash'
|
|
||||||
});
|
|
||||||
|
|
||||||
console.log('✓ Generic providers created\n');
|
|
||||||
|
|
||||||
// Method 3: Using Provider Registry
|
|
||||||
console.log('3. Using Provider Registry...');
|
|
||||||
console.log('Registered providers:', ProviderRegistry.getRegisteredProviders());
|
|
||||||
|
|
||||||
const claudeFromRegistry = ProviderRegistry.create('claude', {
|
|
||||||
apiKey: claudeApiKey,
|
|
||||||
defaultModel: 'claude-3-5-haiku-20241022'
|
|
||||||
});
|
|
||||||
|
|
||||||
console.log('✓ Provider created from registry\n');
|
|
||||||
|
|
||||||
// Initialize providers
|
|
||||||
console.log('4. Initializing providers...');
|
|
||||||
await Promise.all([
|
|
||||||
claude.initialize(),
|
|
||||||
openai.initialize(),
|
|
||||||
gemini.initialize()
|
|
||||||
]);
|
|
||||||
console.log('✓ All providers initialized\n');
|
|
||||||
|
|
||||||
// Compare provider information
|
|
||||||
console.log('5. Provider Information:');
|
|
||||||
console.log('Claude Info:', claude.getInfo());
|
|
||||||
console.log('OpenAI Info:', openai.getInfo());
|
|
||||||
console.log('Gemini Info:', gemini.getInfo());
|
|
||||||
console.log();
|
|
||||||
|
|
||||||
// Test the same prompt with all providers
|
|
||||||
const testPrompt = 'Explain the concept of recursion in programming in one sentence.';
|
|
||||||
|
|
||||||
console.log('6. Testing same prompt with all providers...');
|
|
||||||
console.log(`Prompt: "${testPrompt}"\n`);
|
|
||||||
|
|
||||||
// Claude response
|
|
||||||
console.log('--- Claude Response ---');
|
|
||||||
const claudeResponse = await claude.complete({
|
|
||||||
messages: [
|
|
||||||
{ role: 'system', content: 'You are a concise programming tutor.' },
|
|
||||||
{ role: 'user', content: testPrompt }
|
|
||||||
],
|
|
||||||
maxTokens: 100,
|
|
||||||
temperature: 0.7
|
|
||||||
});
|
|
||||||
|
|
||||||
console.log('Response:', claudeResponse.content);
|
|
||||||
console.log('Usage:', claudeResponse.usage);
|
|
||||||
console.log('Model:', claudeResponse.model);
|
|
||||||
console.log();
|
|
||||||
|
|
||||||
// OpenAI response
|
|
||||||
console.log('--- OpenAI Response ---');
|
|
||||||
const openaiResponse = await openai.complete({
|
|
||||||
messages: [
|
|
||||||
{ role: 'system', content: 'You are a concise programming tutor.' },
|
|
||||||
{ role: 'user', content: testPrompt }
|
|
||||||
],
|
|
||||||
maxTokens: 100,
|
|
||||||
temperature: 0.7
|
|
||||||
});
|
|
||||||
|
|
||||||
console.log('Response:', openaiResponse.content);
|
|
||||||
console.log('Usage:', openaiResponse.usage);
|
|
||||||
console.log('Model:', openaiResponse.model);
|
|
||||||
console.log();
|
|
||||||
|
|
||||||
// Gemini response
|
|
||||||
console.log('--- Gemini Response ---');
|
|
||||||
const geminiResponse = await gemini.complete({
|
|
||||||
messages: [
|
|
||||||
{ role: 'system', content: 'You are a concise programming tutor.' },
|
|
||||||
{ role: 'user', content: testPrompt }
|
|
||||||
],
|
|
||||||
maxTokens: 100,
|
|
||||||
temperature: 0.7
|
|
||||||
});
|
|
||||||
|
|
||||||
console.log('Response:', geminiResponse.content);
|
|
||||||
console.log('Usage:', geminiResponse.usage);
|
|
||||||
console.log('Model:', geminiResponse.model);
|
|
||||||
console.log();
|
|
||||||
|
|
||||||
// Streaming comparison
|
|
||||||
console.log('7. Streaming comparison...');
|
|
||||||
console.log('Streaming from Claude:');
|
|
||||||
|
|
||||||
for await (const chunk of claude.stream({
|
for await (const chunk of claude.stream({
|
||||||
messages: [{ role: 'user', content: 'Count from 1 to 5.' }],
|
messages: [{ role: 'user', content: 'Count from 1 to 5 with explanations.' }],
|
||||||
maxTokens: 50
|
maxTokens: 150
|
||||||
})) {
|
})) {
|
||||||
if (!chunk.isComplete) {
|
if (!chunk.isComplete) {
|
||||||
process.stdout.write(chunk.content);
|
process.stdout.write(chunk.content);
|
||||||
} else {
|
} else if (chunk.usage) {
|
||||||
console.log('\n✓ Claude streaming complete\n');
|
console.log(`\n 📊 Final tokens: ${chunk.usage.totalTokens}\n`);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
console.log('Streaming from OpenAI:');
|
|
||||||
|
|
||||||
for await (const chunk of openai.stream({
|
|
||||||
messages: [{ role: 'user', content: 'Count from 1 to 5.' }],
|
|
||||||
maxTokens: 50
|
|
||||||
})) {
|
|
||||||
if (!chunk.isComplete) {
|
|
||||||
process.stdout.write(chunk.content);
|
|
||||||
} else {
|
|
||||||
console.log('\n✓ OpenAI streaming complete\n');
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log('Streaming from Gemini:');
|
|
||||||
|
|
||||||
for await (const chunk of gemini.stream({
|
|
||||||
messages: [{ role: 'user', content: 'Count from 1 to 5.' }],
|
|
||||||
maxTokens: 50
|
|
||||||
})) {
|
|
||||||
if (!chunk.isComplete) {
|
|
||||||
process.stdout.write(chunk.content);
|
|
||||||
} else {
|
|
||||||
console.log('\n✓ Gemini streaming complete\n');
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Provider-specific features demo
|
|
||||||
console.log('8. Provider-specific features...');
|
|
||||||
|
|
||||||
// OpenAI with organization
|
|
||||||
const openaiWithOrg = createOpenAIProvider(openaiApiKey, {
|
|
||||||
defaultModel: 'gpt-3.5-turbo',
|
|
||||||
organization: 'org-example',
|
|
||||||
timeout: 60000
|
|
||||||
});
|
|
||||||
console.log('✓ Created OpenAI provider with organization settings');
|
|
||||||
|
|
||||||
// Claude with custom version
|
|
||||||
const claudeCustom = createClaudeProvider(claudeApiKey, {
|
|
||||||
defaultModel: 'claude-3-5-sonnet-20241022',
|
|
||||||
version: '2023-06-01',
|
|
||||||
maxRetries: 5
|
|
||||||
});
|
|
||||||
console.log('✓ Created Claude provider with custom settings');
|
|
||||||
|
|
||||||
// Gemini with safety settings
|
|
||||||
const geminiCustom = createGeminiProvider(geminiApiKey, {
|
|
||||||
defaultModel: 'gemini-1.5-pro',
|
|
||||||
safetySettings: [],
|
|
||||||
generationConfig: {
|
|
||||||
temperature: 0.9,
|
|
||||||
topP: 0.8,
|
|
||||||
topK: 40
|
|
||||||
}
|
|
||||||
});
|
|
||||||
console.log('✓ Created Gemini provider with safety and generation settings');
|
|
||||||
|
|
||||||
console.log('\n🎉 Multi-provider example completed successfully!');
|
|
||||||
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('❌ Error in multi-provider example:');
|
console.log(` ❌ Streaming error: ${(error as Error).message}\n`);
|
||||||
|
|
||||||
if (error instanceof AIProviderError) {
|
|
||||||
console.error(`AI Provider Error (${error.type}):`, error.message);
|
|
||||||
|
|
||||||
switch (error.type) {
|
|
||||||
case AIErrorType.AUTHENTICATION:
|
|
||||||
console.error('💡 Hint: Check your API keys in environment variables');
|
|
||||||
console.error(' Set ANTHROPIC_API_KEY, OPENAI_API_KEY, and GOOGLE_AI_API_KEY');
|
|
||||||
break;
|
|
||||||
case AIErrorType.RATE_LIMIT:
|
|
||||||
console.error('💡 Hint: You are being rate limited. Wait and try again.');
|
|
||||||
break;
|
|
||||||
case AIErrorType.INVALID_REQUEST:
|
|
||||||
console.error('💡 Hint: Check your request parameters.');
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
console.error('💡 Hint: An unexpected error occurred.');
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
console.error('Unexpected error:', error);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Provider comparison utility
|
// ===== 6. Provider-Specific Features =====
|
||||||
async function compareProviders() {
|
console.log('6️⃣ Provider-specific features...\n');
|
||||||
console.log('\n=== Provider Comparison ===');
|
|
||||||
|
|
||||||
const providers = [
|
// Claude - Advanced reasoning
|
||||||
{ name: 'Claude', factory: () => createClaudeProvider('dummy-key') },
|
try {
|
||||||
{ name: 'OpenAI', factory: () => createOpenAIProvider('dummy-key') },
|
await claude.initialize();
|
||||||
{ name: 'Gemini', factory: () => createGeminiProvider('dummy-key') }
|
console.log('Claude Advanced Reasoning:');
|
||||||
|
const claudeResponse = await claude.complete({
|
||||||
|
messages: [{
|
||||||
|
role: 'user',
|
||||||
|
content: 'Analyze the logical structure of this argument: "All humans are mortal. Socrates is human. Therefore, Socrates is mortal."'
|
||||||
|
}],
|
||||||
|
maxTokens: 100,
|
||||||
|
temperature: 0.1
|
||||||
|
});
|
||||||
|
console.log(` ✅ ${claudeResponse.content.trim()}\n`);
|
||||||
|
} catch (error) {
|
||||||
|
console.log(` ❌ Claude error: ${(error as Error).message}\n`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenAI - Function calling (conceptual)
|
||||||
|
try {
|
||||||
|
await openai.initialize();
|
||||||
|
console.log('OpenAI Code Generation:');
|
||||||
|
const openaiResponse = await openai.complete({
|
||||||
|
messages: [{
|
||||||
|
role: 'user',
|
||||||
|
content: 'Write a simple TypeScript function to calculate factorial. Just the function, no explanation.'
|
||||||
|
}],
|
||||||
|
maxTokens: 100,
|
||||||
|
temperature: 0.3
|
||||||
|
});
|
||||||
|
console.log(` ✅ ${openaiResponse.content.trim()}\n`);
|
||||||
|
} catch (error) {
|
||||||
|
console.log(` ❌ OpenAI error: ${(error as Error).message}\n`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gemini - Large context
|
||||||
|
try {
|
||||||
|
await gemini.initialize();
|
||||||
|
console.log('Gemini Large Context Capability:');
|
||||||
|
const geminiResponse = await gemini.complete({
|
||||||
|
messages: [{
|
||||||
|
role: 'user',
|
||||||
|
content: 'Explain the benefits of having 1M token context length for AI applications.'
|
||||||
|
}],
|
||||||
|
maxTokens: 80,
|
||||||
|
temperature: 0.5
|
||||||
|
});
|
||||||
|
console.log(` ✅ ${geminiResponse.content.trim()}\n`);
|
||||||
|
} catch (error) {
|
||||||
|
console.log(` ❌ Gemini error: ${(error as Error).message}\n`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenWebUI - Local model capabilities
|
||||||
|
try {
|
||||||
|
await openwebui.initialize();
|
||||||
|
console.log('OpenWebUI Local Model:');
|
||||||
|
const openwebuiResponse = await openwebui.complete({
|
||||||
|
messages: [{
|
||||||
|
role: 'user',
|
||||||
|
content: 'What are the advantages of running AI models locally?'
|
||||||
|
}],
|
||||||
|
maxTokens: 80,
|
||||||
|
temperature: 0.6
|
||||||
|
});
|
||||||
|
console.log(` ✅ ${openwebuiResponse.content.trim()}\n`);
|
||||||
|
} catch (error) {
|
||||||
|
console.log(` ❌ OpenWebUI error: ${(error as Error).message}`);
|
||||||
|
console.log(` (This is expected if OpenWebUI is not running locally or API key is invalid)\n`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ===== 7. Error Handling Demonstration =====
|
||||||
|
console.log('7️⃣ Error handling examples...\n');
|
||||||
|
|
||||||
|
try {
|
||||||
|
const invalidProvider = new ClaudeProvider({ apiKey: 'invalid-key' });
|
||||||
|
await invalidProvider.initialize();
|
||||||
|
await invalidProvider.complete({
|
||||||
|
messages: [{ role: 'user', content: 'Test' }]
|
||||||
|
});
|
||||||
|
} catch (error: any) {
|
||||||
|
console.log('Expected authentication error:');
|
||||||
|
console.log(` ❌ Type: ${error.type}`);
|
||||||
|
console.log(` ❌ Message: ${error.message}\n`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ===== 8. Performance Comparison =====
|
||||||
|
console.log('8️⃣ Provider Comparison Summary:\n');
|
||||||
|
|
||||||
|
const comparison = [
|
||||||
|
{
|
||||||
|
Provider: 'Claude',
|
||||||
|
'Context Length': '200K tokens',
|
||||||
|
'Best For': 'Reasoning, Analysis, Code Review',
|
||||||
|
'Streaming': '✅',
|
||||||
|
'Cost': 'Mid-range'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Provider: 'OpenAI',
|
||||||
|
'Context Length': '128K tokens',
|
||||||
|
'Best For': 'General Purpose, Function Calling',
|
||||||
|
'Streaming': '✅',
|
||||||
|
'Cost': 'Variable'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Provider: 'Gemini',
|
||||||
|
'Context Length': '1M tokens',
|
||||||
|
'Best For': 'Large Documents, Multimodal',
|
||||||
|
'Streaming': '✅',
|
||||||
|
'Cost': 'Low-cost'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Provider: 'OpenWebUI',
|
||||||
|
'Context Length': '8K-32K tokens',
|
||||||
|
'Best For': 'Privacy, Local Inference, Custom Models, RAG',
|
||||||
|
'Streaming': '✅',
|
||||||
|
'Cost': 'Free (compute)'
|
||||||
|
}
|
||||||
];
|
];
|
||||||
|
|
||||||
console.log('\nProvider Capabilities:');
|
console.table(comparison);
|
||||||
console.log('| Provider | Models | Context | Streaming | Vision | Functions | Multimodal |');
|
|
||||||
console.log('|----------|--------|---------|-----------|--------|-----------|------------|');
|
|
||||||
|
|
||||||
for (const { name, factory } of providers) {
|
console.log('\n✨ Demo completed! All providers work with the same unified interface.\n');
|
||||||
const provider = factory();
|
|
||||||
const info = provider.getInfo();
|
|
||||||
|
|
||||||
const contextStr = info.maxContextLength >= 1000000
|
|
||||||
? `${(info.maxContextLength / 1000000).toFixed(1)}M`
|
|
||||||
: `${(info.maxContextLength / 1000).toFixed(0)}K`;
|
|
||||||
|
|
||||||
console.log(`| ${name.padEnd(8)} | ${info.models.length.toString().padEnd(6)} | ${contextStr.padEnd(7)} | ${info.supportsStreaming ? '✓' : '✗'.padEnd(9)} | ${info.capabilities?.vision ? '✓' : '✗'.padEnd(6)} | ${info.capabilities?.functionCalling ? '✓' : '✗'.padEnd(9)} | ${info.capabilities?.multimodal ? '✓' : '✗'.padEnd(10)} |`);
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Feature comparison
|
// Run the demonstration
|
||||||
async function featureComparison() {
|
|
||||||
console.log('\n=== Feature Comparison ===');
|
|
||||||
|
|
||||||
const features = [
|
|
||||||
['Provider', 'Context Window', 'Streaming', 'Vision', 'Function Calling', 'System Messages', 'Special Features'],
|
|
||||||
['Claude', '200K tokens', '✅', '✅', '✅', '✅ (separate)', 'Advanced reasoning'],
|
|
||||||
['OpenAI', '128K tokens', '✅', '✅', '✅', '✅ (inline)', 'JSON mode, plugins'],
|
|
||||||
['Gemini', '1M tokens', '✅', '✅', '✅', '✅ (separate)', 'Largest context, multimodal']
|
|
||||||
];
|
|
||||||
|
|
||||||
for (const row of features) {
|
|
||||||
console.log('| ' + row.map(cell => cell.padEnd(15)).join(' | ') + ' |');
|
|
||||||
if (row[0] === 'Provider') {
|
|
||||||
console.log('|' + ''.padEnd(row.length * 17 + row.length - 1, '-') + '|');
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run the examples
|
|
||||||
if (import.meta.main) {
|
if (import.meta.main) {
|
||||||
await multiProviderExample();
|
demonstrateProviders().catch(console.error);
|
||||||
await compareProviders();
|
|
||||||
await featureComparison();
|
|
||||||
}
|
}
|
@ -37,7 +37,10 @@
|
|||||||
"google",
|
"google",
|
||||||
"provider",
|
"provider",
|
||||||
"typescript",
|
"typescript",
|
||||||
"nodejs"
|
"nodejs",
|
||||||
|
"openwebui",
|
||||||
|
"llm",
|
||||||
|
"unified-api"
|
||||||
],
|
],
|
||||||
"author": "Jan-Marlon Leibl",
|
"author": "Jan-Marlon Leibl",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
|
39
src/index.ts
39
src/index.ts
@ -1,56 +1,53 @@
|
|||||||
/**
|
/**
|
||||||
* Simple AI Provider - Main Entry Point
|
* Simple AI Provider - A unified interface for multiple AI providers
|
||||||
*
|
*
|
||||||
* A professional, extensible package for integrating multiple AI providers
|
* Main entry point for the library, providing access to all providers
|
||||||
* into your applications with a unified interface.
|
* and utilities in a clean, type-safe interface.
|
||||||
*
|
*
|
||||||
* @author Jan-Marlon Leibl
|
* @author Jan-Marlon Leibl
|
||||||
* @version 1.0.0
|
* @version 1.0.0
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Core types and interfaces
|
// Core types
|
||||||
export type {
|
export type {
|
||||||
AIProviderConfig,
|
|
||||||
AIMessage,
|
AIMessage,
|
||||||
MessageRole,
|
AIProviderConfig,
|
||||||
CompletionParams,
|
CompletionParams,
|
||||||
CompletionResponse,
|
CompletionResponse,
|
||||||
CompletionChunk,
|
CompletionChunk,
|
||||||
TokenUsage,
|
ProviderInfo,
|
||||||
ProviderInfo
|
TokenUsage
|
||||||
} from './types/index.js';
|
} from './types/index.js';
|
||||||
|
|
||||||
// Error handling
|
// Error types
|
||||||
export { AIProviderError, AIErrorType } from './types/index.js';
|
export { AIProviderError, AIErrorType } from './types/index.js';
|
||||||
|
|
||||||
// Base provider class
|
// Base provider
|
||||||
export { BaseAIProvider } from './providers/base.js';
|
export { BaseAIProvider } from './providers/base.js';
|
||||||
|
|
||||||
// Concrete provider implementations
|
// Provider implementations
|
||||||
export { ClaudeProvider, type ClaudeConfig } from './providers/claude.js';
|
export { ClaudeProvider, type ClaudeConfig } from './providers/claude.js';
|
||||||
export { OpenAIProvider, type OpenAIConfig } from './providers/openai.js';
|
export { OpenAIProvider, type OpenAIConfig } from './providers/openai.js';
|
||||||
export { GeminiProvider, type GeminiConfig } from './providers/gemini.js';
|
export { GeminiProvider, type GeminiConfig } from './providers/gemini.js';
|
||||||
|
export { OpenWebUIProvider, type OpenWebUIConfig } from './providers/openwebui.js';
|
||||||
|
|
||||||
// Utility functions and factory
|
// Factory utilities
|
||||||
export {
|
export {
|
||||||
createProvider,
|
createProvider,
|
||||||
createClaudeProvider,
|
createClaudeProvider,
|
||||||
createOpenAIProvider,
|
createOpenAIProvider,
|
||||||
createGeminiProvider,
|
createGeminiProvider,
|
||||||
ProviderRegistry,
|
createOpenWebUIProvider,
|
||||||
type ProviderType,
|
type ProviderType,
|
||||||
type ProviderConfigMap
|
PROVIDER_REGISTRY
|
||||||
} from './utils/factory.js';
|
} from './utils/factory.js';
|
||||||
|
|
||||||
// Re-export everything from providers for convenience
|
/**
|
||||||
export * from './providers/index.js';
|
* List of all supported providers
|
||||||
|
*/
|
||||||
|
export const SUPPORTED_PROVIDERS = ['claude', 'openai', 'gemini', 'openwebui'] as const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Package version
|
* Package version
|
||||||
*/
|
*/
|
||||||
export const VERSION = '1.0.0';
|
export const VERSION = '1.0.0';
|
||||||
|
|
||||||
/**
|
|
||||||
* List of supported providers
|
|
||||||
*/
|
|
||||||
export const SUPPORTED_PROVIDERS = ['claude', 'openai', 'gemini'] as const;
|
|
@ -7,3 +7,4 @@ export { BaseAIProvider } from './base.js';
|
|||||||
export { ClaudeProvider, type ClaudeConfig } from './claude.js';
|
export { ClaudeProvider, type ClaudeConfig } from './claude.js';
|
||||||
export { OpenAIProvider, type OpenAIConfig } from './openai.js';
|
export { OpenAIProvider, type OpenAIConfig } from './openai.js';
|
||||||
export { GeminiProvider, type GeminiConfig } from './gemini.js';
|
export { GeminiProvider, type GeminiConfig } from './gemini.js';
|
||||||
|
export { OpenWebUIProvider, type OpenWebUIConfig } from './openwebui.js';
|
659
src/providers/openwebui.ts
Normal file
659
src/providers/openwebui.ts
Normal file
@ -0,0 +1,659 @@
|
|||||||
|
/**
|
||||||
|
* OpenWebUI Provider implementation using OpenWebUI's native API
|
||||||
|
* Provides integration with OpenWebUI's chat completions and Ollama proxy endpoints
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type {
|
||||||
|
AIProviderConfig,
|
||||||
|
CompletionParams,
|
||||||
|
CompletionResponse,
|
||||||
|
CompletionChunk,
|
||||||
|
ProviderInfo,
|
||||||
|
AIMessage
|
||||||
|
} from '../types/index.js';
|
||||||
|
import { BaseAIProvider } from './base.js';
|
||||||
|
import { AIProviderError, AIErrorType } from '../types/index.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Configuration specific to OpenWebUI provider
|
||||||
|
*/
|
||||||
|
export interface OpenWebUIConfig extends AIProviderConfig {
|
||||||
|
/** Default model to use if not specified in requests (default: 'llama3.1') */
|
||||||
|
defaultModel?: string;
|
||||||
|
/** Base URL for OpenWebUI instance (default: 'http://localhost:3000') */
|
||||||
|
baseUrl?: string;
|
||||||
|
/** Whether to use Ollama API proxy endpoints instead of chat completions (default: false) */
|
||||||
|
useOllamaProxy?: boolean;
|
||||||
|
/** Whether to verify SSL certificates (default: false for local) */
|
||||||
|
dangerouslyAllowInsecureConnections?: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* OpenWebUI chat completion response interface
|
||||||
|
*/
|
||||||
|
interface OpenWebUIChatResponse {
|
||||||
|
id: string;
|
||||||
|
object: string;
|
||||||
|
created: number;
|
||||||
|
model: string;
|
||||||
|
choices: Array<{
|
||||||
|
index: number;
|
||||||
|
message: {
|
||||||
|
role: string;
|
||||||
|
content: string;
|
||||||
|
};
|
||||||
|
finish_reason: string | null;
|
||||||
|
}>;
|
||||||
|
usage?: {
|
||||||
|
prompt_tokens: number;
|
||||||
|
completion_tokens: number;
|
||||||
|
total_tokens: number;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* OpenWebUI streaming response interface
|
||||||
|
*/
|
||||||
|
interface OpenWebUIStreamChunk {
|
||||||
|
id: string;
|
||||||
|
object: string;
|
||||||
|
created: number;
|
||||||
|
model: string;
|
||||||
|
choices: Array<{
|
||||||
|
index: number;
|
||||||
|
delta: {
|
||||||
|
role?: string;
|
||||||
|
content?: string;
|
||||||
|
};
|
||||||
|
finish_reason: string | null;
|
||||||
|
}>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ollama generate response interface
|
||||||
|
*/
|
||||||
|
interface OllamaGenerateResponse {
|
||||||
|
model: string;
|
||||||
|
created_at: string;
|
||||||
|
response: string;
|
||||||
|
done: boolean;
|
||||||
|
context?: number[];
|
||||||
|
total_duration?: number;
|
||||||
|
load_duration?: number;
|
||||||
|
prompt_eval_count?: number;
|
||||||
|
prompt_eval_duration?: number;
|
||||||
|
eval_count?: number;
|
||||||
|
eval_duration?: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* OpenWebUI models response interface
|
||||||
|
*/
|
||||||
|
interface OpenWebUIModelsResponse {
|
||||||
|
data: Array<{
|
||||||
|
id: string;
|
||||||
|
object: string;
|
||||||
|
created: number;
|
||||||
|
owned_by: string;
|
||||||
|
}>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* OpenWebUI provider implementation
|
||||||
|
*/
|
||||||
|
export class OpenWebUIProvider extends BaseAIProvider {
|
||||||
|
private readonly defaultModel: string;
|
||||||
|
private readonly baseUrl: string;
|
||||||
|
private readonly useOllamaProxy: boolean;
|
||||||
|
private readonly dangerouslyAllowInsecureConnections: boolean;
|
||||||
|
|
||||||
|
constructor(config: OpenWebUIConfig) {
|
||||||
|
super(config);
|
||||||
|
this.defaultModel = config.defaultModel || 'llama3.1';
|
||||||
|
this.baseUrl = (config.baseUrl || 'http://localhost:3000').replace(/\/$/, '');
|
||||||
|
this.useOllamaProxy = config.useOllamaProxy ?? false;
|
||||||
|
this.dangerouslyAllowInsecureConnections = config.dangerouslyAllowInsecureConnections ?? true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize the OpenWebUI provider by testing the connection
|
||||||
|
*/
|
||||||
|
protected async doInitialize(): Promise<void> {
|
||||||
|
try {
|
||||||
|
await this.validateConnection();
|
||||||
|
} catch (error) {
|
||||||
|
throw new AIProviderError(
|
||||||
|
`Failed to initialize OpenWebUI provider: ${(error as Error).message}`,
|
||||||
|
AIErrorType.NETWORK,
|
||||||
|
undefined,
|
||||||
|
error as Error
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate a completion using OpenWebUI
|
||||||
|
*/
|
||||||
|
protected async doComplete(params: CompletionParams): Promise<CompletionResponse> {
|
||||||
|
if (this.useOllamaProxy) {
|
||||||
|
return this.completeWithOllama(params);
|
||||||
|
} else {
|
||||||
|
return this.completeWithChat(params);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate a streaming completion using OpenWebUI
|
||||||
|
*/
|
||||||
|
protected async *doStream(params: CompletionParams): AsyncIterable<CompletionChunk> {
|
||||||
|
if (this.useOllamaProxy) {
|
||||||
|
yield* this.streamWithOllama(params);
|
||||||
|
} else {
|
||||||
|
yield* this.streamWithChat(params);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Complete using OpenWebUI's chat completions API
|
||||||
|
*/
|
||||||
|
private async completeWithChat(params: CompletionParams): Promise<CompletionResponse> {
|
||||||
|
const url = `${this.baseUrl}/api/chat/completions`;
|
||||||
|
|
||||||
|
const requestBody = {
|
||||||
|
model: params.model || this.defaultModel,
|
||||||
|
messages: this.convertMessages(params.messages),
|
||||||
|
max_tokens: params.maxTokens || 1000,
|
||||||
|
temperature: params.temperature ?? 0.7,
|
||||||
|
top_p: params.topP,
|
||||||
|
stop: params.stopSequences,
|
||||||
|
stream: false
|
||||||
|
};
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await this.makeRequest(url, 'POST', requestBody);
|
||||||
|
const data = await response.json() as OpenWebUIChatResponse;
|
||||||
|
|
||||||
|
return this.formatChatResponse(data);
|
||||||
|
} catch (error) {
|
||||||
|
throw this.handleOpenWebUIError(error as Error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Complete using Ollama proxy API
|
||||||
|
*/
|
||||||
|
private async completeWithOllama(params: CompletionParams): Promise<CompletionResponse> {
|
||||||
|
const url = `${this.baseUrl}/ollama/api/generate`;
|
||||||
|
|
||||||
|
// Convert messages to a single prompt for Ollama
|
||||||
|
const prompt = this.convertMessagesToPrompt(params.messages);
|
||||||
|
|
||||||
|
const requestBody = {
|
||||||
|
model: params.model || this.defaultModel,
|
||||||
|
prompt: prompt,
|
||||||
|
stream: false,
|
||||||
|
options: {
|
||||||
|
temperature: params.temperature ?? 0.7,
|
||||||
|
top_p: params.topP,
|
||||||
|
num_predict: params.maxTokens || 1000,
|
||||||
|
stop: params.stopSequences
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await this.makeRequest(url, 'POST', requestBody);
|
||||||
|
const data = await response.json() as OllamaGenerateResponse;
|
||||||
|
|
||||||
|
return this.formatOllamaResponse(data);
|
||||||
|
} catch (error) {
|
||||||
|
throw this.handleOpenWebUIError(error as Error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stream using OpenWebUI's chat completions API
|
||||||
|
*/
|
||||||
|
private async *streamWithChat(params: CompletionParams): AsyncIterable<CompletionChunk> {
|
||||||
|
const url = `${this.baseUrl}/api/chat/completions`;
|
||||||
|
|
||||||
|
const requestBody = {
|
||||||
|
model: params.model || this.defaultModel,
|
||||||
|
messages: this.convertMessages(params.messages),
|
||||||
|
max_tokens: params.maxTokens || 1000,
|
||||||
|
temperature: params.temperature ?? 0.7,
|
||||||
|
top_p: params.topP,
|
||||||
|
stop: params.stopSequences,
|
||||||
|
stream: true
|
||||||
|
};
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await this.makeRequest(url, 'POST', requestBody);
|
||||||
|
|
||||||
|
if (!response.body) {
|
||||||
|
throw new Error('No response body for streaming');
|
||||||
|
}
|
||||||
|
|
||||||
|
const reader = response.body.getReader();
|
||||||
|
const decoder = new TextDecoder();
|
||||||
|
let buffer = '';
|
||||||
|
let messageId = '';
|
||||||
|
|
||||||
|
try {
|
||||||
|
while (true) {
|
||||||
|
const { done, value } = await reader.read();
|
||||||
|
if (done) break;
|
||||||
|
|
||||||
|
buffer += decoder.decode(value, { stream: true });
|
||||||
|
const lines = buffer.split('\n');
|
||||||
|
buffer = lines.pop() || '';
|
||||||
|
|
||||||
|
for (const line of lines) {
|
||||||
|
const trimmed = line.trim();
|
||||||
|
if (trimmed.startsWith('data: ')) {
|
||||||
|
const data = trimmed.slice(6);
|
||||||
|
if (data === '[DONE]') {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const chunk = JSON.parse(data) as OpenWebUIStreamChunk;
|
||||||
|
if (chunk.id && !messageId) {
|
||||||
|
messageId = chunk.id;
|
||||||
|
}
|
||||||
|
|
||||||
|
const delta = chunk.choices[0]?.delta;
|
||||||
|
if (delta?.content) {
|
||||||
|
yield {
|
||||||
|
content: delta.content,
|
||||||
|
isComplete: false,
|
||||||
|
id: messageId || chunk.id
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (chunk.choices[0]?.finish_reason) {
|
||||||
|
yield {
|
||||||
|
content: '',
|
||||||
|
isComplete: true,
|
||||||
|
id: messageId || chunk.id,
|
||||||
|
usage: {
|
||||||
|
promptTokens: 0,
|
||||||
|
completionTokens: 0,
|
||||||
|
totalTokens: 0
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
} catch (parseError) {
|
||||||
|
// Skip invalid JSON chunks
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
reader.releaseLock();
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
throw this.handleOpenWebUIError(error as Error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stream using Ollama proxy API
|
||||||
|
*/
|
||||||
|
private async *streamWithOllama(params: CompletionParams): AsyncIterable<CompletionChunk> {
|
||||||
|
const url = `${this.baseUrl}/ollama/api/generate`;
|
||||||
|
|
||||||
|
const prompt = this.convertMessagesToPrompt(params.messages);
|
||||||
|
|
||||||
|
const requestBody = {
|
||||||
|
model: params.model || this.defaultModel,
|
||||||
|
prompt: prompt,
|
||||||
|
stream: true,
|
||||||
|
options: {
|
||||||
|
temperature: params.temperature ?? 0.7,
|
||||||
|
top_p: params.topP,
|
||||||
|
num_predict: params.maxTokens || 1000,
|
||||||
|
stop: params.stopSequences
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await this.makeRequest(url, 'POST', requestBody);
|
||||||
|
|
||||||
|
if (!response.body) {
|
||||||
|
throw new Error('No response body for streaming');
|
||||||
|
}
|
||||||
|
|
||||||
|
const reader = response.body.getReader();
|
||||||
|
const decoder = new TextDecoder();
|
||||||
|
let buffer = '';
|
||||||
|
const messageId = 'ollama-' + Date.now();
|
||||||
|
|
||||||
|
try {
|
||||||
|
while (true) {
|
||||||
|
const { done, value } = await reader.read();
|
||||||
|
if (done) break;
|
||||||
|
|
||||||
|
buffer += decoder.decode(value, { stream: true });
|
||||||
|
const lines = buffer.split('\n');
|
||||||
|
buffer = lines.pop() || '';
|
||||||
|
|
||||||
|
for (const line of lines) {
|
||||||
|
const trimmed = line.trim();
|
||||||
|
if (trimmed) {
|
||||||
|
try {
|
||||||
|
const chunk = JSON.parse(trimmed) as OllamaGenerateResponse;
|
||||||
|
|
||||||
|
if (chunk.response) {
|
||||||
|
yield {
|
||||||
|
content: chunk.response,
|
||||||
|
isComplete: false,
|
||||||
|
id: messageId
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (chunk.done) {
|
||||||
|
yield {
|
||||||
|
content: '',
|
||||||
|
isComplete: true,
|
||||||
|
id: messageId,
|
||||||
|
usage: {
|
||||||
|
promptTokens: chunk.prompt_eval_count || 0,
|
||||||
|
completionTokens: chunk.eval_count || 0,
|
||||||
|
totalTokens: (chunk.prompt_eval_count || 0) + (chunk.eval_count || 0)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} catch (parseError) {
|
||||||
|
// Skip invalid JSON chunks
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
reader.releaseLock();
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
throw this.handleOpenWebUIError(error as Error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get information about the OpenWebUI provider
|
||||||
|
*/
|
||||||
|
public getInfo(): ProviderInfo {
|
||||||
|
return {
|
||||||
|
name: 'OpenWebUI',
|
||||||
|
version: '1.0.0',
|
||||||
|
models: [
|
||||||
|
'llama3.1',
|
||||||
|
'llama3.1:8b',
|
||||||
|
'llama3.1:70b',
|
||||||
|
'llama3.2',
|
||||||
|
'llama3.2:1b',
|
||||||
|
'llama3.2:3b',
|
||||||
|
'codellama',
|
||||||
|
'codellama:7b',
|
||||||
|
'codellama:13b',
|
||||||
|
'codellama:34b',
|
||||||
|
'mistral',
|
||||||
|
'mistral:7b',
|
||||||
|
'mixtral',
|
||||||
|
'mixtral:8x7b',
|
||||||
|
'phi3',
|
||||||
|
'phi3:mini',
|
||||||
|
'gemma2',
|
||||||
|
'gemma2:2b',
|
||||||
|
'gemma2:9b',
|
||||||
|
'qwen2.5',
|
||||||
|
'granite3.1-dense:8b'
|
||||||
|
],
|
||||||
|
maxContextLength: 8192, // Varies by model, but reasonable default
|
||||||
|
supportsStreaming: true,
|
||||||
|
capabilities: {
|
||||||
|
vision: false, // Depends on model
|
||||||
|
functionCalling: false, // Limited in local models
|
||||||
|
systemMessages: true,
|
||||||
|
localExecution: true,
|
||||||
|
customModels: true,
|
||||||
|
rag: true // OpenWebUI supports RAG
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validate the connection by attempting to list models
|
||||||
|
*/
|
||||||
|
private async validateConnection(): Promise<void> {
|
||||||
|
try {
|
||||||
|
const url = `${this.baseUrl}/api/models`;
|
||||||
|
const response = await this.makeRequest(url, 'GET');
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
await response.json(); // Just verify we can parse the response
|
||||||
|
} catch (error: any) {
|
||||||
|
if (error.code === 'ECONNREFUSED' || error.message?.includes('connect')) {
|
||||||
|
throw new AIProviderError(
|
||||||
|
`Cannot connect to OpenWebUI at ${this.baseUrl}. Make sure OpenWebUI is running.`,
|
||||||
|
AIErrorType.NETWORK
|
||||||
|
);
|
||||||
|
}
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Make an HTTP request with proper headers and error handling
|
||||||
|
*/
|
||||||
|
private async makeRequest(url: string, method: string, body?: any): Promise<Response> {
|
||||||
|
const headers: Record<string, string> = {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
};
|
||||||
|
|
||||||
|
// Add Authorization header if API key is provided
|
||||||
|
if (this.config.apiKey) {
|
||||||
|
headers['Authorization'] = `Bearer ${this.config.apiKey}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
const requestInit: RequestInit = {
|
||||||
|
method,
|
||||||
|
headers,
|
||||||
|
body: body ? JSON.stringify(body) : undefined,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Handle SSL verification for local instances
|
||||||
|
if (this.dangerouslyAllowInsecureConnections && url.startsWith('https://localhost')) {
|
||||||
|
// Note: In a real implementation, you'd need to configure the agent
|
||||||
|
// This is a placeholder for the concept
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await fetch(url, requestInit);
|
||||||
|
return response;
|
||||||
|
} catch (error) {
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert our generic message format to OpenWebUI's format
|
||||||
|
*/
|
||||||
|
private convertMessages(messages: AIMessage[]): Array<{role: string; content: string}> {
|
||||||
|
return messages.map(message => ({
|
||||||
|
role: message.role,
|
||||||
|
content: message.content
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert messages to a single prompt for Ollama API
|
||||||
|
*/
|
||||||
|
private convertMessagesToPrompt(messages: AIMessage[]): string {
|
||||||
|
return messages.map(message => {
|
||||||
|
switch (message.role) {
|
||||||
|
case 'system':
|
||||||
|
return `System: ${message.content}`;
|
||||||
|
case 'user':
|
||||||
|
return `Human: ${message.content}`;
|
||||||
|
case 'assistant':
|
||||||
|
return `Assistant: ${message.content}`;
|
||||||
|
default:
|
||||||
|
return message.content;
|
||||||
|
}
|
||||||
|
}).join('\n\n');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Format OpenWebUI chat response to our standard format
|
||||||
|
*/
|
||||||
|
private formatChatResponse(response: OpenWebUIChatResponse): CompletionResponse {
|
||||||
|
const choice = response.choices[0];
|
||||||
|
if (!choice || !choice.message.content) {
|
||||||
|
throw new AIProviderError(
|
||||||
|
'No content in OpenWebUI response',
|
||||||
|
AIErrorType.UNKNOWN
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: choice.message.content,
|
||||||
|
model: response.model,
|
||||||
|
usage: {
|
||||||
|
promptTokens: response.usage?.prompt_tokens || 0,
|
||||||
|
completionTokens: response.usage?.completion_tokens || 0,
|
||||||
|
totalTokens: response.usage?.total_tokens || 0
|
||||||
|
},
|
||||||
|
id: response.id,
|
||||||
|
metadata: {
|
||||||
|
finishReason: choice.finish_reason,
|
||||||
|
created: response.created
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Format Ollama response to our standard format
|
||||||
|
*/
|
||||||
|
private formatOllamaResponse(response: OllamaGenerateResponse): CompletionResponse {
|
||||||
|
return {
|
||||||
|
content: response.response,
|
||||||
|
model: response.model,
|
||||||
|
usage: {
|
||||||
|
promptTokens: response.prompt_eval_count || 0,
|
||||||
|
completionTokens: response.eval_count || 0,
|
||||||
|
totalTokens: (response.prompt_eval_count || 0) + (response.eval_count || 0)
|
||||||
|
},
|
||||||
|
id: `ollama-${Date.now()}`,
|
||||||
|
metadata: {
|
||||||
|
created_at: response.created_at,
|
||||||
|
total_duration: response.total_duration,
|
||||||
|
eval_duration: response.eval_duration
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle OpenWebUI-specific errors and convert them to our standard format
|
||||||
|
*/
|
||||||
|
private handleOpenWebUIError(error: any): AIProviderError {
|
||||||
|
if (error instanceof AIProviderError) {
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
const message = error.message || 'Unknown OpenWebUI API error';
|
||||||
|
|
||||||
|
// Handle connection errors
|
||||||
|
if (error.code === 'ECONNREFUSED' || message.includes('connect')) {
|
||||||
|
return new AIProviderError(
|
||||||
|
`Cannot connect to OpenWebUI at ${this.baseUrl}. Make sure OpenWebUI is running.`,
|
||||||
|
AIErrorType.NETWORK,
|
||||||
|
undefined,
|
||||||
|
error
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (error.code === 'ENOTFOUND' || message.includes('getaddrinfo')) {
|
||||||
|
return new AIProviderError(
|
||||||
|
`Cannot resolve OpenWebUI hostname. Check your baseUrl configuration.`,
|
||||||
|
AIErrorType.NETWORK,
|
||||||
|
undefined,
|
||||||
|
error
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle HTTP status codes
|
||||||
|
const status = error.status || error.statusCode;
|
||||||
|
|
||||||
|
switch (status) {
|
||||||
|
case 400:
|
||||||
|
return new AIProviderError(
|
||||||
|
`Invalid request: ${message}`,
|
||||||
|
AIErrorType.INVALID_REQUEST,
|
||||||
|
status,
|
||||||
|
error
|
||||||
|
);
|
||||||
|
case 401:
|
||||||
|
return new AIProviderError(
|
||||||
|
'Authentication failed. Check your API key from OpenWebUI Settings > Account.',
|
||||||
|
AIErrorType.AUTHENTICATION,
|
||||||
|
status,
|
||||||
|
error
|
||||||
|
);
|
||||||
|
case 404:
|
||||||
|
// Model not found or endpoint not found
|
||||||
|
if (message.includes('model')) {
|
||||||
|
return new AIProviderError(
|
||||||
|
'Model not found. Make sure the model is available in OpenWebUI.',
|
||||||
|
AIErrorType.MODEL_NOT_FOUND,
|
||||||
|
status,
|
||||||
|
error
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return new AIProviderError(
|
||||||
|
`API endpoint not found. Check your baseUrl configuration.`,
|
||||||
|
AIErrorType.NETWORK,
|
||||||
|
status,
|
||||||
|
error
|
||||||
|
);
|
||||||
|
case 429:
|
||||||
|
return new AIProviderError(
|
||||||
|
'Rate limit exceeded. Local models may be overloaded.',
|
||||||
|
AIErrorType.RATE_LIMIT,
|
||||||
|
status,
|
||||||
|
error
|
||||||
|
);
|
||||||
|
case 500:
|
||||||
|
case 502:
|
||||||
|
case 503:
|
||||||
|
case 504:
|
||||||
|
return new AIProviderError(
|
||||||
|
'OpenWebUI service error. Check the OpenWebUI logs for details.',
|
||||||
|
AIErrorType.NETWORK,
|
||||||
|
status,
|
||||||
|
error
|
||||||
|
);
|
||||||
|
default:
|
||||||
|
// Handle timeout errors
|
||||||
|
if (message.includes('timeout') || error.code === 'ETIMEDOUT') {
|
||||||
|
return new AIProviderError(
|
||||||
|
'Request timeout. Local model inference may be slow.',
|
||||||
|
AIErrorType.TIMEOUT,
|
||||||
|
status,
|
||||||
|
error
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return new AIProviderError(
|
||||||
|
`OpenWebUI API error: ${message}`,
|
||||||
|
AIErrorType.UNKNOWN,
|
||||||
|
status,
|
||||||
|
error
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -7,12 +7,13 @@ import type { AIProviderConfig } from '../types/index.js';
|
|||||||
import { ClaudeProvider, type ClaudeConfig } from '../providers/claude.js';
|
import { ClaudeProvider, type ClaudeConfig } from '../providers/claude.js';
|
||||||
import { OpenAIProvider, type OpenAIConfig } from '../providers/openai.js';
|
import { OpenAIProvider, type OpenAIConfig } from '../providers/openai.js';
|
||||||
import { GeminiProvider, type GeminiConfig } from '../providers/gemini.js';
|
import { GeminiProvider, type GeminiConfig } from '../providers/gemini.js';
|
||||||
|
import { OpenWebUIProvider, type OpenWebUIConfig } from '../providers/openwebui.js';
|
||||||
import { BaseAIProvider } from '../providers/base.js';
|
import { BaseAIProvider } from '../providers/base.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Supported AI provider types
|
* Supported AI provider types
|
||||||
*/
|
*/
|
||||||
export type ProviderType = 'claude' | 'openai' | 'gemini';
|
export type ProviderType = 'claude' | 'openai' | 'gemini' | 'openwebui';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Configuration map for different provider types
|
* Configuration map for different provider types
|
||||||
@ -21,6 +22,7 @@ export interface ProviderConfigMap {
|
|||||||
claude: ClaudeConfig;
|
claude: ClaudeConfig;
|
||||||
openai: OpenAIConfig;
|
openai: OpenAIConfig;
|
||||||
gemini: GeminiConfig;
|
gemini: GeminiConfig;
|
||||||
|
openwebui: OpenWebUIConfig;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -40,6 +42,8 @@ export function createProvider<T extends ProviderType>(
|
|||||||
return new OpenAIProvider(config as OpenAIConfig);
|
return new OpenAIProvider(config as OpenAIConfig);
|
||||||
case 'gemini':
|
case 'gemini':
|
||||||
return new GeminiProvider(config as GeminiConfig);
|
return new GeminiProvider(config as GeminiConfig);
|
||||||
|
case 'openwebui':
|
||||||
|
return new OpenWebUIProvider(config as OpenWebUIConfig);
|
||||||
default:
|
default:
|
||||||
throw new Error(`Unsupported provider type: ${type}`);
|
throw new Error(`Unsupported provider type: ${type}`);
|
||||||
}
|
}
|
||||||
@ -93,6 +97,13 @@ export function createGeminiProvider(
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create an OpenWebUI provider instance
|
||||||
|
*/
|
||||||
|
export function createOpenWebUIProvider(config: OpenWebUIConfig): OpenWebUIProvider {
|
||||||
|
return new OpenWebUIProvider(config);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Provider registry for dynamic provider creation
|
* Provider registry for dynamic provider creation
|
||||||
*/
|
*/
|
||||||
@ -144,3 +155,14 @@ export class ProviderRegistry {
|
|||||||
ProviderRegistry.register('claude', ClaudeProvider);
|
ProviderRegistry.register('claude', ClaudeProvider);
|
||||||
ProviderRegistry.register('openai', OpenAIProvider);
|
ProviderRegistry.register('openai', OpenAIProvider);
|
||||||
ProviderRegistry.register('gemini', GeminiProvider);
|
ProviderRegistry.register('gemini', GeminiProvider);
|
||||||
|
ProviderRegistry.register('openwebui', OpenWebUIProvider);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Registry of all available providers
|
||||||
|
*/
|
||||||
|
export const PROVIDER_REGISTRY = {
|
||||||
|
claude: ClaudeProvider,
|
||||||
|
openai: OpenAIProvider,
|
||||||
|
gemini: GeminiProvider,
|
||||||
|
openwebui: OpenWebUIProvider
|
||||||
|
} as const;
|
416
tests/openwebui.test.ts
Normal file
416
tests/openwebui.test.ts
Normal file
@ -0,0 +1,416 @@
|
|||||||
|
/**
|
||||||
|
* Tests for OpenWebUI provider implementation
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, it, expect, beforeEach } from 'bun:test';
|
||||||
|
import { OpenWebUIProvider, type OpenWebUIConfig } from '../src/providers/openwebui.js';
|
||||||
|
import { AIProviderError, AIErrorType, type CompletionParams } from '../src/types/index.js';
|
||||||
|
|
||||||
|
describe('OpenWebUIProvider', () => {
|
||||||
|
let provider: OpenWebUIProvider;
|
||||||
|
let mockConfig: OpenWebUIConfig;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
mockConfig = {
|
||||||
|
apiKey: 'test-bearer-token',
|
||||||
|
baseUrl: 'http://localhost:3000',
|
||||||
|
defaultModel: 'llama3.1'
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Constructor', () => {
|
||||||
|
it('should create provider with default configuration', () => {
|
||||||
|
provider = new OpenWebUIProvider(mockConfig);
|
||||||
|
expect(provider).toBeInstanceOf(OpenWebUIProvider);
|
||||||
|
expect(provider.getInfo().name).toBe('OpenWebUI');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should use default values when not specified', () => {
|
||||||
|
const minimalConfig: OpenWebUIConfig = { apiKey: 'test' };
|
||||||
|
provider = new OpenWebUIProvider(minimalConfig);
|
||||||
|
expect(provider).toBeInstanceOf(OpenWebUIProvider);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle custom configuration', () => {
|
||||||
|
const config: OpenWebUIConfig = {
|
||||||
|
apiKey: 'test',
|
||||||
|
baseUrl: 'http://localhost:8080',
|
||||||
|
defaultModel: 'mistral:7b',
|
||||||
|
useOllamaProxy: true
|
||||||
|
};
|
||||||
|
provider = new OpenWebUIProvider(config);
|
||||||
|
expect(provider).toBeInstanceOf(OpenWebUIProvider);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle trailing slash in baseUrl', () => {
|
||||||
|
const config: OpenWebUIConfig = {
|
||||||
|
apiKey: 'test',
|
||||||
|
baseUrl: 'http://localhost:3000/',
|
||||||
|
defaultModel: 'llama3.1'
|
||||||
|
};
|
||||||
|
provider = new OpenWebUIProvider(config);
|
||||||
|
expect(provider).toBeInstanceOf(OpenWebUIProvider);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getInfo', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
provider = new OpenWebUIProvider(mockConfig);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return correct provider information', () => {
|
||||||
|
const info = provider.getInfo();
|
||||||
|
|
||||||
|
expect(info.name).toBe('OpenWebUI');
|
||||||
|
expect(info.version).toBe('1.0.0');
|
||||||
|
expect(info.supportsStreaming).toBe(true);
|
||||||
|
expect(info.maxContextLength).toBe(8192);
|
||||||
|
expect(Array.isArray(info.models)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should include expected models', () => {
|
||||||
|
const info = provider.getInfo();
|
||||||
|
|
||||||
|
expect(info.models).toContain('llama3.1');
|
||||||
|
expect(info.models).toContain('mistral');
|
||||||
|
expect(info.models).toContain('codellama');
|
||||||
|
expect(info.models).toContain('gemma2');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should have correct capabilities', () => {
|
||||||
|
const info = provider.getInfo();
|
||||||
|
|
||||||
|
expect(info.capabilities).toEqual({
|
||||||
|
vision: false,
|
||||||
|
functionCalling: false,
|
||||||
|
systemMessages: true,
|
||||||
|
localExecution: true,
|
||||||
|
customModels: true,
|
||||||
|
rag: true
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Error Handling', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
provider = new OpenWebUIProvider(mockConfig);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle connection refused errors', () => {
|
||||||
|
const mockError = new Error('connect ECONNREFUSED') as any;
|
||||||
|
mockError.code = 'ECONNREFUSED';
|
||||||
|
|
||||||
|
const aiError = (provider as any).handleOpenWebUIError(mockError);
|
||||||
|
|
||||||
|
expect(aiError).toBeInstanceOf(AIProviderError);
|
||||||
|
expect(aiError.type).toBe(AIErrorType.NETWORK);
|
||||||
|
expect(aiError.message).toContain('Cannot connect to OpenWebUI');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle hostname resolution errors', () => {
|
||||||
|
const mockError = new Error('getaddrinfo ENOTFOUND') as any;
|
||||||
|
mockError.code = 'ENOTFOUND';
|
||||||
|
|
||||||
|
const aiError = (provider as any).handleOpenWebUIError(mockError);
|
||||||
|
|
||||||
|
expect(aiError).toBeInstanceOf(AIProviderError);
|
||||||
|
expect(aiError.type).toBe(AIErrorType.NETWORK);
|
||||||
|
expect(aiError.message).toContain('Cannot resolve OpenWebUI hostname');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle 404 model not found errors', () => {
|
||||||
|
const mockError = new Error('model not available') as any;
|
||||||
|
mockError.status = 404;
|
||||||
|
mockError.message = 'model not available';
|
||||||
|
|
||||||
|
const aiError = (provider as any).handleOpenWebUIError(mockError);
|
||||||
|
|
||||||
|
expect(aiError).toBeInstanceOf(AIProviderError);
|
||||||
|
expect(aiError.type).toBe(AIErrorType.MODEL_NOT_FOUND);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle 404 endpoint not found errors', () => {
|
||||||
|
const mockError = new Error('Not found') as any;
|
||||||
|
mockError.status = 404;
|
||||||
|
mockError.message = 'endpoint not found';
|
||||||
|
|
||||||
|
const aiError = (provider as any).handleOpenWebUIError(mockError);
|
||||||
|
|
||||||
|
expect(aiError).toBeInstanceOf(AIProviderError);
|
||||||
|
expect(aiError.type).toBe(AIErrorType.NETWORK);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle timeout errors', () => {
|
||||||
|
const mockError = new Error('Request timeout') as any;
|
||||||
|
mockError.code = 'ETIMEDOUT';
|
||||||
|
|
||||||
|
const aiError = (provider as any).handleOpenWebUIError(mockError);
|
||||||
|
|
||||||
|
expect(aiError).toBeInstanceOf(AIProviderError);
|
||||||
|
expect(aiError.type).toBe(AIErrorType.TIMEOUT);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle rate limit errors', () => {
|
||||||
|
const mockError = new Error('Rate limit exceeded') as any;
|
||||||
|
mockError.status = 429;
|
||||||
|
|
||||||
|
const aiError = (provider as any).handleOpenWebUIError(mockError);
|
||||||
|
|
||||||
|
expect(aiError).toBeInstanceOf(AIProviderError);
|
||||||
|
expect(aiError.type).toBe(AIErrorType.RATE_LIMIT);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle server errors', () => {
|
||||||
|
const mockError = new Error('Internal server error') as any;
|
||||||
|
mockError.status = 500;
|
||||||
|
|
||||||
|
const aiError = (provider as any).handleOpenWebUIError(mockError);
|
||||||
|
|
||||||
|
expect(aiError).toBeInstanceOf(AIProviderError);
|
||||||
|
expect(aiError.type).toBe(AIErrorType.NETWORK);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle authentication errors', () => {
|
||||||
|
const mockError = new Error('Unauthorized') as any;
|
||||||
|
mockError.status = 401;
|
||||||
|
|
||||||
|
const aiError = (provider as any).handleOpenWebUIError(mockError);
|
||||||
|
|
||||||
|
expect(aiError).toBeInstanceOf(AIProviderError);
|
||||||
|
expect(aiError.type).toBe(AIErrorType.AUTHENTICATION);
|
||||||
|
expect(aiError.message).toContain('Settings > Account');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle unknown errors', () => {
|
||||||
|
const mockError = new Error('Unknown error');
|
||||||
|
|
||||||
|
const aiError = (provider as any).handleOpenWebUIError(mockError);
|
||||||
|
|
||||||
|
expect(aiError).toBeInstanceOf(AIProviderError);
|
||||||
|
expect(aiError.type).toBe(AIErrorType.UNKNOWN);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Message Conversion', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
provider = new OpenWebUIProvider(mockConfig);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should convert messages correctly for chat API', () => {
|
||||||
|
const messages = [
|
||||||
|
{ role: 'system' as const, content: 'You are helpful' },
|
||||||
|
{ role: 'user' as const, content: 'Hello' },
|
||||||
|
{ role: 'assistant' as const, content: 'Hi there!' }
|
||||||
|
];
|
||||||
|
|
||||||
|
const converted = (provider as any).convertMessages(messages);
|
||||||
|
|
||||||
|
expect(converted).toHaveLength(3);
|
||||||
|
expect(converted[0]).toEqual({ role: 'system', content: 'You are helpful' });
|
||||||
|
expect(converted[1]).toEqual({ role: 'user', content: 'Hello' });
|
||||||
|
expect(converted[2]).toEqual({ role: 'assistant', content: 'Hi there!' });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should convert messages to prompt for Ollama API', () => {
|
||||||
|
const messages = [
|
||||||
|
{ role: 'system' as const, content: 'You are helpful' },
|
||||||
|
{ role: 'user' as const, content: 'Hello' },
|
||||||
|
{ role: 'assistant' as const, content: 'Hi there!' }
|
||||||
|
];
|
||||||
|
|
||||||
|
const prompt = (provider as any).convertMessagesToPrompt(messages);
|
||||||
|
|
||||||
|
expect(prompt).toBe('System: You are helpful\n\nHuman: Hello\n\nAssistant: Hi there!');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Response Formatting', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
provider = new OpenWebUIProvider(mockConfig);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should format chat completion response correctly', () => {
|
||||||
|
const mockResponse = {
|
||||||
|
id: 'test-id',
|
||||||
|
object: 'chat.completion',
|
||||||
|
created: 1234567890,
|
||||||
|
model: 'llama3.1',
|
||||||
|
choices: [{
|
||||||
|
index: 0,
|
||||||
|
message: { role: 'assistant', content: 'Hello there!' },
|
||||||
|
finish_reason: 'stop'
|
||||||
|
}],
|
||||||
|
usage: {
|
||||||
|
prompt_tokens: 10,
|
||||||
|
completion_tokens: 5,
|
||||||
|
total_tokens: 15
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const response = (provider as any).formatChatResponse(mockResponse);
|
||||||
|
|
||||||
|
expect(response.content).toBe('Hello there!');
|
||||||
|
expect(response.model).toBe('llama3.1');
|
||||||
|
expect(response.id).toBe('test-id');
|
||||||
|
expect(response.usage.promptTokens).toBe(10);
|
||||||
|
expect(response.usage.completionTokens).toBe(5);
|
||||||
|
expect(response.usage.totalTokens).toBe(15);
|
||||||
|
expect(response.metadata?.finishReason).toBe('stop');
|
||||||
|
expect(response.metadata?.created).toBe(1234567890);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should format Ollama response correctly', () => {
|
||||||
|
const mockResponse = {
|
||||||
|
model: 'llama3.1',
|
||||||
|
created_at: '2024-01-01T00:00:00Z',
|
||||||
|
response: 'Hello there!',
|
||||||
|
done: true,
|
||||||
|
prompt_eval_count: 10,
|
||||||
|
eval_count: 5,
|
||||||
|
total_duration: 1000000,
|
||||||
|
eval_duration: 500000
|
||||||
|
};
|
||||||
|
|
||||||
|
const response = (provider as any).formatOllamaResponse(mockResponse);
|
||||||
|
|
||||||
|
expect(response.content).toBe('Hello there!');
|
||||||
|
expect(response.model).toBe('llama3.1');
|
||||||
|
expect(response.usage.promptTokens).toBe(10);
|
||||||
|
expect(response.usage.completionTokens).toBe(5);
|
||||||
|
expect(response.usage.totalTokens).toBe(15);
|
||||||
|
expect(response.metadata?.created_at).toBe('2024-01-01T00:00:00Z');
|
||||||
|
expect(response.metadata?.total_duration).toBe(1000000);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle response without content', () => {
|
||||||
|
const mockResponse = {
|
||||||
|
id: 'test-id',
|
||||||
|
object: 'chat.completion',
|
||||||
|
created: 1234567890,
|
||||||
|
model: 'llama3.1',
|
||||||
|
choices: [{
|
||||||
|
index: 0,
|
||||||
|
message: { role: 'assistant' }, // Message without content
|
||||||
|
finish_reason: 'stop'
|
||||||
|
}],
|
||||||
|
usage: { prompt_tokens: 5, completion_tokens: 0, total_tokens: 5 }
|
||||||
|
};
|
||||||
|
|
||||||
|
expect(() => {
|
||||||
|
(provider as any).formatChatResponse(mockResponse);
|
||||||
|
}).toThrow(AIProviderError);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle missing usage information', () => {
|
||||||
|
const mockResponse = {
|
||||||
|
id: 'test-id',
|
||||||
|
object: 'chat.completion',
|
||||||
|
created: 1234567890,
|
||||||
|
model: 'llama3.1',
|
||||||
|
choices: [{
|
||||||
|
index: 0,
|
||||||
|
message: { role: 'assistant', content: 'Hello there!' },
|
||||||
|
finish_reason: 'stop'
|
||||||
|
}]
|
||||||
|
// No usage information
|
||||||
|
};
|
||||||
|
|
||||||
|
const response = (provider as any).formatChatResponse(mockResponse);
|
||||||
|
|
||||||
|
expect(response.content).toBe('Hello there!');
|
||||||
|
expect(response.usage.promptTokens).toBe(0);
|
||||||
|
expect(response.usage.completionTokens).toBe(0);
|
||||||
|
expect(response.usage.totalTokens).toBe(0);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Configuration Options', () => {
|
||||||
|
it('should handle custom baseUrl', () => {
|
||||||
|
const config: OpenWebUIConfig = {
|
||||||
|
apiKey: 'test',
|
||||||
|
baseUrl: 'https://my-openwebui.com'
|
||||||
|
};
|
||||||
|
|
||||||
|
provider = new OpenWebUIProvider(config);
|
||||||
|
expect(provider).toBeInstanceOf(OpenWebUIProvider);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle custom default model', () => {
|
||||||
|
const config: OpenWebUIConfig = {
|
||||||
|
apiKey: 'test',
|
||||||
|
defaultModel: 'mistral:7b'
|
||||||
|
};
|
||||||
|
|
||||||
|
provider = new OpenWebUIProvider(config);
|
||||||
|
expect(provider).toBeInstanceOf(OpenWebUIProvider);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle Ollama proxy mode', () => {
|
||||||
|
const config: OpenWebUIConfig = {
|
||||||
|
apiKey: 'test',
|
||||||
|
useOllamaProxy: true
|
||||||
|
};
|
||||||
|
|
||||||
|
provider = new OpenWebUIProvider(config);
|
||||||
|
expect(provider).toBeInstanceOf(OpenWebUIProvider);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle SSL verification settings', () => {
|
||||||
|
const config: OpenWebUIConfig = {
|
||||||
|
apiKey: 'test',
|
||||||
|
dangerouslyAllowInsecureConnections: false
|
||||||
|
};
|
||||||
|
|
||||||
|
provider = new OpenWebUIProvider(config);
|
||||||
|
expect(provider).toBeInstanceOf(OpenWebUIProvider);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Request Generation', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
provider = new OpenWebUIProvider(mockConfig);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should generate correct headers', () => {
|
||||||
|
const headers = (provider as any).makeRequest('http://test.com', 'GET');
|
||||||
|
// Note: This would need to be mocked in a real test environment
|
||||||
|
expect(typeof headers).toBe('object');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('API Mode Selection', () => {
|
||||||
|
it('should use chat API by default', () => {
|
||||||
|
const config: OpenWebUIConfig = {
|
||||||
|
apiKey: 'test',
|
||||||
|
useOllamaProxy: false
|
||||||
|
};
|
||||||
|
|
||||||
|
provider = new OpenWebUIProvider(config);
|
||||||
|
expect(provider).toBeInstanceOf(OpenWebUIProvider);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should use Ollama proxy when configured', () => {
|
||||||
|
const config: OpenWebUIConfig = {
|
||||||
|
apiKey: 'test',
|
||||||
|
useOllamaProxy: true
|
||||||
|
};
|
||||||
|
|
||||||
|
provider = new OpenWebUIProvider(config);
|
||||||
|
expect(provider).toBeInstanceOf(OpenWebUIProvider);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Validation', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
provider = new OpenWebUIProvider(mockConfig);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should validate provider is initialized before use', async () => {
|
||||||
|
const params: CompletionParams = {
|
||||||
|
messages: [{ role: 'user', content: 'Hello' }]
|
||||||
|
};
|
||||||
|
|
||||||
|
await expect(provider.complete(params)).rejects.toThrow('Provider must be initialized before use');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
Reference in New Issue
Block a user