Files
simple-ai-provider/examples/multi-provider.ts

269 lines
8.4 KiB
TypeScript
Raw Permalink Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

/**
* Multi-Provider Example - Demonstrating all supported AI providers
*
* This example shows how to use Claude, OpenAI, Gemini, and OpenWebUI providers
* with consistent interfaces, showcasing the power of the unified API.
*/
import {
ClaudeProvider,
OpenAIProvider,
GeminiProvider,
OpenWebUIProvider,
createProvider,
type ClaudeConfig,
type OpenAIConfig,
type GeminiConfig,
type OpenWebUIConfig,
type CompletionParams
} from '../src/index.js';
// Provider configurations
const configs = {
claude: {
apiKey: process.env.ANTHROPIC_API_KEY || 'your-claude-api-key',
defaultModel: 'claude-3-5-sonnet-20241022'
} as ClaudeConfig,
openai: {
apiKey: process.env.OPENAI_API_KEY || 'your-openai-api-key',
defaultModel: 'gpt-4o'
} as OpenAIConfig,
gemini: {
apiKey: process.env.GOOGLE_AI_API_KEY || 'your-gemini-api-key',
defaultModel: 'gemini-1.5-flash'
} as GeminiConfig,
openwebui: {
apiKey: process.env.OPENWEBUI_API_KEY || 'your-bearer-token', // Get from OpenWebUI Settings > Account
baseUrl: 'http://localhost:3000',
defaultModel: 'llama3.1',
useOllamaProxy: false // Set to true to use Ollama API proxy
} as OpenWebUIConfig
};
async function demonstrateProviders() {
console.log('🤖 Multi-Provider AI Demo\n');
// ===== 1. Direct Provider Creation =====
console.log('1⃣ Creating providers directly...\n');
const claude = new ClaudeProvider(configs.claude);
const openai = new OpenAIProvider(configs.openai);
const gemini = new GeminiProvider(configs.gemini);
const openwebui = new OpenWebUIProvider(configs.openwebui);
const providers = { claude, openai, gemini, openwebui };
// ===== 2. Factory Creation =====
console.log('2⃣ Creating providers via factory...\n');
const factoryProviders = {
claude: createProvider('claude', configs.claude),
openai: createProvider('openai', configs.openai),
gemini: createProvider('gemini', configs.gemini),
openwebui: createProvider('openwebui', configs.openwebui)
};
// ===== 3. Provider Information =====
console.log('3⃣ Provider Information:\n');
for (const [name, provider] of Object.entries(providers)) {
const info = provider.getInfo();
console.log(`${name.toUpperCase()}: ${info.name} v${info.version}`);
console.log(` • Context: ${info.maxContextLength.toLocaleString()} tokens`);
console.log(` • Streaming: ${info.supportsStreaming ? '✅' : '❌'}`);
console.log(` • Models: ${info.models.slice(0, 3).join(', ')}${info.models.length > 3 ? '...' : ''}`);
if (info.capabilities) {
console.log(` • Vision: ${info.capabilities.vision ? '✅' : '❌'}`);
console.log(` • Function Calling: ${info.capabilities.functionCalling ? '✅' : '❌'}`);
console.log(` • Local Execution: ${info.capabilities.localExecution ? '✅' : '❌'}`);
}
console.log();
}
// ===== 4. Common Completion Example =====
console.log('4⃣ Running completions across all providers...\n');
const messages = [
{ role: 'system' as const, content: 'You are a helpful assistant. Be concise.' },
{ role: 'user' as const, content: 'What is TypeScript? Answer in one sentence.' }
];
const params: CompletionParams = {
messages,
maxTokens: 50,
temperature: 0.7
};
for (const [name, provider] of Object.entries(providers)) {
try {
console.log(`${name.toUpperCase()} Response:`);
// Initialize provider (would be done once in real app)
await provider.initialize();
const response = await provider.complete(params);
console.log(`${response.content.trim()}`);
console.log(` 📊 Tokens: ${response.usage.totalTokens} (${response.usage.promptTokens}+${response.usage.completionTokens})\n`);
} catch (error) {
console.log(` ❌ Error: ${(error as Error).message}\n`);
}
}
// ===== 5. Streaming Example =====
console.log('5⃣ Streaming example (Claude)...\n');
try {
await claude.initialize();
console.log('Claude Streaming Response:');
process.stdout.write(' ');
for await (const chunk of claude.stream({
messages: [{ role: 'user', content: 'Count from 1 to 5 with explanations.' }],
maxTokens: 150
})) {
if (!chunk.isComplete) {
process.stdout.write(chunk.content);
} else if (chunk.usage) {
console.log(`\n 📊 Final tokens: ${chunk.usage.totalTokens}\n`);
}
}
} catch (error) {
console.log(` ❌ Streaming error: ${(error as Error).message}\n`);
}
// ===== 6. Provider-Specific Features =====
console.log('6⃣ Provider-specific features...\n');
// Claude - Advanced reasoning
try {
await claude.initialize();
console.log('Claude Advanced Reasoning:');
const claudeResponse = await claude.complete({
messages: [{
role: 'user',
content: 'Analyze the logical structure of this argument: "All humans are mortal. Socrates is human. Therefore, Socrates is mortal."'
}],
maxTokens: 100,
temperature: 0.1
});
console.log(`${claudeResponse.content.trim()}\n`);
} catch (error) {
console.log(` ❌ Claude error: ${(error as Error).message}\n`);
}
// OpenAI - Function calling (conceptual)
try {
await openai.initialize();
console.log('OpenAI Code Generation:');
const openaiResponse = await openai.complete({
messages: [{
role: 'user',
content: 'Write a simple TypeScript function to calculate factorial. Just the function, no explanation.'
}],
maxTokens: 100,
temperature: 0.3
});
console.log(`${openaiResponse.content.trim()}\n`);
} catch (error) {
console.log(` ❌ OpenAI error: ${(error as Error).message}\n`);
}
// Gemini - Large context
try {
await gemini.initialize();
console.log('Gemini Large Context Capability:');
const geminiResponse = await gemini.complete({
messages: [{
role: 'user',
content: 'Explain the benefits of having 1M token context length for AI applications.'
}],
maxTokens: 80,
temperature: 0.5
});
console.log(`${geminiResponse.content.trim()}\n`);
} catch (error) {
console.log(` ❌ Gemini error: ${(error as Error).message}\n`);
}
// OpenWebUI - Local model capabilities
try {
await openwebui.initialize();
console.log('OpenWebUI Local Model:');
const openwebuiResponse = await openwebui.complete({
messages: [{
role: 'user',
content: 'What are the advantages of running AI models locally?'
}],
maxTokens: 80,
temperature: 0.6
});
console.log(`${openwebuiResponse.content.trim()}\n`);
} catch (error) {
console.log(` ❌ OpenWebUI error: ${(error as Error).message}`);
console.log(` (This is expected if OpenWebUI is not running locally or API key is invalid)\n`);
}
// ===== 7. Error Handling Demonstration =====
console.log('7⃣ Error handling examples...\n');
try {
const invalidProvider = new ClaudeProvider({ apiKey: 'invalid-key' });
await invalidProvider.initialize();
await invalidProvider.complete({
messages: [{ role: 'user', content: 'Test' }]
});
} catch (error: any) {
console.log('Expected authentication error:');
console.log(` ❌ Type: ${error.type}`);
console.log(` ❌ Message: ${error.message}\n`);
}
// ===== 8. Performance Comparison =====
console.log('8⃣ Provider Comparison Summary:\n');
const comparison = [
{
Provider: 'Claude',
'Context Length': '200K tokens',
'Best For': 'Reasoning, Analysis, Code Review',
'Streaming': '✅',
'Cost': 'Mid-range'
},
{
Provider: 'OpenAI',
'Context Length': '128K tokens',
'Best For': 'General Purpose, Function Calling',
'Streaming': '✅',
'Cost': 'Variable'
},
{
Provider: 'Gemini',
'Context Length': '1M tokens',
'Best For': 'Large Documents, Multimodal',
'Streaming': '✅',
'Cost': 'Low-cost'
},
{
Provider: 'OpenWebUI',
'Context Length': '8K-32K tokens',
'Best For': 'Privacy, Local Inference, Custom Models, RAG',
'Streaming': '✅',
'Cost': 'Free (compute)'
}
];
console.table(comparison);
console.log('\n✨ Demo completed! All providers work with the same unified interface.\n');
}
// Run the demonstration
if (import.meta.main) {
demonstrateProviders().catch(console.error);
}