feat: add OpenAI provider integration and examples

This commit is contained in:
2025-05-28 12:04:10 +02:00
parent 42902445fb
commit aa2fd98cc1
9 changed files with 1061 additions and 17 deletions

169
README.md
View File

@ -1,6 +1,6 @@
# Simple AI Provider
A professional, extensible TypeScript package for integrating multiple AI providers into your applications with a unified interface. Currently supports Claude (Anthropic) with plans to add more providers.
A professional, extensible TypeScript package for integrating multiple AI providers into your applications with a unified interface. Currently supports **Claude (Anthropic)** and **OpenAI (GPT)** with plans to add more providers.
## Features
@ -11,6 +11,7 @@ A professional, extensible TypeScript package for integrating multiple AI provid
- 🛡️ **Error Handling**: Robust error handling with categorized error types
- 🔧 **Extensible**: Easy to add new AI providers
- 📦 **Modern**: Built with ES modules and modern JavaScript features
- 🌐 **Multi-Provider**: Switch between Claude and OpenAI seamlessly
## Installation
@ -47,16 +48,62 @@ const response = await claude.complete({
console.log(response.content);
```
### Basic Usage with OpenAI
```typescript
import { createOpenAIProvider } from 'simple-ai-provider';
// Create an OpenAI provider
const openai = createOpenAIProvider('your-openai-api-key');
// Initialize the provider
await openai.initialize();
// Generate a completion
const response = await openai.complete({
messages: [
{ role: 'user', content: 'Hello! How are you today?' }
],
maxTokens: 100,
temperature: 0.7
});
console.log(response.content);
```
### Multi-Provider Usage
```typescript
import { createProvider, createClaudeProvider, createOpenAIProvider } from 'simple-ai-provider';
// Method 1: Using specific factory functions
const claude = createClaudeProvider('your-anthropic-api-key');
const openai = createOpenAIProvider('your-openai-api-key');
// Method 2: Using generic factory
const claude2 = createProvider('claude', { apiKey: 'your-anthropic-api-key' });
const openai2 = createProvider('openai', { apiKey: 'your-openai-api-key' });
// Initialize both
await Promise.all([claude.initialize(), openai.initialize()]);
// Use the same interface for both providers
const prompt = { messages: [{ role: 'user', content: 'Explain AI' }] };
const claudeResponse = await claude.complete(prompt);
const openaiResponse = await openai.complete(prompt);
```
### Streaming Responses
```typescript
import { createClaudeProvider } from 'simple-ai-provider';
import { createOpenAIProvider } from 'simple-ai-provider';
const claude = createClaudeProvider('your-anthropic-api-key');
await claude.initialize();
const openai = createOpenAIProvider('your-openai-api-key');
await openai.initialize();
// Stream a completion
for await (const chunk of claude.stream({
for await (const chunk of openai.stream({
messages: [
{ role: 'user', content: 'Write a short story about a robot.' }
],
@ -73,8 +120,9 @@ for await (const chunk of claude.stream({
### Advanced Configuration
```typescript
import { ClaudeProvider } from 'simple-ai-provider';
import { ClaudeProvider, OpenAIProvider } from 'simple-ai-provider';
// Claude with custom configuration
const claude = new ClaudeProvider({
apiKey: 'your-anthropic-api-key',
defaultModel: 'claude-3-5-sonnet-20241022',
@ -83,14 +131,24 @@ const claude = new ClaudeProvider({
baseUrl: 'https://api.anthropic.com' // optional custom endpoint
});
await claude.initialize();
// OpenAI with organization and project
const openai = new OpenAIProvider({
apiKey: 'your-openai-api-key',
defaultModel: 'gpt-4',
organization: 'org-your-org-id',
project: 'proj-your-project-id',
timeout: 60000,
maxRetries: 5
});
const response = await claude.complete({
await Promise.all([claude.initialize(), openai.initialize()]);
const response = await openai.complete({
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'Explain quantum computing in simple terms.' }
],
model: 'claude-3-5-haiku-20241022',
model: 'gpt-4-turbo',
maxTokens: 300,
temperature: 0.5,
topP: 0.9,
@ -147,6 +205,17 @@ const claude = createClaudeProvider('your-api-key', {
});
```
#### `createOpenAIProvider(apiKey, options?)`
Creates an OpenAI provider with simplified configuration.
```typescript
const openai = createOpenAIProvider('your-api-key', {
defaultModel: 'gpt-4',
organization: 'org-123',
timeout: 60000
});
```
#### `createProvider(type, config)`
Generic factory function for creating any provider type.
@ -155,6 +224,11 @@ const claude = createProvider('claude', {
apiKey: 'your-api-key',
defaultModel: 'claude-3-5-sonnet-20241022'
});
const openai = createProvider('openai', {
apiKey: 'your-api-key',
defaultModel: 'gpt-4'
});
```
### Provider Methods
@ -182,7 +256,7 @@ The package provides comprehensive error handling with categorized error types:
import { AIProviderError, AIErrorType } from 'simple-ai-provider';
try {
const response = await claude.complete({
const response = await openai.complete({
messages: [{ role: 'user', content: 'Hello!' }]
});
} catch (error) {
@ -213,26 +287,91 @@ try {
- `claude-3-sonnet-20240229`
- `claude-3-haiku-20240307`
### OpenAI (GPT)
- `gpt-4` (default)
- `gpt-4-turbo`
- `gpt-4-turbo-preview`
- `gpt-4-0125-preview`
- `gpt-4-1106-preview`
- `gpt-3.5-turbo`
- `gpt-3.5-turbo-0125`
- `gpt-3.5-turbo-1106`
## Environment Variables
You can set your API keys as environment variables:
```bash
export ANTHROPIC_API_KEY="your-anthropic-api-key"
export OPENAI_API_KEY="your-openai-api-key"
```
```typescript
const claude = createClaudeProvider(process.env.ANTHROPIC_API_KEY!);
const openai = createOpenAIProvider(process.env.OPENAI_API_KEY!);
```
## Provider Comparison
| Feature | Claude | OpenAI |
|---------|--------|--------|
| **Models** | 5 models | 8+ models |
| **Max Context** | 200K tokens | 128K tokens |
| **Streaming** | ✅ | ✅ |
| **Vision** | ✅ | ✅ |
| **Function Calling** | ✅ | ✅ |
| **JSON Mode** | ❌ | ✅ |
| **System Messages** | ✅ (separate) | ✅ (inline) |
## Best Practices
1. **Always initialize providers** before using them
2. **Handle errors gracefully** with proper error types
3. **Use appropriate models** for your use case
3. **Use appropriate models** for your use case (speed vs. capability)
4. **Set reasonable timeouts** for your application
5. **Implement retry logic** for production applications
6. **Monitor token usage** to control costs
7. **Use environment variables** for API keys
8. **Consider provider-specific features** when choosing
## Advanced Usage
### Provider Registry
```typescript
import { ProviderRegistry } from 'simple-ai-provider';
// List all registered providers
console.log(ProviderRegistry.getRegisteredProviders()); // ['claude', 'openai']
// Create provider by name
const provider = ProviderRegistry.create('openai', {
apiKey: 'your-api-key'
});
// Check if provider is registered
if (ProviderRegistry.isRegistered('claude')) {
console.log('Claude is available!');
}
```
### Custom Error Handling
```typescript
function handleAIError(error: unknown, providerName: string) {
if (error instanceof AIProviderError) {
console.error(`${providerName} Error (${error.type}):`, error.message);
if (error.statusCode) {
console.error('HTTP Status:', error.statusCode);
}
if (error.originalError) {
console.error('Original Error:', error.originalError.message);
}
}
}
```
## Extending the Package
@ -264,6 +403,9 @@ class MyCustomProvider extends BaseAIProvider {
};
}
}
// Register your provider
ProviderRegistry.register('mycustom', MyCustomProvider);
```
## Contributing
@ -279,6 +421,9 @@ MIT
### 1.0.0
- Initial release
- Claude provider implementation
- Streaming support
- OpenAI provider implementation
- Streaming support for both providers
- Comprehensive error handling
- TypeScript support
- Provider registry system
- Multi-provider examples

View File

@ -5,6 +5,7 @@
"name": "simple-ai-provider",
"dependencies": {
"@anthropic-ai/sdk": "^0.52.0",
"openai": "^4.103.0",
},
"devDependencies": {
"@types/bun": "latest",
@ -22,14 +23,92 @@
"@types/node": ["@types/node@20.17.51", "", { "dependencies": { "undici-types": "~6.19.2" } }, "sha512-hccptBl7C8lHiKxTBsY6vYYmqpmw1E/aGR/8fmueE+B390L3pdMOpNSRvFO4ZnXzW5+p2HBXV0yNABd2vdk22Q=="],
"@types/node-fetch": ["@types/node-fetch@2.6.12", "", { "dependencies": { "@types/node": "*", "form-data": "^4.0.0" } }, "sha512-8nneRWKCg3rMtF69nLQJnOYUcbafYeFSjqkw3jCRLsqkWFlHaoQrr5mXmofFGOx3DKn7UfmBMyov8ySvLRVldA=="],
"abort-controller": ["abort-controller@3.0.0", "", { "dependencies": { "event-target-shim": "^5.0.0" } }, "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg=="],
"agentkeepalive": ["agentkeepalive@4.6.0", "", { "dependencies": { "humanize-ms": "^1.2.1" } }, "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ=="],
"asynckit": ["asynckit@0.4.0", "", {}, "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q=="],
"bun-types": ["bun-types@1.2.14", "", { "dependencies": { "@types/node": "*" } }, "sha512-Kuh4Ub28ucMRWeiUUWMHsT9Wcbr4H3kLIO72RZZElSDxSu7vpetRvxIUDUaW6QtaIeixIpm7OXtNnZPf82EzwA=="],
"call-bind-apply-helpers": ["call-bind-apply-helpers@1.0.2", "", { "dependencies": { "es-errors": "^1.3.0", "function-bind": "^1.1.2" } }, "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ=="],
"combined-stream": ["combined-stream@1.0.8", "", { "dependencies": { "delayed-stream": "~1.0.0" } }, "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg=="],
"delayed-stream": ["delayed-stream@1.0.0", "", {}, "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ=="],
"dunder-proto": ["dunder-proto@1.0.1", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.1", "es-errors": "^1.3.0", "gopd": "^1.2.0" } }, "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A=="],
"es-define-property": ["es-define-property@1.0.1", "", {}, "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g=="],
"es-errors": ["es-errors@1.3.0", "", {}, "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw=="],
"es-object-atoms": ["es-object-atoms@1.1.1", "", { "dependencies": { "es-errors": "^1.3.0" } }, "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA=="],
"es-set-tostringtag": ["es-set-tostringtag@2.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "get-intrinsic": "^1.2.6", "has-tostringtag": "^1.0.2", "hasown": "^2.0.2" } }, "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA=="],
"event-target-shim": ["event-target-shim@5.0.1", "", {}, "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ=="],
"form-data": ["form-data@4.0.2", "", { "dependencies": { "asynckit": "^0.4.0", "combined-stream": "^1.0.8", "es-set-tostringtag": "^2.1.0", "mime-types": "^2.1.12" } }, "sha512-hGfm/slu0ZabnNt4oaRZ6uREyfCj6P4fT/n6A1rGV+Z0VdGXjfOhVUpkn6qVQONHGIFwmveGXyDs75+nr6FM8w=="],
"form-data-encoder": ["form-data-encoder@1.7.2", "", {}, "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A=="],
"formdata-node": ["formdata-node@4.4.1", "", { "dependencies": { "node-domexception": "1.0.0", "web-streams-polyfill": "4.0.0-beta.3" } }, "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ=="],
"function-bind": ["function-bind@1.1.2", "", {}, "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA=="],
"get-intrinsic": ["get-intrinsic@1.3.0", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.2", "es-define-property": "^1.0.1", "es-errors": "^1.3.0", "es-object-atoms": "^1.1.1", "function-bind": "^1.1.2", "get-proto": "^1.0.1", "gopd": "^1.2.0", "has-symbols": "^1.1.0", "hasown": "^2.0.2", "math-intrinsics": "^1.1.0" } }, "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ=="],
"get-proto": ["get-proto@1.0.1", "", { "dependencies": { "dunder-proto": "^1.0.1", "es-object-atoms": "^1.0.0" } }, "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g=="],
"gopd": ["gopd@1.2.0", "", {}, "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg=="],
"has-symbols": ["has-symbols@1.1.0", "", {}, "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ=="],
"has-tostringtag": ["has-tostringtag@1.0.2", "", { "dependencies": { "has-symbols": "^1.0.3" } }, "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw=="],
"hasown": ["hasown@2.0.2", "", { "dependencies": { "function-bind": "^1.1.2" } }, "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ=="],
"humanize-ms": ["humanize-ms@1.2.1", "", { "dependencies": { "ms": "^2.0.0" } }, "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ=="],
"math-intrinsics": ["math-intrinsics@1.1.0", "", {}, "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g=="],
"mime-db": ["mime-db@1.52.0", "", {}, "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="],
"mime-types": ["mime-types@2.1.35", "", { "dependencies": { "mime-db": "1.52.0" } }, "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw=="],
"ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="],
"node-domexception": ["node-domexception@1.0.0", "", {}, "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ=="],
"node-fetch": ["node-fetch@2.7.0", "", { "dependencies": { "whatwg-url": "^5.0.0" }, "peerDependencies": { "encoding": "^0.1.0" }, "optionalPeers": ["encoding"] }, "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A=="],
"openai": ["openai@4.103.0", "", { "dependencies": { "@types/node": "^18.11.18", "@types/node-fetch": "^2.6.4", "abort-controller": "^3.0.0", "agentkeepalive": "^4.2.1", "form-data-encoder": "1.7.2", "formdata-node": "^4.3.2", "node-fetch": "^2.6.7" }, "peerDependencies": { "ws": "^8.18.0", "zod": "^3.23.8" }, "optionalPeers": ["ws", "zod"], "bin": { "openai": "bin/cli" } }, "sha512-eWcz9kdurkGOFDtd5ySS5y251H2uBgq9+1a2lTBnjMMzlexJ40Am5t6Mu76SSE87VvitPa0dkIAp75F+dZVC0g=="],
"tr46": ["tr46@0.0.3", "", {}, "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="],
"typescript": ["typescript@5.8.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ=="],
"undici-types": ["undici-types@6.19.8", "", {}, "sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw=="],
"web-streams-polyfill": ["web-streams-polyfill@4.0.0-beta.3", "", {}, "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug=="],
"webidl-conversions": ["webidl-conversions@3.0.1", "", {}, "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="],
"whatwg-url": ["whatwg-url@5.0.0", "", { "dependencies": { "tr46": "~0.0.3", "webidl-conversions": "^3.0.0" } }, "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw=="],
"@types/node-fetch/@types/node": ["@types/node@22.15.23", "", { "dependencies": { "undici-types": "~6.21.0" } }, "sha512-7Ec1zaFPF4RJ0eXu1YT/xgiebqwqoJz8rYPDi/O2BcZ++Wpt0Kq9cl0eg6NN6bYbPnR67ZLo7St5Q3UK0SnARw=="],
"bun-types/@types/node": ["@types/node@22.15.23", "", { "dependencies": { "undici-types": "~6.21.0" } }, "sha512-7Ec1zaFPF4RJ0eXu1YT/xgiebqwqoJz8rYPDi/O2BcZ++Wpt0Kq9cl0eg6NN6bYbPnR67ZLo7St5Q3UK0SnARw=="],
"openai/@types/node": ["@types/node@18.19.104", "", { "dependencies": { "undici-types": "~5.26.4" } }, "sha512-mqjoYx1RjmN61vjnHWfiWzAlwvBKutoUdm+kYLPnjI5DCh8ZqofUhaTbT3WLl7bt3itR8DuCf8ShnxI0JvIC3g=="],
"@types/node-fetch/@types/node/undici-types": ["undici-types@6.21.0", "", {}, "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ=="],
"bun-types/@types/node/undici-types": ["undici-types@6.21.0", "", {}, "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ=="],
"openai/@types/node/undici-types": ["undici-types@5.26.5", "", {}, "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="],
}
}

216
examples/multi-provider.ts Normal file
View File

@ -0,0 +1,216 @@
/**
* Multi-provider example for Simple AI Provider
* Demonstrates how to use both Claude and OpenAI providers
*/
import {
createClaudeProvider,
createOpenAIProvider,
createProvider,
ProviderRegistry,
AIProviderError,
AIErrorType
} from '../src/index.js';
async function multiProviderExample() {
console.log('=== Multi-Provider AI Example ===\n');
// Get API keys from environment
const claudeApiKey = process.env.ANTHROPIC_API_KEY || 'your-claude-api-key';
const openaiApiKey = process.env.OPENAI_API_KEY || 'your-openai-api-key';
try {
// Method 1: Using factory functions
console.log('1. Creating providers using factory functions...');
const claude = createClaudeProvider(claudeApiKey, {
defaultModel: 'claude-3-5-haiku-20241022'
});
const openai = createOpenAIProvider(openaiApiKey, {
defaultModel: 'gpt-3.5-turbo'
});
console.log('✓ Providers created\n');
// Method 2: Using generic createProvider function
console.log('2. Creating providers using generic factory...');
const claude2 = createProvider('claude', {
apiKey: claudeApiKey,
defaultModel: 'claude-3-5-haiku-20241022'
});
const openai2 = createProvider('openai', {
apiKey: openaiApiKey,
defaultModel: 'gpt-3.5-turbo'
});
console.log('✓ Generic providers created\n');
// Method 3: Using Provider Registry
console.log('3. Using Provider Registry...');
console.log('Registered providers:', ProviderRegistry.getRegisteredProviders());
const claudeFromRegistry = ProviderRegistry.create('claude', {
apiKey: claudeApiKey,
defaultModel: 'claude-3-5-haiku-20241022'
});
console.log('✓ Provider created from registry\n');
// Initialize providers
console.log('4. Initializing providers...');
await Promise.all([
claude.initialize(),
openai.initialize()
]);
console.log('✓ All providers initialized\n');
// Compare provider information
console.log('5. Provider Information:');
console.log('Claude Info:', claude.getInfo());
console.log('OpenAI Info:', openai.getInfo());
console.log();
// Test the same prompt with both providers
const testPrompt = 'Explain the concept of recursion in programming in one sentence.';
console.log('6. Testing same prompt with both providers...');
console.log(`Prompt: "${testPrompt}"\n`);
// Claude response
console.log('--- Claude Response ---');
const claudeResponse = await claude.complete({
messages: [
{ role: 'system', content: 'You are a concise programming tutor.' },
{ role: 'user', content: testPrompt }
],
maxTokens: 100,
temperature: 0.7
});
console.log('Response:', claudeResponse.content);
console.log('Usage:', claudeResponse.usage);
console.log('Model:', claudeResponse.model);
console.log();
// OpenAI response
console.log('--- OpenAI Response ---');
const openaiResponse = await openai.complete({
messages: [
{ role: 'system', content: 'You are a concise programming tutor.' },
{ role: 'user', content: testPrompt }
],
maxTokens: 100,
temperature: 0.7
});
console.log('Response:', openaiResponse.content);
console.log('Usage:', openaiResponse.usage);
console.log('Model:', openaiResponse.model);
console.log();
// Streaming comparison
console.log('7. Streaming comparison...');
console.log('Streaming from Claude:');
for await (const chunk of claude.stream({
messages: [{ role: 'user', content: 'Count from 1 to 5.' }],
maxTokens: 50
})) {
if (!chunk.isComplete) {
process.stdout.write(chunk.content);
} else {
console.log('\n✓ Claude streaming complete\n');
}
}
console.log('Streaming from OpenAI:');
for await (const chunk of openai.stream({
messages: [{ role: 'user', content: 'Count from 1 to 5.' }],
maxTokens: 50
})) {
if (!chunk.isComplete) {
process.stdout.write(chunk.content);
} else {
console.log('\n✓ OpenAI streaming complete\n');
}
}
// Provider-specific features demo
console.log('8. Provider-specific features...');
// OpenAI with organization
const openaiWithOrg = createOpenAIProvider(openaiApiKey, {
defaultModel: 'gpt-3.5-turbo',
organization: 'org-example',
timeout: 60000
});
console.log('✓ Created OpenAI provider with organization settings');
// Claude with custom version
const claudeCustom = createClaudeProvider(claudeApiKey, {
defaultModel: 'claude-3-5-sonnet-20241022',
version: '2023-06-01',
maxRetries: 5
});
console.log('✓ Created Claude provider with custom settings');
console.log('\n🎉 Multi-provider example completed successfully!');
} catch (error) {
console.error('❌ Error in multi-provider example:');
if (error instanceof AIProviderError) {
console.error(`AI Provider Error (${error.type}):`, error.message);
switch (error.type) {
case AIErrorType.AUTHENTICATION:
console.error('💡 Hint: Check your API keys in environment variables');
console.error(' Set ANTHROPIC_API_KEY and OPENAI_API_KEY');
break;
case AIErrorType.RATE_LIMIT:
console.error('💡 Hint: You are being rate limited. Wait and try again.');
break;
case AIErrorType.INVALID_REQUEST:
console.error('💡 Hint: Check your request parameters.');
break;
default:
console.error('💡 Hint: An unexpected error occurred.');
}
} else {
console.error('Unexpected error:', error);
}
}
}
// Provider comparison utility
async function compareProviders() {
console.log('\n=== Provider Comparison ===');
const providers = [
{ name: 'Claude', factory: () => createClaudeProvider('dummy-key') },
{ name: 'OpenAI', factory: () => createOpenAIProvider('dummy-key') }
];
console.log('\nProvider Capabilities:');
console.log('| Provider | Models | Context | Streaming | Vision | Functions |');
console.log('|----------|--------|---------|-----------|--------|-----------|');
for (const { name, factory } of providers) {
const provider = factory();
const info = provider.getInfo();
console.log(`| ${name.padEnd(8)} | ${info.models.length.toString().padEnd(6)} | ${info.maxContextLength.toLocaleString().padEnd(7)} | ${info.supportsStreaming ? '✓' : '✗'.padEnd(9)} | ${info.capabilities?.vision ? '✓' : '✗'.padEnd(6)} | ${info.capabilities?.functionCalling ? '✓' : '✗'.padEnd(9)} |`);
}
console.log();
}
// Run the examples
if (import.meta.main) {
await multiProviderExample();
await compareProviders();
}

View File

@ -31,6 +31,8 @@
"ai",
"claude",
"anthropic",
"openai",
"gpt",
"provider",
"typescript",
"nodejs"
@ -42,7 +44,8 @@
"url": "https://gitea.jleibl.net/jleibl/simple-ai-provider.git"
},
"dependencies": {
"@anthropic-ai/sdk": "^0.52.0"
"@anthropic-ai/sdk": "^0.52.0",
"openai": "^4.103.0"
},
"devDependencies": {
"@types/bun": "latest",

View File

@ -28,11 +28,13 @@ export { BaseAIProvider } from './providers/base.js';
// Concrete provider implementations
export { ClaudeProvider, type ClaudeConfig } from './providers/claude.js';
export { OpenAIProvider, type OpenAIConfig } from './providers/openai.js';
// Utility functions and factory
export {
createProvider,
createClaudeProvider,
createOpenAIProvider,
ProviderRegistry,
type ProviderType,
type ProviderConfigMap
@ -49,4 +51,4 @@ export const VERSION = '1.0.0';
/**
* List of supported providers
*/
export const SUPPORTED_PROVIDERS = ['claude'] as const;
export const SUPPORTED_PROVIDERS = ['claude', 'openai'] as const;

View File

@ -4,4 +4,5 @@
*/
export { BaseAIProvider } from './base.js';
export { ClaudeProvider, type ClaudeConfig } from './claude.js';
export { ClaudeProvider, type ClaudeConfig } from './claude.js';
export { OpenAIProvider, type OpenAIConfig } from './openai.js';

316
src/providers/openai.ts Normal file
View File

@ -0,0 +1,316 @@
/**
* OpenAI Provider implementation using OpenAI's API
* Provides integration with GPT models through a standardized interface
*/
import OpenAI from 'openai';
import type {
AIProviderConfig,
CompletionParams,
CompletionResponse,
CompletionChunk,
ProviderInfo,
AIMessage
} from '../types/index.js';
import { BaseAIProvider } from './base.js';
import { AIProviderError, AIErrorType } from '../types/index.js';
/**
* Configuration specific to OpenAI provider
*/
export interface OpenAIConfig extends AIProviderConfig {
/** Default model to use if not specified in requests (default: gpt-4) */
defaultModel?: string;
/** Organization ID (optional) */
organization?: string;
/** Project ID (optional) */
project?: string;
}
/**
* OpenAI provider implementation
*/
export class OpenAIProvider extends BaseAIProvider {
private client: OpenAI | null = null;
private readonly defaultModel: string;
private readonly organization?: string;
private readonly project?: string;
constructor(config: OpenAIConfig) {
super(config);
this.defaultModel = config.defaultModel || 'gpt-4';
this.organization = config.organization;
this.project = config.project;
}
/**
* Initialize the OpenAI provider by setting up the OpenAI client
*/
protected async doInitialize(): Promise<void> {
try {
this.client = new OpenAI({
apiKey: this.config.apiKey,
baseURL: this.config.baseUrl,
timeout: this.config.timeout,
maxRetries: this.config.maxRetries,
organization: this.organization,
project: this.project
});
// Test the connection by making a simple request
await this.validateConnection();
} catch (error) {
throw new AIProviderError(
`Failed to initialize OpenAI provider: ${(error as Error).message}`,
AIErrorType.AUTHENTICATION,
undefined,
error as Error
);
}
}
/**
* Generate a completion using OpenAI
*/
protected async doComplete(params: CompletionParams): Promise<CompletionResponse> {
if (!this.client) {
throw new AIProviderError('Client not initialized', AIErrorType.INVALID_REQUEST);
}
try {
const response = await this.client.chat.completions.create({
model: params.model || this.defaultModel,
messages: this.convertMessages(params.messages),
max_tokens: params.maxTokens || 1000,
temperature: params.temperature ?? 0.7,
top_p: params.topP,
stop: params.stopSequences,
stream: false
});
return this.formatCompletionResponse(response);
} catch (error) {
throw this.handleOpenAIError(error as Error);
}
}
/**
* Generate a streaming completion using OpenAI
*/
protected async *doStream(params: CompletionParams): AsyncIterable<CompletionChunk> {
if (!this.client) {
throw new AIProviderError('Client not initialized', AIErrorType.INVALID_REQUEST);
}
try {
const stream = await this.client.chat.completions.create({
model: params.model || this.defaultModel,
messages: this.convertMessages(params.messages),
max_tokens: params.maxTokens || 1000,
temperature: params.temperature ?? 0.7,
top_p: params.topP,
stop: params.stopSequences,
stream: true
});
let messageId = '';
let totalPromptTokens = 0;
let totalCompletionTokens = 0;
for await (const chunk of stream) {
if (chunk.id && !messageId) {
messageId = chunk.id;
}
const delta = chunk.choices[0]?.delta;
if (delta?.content) {
yield {
content: delta.content,
isComplete: false,
id: messageId || chunk.id
};
}
// Check for completion
if (chunk.choices[0]?.finish_reason) {
// For OpenAI, we need to make a separate call to get usage stats
// or track them during streaming (not available in all stream responses)
yield {
content: '',
isComplete: true,
id: messageId || chunk.id,
usage: {
promptTokens: totalPromptTokens,
completionTokens: totalCompletionTokens,
totalTokens: totalPromptTokens + totalCompletionTokens
}
};
}
}
} catch (error) {
throw this.handleOpenAIError(error as Error);
}
}
/**
* Get information about the OpenAI provider
*/
public getInfo(): ProviderInfo {
return {
name: 'OpenAI',
version: '1.0.0',
models: [
'gpt-4',
'gpt-4-turbo',
'gpt-4-turbo-preview',
'gpt-4-0125-preview',
'gpt-4-1106-preview',
'gpt-3.5-turbo',
'gpt-3.5-turbo-0125',
'gpt-3.5-turbo-1106'
],
maxContextLength: 128000, // GPT-4 Turbo context length
supportsStreaming: true,
capabilities: {
vision: true,
functionCalling: true,
jsonMode: true,
systemMessages: true
}
};
}
/**
* Validate the connection by making a simple request
*/
private async validateConnection(): Promise<void> {
if (!this.client) {
throw new Error('Client not initialized');
}
try {
// Make a minimal request to validate credentials
await this.client.chat.completions.create({
model: this.defaultModel,
messages: [{ role: 'user', content: 'Hi' }],
max_tokens: 1
});
} catch (error: any) {
if (error.status === 401 || error.status === 403) {
throw new AIProviderError(
'Invalid API key. Please check your OpenAI API key.',
AIErrorType.AUTHENTICATION,
error.status
);
}
// For other errors during validation, we'll let initialization proceed
// as they might be temporary issues
}
}
/**
* Convert our generic message format to OpenAI's format
* OpenAI supports system messages directly in the messages array
*/
private convertMessages(messages: AIMessage[]): OpenAI.Chat.Completions.ChatCompletionMessageParam[] {
return messages.map(message => ({
role: message.role as 'system' | 'user' | 'assistant',
content: message.content
}));
}
/**
* Format OpenAI's response to our standard format
*/
private formatCompletionResponse(response: OpenAI.Chat.Completions.ChatCompletion): CompletionResponse {
const choice = response.choices[0];
if (!choice || !choice.message.content) {
throw new AIProviderError(
'No content in OpenAI response',
AIErrorType.UNKNOWN
);
}
return {
content: choice.message.content,
model: response.model,
usage: {
promptTokens: response.usage?.prompt_tokens || 0,
completionTokens: response.usage?.completion_tokens || 0,
totalTokens: response.usage?.total_tokens || 0
},
id: response.id,
metadata: {
finishReason: choice.finish_reason,
systemFingerprint: response.system_fingerprint
}
};
}
/**
* Handle OpenAI-specific errors and convert them to our standard format
*/
private handleOpenAIError(error: any): AIProviderError {
if (error instanceof AIProviderError) {
return error;
}
const status = error.status || error.statusCode;
const message = error.message || 'Unknown OpenAI API error';
switch (status) {
case 400:
return new AIProviderError(
`Invalid request: ${message}`,
AIErrorType.INVALID_REQUEST,
status,
error
);
case 401:
return new AIProviderError(
'Authentication failed. Please check your OpenAI API key.',
AIErrorType.AUTHENTICATION,
status,
error
);
case 403:
return new AIProviderError(
'Access forbidden. Please check your API key permissions.',
AIErrorType.AUTHENTICATION,
status,
error
);
case 404:
return new AIProviderError(
'Model not found. Please check the model name.',
AIErrorType.MODEL_NOT_FOUND,
status,
error
);
case 429:
return new AIProviderError(
'Rate limit exceeded. Please slow down your requests.',
AIErrorType.RATE_LIMIT,
status,
error
);
case 500:
case 502:
case 503:
case 504:
return new AIProviderError(
'OpenAI service temporarily unavailable. Please try again later.',
AIErrorType.NETWORK,
status,
error
);
default:
return new AIProviderError(
`OpenAI API error: ${message}`,
AIErrorType.UNKNOWN,
status,
error
);
}
}
}

View File

@ -5,18 +5,20 @@
import type { AIProviderConfig } from '../types/index.js';
import { ClaudeProvider, type ClaudeConfig } from '../providers/claude.js';
import { OpenAIProvider, type OpenAIConfig } from '../providers/openai.js';
import { BaseAIProvider } from '../providers/base.js';
/**
* Supported AI provider types
*/
export type ProviderType = 'claude';
export type ProviderType = 'claude' | 'openai';
/**
* Configuration map for different provider types
*/
export interface ProviderConfigMap {
claude: ClaudeConfig;
openai: OpenAIConfig;
}
/**
@ -32,6 +34,8 @@ export function createProvider<T extends ProviderType>(
switch (type) {
case 'claude':
return new ClaudeProvider(config as ClaudeConfig);
case 'openai':
return new OpenAIProvider(config as OpenAIConfig);
default:
throw new Error(`Unsupported provider type: ${type}`);
}
@ -53,6 +57,22 @@ export function createClaudeProvider(
});
}
/**
* Create an OpenAI provider with simplified configuration
* @param apiKey - OpenAI API key
* @param options - Optional additional configuration
* @returns Configured OpenAI provider instance
*/
export function createOpenAIProvider(
apiKey: string,
options: Partial<Omit<OpenAIConfig, 'apiKey'>> = {}
): OpenAIProvider {
return new OpenAIProvider({
apiKey,
...options
});
}
/**
* Provider registry for dynamic provider creation
*/
@ -101,4 +121,5 @@ export class ProviderRegistry {
}
// Pre-register built-in providers
ProviderRegistry.register('claude', ClaudeProvider);
ProviderRegistry.register('claude', ClaudeProvider);
ProviderRegistry.register('openai', OpenAIProvider);

261
tests/openai.test.ts Normal file
View File

@ -0,0 +1,261 @@
/**
* Tests for OpenAI Provider
*/
import { describe, it, expect, beforeEach } from 'bun:test';
import { OpenAIProvider, AIProviderError, AIErrorType } from '../src/index.js';
describe('OpenAIProvider', () => {
let provider: OpenAIProvider;
beforeEach(() => {
provider = new OpenAIProvider({
apiKey: 'test-api-key',
defaultModel: 'gpt-3.5-turbo'
});
});
describe('constructor', () => {
it('should create provider with valid config', () => {
expect(provider).toBeInstanceOf(OpenAIProvider);
expect(provider.isInitialized()).toBe(false);
});
it('should throw error for missing API key', () => {
expect(() => {
new OpenAIProvider({ apiKey: '' });
}).toThrow(AIProviderError);
});
it('should set default model', () => {
const customProvider = new OpenAIProvider({
apiKey: 'test-key',
defaultModel: 'gpt-4'
});
expect(customProvider).toBeInstanceOf(OpenAIProvider);
});
it('should handle organization and project options', () => {
const customProvider = new OpenAIProvider({
apiKey: 'test-key',
organization: 'org-123',
project: 'proj-456'
});
expect(customProvider).toBeInstanceOf(OpenAIProvider);
});
});
describe('getInfo', () => {
it('should return provider information', () => {
const info = provider.getInfo();
expect(info.name).toBe('OpenAI');
expect(info.version).toBe('1.0.0');
expect(info.supportsStreaming).toBe(true);
expect(info.models).toContain('gpt-4');
expect(info.models).toContain('gpt-3.5-turbo');
expect(info.maxContextLength).toBe(128000);
expect(info.capabilities).toHaveProperty('vision', true);
expect(info.capabilities).toHaveProperty('functionCalling', true);
expect(info.capabilities).toHaveProperty('jsonMode', true);
expect(info.capabilities).toHaveProperty('systemMessages', true);
});
});
describe('validation', () => {
it('should validate temperature range', async () => {
// Mock initialization to avoid API call
(provider as any).initialized = true;
(provider as any).client = {};
await expect(
provider.complete({
messages: [{ role: 'user', content: 'test' }],
temperature: 1.5
})
).rejects.toThrow('Temperature must be between 0.0 and 1.0');
});
it('should validate top_p range', async () => {
(provider as any).initialized = true;
(provider as any).client = {};
await expect(
provider.complete({
messages: [{ role: 'user', content: 'test' }],
topP: 1.5
})
).rejects.toThrow('Top-p must be between 0.0 and 1.0');
});
it('should validate message format', async () => {
(provider as any).initialized = true;
(provider as any).client = {};
await expect(
provider.complete({
messages: [{ role: 'invalid' as any, content: 'test' }]
})
).rejects.toThrow('Each message must have a valid role');
});
it('should validate empty content', async () => {
(provider as any).initialized = true;
(provider as any).client = {};
await expect(
provider.complete({
messages: [{ role: 'user', content: '' }]
})
).rejects.toThrow('Each message must have non-empty string content');
});
it('should require initialization before use', async () => {
await expect(
provider.complete({
messages: [{ role: 'user', content: 'test' }]
})
).rejects.toThrow('Provider must be initialized before use');
});
});
describe('error handling', () => {
it('should handle authentication errors', () => {
const error = new Error('Unauthorized');
(error as any).status = 401;
const providerError = (provider as any).handleOpenAIError(error);
expect(providerError).toBeInstanceOf(AIProviderError);
expect(providerError.type).toBe(AIErrorType.AUTHENTICATION);
expect(providerError.message).toContain('Authentication failed');
});
it('should handle rate limit errors', () => {
const error = new Error('Rate limited');
(error as any).status = 429;
const providerError = (provider as any).handleOpenAIError(error);
expect(providerError).toBeInstanceOf(AIProviderError);
expect(providerError.type).toBe(AIErrorType.RATE_LIMIT);
expect(providerError.message).toContain('Rate limit exceeded');
});
it('should handle model not found errors', () => {
const error = new Error('Model not found');
(error as any).status = 404;
const providerError = (provider as any).handleOpenAIError(error);
expect(providerError).toBeInstanceOf(AIProviderError);
expect(providerError.type).toBe(AIErrorType.MODEL_NOT_FOUND);
expect(providerError.message).toContain('Model not found');
});
it('should handle invalid request errors', () => {
const error = new Error('Bad request');
(error as any).status = 400;
const providerError = (provider as any).handleOpenAIError(error);
expect(providerError).toBeInstanceOf(AIProviderError);
expect(providerError.type).toBe(AIErrorType.INVALID_REQUEST);
});
it('should handle server errors', () => {
const error = new Error('Internal server error');
(error as any).status = 500;
const providerError = (provider as any).handleOpenAIError(error);
expect(providerError).toBeInstanceOf(AIProviderError);
expect(providerError.type).toBe(AIErrorType.NETWORK);
});
it('should handle unknown errors', () => {
const error = new Error('Unknown error');
const providerError = (provider as any).handleOpenAIError(error);
expect(providerError).toBeInstanceOf(AIProviderError);
expect(providerError.type).toBe(AIErrorType.UNKNOWN);
});
});
describe('message conversion', () => {
it('should convert messages to OpenAI format', () => {
const messages = [
{ role: 'system' as const, content: 'You are helpful' },
{ role: 'user' as const, content: 'Hello' },
{ role: 'assistant' as const, content: 'Hi there' }
];
const result = (provider as any).convertMessages(messages);
expect(result).toHaveLength(3);
expect(result[0]).toEqual({ role: 'system', content: 'You are helpful' });
expect(result[1]).toEqual({ role: 'user', content: 'Hello' });
expect(result[2]).toEqual({ role: 'assistant', content: 'Hi there' });
});
it('should handle messages with metadata', () => {
const messages = [
{
role: 'user' as const,
content: 'Hello',
metadata: { timestamp: '2024-01-01' }
}
];
const result = (provider as any).convertMessages(messages);
expect(result).toHaveLength(1);
expect(result[0]).toEqual({ role: 'user', content: 'Hello' });
// Metadata should not be included in OpenAI format
});
});
describe('response formatting', () => {
it('should format completion response correctly', () => {
const mockResponse = {
id: 'chatcmpl-123',
model: 'gpt-3.5-turbo',
choices: [{
message: { content: 'Hello there!' },
finish_reason: 'stop'
}],
usage: {
prompt_tokens: 10,
completion_tokens: 20,
total_tokens: 30
},
system_fingerprint: 'fp_123'
};
const result = (provider as any).formatCompletionResponse(mockResponse);
expect(result.content).toBe('Hello there!');
expect(result.model).toBe('gpt-3.5-turbo');
expect(result.id).toBe('chatcmpl-123');
expect(result.usage.promptTokens).toBe(10);
expect(result.usage.completionTokens).toBe(20);
expect(result.usage.totalTokens).toBe(30);
expect(result.metadata.finishReason).toBe('stop');
expect(result.metadata.systemFingerprint).toBe('fp_123');
});
it('should throw error for empty response', () => {
const mockResponse = {
id: 'chatcmpl-123',
model: 'gpt-3.5-turbo',
choices: [],
usage: { prompt_tokens: 10, completion_tokens: 0, total_tokens: 10 }
};
expect(() => {
(provider as any).formatCompletionResponse(mockResponse);
}).toThrow('No content in OpenAI response');
});
});
});