feat(docs): update README with OpenWebUI support details
This commit is contained in:
41
src/index.ts
41
src/index.ts
@@ -1,56 +1,53 @@
|
||||
/**
|
||||
* Simple AI Provider - Main Entry Point
|
||||
* Simple AI Provider - A unified interface for multiple AI providers
|
||||
*
|
||||
* A professional, extensible package for integrating multiple AI providers
|
||||
* into your applications with a unified interface.
|
||||
* Main entry point for the library, providing access to all providers
|
||||
* and utilities in a clean, type-safe interface.
|
||||
*
|
||||
* @author Jan-Marlon Leibl
|
||||
* @version 1.0.0
|
||||
*/
|
||||
|
||||
// Core types and interfaces
|
||||
// Core types
|
||||
export type {
|
||||
AIProviderConfig,
|
||||
AIMessage,
|
||||
MessageRole,
|
||||
AIProviderConfig,
|
||||
CompletionParams,
|
||||
CompletionResponse,
|
||||
CompletionChunk,
|
||||
TokenUsage,
|
||||
ProviderInfo
|
||||
ProviderInfo,
|
||||
TokenUsage
|
||||
} from './types/index.js';
|
||||
|
||||
// Error handling
|
||||
// Error types
|
||||
export { AIProviderError, AIErrorType } from './types/index.js';
|
||||
|
||||
// Base provider class
|
||||
// Base provider
|
||||
export { BaseAIProvider } from './providers/base.js';
|
||||
|
||||
// Concrete provider implementations
|
||||
// Provider implementations
|
||||
export { ClaudeProvider, type ClaudeConfig } from './providers/claude.js';
|
||||
export { OpenAIProvider, type OpenAIConfig } from './providers/openai.js';
|
||||
export { GeminiProvider, type GeminiConfig } from './providers/gemini.js';
|
||||
export { OpenWebUIProvider, type OpenWebUIConfig } from './providers/openwebui.js';
|
||||
|
||||
// Utility functions and factory
|
||||
// Factory utilities
|
||||
export {
|
||||
createProvider,
|
||||
createClaudeProvider,
|
||||
createOpenAIProvider,
|
||||
createGeminiProvider,
|
||||
ProviderRegistry,
|
||||
createOpenWebUIProvider,
|
||||
type ProviderType,
|
||||
type ProviderConfigMap
|
||||
PROVIDER_REGISTRY
|
||||
} from './utils/factory.js';
|
||||
|
||||
// Re-export everything from providers for convenience
|
||||
export * from './providers/index.js';
|
||||
/**
|
||||
* List of all supported providers
|
||||
*/
|
||||
export const SUPPORTED_PROVIDERS = ['claude', 'openai', 'gemini', 'openwebui'] as const;
|
||||
|
||||
/**
|
||||
* Package version
|
||||
*/
|
||||
export const VERSION = '1.0.0';
|
||||
|
||||
/**
|
||||
* List of supported providers
|
||||
*/
|
||||
export const SUPPORTED_PROVIDERS = ['claude', 'openai', 'gemini'] as const;
|
||||
export const VERSION = '1.0.0';
|
||||
@@ -6,4 +6,5 @@
|
||||
export { BaseAIProvider } from './base.js';
|
||||
export { ClaudeProvider, type ClaudeConfig } from './claude.js';
|
||||
export { OpenAIProvider, type OpenAIConfig } from './openai.js';
|
||||
export { GeminiProvider, type GeminiConfig } from './gemini.js';
|
||||
export { GeminiProvider, type GeminiConfig } from './gemini.js';
|
||||
export { OpenWebUIProvider, type OpenWebUIConfig } from './openwebui.js';
|
||||
659
src/providers/openwebui.ts
Normal file
659
src/providers/openwebui.ts
Normal file
@@ -0,0 +1,659 @@
|
||||
/**
|
||||
* OpenWebUI Provider implementation using OpenWebUI's native API
|
||||
* Provides integration with OpenWebUI's chat completions and Ollama proxy endpoints
|
||||
*/
|
||||
|
||||
import type {
|
||||
AIProviderConfig,
|
||||
CompletionParams,
|
||||
CompletionResponse,
|
||||
CompletionChunk,
|
||||
ProviderInfo,
|
||||
AIMessage
|
||||
} from '../types/index.js';
|
||||
import { BaseAIProvider } from './base.js';
|
||||
import { AIProviderError, AIErrorType } from '../types/index.js';
|
||||
|
||||
/**
|
||||
* Configuration specific to OpenWebUI provider
|
||||
*/
|
||||
export interface OpenWebUIConfig extends AIProviderConfig {
|
||||
/** Default model to use if not specified in requests (default: 'llama3.1') */
|
||||
defaultModel?: string;
|
||||
/** Base URL for OpenWebUI instance (default: 'http://localhost:3000') */
|
||||
baseUrl?: string;
|
||||
/** Whether to use Ollama API proxy endpoints instead of chat completions (default: false) */
|
||||
useOllamaProxy?: boolean;
|
||||
/** Whether to verify SSL certificates (default: false for local) */
|
||||
dangerouslyAllowInsecureConnections?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* OpenWebUI chat completion response interface
|
||||
*/
|
||||
interface OpenWebUIChatResponse {
|
||||
id: string;
|
||||
object: string;
|
||||
created: number;
|
||||
model: string;
|
||||
choices: Array<{
|
||||
index: number;
|
||||
message: {
|
||||
role: string;
|
||||
content: string;
|
||||
};
|
||||
finish_reason: string | null;
|
||||
}>;
|
||||
usage?: {
|
||||
prompt_tokens: number;
|
||||
completion_tokens: number;
|
||||
total_tokens: number;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* OpenWebUI streaming response interface
|
||||
*/
|
||||
interface OpenWebUIStreamChunk {
|
||||
id: string;
|
||||
object: string;
|
||||
created: number;
|
||||
model: string;
|
||||
choices: Array<{
|
||||
index: number;
|
||||
delta: {
|
||||
role?: string;
|
||||
content?: string;
|
||||
};
|
||||
finish_reason: string | null;
|
||||
}>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Ollama generate response interface
|
||||
*/
|
||||
interface OllamaGenerateResponse {
|
||||
model: string;
|
||||
created_at: string;
|
||||
response: string;
|
||||
done: boolean;
|
||||
context?: number[];
|
||||
total_duration?: number;
|
||||
load_duration?: number;
|
||||
prompt_eval_count?: number;
|
||||
prompt_eval_duration?: number;
|
||||
eval_count?: number;
|
||||
eval_duration?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* OpenWebUI models response interface
|
||||
*/
|
||||
interface OpenWebUIModelsResponse {
|
||||
data: Array<{
|
||||
id: string;
|
||||
object: string;
|
||||
created: number;
|
||||
owned_by: string;
|
||||
}>;
|
||||
}
|
||||
|
||||
/**
|
||||
* OpenWebUI provider implementation
|
||||
*/
|
||||
export class OpenWebUIProvider extends BaseAIProvider {
|
||||
private readonly defaultModel: string;
|
||||
private readonly baseUrl: string;
|
||||
private readonly useOllamaProxy: boolean;
|
||||
private readonly dangerouslyAllowInsecureConnections: boolean;
|
||||
|
||||
constructor(config: OpenWebUIConfig) {
|
||||
super(config);
|
||||
this.defaultModel = config.defaultModel || 'llama3.1';
|
||||
this.baseUrl = (config.baseUrl || 'http://localhost:3000').replace(/\/$/, '');
|
||||
this.useOllamaProxy = config.useOllamaProxy ?? false;
|
||||
this.dangerouslyAllowInsecureConnections = config.dangerouslyAllowInsecureConnections ?? true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the OpenWebUI provider by testing the connection
|
||||
*/
|
||||
protected async doInitialize(): Promise<void> {
|
||||
try {
|
||||
await this.validateConnection();
|
||||
} catch (error) {
|
||||
throw new AIProviderError(
|
||||
`Failed to initialize OpenWebUI provider: ${(error as Error).message}`,
|
||||
AIErrorType.NETWORK,
|
||||
undefined,
|
||||
error as Error
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a completion using OpenWebUI
|
||||
*/
|
||||
protected async doComplete(params: CompletionParams): Promise<CompletionResponse> {
|
||||
if (this.useOllamaProxy) {
|
||||
return this.completeWithOllama(params);
|
||||
} else {
|
||||
return this.completeWithChat(params);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a streaming completion using OpenWebUI
|
||||
*/
|
||||
protected async *doStream(params: CompletionParams): AsyncIterable<CompletionChunk> {
|
||||
if (this.useOllamaProxy) {
|
||||
yield* this.streamWithOllama(params);
|
||||
} else {
|
||||
yield* this.streamWithChat(params);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Complete using OpenWebUI's chat completions API
|
||||
*/
|
||||
private async completeWithChat(params: CompletionParams): Promise<CompletionResponse> {
|
||||
const url = `${this.baseUrl}/api/chat/completions`;
|
||||
|
||||
const requestBody = {
|
||||
model: params.model || this.defaultModel,
|
||||
messages: this.convertMessages(params.messages),
|
||||
max_tokens: params.maxTokens || 1000,
|
||||
temperature: params.temperature ?? 0.7,
|
||||
top_p: params.topP,
|
||||
stop: params.stopSequences,
|
||||
stream: false
|
||||
};
|
||||
|
||||
try {
|
||||
const response = await this.makeRequest(url, 'POST', requestBody);
|
||||
const data = await response.json() as OpenWebUIChatResponse;
|
||||
|
||||
return this.formatChatResponse(data);
|
||||
} catch (error) {
|
||||
throw this.handleOpenWebUIError(error as Error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Complete using Ollama proxy API
|
||||
*/
|
||||
private async completeWithOllama(params: CompletionParams): Promise<CompletionResponse> {
|
||||
const url = `${this.baseUrl}/ollama/api/generate`;
|
||||
|
||||
// Convert messages to a single prompt for Ollama
|
||||
const prompt = this.convertMessagesToPrompt(params.messages);
|
||||
|
||||
const requestBody = {
|
||||
model: params.model || this.defaultModel,
|
||||
prompt: prompt,
|
||||
stream: false,
|
||||
options: {
|
||||
temperature: params.temperature ?? 0.7,
|
||||
top_p: params.topP,
|
||||
num_predict: params.maxTokens || 1000,
|
||||
stop: params.stopSequences
|
||||
}
|
||||
};
|
||||
|
||||
try {
|
||||
const response = await this.makeRequest(url, 'POST', requestBody);
|
||||
const data = await response.json() as OllamaGenerateResponse;
|
||||
|
||||
return this.formatOllamaResponse(data);
|
||||
} catch (error) {
|
||||
throw this.handleOpenWebUIError(error as Error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Stream using OpenWebUI's chat completions API
|
||||
*/
|
||||
private async *streamWithChat(params: CompletionParams): AsyncIterable<CompletionChunk> {
|
||||
const url = `${this.baseUrl}/api/chat/completions`;
|
||||
|
||||
const requestBody = {
|
||||
model: params.model || this.defaultModel,
|
||||
messages: this.convertMessages(params.messages),
|
||||
max_tokens: params.maxTokens || 1000,
|
||||
temperature: params.temperature ?? 0.7,
|
||||
top_p: params.topP,
|
||||
stop: params.stopSequences,
|
||||
stream: true
|
||||
};
|
||||
|
||||
try {
|
||||
const response = await this.makeRequest(url, 'POST', requestBody);
|
||||
|
||||
if (!response.body) {
|
||||
throw new Error('No response body for streaming');
|
||||
}
|
||||
|
||||
const reader = response.body.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
let buffer = '';
|
||||
let messageId = '';
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
buffer += decoder.decode(value, { stream: true });
|
||||
const lines = buffer.split('\n');
|
||||
buffer = lines.pop() || '';
|
||||
|
||||
for (const line of lines) {
|
||||
const trimmed = line.trim();
|
||||
if (trimmed.startsWith('data: ')) {
|
||||
const data = trimmed.slice(6);
|
||||
if (data === '[DONE]') {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const chunk = JSON.parse(data) as OpenWebUIStreamChunk;
|
||||
if (chunk.id && !messageId) {
|
||||
messageId = chunk.id;
|
||||
}
|
||||
|
||||
const delta = chunk.choices[0]?.delta;
|
||||
if (delta?.content) {
|
||||
yield {
|
||||
content: delta.content,
|
||||
isComplete: false,
|
||||
id: messageId || chunk.id
|
||||
};
|
||||
}
|
||||
|
||||
if (chunk.choices[0]?.finish_reason) {
|
||||
yield {
|
||||
content: '',
|
||||
isComplete: true,
|
||||
id: messageId || chunk.id,
|
||||
usage: {
|
||||
promptTokens: 0,
|
||||
completionTokens: 0,
|
||||
totalTokens: 0
|
||||
}
|
||||
};
|
||||
}
|
||||
} catch (parseError) {
|
||||
// Skip invalid JSON chunks
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
reader.releaseLock();
|
||||
}
|
||||
} catch (error) {
|
||||
throw this.handleOpenWebUIError(error as Error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Stream using Ollama proxy API
|
||||
*/
|
||||
private async *streamWithOllama(params: CompletionParams): AsyncIterable<CompletionChunk> {
|
||||
const url = `${this.baseUrl}/ollama/api/generate`;
|
||||
|
||||
const prompt = this.convertMessagesToPrompt(params.messages);
|
||||
|
||||
const requestBody = {
|
||||
model: params.model || this.defaultModel,
|
||||
prompt: prompt,
|
||||
stream: true,
|
||||
options: {
|
||||
temperature: params.temperature ?? 0.7,
|
||||
top_p: params.topP,
|
||||
num_predict: params.maxTokens || 1000,
|
||||
stop: params.stopSequences
|
||||
}
|
||||
};
|
||||
|
||||
try {
|
||||
const response = await this.makeRequest(url, 'POST', requestBody);
|
||||
|
||||
if (!response.body) {
|
||||
throw new Error('No response body for streaming');
|
||||
}
|
||||
|
||||
const reader = response.body.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
let buffer = '';
|
||||
const messageId = 'ollama-' + Date.now();
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
buffer += decoder.decode(value, { stream: true });
|
||||
const lines = buffer.split('\n');
|
||||
buffer = lines.pop() || '';
|
||||
|
||||
for (const line of lines) {
|
||||
const trimmed = line.trim();
|
||||
if (trimmed) {
|
||||
try {
|
||||
const chunk = JSON.parse(trimmed) as OllamaGenerateResponse;
|
||||
|
||||
if (chunk.response) {
|
||||
yield {
|
||||
content: chunk.response,
|
||||
isComplete: false,
|
||||
id: messageId
|
||||
};
|
||||
}
|
||||
|
||||
if (chunk.done) {
|
||||
yield {
|
||||
content: '',
|
||||
isComplete: true,
|
||||
id: messageId,
|
||||
usage: {
|
||||
promptTokens: chunk.prompt_eval_count || 0,
|
||||
completionTokens: chunk.eval_count || 0,
|
||||
totalTokens: (chunk.prompt_eval_count || 0) + (chunk.eval_count || 0)
|
||||
}
|
||||
};
|
||||
return;
|
||||
}
|
||||
} catch (parseError) {
|
||||
// Skip invalid JSON chunks
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
reader.releaseLock();
|
||||
}
|
||||
} catch (error) {
|
||||
throw this.handleOpenWebUIError(error as Error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get information about the OpenWebUI provider
|
||||
*/
|
||||
public getInfo(): ProviderInfo {
|
||||
return {
|
||||
name: 'OpenWebUI',
|
||||
version: '1.0.0',
|
||||
models: [
|
||||
'llama3.1',
|
||||
'llama3.1:8b',
|
||||
'llama3.1:70b',
|
||||
'llama3.2',
|
||||
'llama3.2:1b',
|
||||
'llama3.2:3b',
|
||||
'codellama',
|
||||
'codellama:7b',
|
||||
'codellama:13b',
|
||||
'codellama:34b',
|
||||
'mistral',
|
||||
'mistral:7b',
|
||||
'mixtral',
|
||||
'mixtral:8x7b',
|
||||
'phi3',
|
||||
'phi3:mini',
|
||||
'gemma2',
|
||||
'gemma2:2b',
|
||||
'gemma2:9b',
|
||||
'qwen2.5',
|
||||
'granite3.1-dense:8b'
|
||||
],
|
||||
maxContextLength: 8192, // Varies by model, but reasonable default
|
||||
supportsStreaming: true,
|
||||
capabilities: {
|
||||
vision: false, // Depends on model
|
||||
functionCalling: false, // Limited in local models
|
||||
systemMessages: true,
|
||||
localExecution: true,
|
||||
customModels: true,
|
||||
rag: true // OpenWebUI supports RAG
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate the connection by attempting to list models
|
||||
*/
|
||||
private async validateConnection(): Promise<void> {
|
||||
try {
|
||||
const url = `${this.baseUrl}/api/models`;
|
||||
const response = await this.makeRequest(url, 'GET');
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
|
||||
}
|
||||
|
||||
await response.json(); // Just verify we can parse the response
|
||||
} catch (error: any) {
|
||||
if (error.code === 'ECONNREFUSED' || error.message?.includes('connect')) {
|
||||
throw new AIProviderError(
|
||||
`Cannot connect to OpenWebUI at ${this.baseUrl}. Make sure OpenWebUI is running.`,
|
||||
AIErrorType.NETWORK
|
||||
);
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Make an HTTP request with proper headers and error handling
|
||||
*/
|
||||
private async makeRequest(url: string, method: string, body?: any): Promise<Response> {
|
||||
const headers: Record<string, string> = {
|
||||
'Content-Type': 'application/json',
|
||||
};
|
||||
|
||||
// Add Authorization header if API key is provided
|
||||
if (this.config.apiKey) {
|
||||
headers['Authorization'] = `Bearer ${this.config.apiKey}`;
|
||||
}
|
||||
|
||||
const requestInit: RequestInit = {
|
||||
method,
|
||||
headers,
|
||||
body: body ? JSON.stringify(body) : undefined,
|
||||
};
|
||||
|
||||
// Handle SSL verification for local instances
|
||||
if (this.dangerouslyAllowInsecureConnections && url.startsWith('https://localhost')) {
|
||||
// Note: In a real implementation, you'd need to configure the agent
|
||||
// This is a placeholder for the concept
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch(url, requestInit);
|
||||
return response;
|
||||
} catch (error) {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert our generic message format to OpenWebUI's format
|
||||
*/
|
||||
private convertMessages(messages: AIMessage[]): Array<{role: string; content: string}> {
|
||||
return messages.map(message => ({
|
||||
role: message.role,
|
||||
content: message.content
|
||||
}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert messages to a single prompt for Ollama API
|
||||
*/
|
||||
private convertMessagesToPrompt(messages: AIMessage[]): string {
|
||||
return messages.map(message => {
|
||||
switch (message.role) {
|
||||
case 'system':
|
||||
return `System: ${message.content}`;
|
||||
case 'user':
|
||||
return `Human: ${message.content}`;
|
||||
case 'assistant':
|
||||
return `Assistant: ${message.content}`;
|
||||
default:
|
||||
return message.content;
|
||||
}
|
||||
}).join('\n\n');
|
||||
}
|
||||
|
||||
/**
|
||||
* Format OpenWebUI chat response to our standard format
|
||||
*/
|
||||
private formatChatResponse(response: OpenWebUIChatResponse): CompletionResponse {
|
||||
const choice = response.choices[0];
|
||||
if (!choice || !choice.message.content) {
|
||||
throw new AIProviderError(
|
||||
'No content in OpenWebUI response',
|
||||
AIErrorType.UNKNOWN
|
||||
);
|
||||
}
|
||||
|
||||
return {
|
||||
content: choice.message.content,
|
||||
model: response.model,
|
||||
usage: {
|
||||
promptTokens: response.usage?.prompt_tokens || 0,
|
||||
completionTokens: response.usage?.completion_tokens || 0,
|
||||
totalTokens: response.usage?.total_tokens || 0
|
||||
},
|
||||
id: response.id,
|
||||
metadata: {
|
||||
finishReason: choice.finish_reason,
|
||||
created: response.created
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Format Ollama response to our standard format
|
||||
*/
|
||||
private formatOllamaResponse(response: OllamaGenerateResponse): CompletionResponse {
|
||||
return {
|
||||
content: response.response,
|
||||
model: response.model,
|
||||
usage: {
|
||||
promptTokens: response.prompt_eval_count || 0,
|
||||
completionTokens: response.eval_count || 0,
|
||||
totalTokens: (response.prompt_eval_count || 0) + (response.eval_count || 0)
|
||||
},
|
||||
id: `ollama-${Date.now()}`,
|
||||
metadata: {
|
||||
created_at: response.created_at,
|
||||
total_duration: response.total_duration,
|
||||
eval_duration: response.eval_duration
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle OpenWebUI-specific errors and convert them to our standard format
|
||||
*/
|
||||
private handleOpenWebUIError(error: any): AIProviderError {
|
||||
if (error instanceof AIProviderError) {
|
||||
return error;
|
||||
}
|
||||
|
||||
const message = error.message || 'Unknown OpenWebUI API error';
|
||||
|
||||
// Handle connection errors
|
||||
if (error.code === 'ECONNREFUSED' || message.includes('connect')) {
|
||||
return new AIProviderError(
|
||||
`Cannot connect to OpenWebUI at ${this.baseUrl}. Make sure OpenWebUI is running.`,
|
||||
AIErrorType.NETWORK,
|
||||
undefined,
|
||||
error
|
||||
);
|
||||
}
|
||||
|
||||
if (error.code === 'ENOTFOUND' || message.includes('getaddrinfo')) {
|
||||
return new AIProviderError(
|
||||
`Cannot resolve OpenWebUI hostname. Check your baseUrl configuration.`,
|
||||
AIErrorType.NETWORK,
|
||||
undefined,
|
||||
error
|
||||
);
|
||||
}
|
||||
|
||||
// Handle HTTP status codes
|
||||
const status = error.status || error.statusCode;
|
||||
|
||||
switch (status) {
|
||||
case 400:
|
||||
return new AIProviderError(
|
||||
`Invalid request: ${message}`,
|
||||
AIErrorType.INVALID_REQUEST,
|
||||
status,
|
||||
error
|
||||
);
|
||||
case 401:
|
||||
return new AIProviderError(
|
||||
'Authentication failed. Check your API key from OpenWebUI Settings > Account.',
|
||||
AIErrorType.AUTHENTICATION,
|
||||
status,
|
||||
error
|
||||
);
|
||||
case 404:
|
||||
// Model not found or endpoint not found
|
||||
if (message.includes('model')) {
|
||||
return new AIProviderError(
|
||||
'Model not found. Make sure the model is available in OpenWebUI.',
|
||||
AIErrorType.MODEL_NOT_FOUND,
|
||||
status,
|
||||
error
|
||||
);
|
||||
}
|
||||
return new AIProviderError(
|
||||
`API endpoint not found. Check your baseUrl configuration.`,
|
||||
AIErrorType.NETWORK,
|
||||
status,
|
||||
error
|
||||
);
|
||||
case 429:
|
||||
return new AIProviderError(
|
||||
'Rate limit exceeded. Local models may be overloaded.',
|
||||
AIErrorType.RATE_LIMIT,
|
||||
status,
|
||||
error
|
||||
);
|
||||
case 500:
|
||||
case 502:
|
||||
case 503:
|
||||
case 504:
|
||||
return new AIProviderError(
|
||||
'OpenWebUI service error. Check the OpenWebUI logs for details.',
|
||||
AIErrorType.NETWORK,
|
||||
status,
|
||||
error
|
||||
);
|
||||
default:
|
||||
// Handle timeout errors
|
||||
if (message.includes('timeout') || error.code === 'ETIMEDOUT') {
|
||||
return new AIProviderError(
|
||||
'Request timeout. Local model inference may be slow.',
|
||||
AIErrorType.TIMEOUT,
|
||||
status,
|
||||
error
|
||||
);
|
||||
}
|
||||
|
||||
return new AIProviderError(
|
||||
`OpenWebUI API error: ${message}`,
|
||||
AIErrorType.UNKNOWN,
|
||||
status,
|
||||
error
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -7,12 +7,13 @@ import type { AIProviderConfig } from '../types/index.js';
|
||||
import { ClaudeProvider, type ClaudeConfig } from '../providers/claude.js';
|
||||
import { OpenAIProvider, type OpenAIConfig } from '../providers/openai.js';
|
||||
import { GeminiProvider, type GeminiConfig } from '../providers/gemini.js';
|
||||
import { OpenWebUIProvider, type OpenWebUIConfig } from '../providers/openwebui.js';
|
||||
import { BaseAIProvider } from '../providers/base.js';
|
||||
|
||||
/**
|
||||
* Supported AI provider types
|
||||
*/
|
||||
export type ProviderType = 'claude' | 'openai' | 'gemini';
|
||||
export type ProviderType = 'claude' | 'openai' | 'gemini' | 'openwebui';
|
||||
|
||||
/**
|
||||
* Configuration map for different provider types
|
||||
@@ -21,6 +22,7 @@ export interface ProviderConfigMap {
|
||||
claude: ClaudeConfig;
|
||||
openai: OpenAIConfig;
|
||||
gemini: GeminiConfig;
|
||||
openwebui: OpenWebUIConfig;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -40,6 +42,8 @@ export function createProvider<T extends ProviderType>(
|
||||
return new OpenAIProvider(config as OpenAIConfig);
|
||||
case 'gemini':
|
||||
return new GeminiProvider(config as GeminiConfig);
|
||||
case 'openwebui':
|
||||
return new OpenWebUIProvider(config as OpenWebUIConfig);
|
||||
default:
|
||||
throw new Error(`Unsupported provider type: ${type}`);
|
||||
}
|
||||
@@ -93,6 +97,13 @@ export function createGeminiProvider(
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an OpenWebUI provider instance
|
||||
*/
|
||||
export function createOpenWebUIProvider(config: OpenWebUIConfig): OpenWebUIProvider {
|
||||
return new OpenWebUIProvider(config);
|
||||
}
|
||||
|
||||
/**
|
||||
* Provider registry for dynamic provider creation
|
||||
*/
|
||||
@@ -143,4 +154,15 @@ export class ProviderRegistry {
|
||||
// Pre-register built-in providers
|
||||
ProviderRegistry.register('claude', ClaudeProvider);
|
||||
ProviderRegistry.register('openai', OpenAIProvider);
|
||||
ProviderRegistry.register('gemini', GeminiProvider);
|
||||
ProviderRegistry.register('gemini', GeminiProvider);
|
||||
ProviderRegistry.register('openwebui', OpenWebUIProvider);
|
||||
|
||||
/**
|
||||
* Registry of all available providers
|
||||
*/
|
||||
export const PROVIDER_REGISTRY = {
|
||||
claude: ClaudeProvider,
|
||||
openai: OpenAIProvider,
|
||||
gemini: GeminiProvider,
|
||||
openwebui: OpenWebUIProvider
|
||||
} as const;
|
||||
Reference in New Issue
Block a user