feat(docs): add structured output example and details

This commit is contained in:
2025-09-04 14:49:01 +02:00
parent 5da20536af
commit 18769c134d
10 changed files with 647 additions and 22 deletions

View File

@@ -16,12 +16,20 @@ export type {
CompletionResponse,
CompletionChunk,
ProviderInfo,
TokenUsage
TokenUsage,
ResponseType
} from './types/index.js';
// Error types
export { AIProviderError, AIErrorType } from './types/index.js';
// Response type utilities
export {
createResponseType,
generateResponseTypePrompt,
validateResponseType
} from './types/index.js';
// Base provider
export { BaseAIProvider } from './providers/base.js';
@@ -50,4 +58,4 @@ export const SUPPORTED_PROVIDERS = ['claude', 'openai', 'gemini', 'openwebui'] a
/**
* Package version
*/
export const VERSION = '1.0.0';
export const VERSION = '1.2.0';

View File

@@ -19,9 +19,10 @@ import type {
CompletionParams,
CompletionResponse,
CompletionChunk,
ProviderInfo
ProviderInfo,
ResponseType
} from '../types/index.js';
import { AIProviderError, AIErrorType } from '../types/index.js';
import { AIProviderError, AIErrorType, generateResponseTypePrompt } from '../types/index.js';
// ============================================================================
// ABSTRACT BASE PROVIDER CLASS
@@ -147,7 +148,7 @@ export abstract class BaseAIProvider {
* console.log(response.content);
* ```
*/
public async complete(params: CompletionParams): Promise<CompletionResponse> {
public async complete<T = any>(params: CompletionParams<T>): Promise<CompletionResponse> {
// Ensure provider is ready for use
this.ensureInitialized();
@@ -155,8 +156,11 @@ export abstract class BaseAIProvider {
this.validateCompletionParams(params);
try {
// Process response type instructions if specified
const processedParams = this.processResponseType(params);
// Delegate to provider-specific implementation
return await this.doComplete(params);
return await this.doComplete(processedParams);
} catch (error) {
// Normalize error to our standard format
throw this.normalizeError(error as Error);
@@ -184,7 +188,7 @@ export abstract class BaseAIProvider {
* }
* ```
*/
public async *stream(params: CompletionParams): AsyncIterable<CompletionChunk> {
public async *stream<T = any>(params: CompletionParams<T>): AsyncIterable<CompletionChunk> {
// Ensure provider is ready for use
this.ensureInitialized();
@@ -192,8 +196,11 @@ export abstract class BaseAIProvider {
this.validateCompletionParams(params);
try {
// Process response type instructions if specified
const processedParams = this.processResponseType(params);
// Delegate to provider-specific implementation
yield* this.doStream(params);
yield* this.doStream(processedParams);
} catch (error) {
// Normalize error to our standard format
throw this.normalizeError(error as Error);
@@ -242,7 +249,7 @@ export abstract class BaseAIProvider {
* @returns Promise resolving to completion response
* @throws {Error} If completion fails (will be normalized to AIProviderError)
*/
protected abstract doComplete(params: CompletionParams): Promise<CompletionResponse>;
protected abstract doComplete<T = any>(params: CompletionParams<T>): Promise<CompletionResponse>;
/**
* Provider-specific streaming implementation.
@@ -256,7 +263,7 @@ export abstract class BaseAIProvider {
* @returns AsyncIterable yielding completion chunks
* @throws {Error} If streaming fails (will be normalized to AIProviderError)
*/
protected abstract doStream(params: CompletionParams): AsyncIterable<CompletionChunk>;
protected abstract doStream<T = any>(params: CompletionParams<T>): AsyncIterable<CompletionChunk>;
// ========================================================================
// PROTECTED UTILITY METHODS
@@ -370,7 +377,7 @@ export abstract class BaseAIProvider {
* @param params - Parameters to validate
* @throws {AIProviderError} If any parameter is invalid
*/
protected validateCompletionParams(params: CompletionParams): void {
protected validateCompletionParams<T = any>(params: CompletionParams<T>): void {
if (!params || typeof params !== 'object') {
throw new AIProviderError(
'Completion parameters object is required',
@@ -512,6 +519,45 @@ export abstract class BaseAIProvider {
}
}
/**
* Processes completion parameters to include response type instructions.
*
* This method automatically adds system prompt instructions when a response
* type is specified, ensuring the AI understands the expected output format.
*
* @protected
* @param params - Original completion parameters
* @returns Processed parameters with response type instructions
*/
protected processResponseType<T>(params: CompletionParams<T>): CompletionParams<T> {
if (!params.responseType) {
return params;
}
// Create a copy of the parameters to avoid mutation
const processedParams = { ...params };
// Generate the response type instruction
const responseTypePrompt = generateResponseTypePrompt(params.responseType);
// Find existing system messages or create new ones
const systemMessages = params.messages.filter(msg => msg.role === 'system');
const nonSystemMessages = params.messages.filter(msg => msg.role !== 'system');
// Combine existing system messages with response type instruction
const combinedSystemContent = systemMessages.length > 0
? systemMessages.map(msg => msg.content).join('\n\n') + '\n\n' + responseTypePrompt
: responseTypePrompt;
// Create new messages array with combined system message
processedParams.messages = [
{ role: 'system', content: combinedSystemContent },
...nonSystemMessages
];
return processedParams;
}
/**
* Normalizes any error into a standardized AIProviderError.
*

View File

@@ -220,7 +220,7 @@ export class ClaudeProvider extends BaseAIProvider {
* @returns Promise resolving to formatted completion response
* @throws {Error} If API request fails
*/
protected async doComplete(params: CompletionParams): Promise<CompletionResponse> {
protected async doComplete<T = any>(params: CompletionParams<T>): Promise<CompletionResponse> {
if (!this.client) {
throw new AIProviderError('Claude client not initialized', AIErrorType.INVALID_REQUEST);
}
@@ -257,7 +257,7 @@ export class ClaudeProvider extends BaseAIProvider {
* @returns AsyncIterable yielding completion chunks
* @throws {Error} If streaming request fails
*/
protected async *doStream(params: CompletionParams): AsyncIterable<CompletionChunk> {
protected async *doStream<T = any>(params: CompletionParams<T>): AsyncIterable<CompletionChunk> {
if (!this.client) {
throw new AIProviderError('Claude client not initialized', AIErrorType.INVALID_REQUEST);
}

View File

@@ -268,7 +268,7 @@ export class GeminiProvider extends BaseAIProvider {
* @returns Promise resolving to formatted completion response
* @throws {Error} If API request fails
*/
protected async doComplete(params: CompletionParams): Promise<CompletionResponse> {
protected async doComplete<T = any>(params: CompletionParams<T>): Promise<CompletionResponse> {
if (!this.client || !this.model) {
throw new AIProviderError('Gemini client not initialized', AIErrorType.INVALID_REQUEST);
}
@@ -324,7 +324,7 @@ export class GeminiProvider extends BaseAIProvider {
* @returns AsyncIterable yielding completion chunks
* @throws {Error} If streaming request fails
*/
protected async *doStream(params: CompletionParams): AsyncIterable<CompletionChunk> {
protected async *doStream<T = any>(params: CompletionParams<T>): AsyncIterable<CompletionChunk> {
if (!this.client || !this.model) {
throw new AIProviderError('Gemini client not initialized', AIErrorType.INVALID_REQUEST);
}

View File

@@ -244,7 +244,7 @@ export class OpenAIProvider extends BaseAIProvider {
* @returns Promise resolving to formatted completion response
* @throws {Error} If API request fails
*/
protected async doComplete(params: CompletionParams): Promise<CompletionResponse> {
protected async doComplete<T = any>(params: CompletionParams<T>): Promise<CompletionResponse> {
if (!this.client) {
throw new AIProviderError('OpenAI client not initialized', AIErrorType.INVALID_REQUEST);
}
@@ -278,7 +278,7 @@ export class OpenAIProvider extends BaseAIProvider {
* @returns AsyncIterable yielding completion chunks
* @throws {Error} If streaming request fails
*/
protected async *doStream(params: CompletionParams): AsyncIterable<CompletionChunk> {
protected async *doStream<T = any>(params: CompletionParams<T>): AsyncIterable<CompletionChunk> {
if (!this.client) {
throw new AIProviderError('OpenAI client not initialized', AIErrorType.INVALID_REQUEST);
}

View File

@@ -962,7 +962,7 @@ export class OpenWebUIProvider extends BaseAIProvider {
* @returns Promise resolving to formatted completion response
* @throws {Error} If API request fails
*/
protected async doComplete(params: CompletionParams): Promise<CompletionResponse> {
protected async doComplete<T = any>(params: CompletionParams<T>): Promise<CompletionResponse> {
if (this.useOllamaProxy) {
return this.completeWithOllama(params);
} else {
@@ -978,7 +978,7 @@ export class OpenWebUIProvider extends BaseAIProvider {
* @returns AsyncIterable yielding completion chunks
* @throws {Error} If streaming request fails
*/
protected async *doStream(params: CompletionParams): AsyncIterable<CompletionChunk> {
protected async *doStream<T = any>(params: CompletionParams<T>): AsyncIterable<CompletionChunk> {
if (this.useOllamaProxy) {
yield* this.streamWithOllama(params);
} else {

View File

@@ -36,10 +36,24 @@ export interface AIMessage {
metadata?: Record<string, any>;
}
/**
* Response type definition for structured AI outputs
*/
export interface ResponseType<T = any> {
/** The TypeScript type definition as a string */
typeDefinition: string;
/** Human-readable description of the expected response format */
description: string;
/** Example of the expected response structure */
example?: T;
/** Whether to enforce strict JSON formatting */
strictJson?: boolean;
}
/**
* Parameters for AI completion requests
*/
export interface CompletionParams {
export interface CompletionParams<T = any> {
/** Array of messages forming the conversation */
messages: AIMessage[];
/** Model to use for completion */
@@ -54,6 +68,8 @@ export interface CompletionParams {
stopSequences?: string[];
/** Whether to stream the response */
stream?: boolean;
/** Expected response type for structured outputs */
responseType?: ResponseType<T>;
/** Additional provider-specific parameters */
[key: string]: any;
}
@@ -144,4 +160,115 @@ export interface ProviderInfo {
supportsStreaming: boolean;
/** Additional capabilities */
capabilities?: Record<string, any>;
}
// ============================================================================
// RESPONSE TYPE UTILITIES
// ============================================================================
/**
* Creates a response type definition for structured AI outputs
*
* @param typeDefinition - TypeScript type definition as a string
* @param description - Human-readable description of the expected format
* @param example - Optional example of the expected response structure
* @param strictJson - Whether to enforce strict JSON formatting (default: true)
* @returns ResponseType object for use in completion requests
*
* @example
* ```typescript
* const userType = createResponseType(
* `{
* name: string;
* age: number;
* email: string;
* preferences: {
* theme: 'light' | 'dark';
* notifications: boolean;
* };
* }`,
* 'A user profile with personal information and preferences',
* {
* name: 'John Doe',
* age: 30,
* email: 'john@example.com',
* preferences: { theme: 'dark', notifications: true }
* }
* );
* ```
*/
export function createResponseType<T = any>(
typeDefinition: string,
description: string,
example?: T,
strictJson: boolean = true
): ResponseType<T> {
return {
typeDefinition: typeDefinition.trim(),
description,
example,
strictJson
};
}
/**
* Generates a system prompt instruction for structured output based on response type
*
* @param responseType - The response type definition
* @returns Formatted system prompt instruction
*/
export function generateResponseTypePrompt(responseType: ResponseType): string {
const { typeDefinition, description, example, strictJson } = responseType;
let prompt = `You must respond with a JSON object that matches the following TypeScript type definition:\n\n`;
prompt += `Type Definition:\n\`\`\`typescript\n${typeDefinition}\n\`\`\`\n\n`;
prompt += `Description: ${description}\n\n`;
if (example) {
prompt += `Example:\n\`\`\`json\n${JSON.stringify(example, null, 2)}\n\`\`\`\n\n`;
}
if (strictJson) {
prompt += `IMPORTANT: Your response must be valid JSON only. Do not include any text before or after the JSON object. Do not use markdown formatting.`;
} else {
prompt += `Your response should follow the structure defined above.`;
}
return prompt;
}
/**
* Validates that a response matches the expected type structure
*
* @param response - The response content to validate
* @param responseType - The expected response type
* @returns Object with validation result and parsed data
*/
export function validateResponseType<T = any>(
response: string,
responseType: ResponseType<T>
): { isValid: boolean; data?: T; error?: string } {
try {
// Parse JSON response
const parsed = JSON.parse(response);
// Basic validation - in a real implementation, you might want to use
// a schema validation library like zod or ajv for more thorough validation
if (typeof parsed !== 'object' || parsed === null) {
return {
isValid: false,
error: 'Response must be a JSON object'
};
}
return {
isValid: true,
data: parsed as T
};
} catch (error) {
return {
isValid: false,
error: `Invalid JSON: ${(error as Error).message}`
};
}
}