2.48.0 • Published 5 months ago

@memberjunction/ai-openai v2.48.0

Weekly downloads
-
License
ISC
Repository
-
Last release
5 months ago

@memberjunction/ai-openai

A comprehensive wrapper for OpenAI's API and models that seamlessly integrates with the MemberJunction AI framework, providing a standardized interface for GPT, embedding, and text-to-speech models.

Features

  • OpenAI Integration: Full integration with OpenAI's chat completion, embedding, and TTS models
  • Standardized Interface: Follows MemberJunction's BaseLLM, BaseEmbeddings, and BaseAudioGenerator abstract classes
  • Streaming Support: Full support for streaming chat completions
  • Message Formatting: Handles conversion between MemberJunction and OpenAI message formats
  • Multi-modal Support: Supports text and image content in messages
  • Response Format Support: Support for different response formats (Text, JSON, Markdown, ModelSpecific)
  • Reasoning Models: Support for reasoning effort levels (o1 models)
  • Error Handling: Comprehensive error handling with detailed reporting
  • Token Usage Tracking: Automatic tracking of prompt and completion tokens
  • Embeddings: Text embedding generation with multiple models
  • Text-to-Speech: Generate speech from text using OpenAI's TTS models

Installation

npm install @memberjunction/ai-openai

Requirements

  • Node.js 16+
  • An OpenAI API key
  • MemberJunction Core libraries

Usage

Basic Setup

import { OpenAILLM } from '@memberjunction/ai-openai';

// Initialize with your API key
const openAI = new OpenAILLM('your-openai-api-key');

Chat Completion

import { ChatParams } from '@memberjunction/ai';

// Create chat parameters
const chatParams: ChatParams = {
  model: 'gpt-4',
  messages: [
    { role: 'system', content: 'You are a helpful assistant.' },
    { role: 'user', content: 'What is machine learning?' }
  ],
  temperature: 0.7,
  maxOutputTokens: 500,
  responseFormat: 'Text',
  includeLogProbs: false
};

// Get a response
try {
  const response = await openAI.ChatCompletion(chatParams);
  if (response.success) {
    console.log('Response:', response.data.choices[0].message.content);
    console.log('Token Usage:', response.data.usage);
  } else {
    console.error('Error:', response.errorMessage);
  }
} catch (error) {
  console.error('Exception:', error);
}

Streaming Chat Completion

const streamingParams: ChatParams = {
  model: 'gpt-4',
  messages: [
    { role: 'user', content: 'Tell me a story' }
  ],
  temperature: 0.8,
  maxOutputTokens: 1000
};

// Stream the response
await openAI.StreamingChatCompletion(streamingParams, {
  onStart: () => console.log('Streaming started...'),
  onContent: (content) => process.stdout.write(content),
  onComplete: (fullContent) => console.log('\n\nComplete:', fullContent),
  onError: (error) => console.error('Error:', error),
  onUsage: (usage) => console.log('Token usage:', usage)
});

Multi-modal Messages (Text + Images)

const multiModalParams: ChatParams = {
  model: 'gpt-4-vision-preview',
  messages: [
    {
      role: 'user',
      content: [
        { type: 'text', content: 'What do you see in this image?' },
        { type: 'image_url', content: 'https://example.com/image.jpg' }
      ]
    }
  ],
  maxOutputTokens: 500
};

const response = await openAI.ChatCompletion(multiModalParams);

JSON Response Format

const jsonParams: ChatParams = {
  model: 'gpt-4',
  messages: [
    { role: 'system', content: 'You are a helpful assistant that outputs JSON.' },
    { role: 'user', content: 'Generate a JSON object with name, age, and city for 3 fictional people.' }
  ],
  temperature: 0.3,
  maxOutputTokens: 500,
  responseFormat: 'JSON'
};

const jsonResponse = await openAI.ChatCompletion(jsonParams);
const jsonData = JSON.parse(jsonResponse.data.choices[0].message.content);
console.log('Structured Data:', jsonData);

Reasoning Models (o1 series)

const reasoningParams: ChatParams = {
  model: 'o1-preview',
  messages: [
    { role: 'user', content: 'Solve this complex math problem...' }
  ],
  effortLevel: 'high', // 'low', 'medium', or 'high'
  maxOutputTokens: 2000
};

const response = await openAI.ChatCompletion(reasoningParams);

Text Summarization

import { SummarizeParams } from '@memberjunction/ai';

const text = `Long text that you want to summarize...`;

const summarizeParams: SummarizeParams = {
  text: text,
  model: 'gpt-3.5-turbo',
  temperature: 0.3,
  maxWords: 100
};

const summary = await openAI.SummarizeText(summarizeParams);
console.log('Summary:', summary.summary);

Text Embeddings

import { OpenAIEmbedding } from '@memberjunction/ai-openai';

const embedding = new OpenAIEmbedding('your-openai-api-key');

// Embed a single text
const singleResult = await embedding.EmbedText({
  text: 'The quick brown fox jumps over the lazy dog',
  model: 'text-embedding-3-small' // or 'text-embedding-3-large', 'text-embedding-ada-002'
});
console.log('Embedding vector:', singleResult.vector);

// Embed multiple texts
const multiResult = await embedding.EmbedTexts({
  texts: ['First text', 'Second text', 'Third text'],
  model: 'text-embedding-3-large'
});
console.log('Embedding vectors:', multiResult.vectors);

// Get available models
const models = await embedding.GetEmbeddingModels();
console.log('Available models:', models);

Text-to-Speech

import { OpenAIAudioGenerator } from '@memberjunction/ai-openai';

const tts = new OpenAIAudioGenerator('your-openai-api-key');

// Generate speech
const speechResult = await tts.CreateSpeech({
  text: 'Hello, this is a test of OpenAI text-to-speech.',
  model_id: 'gpt-4o-mini-tts',
  voice: 'nova', // 'alloy', 'echo', 'fable', 'onyx', 'nova', or 'shimmer'
  instructions: 'Speak in a cheerful and positive tone'
});

if (speechResult.success) {
  // speechResult.data contains the audio buffer
  // speechResult.content contains base64-encoded audio
  fs.writeFileSync('output.mp3', speechResult.data);
}

// Get available voices and models
const voices = await tts.GetVoices();
const models = await tts.GetModels();

Direct Access to OpenAI Client

// Access the underlying OpenAI client for advanced usage
const openAIClient = openAI.OpenAI;

// Use the client directly for features not wrapped
const completion = await openAIClient.completions.create({
  model: 'gpt-3.5-turbo-instruct',
  prompt: 'Say this is a test',
  max_tokens: 7
});

API Reference

OpenAILLM Class

Extends BaseLLM to provide OpenAI-specific chat and completion functionality.

Constructor

new OpenAILLM(apiKey: string)

Properties

  • OpenAI: (read-only) Returns the underlying OpenAI client instance
  • SupportsStreaming: (read-only) Returns true - OpenAI supports streaming

Methods

  • ChatCompletion(params: ChatParams): Promise<ChatResult> - Perform a chat completion
  • StreamingChatCompletion(params: ChatParams, callbacks: StreamingChatCallbacks): Promise<void> - Stream a chat completion
  • SummarizeText(params: SummarizeParams): Promise<SummarizeResult> - Summarize text
  • ClassifyText(params: ClassifyParams): Promise<ClassifyResult> - Classify text (not implemented)
  • ConvertMJToOpenAIChatMessages(messages: ChatMessage[]): ChatCompletionMessageParam[] - Convert MJ to OpenAI format
  • ConvertMJToOpenAIRole(role: string): 'system' | 'user' | 'assistant' - Convert MJ roles to OpenAI roles

OpenAIEmbedding Class

Extends BaseEmbeddings to provide OpenAI embedding functionality.

Constructor

new OpenAIEmbedding(apiKey: string)

Methods

  • EmbedText(params: EmbedTextParams): Promise<EmbedTextResult> - Generate embedding for single text
  • EmbedTexts(params: EmbedTextsParams): Promise<EmbedTextsResult> - Generate embeddings for multiple texts
  • GetEmbeddingModels(): Promise<any> - Get available embedding models

OpenAIAudioGenerator Class

Extends BaseAudioGenerator to provide OpenAI text-to-speech functionality.

Constructor

new OpenAIAudioGenerator(apiKey: string)

Methods

  • CreateSpeech(params: TextToSpeechParams): Promise<SpeechResult> - Generate speech from text
  • SpeechToText(params: SpeechToTextParams): Promise<SpeechResult> - Convert speech to text (not implemented)
  • GetVoices(): Promise<VoiceInfo[]> - Get available voices
  • GetModels(): Promise<AudioModel[]> - Get available TTS models
  • GetPronounciationDictionaries(): Promise<PronounciationDictionary[]> - Get pronunciation dictionaries (empty)
  • GetSupportedMethods(): Promise<string[]> - Get supported methods

Embedding Models

  • text-embedding-3-large: Most capable model (3,072 dimensions)
  • text-embedding-3-small: Balanced performance (1,536 dimensions)
  • text-embedding-ada-002: Legacy 2nd generation model (1,536 dimensions)

TTS Voices

  • alloy: Neutral and balanced
  • echo: Warm and conversational
  • fable: Expressive and animated
  • onyx: Deep and authoritative
  • nova: Friendly and upbeat
  • shimmer: Soft and gentle

Response Formats

The OpenAILLM class supports various response formats:

  • Text: Regular text responses (default)
  • JSON: Structured JSON responses (requires compatible model)
  • Markdown: Markdown-formatted responses
  • Any: Model decides the format
  • ModelSpecific: Custom formats with modelSpecificResponseFormat parameter

Error Handling

The wrapper provides comprehensive error information:

try {
  const response = await openAI.ChatCompletion(params);
  if (!response.success) {
    console.error('Error:', response.errorMessage);
    console.error('Status:', response.statusText);
    console.error('Time Elapsed:', response.timeElapsed, 'ms');
    console.error('Exception:', response.exception);
  }
} catch (error) {
  console.error('Exception occurred:', error);
}

Integration with MemberJunction

This package seamlessly integrates with the MemberJunction AI framework:

import { AIEngine } from '@memberjunction/ai';

// The OpenAI classes are automatically registered
const engine = new AIEngine();
const llm = engine.GetLLM('OpenAILLM', 'your-api-key');
const embeddings = engine.GetEmbedding('OpenAIEmbedding', 'your-api-key');
const tts = engine.GetAudioGenerator('OpenAIAudioGenerator', 'your-api-key');

Dependencies

  • openai: Official OpenAI Node.js SDK (v4.98.0)
  • @memberjunction/ai: MemberJunction AI core framework (v2.43.0)
  • @memberjunction/global: MemberJunction global utilities (v2.43.0)

License

ISC

2.27.1

8 months ago

2.23.2

9 months ago

2.46.0

5 months ago

2.23.1

9 months ago

2.27.0

8 months ago

2.34.0

7 months ago

2.30.0

8 months ago

2.19.4

10 months ago

2.19.5

10 months ago

2.19.2

10 months ago

2.19.3

10 months ago

2.19.0

10 months ago

2.19.1

10 months ago

2.15.2

10 months ago

2.34.2

7 months ago

2.15.0

10 months ago

2.34.1

7 months ago

2.15.1

10 months ago

2.38.0

6 months ago

2.45.0

5 months ago

2.22.1

9 months ago

2.22.0

9 months ago

2.41.0

6 months ago

2.22.2

9 months ago

2.26.1

9 months ago

2.26.0

9 months ago

2.33.0

7 months ago

2.18.3

10 months ago

2.18.1

10 months ago

2.18.2

10 months ago

2.18.0

10 months ago

2.37.1

6 months ago

2.37.0

6 months ago

2.14.0

10 months ago

2.21.0

9 months ago

2.44.0

5 months ago

2.40.0

6 months ago

2.29.0

8 months ago

2.29.2

8 months ago

2.29.1

8 months ago

2.25.0

9 months ago

2.48.0

5 months ago

2.32.0

7 months ago

2.32.2

7 months ago

2.32.1

7 months ago

2.17.0

10 months ago

2.13.4

11 months ago

2.36.0

6 months ago

2.13.2

11 months ago

2.13.3

11 months ago

2.13.0

12 months ago

2.36.1

6 months ago

2.13.1

12 months ago

2.43.0

6 months ago

2.20.2

9 months ago

2.20.3

9 months ago

2.20.0

10 months ago

2.20.1

10 months ago

2.28.0

8 months ago

2.47.0

5 months ago

2.24.1

9 months ago

2.24.0

9 months ago

2.31.0

7 months ago

2.12.0

1 year ago

2.39.0

6 months ago

2.16.1

10 months ago

2.35.1

6 months ago

2.35.0

7 months ago

2.16.0

10 months ago

2.42.1

6 months ago

2.42.0

6 months ago

2.23.0

9 months ago

2.11.0

1 year ago

2.10.0

1 year ago

2.9.0

1 year ago

2.8.0

1 year ago

2.6.1

1 year ago

2.6.0

1 year ago

2.7.0

1 year ago

2.5.2

1 year ago

2.7.1

1 year ago

1.8.1

1 year ago

1.8.0

1 year ago

1.6.1

1 year ago

1.6.0

1 year ago

1.4.1

1 year ago

1.4.0

1 year ago

2.2.1

1 year ago

2.2.0

1 year ago

2.4.1

1 year ago

2.4.0

1 year ago

2.0.0

1 year ago

1.7.1

1 year ago

1.5.3

1 year ago

1.7.0

1 year ago

1.5.2

1 year ago

1.5.1

1 year ago

1.3.3

1 year ago

1.5.0

1 year ago

1.3.2

1 year ago

1.3.1

1 year ago

1.3.0

1 year ago

2.3.0

1 year ago

2.1.2

1 year ago

2.1.1

1 year ago

2.5.0

1 year ago

2.3.2

1 year ago

2.1.4

1 year ago

2.3.1

1 year ago

2.1.3

1 year ago

2.5.1

1 year ago

2.3.3

1 year ago

2.1.5

1 year ago

2.1.0

1 year ago

1.2.2

2 years ago

1.2.1

2 years ago

1.2.0

2 years ago

1.1.1

2 years ago

1.1.0

2 years ago

1.1.3

2 years ago

1.1.2

2 years ago

1.0.11

2 years ago

1.0.9

2 years ago

1.0.8

2 years ago

1.0.7

2 years ago

1.0.8-next.6

2 years ago

1.0.8-next.5

2 years ago

1.0.8-next.4

2 years ago

1.0.8-next.3

2 years ago

1.0.8-next.2

2 years ago

1.0.8-beta.0

2 years ago

1.0.8-next.1

2 years ago

1.0.8-next.0

2 years ago

1.0.7-next.0

2 years ago

1.0.2

2 years ago

1.0.6

2 years ago

1.0.4

2 years ago

1.0.3

2 years ago

1.0.1

2 years ago

1.0.0

2 years ago

0.9.30

2 years ago

0.9.28

2 years ago

0.9.29

2 years ago

0.9.27

2 years ago

0.9.24

2 years ago

0.9.25

2 years ago

0.9.26

2 years ago

0.9.22

2 years ago

0.9.21

2 years ago

0.9.23

2 years ago

0.9.19

2 years ago

0.9.16

2 years ago

0.9.17

2 years ago

0.9.18

2 years ago

0.9.15

2 years ago

0.9.14

2 years ago

0.9.12

2 years ago

0.9.13

2 years ago

0.9.10

2 years ago

0.9.11

2 years ago

0.9.9

2 years ago