2.48.0 • Published 5 months ago

@memberjunction/ai-mistral v2.48.0

Weekly downloads
-
License
ISC
Repository
-
Last release
5 months ago

@memberjunction/ai-mistral

A comprehensive wrapper for Mistral AI's models, enabling seamless integration with the MemberJunction AI framework for natural language processing and embedding tasks.

Features

  • Mistral AI Integration: Connect to Mistral's powerful language models and embedding models
  • Standardized Interface: Implements MemberJunction's BaseLLM and BaseEmbeddings abstract classes
  • Streaming Support: Full support for streaming chat completions
  • Token Usage Tracking: Automatic tracking of prompt and completion tokens
  • Response Format Control: Support for standard text and JSON response formats
  • Multi-Modal Support: Handles text, images, and documents in chat messages
  • Error Handling: Robust error handling with detailed reporting
  • Chat Completion: Full support for chat-based interactions with Mistral models
  • Text Embeddings: Generate vector embeddings for text using Mistral's embedding models

Installation

npm install @memberjunction/ai-mistral

Requirements

  • Node.js 16+
  • TypeScript 5.4.5+
  • A Mistral AI API key
  • MemberJunction Core libraries (@memberjunction/ai, @memberjunction/global)

Usage

Basic Setup

import { MistralLLM } from '@memberjunction/ai-mistral';

// Initialize with your Mistral API key
const mistralLLM = new MistralLLM('your-mistral-api-key');

Chat Completion

import { ChatParams, ChatMessageRole } from '@memberjunction/ai';

// Create chat parameters
const chatParams: ChatParams = {
  model: 'mistral-large-latest',  // or other models like 'open-mistral-7b', 'mistral-small-latest'
  messages: [
    { role: ChatMessageRole.system, content: 'You are a helpful assistant.' },
    { role: ChatMessageRole.user, content: 'What are the main principles of machine learning?' }
  ],
  temperature: 0.7,
  maxOutputTokens: 1000
};

// Get a response
try {
  const response = await mistralLLM.ChatCompletion(chatParams);
  if (response.success) {
    console.log('Response:', response.data.choices[0].message.content);
    console.log('Token Usage:', response.data.usage);
    console.log('Time Elapsed (ms):', response.timeElapsed);
  } else {
    console.error('Error:', response.errorMessage);
  }
} catch (error) {
  console.error('Exception:', error);
}

JSON Response Format

// Request a structured JSON response
const jsonParams: ChatParams = {
  model: 'mistral-large-latest',
  messages: [
    { role: ChatMessageRole.system, content: 'You are a helpful assistant that responds in JSON format.' },
    { role: ChatMessageRole.user, content: 'Give me data about the top 3 machine learning algorithms in JSON format' }
  ],
  maxOutputTokens: 1000,
  responseFormat: 'JSON'  // This will add the appropriate response_format parameter
};

const jsonResponse = await mistralLLM.ChatCompletion(jsonParams);

// Parse the JSON response
if (jsonResponse.success) {
  const structuredData = JSON.parse(jsonResponse.data.choices[0].message.content);
  console.log('Structured Data:', structuredData);
}

Streaming Chat Completion

// Mistral supports streaming responses
const streamParams: ChatParams = {
  model: 'mistral-large-latest',
  messages: [
    { role: ChatMessageRole.system, content: 'You are a helpful assistant.' },
    { role: ChatMessageRole.user, content: 'Write a short story about AI' }
  ],
  maxOutputTokens: 1000,
  stream: true,  // Enable streaming
  streamCallback: (content: string) => {
    // Handle each chunk of streamed content
    process.stdout.write(content);
  }
};

const streamResponse = await mistralLLM.ChatCompletion(streamParams);
console.log('\nStreaming complete!');
console.log('Total tokens:', streamResponse.data.usage);

Multi-Modal Messages

// Mistral supports images and documents in messages
const multiModalParams: ChatParams = {
  model: 'mistral-large-latest',
  messages: [
    {
      role: ChatMessageRole.user,
      content: [
        { type: 'text', content: 'What do you see in this image?' },
        { type: 'image_url', content: 'https://example.com/image.jpg' }
      ]
    }
  ],
  maxOutputTokens: 1000
};

// For documents
const documentParams: ChatParams = {
  model: 'mistral-large-latest',
  messages: [
    {
      role: ChatMessageRole.user,
      content: [
        { type: 'text', content: 'Summarize this document' },
        { type: 'file_url', content: 'https://example.com/document.pdf' }  // Converted to document_url for Mistral
      ]
    }
  ],
  maxOutputTokens: 1000
};

Text Embeddings

import { MistralEmbedding } from '@memberjunction/ai-mistral';
import { EmbedTextParams, EmbedTextsParams } from '@memberjunction/ai';

// Initialize the embedding client
const mistralEmbedding = new MistralEmbedding('your-mistral-api-key');

// Embed a single text
const embedParams: EmbedTextParams = {
  text: 'Machine learning is a subset of artificial intelligence.',
  model: 'mistral-embed'  // Optional, defaults to 'mistral-embed'
};

const embedResult = await mistralEmbedding.EmbedText(embedParams);
console.log('Embedding vector dimensions:', embedResult.vector.length);  // 1024 dimensions
console.log('Token usage:', embedResult.ModelUsage);

// Embed multiple texts
const multiEmbedParams: EmbedTextsParams = {
  texts: [
    'Natural language processing enables computers to understand human language.',
    'Deep learning uses neural networks with multiple layers.',
    'Computer vision allows machines to interpret visual information.'
  ],
  model: 'mistral-embed'
};

const multiEmbedResult = await mistralEmbedding.EmbedTexts(multiEmbedParams);
console.log('Number of embeddings:', multiEmbedResult.vectors.length);
console.log('Total token usage:', multiEmbedResult.ModelUsage);

// Get available embedding models
const embeddingModels = await mistralEmbedding.GetEmbeddingModels();
console.log('Available models:', embeddingModels);

Direct Access to Mistral Client

// Access the underlying Mistral client for advanced usage
const mistralClient = mistralLLM.Client;

// Use the client directly if needed
const modelList = await mistralClient.models.list();
console.log('Available models:', modelList);

Supported Models

Mistral AI offers several models with different capabilities and price points:

  • mistral-large-latest: Mistral's most powerful model (at the time of writing)
  • mistral-medium-latest: Mid-tier model balancing performance and cost
  • mistral-small-latest: Smaller, more efficient model
  • open-mistral-7b: Open-source 7B parameter model
  • open-mixtral-8x7b: Open-source mixture-of-experts model

Check the Mistral AI documentation for the latest list of supported models.

API Reference

MistralLLM Class

A class that extends BaseLLM to provide Mistral-specific functionality.

Constructor

new MistralLLM(apiKey: string)

Properties

  • Client: (read-only) Returns the underlying Mistral client instance
  • SupportsStreaming: (read-only) Returns true - Mistral supports streaming

Methods

  • ChatCompletion(params: ChatParams): Promise<ChatResult> - Perform a chat completion (supports both streaming and non-streaming)
  • SummarizeText(params: SummarizeParams): Promise<SummarizeResult> - Not implemented yet
  • ClassifyText(params: ClassifyParams): Promise<ClassifyResult> - Not implemented yet

MistralEmbedding Class

A class that extends BaseEmbeddings to provide Mistral embedding functionality.

Constructor

new MistralEmbedding(apiKey: string)

Properties

  • Client: (read-only) Returns the underlying Mistral client instance

Methods

  • EmbedText(params: EmbedTextParams): Promise<EmbedTextResult> - Generate embedding for a single text
  • EmbedTexts(params: EmbedTextsParams): Promise<EmbedTextsResult> - Generate embeddings for multiple texts
  • GetEmbeddingModels(): Promise<any> - Get list of available embedding models

Response Format Control

The wrapper supports different response formats:

// For JSON responses
const params: ChatParams = {
  // ...other parameters
  responseFormat: 'JSON'
};

// For regular text responses (default)
const textParams: ChatParams = {
  // ...other parameters
  // No need to specify responseFormat for regular text
};

Error Handling

The wrapper provides detailed error information:

try {
  const response = await mistralLLM.ChatCompletion(params);
  if (!response.success) {
    console.error('Error:', response.errorMessage);
    console.error('Status:', response.statusText);
    console.error('Exception:', response.exception);
  }
} catch (error) {
  console.error('Exception occurred:', error);
}

Token Usage Tracking

Monitor token usage for billing and quota management:

const response = await mistralLLM.ChatCompletion(params);
if (response.success) {
  console.log('Prompt Tokens:', response.data.usage.promptTokens);
  console.log('Completion Tokens:', response.data.usage.completionTokens);
  console.log('Total Tokens:', response.data.usage.totalTokens);
}

Special Behaviors

Message Formatting

  • The wrapper automatically ensures Mistral's requirement that the last message must be from 'user' or 'tool'
  • If the last message is not from a user, a placeholder user message "ok" is automatically appended

Multi-Modal Content

  • Image URLs are passed through as image_url type
  • File URLs are converted to document_url type for Mistral compatibility
  • Unsupported content types are filtered out with a warning

Limitations

Currently, the wrapper implements:

  • Chat completion functionality with full streaming support
  • Text embedding functionality with single and batch processing
  • Token usage tracking for both chat and embeddings

Not yet implemented:

  • SummarizeText functionality
  • ClassifyText functionality
  • effortLevel/reasoning_effort parameter (not currently supported by Mistral API)

Integration with MemberJunction

This package is designed to work seamlessly with the MemberJunction AI framework:

Class Registration

Both MistralLLM and MistralEmbedding are automatically registered with the MemberJunction class factory using the @RegisterClass decorator:

// Classes are registered and can be instantiated via the class factory
import { ClassFactory } from '@memberjunction/global';

const mistralLLM = ClassFactory.CreateInstance<BaseLLM>(BaseLLM, 'MistralLLM', apiKey);
const mistralEmbedding = ClassFactory.CreateInstance<BaseEmbeddings>(BaseEmbeddings, 'MistralEmbedding', apiKey);

Tree-Shaking Prevention

The package exports loader functions to prevent tree-shaking:

import { LoadMistralLLM, LoadMistralEmbedding } from '@memberjunction/ai-mistral';

// Call these in your application initialization to ensure classes are registered
LoadMistralLLM();
LoadMistralEmbedding();

Dependencies

  • @mistralai/mistralai: ^1.6.0 - Official Mistral AI Node.js SDK
  • @memberjunction/ai: 2.43.0 - MemberJunction AI core framework
  • @memberjunction/global: 2.43.0 - MemberJunction global utilities
  • axios-retry: 4.3.0 - Retry mechanism for API calls

Development

Building

npm run build

Development Mode

npm start

License

ISC

Contributing

When contributing to this package: 1. Follow the MemberJunction code style guide 2. Ensure all TypeScript types are properly defined 3. Add appropriate error handling 4. Update documentation for any new features 5. Test with various Mistral models

2.27.1

8 months ago

2.23.2

9 months ago

2.46.0

5 months ago

2.23.1

9 months ago

2.27.0

8 months ago

2.34.0

6 months ago

2.30.0

7 months ago

2.19.4

9 months ago

2.19.5

9 months ago

2.19.2

9 months ago

2.19.3

9 months ago

2.19.0

9 months ago

2.19.1

9 months ago

2.15.2

9 months ago

2.34.2

6 months ago

2.15.0

9 months ago

2.34.1

6 months ago

2.15.1

9 months ago

2.38.0

5 months ago

2.45.0

5 months ago

2.22.1

9 months ago

2.22.0

9 months ago

2.41.0

5 months ago

2.22.2

9 months ago

2.26.1

8 months ago

2.26.0

8 months ago

2.33.0

6 months ago

2.18.3

9 months ago

2.18.1

9 months ago

2.18.2

9 months ago

2.18.0

9 months ago

2.37.1

6 months ago

2.37.0

6 months ago

2.14.0

10 months ago

2.21.0

9 months ago

2.44.0

5 months ago

2.40.0

5 months ago

2.29.0

8 months ago

2.29.2

8 months ago

2.29.1

8 months ago

2.25.0

8 months ago

2.48.0

5 months ago

2.32.0

7 months ago

2.32.2

7 months ago

2.32.1

7 months ago

2.17.0

9 months ago

2.13.4

10 months ago

2.36.0

6 months ago

2.13.2

11 months ago

2.13.3

10 months ago

2.13.0

11 months ago

2.36.1

6 months ago

2.13.1

11 months ago

2.43.0

5 months ago

2.20.2

9 months ago

2.20.3

9 months ago

2.20.0

9 months ago

2.20.1

9 months ago

2.28.0

8 months ago

2.47.0

5 months ago

2.24.1

8 months ago

2.24.0

8 months ago

2.31.0

7 months ago

2.12.0

12 months ago

2.39.0

5 months ago

2.16.1

9 months ago

2.35.1

6 months ago

2.35.0

6 months ago

2.16.0

9 months ago

2.42.1

5 months ago

2.42.0

5 months ago

2.23.0

9 months ago

2.11.0

1 year ago

2.10.0

1 year ago

2.9.0

1 year ago

2.8.0

1 year ago

2.6.1

1 year ago

2.6.0

1 year ago

2.7.0

1 year ago

2.5.2

1 year ago

2.7.1

1 year ago

1.8.1

1 year ago

1.8.0

1 year ago

1.6.1

1 year ago

1.6.0

1 year ago

1.4.1

1 year ago

1.4.0

1 year ago

2.2.1

1 year ago

2.2.0

1 year ago

2.4.1

1 year ago

2.4.0

1 year ago

2.0.0

1 year ago

1.7.1

1 year ago

1.5.3

1 year ago

1.7.0

1 year ago

1.5.2

1 year ago

1.5.1

1 year ago

1.3.3

1 year ago

1.5.0

1 year ago

1.3.2

1 year ago

1.3.1

1 year ago

1.3.0

1 year ago

2.3.0

1 year ago

2.1.2

1 year ago

2.1.1

1 year ago

2.5.0

1 year ago

2.3.2

1 year ago

2.1.4

1 year ago

2.3.1

1 year ago

2.1.3

1 year ago

2.5.1

1 year ago

2.3.3

1 year ago

2.1.5

1 year ago

2.1.0

1 year ago

1.2.2

1 year ago

1.2.1

1 year ago

1.2.0

1 year ago

1.1.1

1 year ago

1.1.0

1 year ago

1.1.3

1 year ago

1.1.2

1 year ago

1.0.11

2 years ago

1.0.9

2 years ago

1.0.2

2 years ago

1.0.8

2 years ago

1.0.7

2 years ago

1.0.6

2 years ago

1.0.4

2 years ago

1.0.3

2 years ago

1.0.8-next.6

2 years ago

1.0.8-next.5

2 years ago

1.0.8-next.4

2 years ago

1.0.8-next.3

2 years ago

1.0.8-next.2

2 years ago

1.0.8-beta.0

2 years ago

1.0.8-next.1

2 years ago

1.0.8-next.0

2 years ago

1.0.7-next.0

2 years ago

1.0.1

2 years ago

1.0.0

2 years ago

0.9.32

2 years ago

0.9.30

2 years ago

0.9.31

2 years ago

0.9.29

2 years ago

0.9.25

2 years ago

0.9.26

2 years ago

0.9.27

2 years ago

0.9.28

2 years ago

0.9.24

2 years ago

0.9.23

2 years ago

0.9.21

2 years ago

0.9.20

2 years ago

0.9.22

2 years ago

0.9.18

2 years ago

0.9.14

2 years ago

0.9.16

2 years ago

0.9.17

2 years ago

0.9.13

2 years ago

0.9.12

2 years ago

0.9.11

2 years ago

0.9.10

2 years ago

0.9.9

2 years ago