1.0.4 • Published 6 months ago

lightrag-js v1.0.4

Weekly downloads
-
License
AGPL-3.0-or-later
Repository
-
Last release
6 months ago

🌬️ A implementation of LightRAG in js.

⚡ High fidelity from Python version.

Install

Install from npm

npm install lightrag-js

use in browser:

  • download the file lightrag.min.js from here
  • use it in your html file:
<script src="lightrag.min.js"></script>

the example is in the file here

Quick Start

Init LightRAG:

1. Nodejs

const rags = require("lightrag-js");
const lightRagConfig = require("../configs/config_node");

async function llm_model_func(
    prompt, options
) {
    const system_prompt = options?.system_prompt || '';
    const history_messages = options?.history_messages || [];
    const kwargs = options;
    return await rags.openAiCompleteIfCache(
        lightRagConfig.lightRagConfig['LLM_MODEL'],
        prompt,
        system_prompt,
        history_messages,
        lightRagConfig.lightRagConfig['LLM_BASE_API_URL'],
        lightRagConfig.lightRagConfig['LLM_API_KEY'],
        kwargs
    )
}

async function embedding_func(texts) {
    return await rags.openAiEmbedding(
        texts,
        lightRagConfig.lightRagConfig['EMBEDDING_MODEL_BASE_API_URL'],
        lightRagConfig.lightRagConfig['EMBEDDING_MODEL_API_KEY'],
        lightRagConfig.lightRagConfig['EMBEDDING_MODEL']

    )
}

async function loadJson(filePath) {
    try{
        const fs = require('fs')
        if (!fs.existsSync(filePath)) {
            fs.writeFileSync(filePath, '{}')
            return {}
        }
        const content = await fs.readFileSync(filePath,'utf-8');
        if (!content) return {}
        return JSON.parse(content)
    }catch(e){
        console.error(e)
        return {}
    }
}

async function writeJson(jsonObject, filePath) {
    try{
        const fs = require('fs')
        const jsonString = JSON.stringify(jsonObject, null, 2)
        fs.writeFileSync(filePath, jsonString)
    } catch (e) {
        console.error(e)
    }
}

2. ES6

import {
    LightRAG,
    createLightRAG,
    embeddingFunc,
    llmModelFunc,
    openAiCompleteIfCache,
    openAiEmbedding
} from "lightrag-js"
import { lightRagConfig } from "../rag/config";
import { loadJson, writeJson } from "../rag/utils";
async function llm_model_func(
    prompt: string, options: any
) {
    const system_prompt = options?.system_prompt || '';
    const history_messages = options?.history_messages || [];
    const kwargs = options;
    return await openAiCompleteIfCache(
        lightRagConfig['LLM_MODEL'],
        prompt,
        system_prompt,
        history_messages,
        lightRagConfig['LLM_BASE_API_URL'],
        lightRagConfig['LLM_API_KEY'],
        kwargs
    )
}

async function embedding_func(texts: string[]) {
    return await openAiEmbedding(
        texts,
        lightRagConfig['EMBEDDING_MODEL_BASE_API_URL'],
        lightRagConfig['EMBEDDING_MODEL_API_KEY'],
        lightRagConfig['EMBEDDING_MODEL']
    )
}

async function _loadJson(filePath: string) {
    //TODO
}

async function _writeJson(jsonObject: Record<string, any>, filePath: string) {
    //TODO
}

async function getRag() {
    const rag = await (createLightRAG(
        {
            workingDir: lightRagConfig['RAG_DIR'],
            llmModelFunc: new llmModelFunc(llm_model_func),
            embeddingFunc: new embeddingFunc(
                3072,
                lightRagConfig['EMBEDDING_MAX_TOKEN_SIZE'],
                embedding_func,
            ),
            loadJsonFunc: loadJson,
            writeJsonFunc: writeJson,
        }
    ))
    return rag
}

getRag().then(rag => {
    console.log(rag)
})

../configs/config_node.js

 const lightRagConfig = {
    "RAG_DIR": "",
    "LLM_MODEL": "deepseek-chat",
    "LLM_API_KEY": "",
    "LLM_BASE_API_URL": "https://api.deepseek.com",
    "EMBEDDING_MODEL": "text-embedding-v1",
    "EMBEDDING_MAX_TOKEN_SIZE": 8192,
    "EMBEDDING_MODEL_API_KEY": "",
    "EMBEDDING_MODEL_BASE_API_URL": "",
}

// Nodejs
module.exports = {
    lightRagConfig
}

// ES6
export { lightRagConfig }

You can add any fields to a data. But there are two keywords:

  • __id__: If passed, NanoVectorDB will use your id, otherwise a generated id will be used.
  • __vector__: must pass, your embedding type is Float32Array.

Query Param

// Nodejs
async function testQuery(mode, content, rag) {
    const param = new rags.QueryParam()
    param.mode = mode
    rag.query(content, param)
}
//ES6
async function testQuery(mode, content, rag) {
    const param = new QueryParam()
    param.mode = mode
    rag.query(content, param)
}

//
const content = "What is the highest peak in the World?"
testQuery("hybrid", content, rag)

流式响应

const content = "你好"
const param = new rags.QueryParam()
param.mode = mode
param.isStreamResponse = true
console.log(param)
let res = ""
const stream = await rag.query(content, param)
for await (chunk of stream) {
    console.log(chunk)
    res += chunk
}
console.log("res", res)
console.log("stream", stream)

Insert

const content = "The Himalayas are the highest peak in the world."
rag.insert(content)

get insert progress

  rag.getRagProgress()
  
  /**
   * @return RagProcess
   */
  // interface RagProcess {
  // totalInsertChunks: number;
  // readyInsertChunks: number;
  // isInsertReady: boolean;
  // isQueryReady: boolean;
  // }

Delete

const content = "Himalayas"
rag.deleteByEntity(content)

Get Knowledge Graph HTML

const fs = require('fs')
const khtml = rag.getKnowledgeHtml()
const savePath = "knowledge.html"
fs.writeFileSync(savePath, khtml)

This will generate a html file, you can open it in your browser.

Thanks

1.0.4

6 months ago

1.0.3

6 months ago

1.0.2

6 months ago

1.0.1

6 months ago

1.0.0

6 months ago