0.0.16 • Published 10 months ago

@locallm/browser v0.0.16

Weekly downloads
-
License
MIT
Repository
github
Last release
10 months ago

LocalLm Browser

Run models in the browser using Wllama

Install

npm i @locallm/browser

Usage

Vuejs example:

<template>
  <div>
    {{ output }}
  </div>
</template>

<script setup lang="ts">
import { onMounted, ref } from 'vue';
import { PromptTemplate } from 'modprompt';
import { LmBrowserProviderParams, OnLoadProgress, WllamaProvider } from '@locallm/browser';

const output = ref("");

const lm = WllamaProvider.init({
  onToken: (t) => { output.value = t },
} as LmBrowserProviderParams);
const model = {
  name: "Qween 0.5b",
  url: "https://huggingface.co/Qwen/Qwen2-0.5B-Instruct-GGUF/resolve/main/qwen2-0_5b-instruct-q5_k_m.gguf",
  ctx: 32768,
}

const onModelLoading: OnLoadProgress = (st) => {
  console.log(st.percent, "%")
}

async function init() {
  await lm.loadBrowsermodel(model.name, model.url, model.ctx, onModelLoading);
  const p = new PromptTemplate("chatml")
    .replaceSystem("You are an AI assistant. Important: always use json to respond")
    .prompt("List the planets of the solar system.")
  const res = await lm.infer(
    p,
    { temperature: 0, min_p: 0.05 }
  );
  console.log(res.stats)
}

onMounted(() => init())
</script>
0.0.16

10 months ago

0.0.15

10 months ago

0.0.12

10 months ago

0.0.13

10 months ago

0.0.14

10 months ago

0.0.11

10 months ago

0.0.10

10 months ago

0.0.9

10 months ago

0.0.8

10 months ago

0.0.7

10 months ago

0.0.3

10 months ago

0.0.5

10 months ago

0.0.4

10 months ago

0.0.6

10 months ago

0.0.2

10 months ago

0.0.1

10 months ago