- Notifications
You must be signed in to change notification settings - Fork0
Use Hugging Face with JavaScript
License
elibobcoder/huggingface.js
Folders and files
Name | Name | Last commit message | Last commit date | |
---|---|---|---|---|
Repository files navigation
// Programmatically interact with the HubawaitcreateRepo({repo:{type:"model",name:"my-user/nlp-model"},accessToken:HF_TOKEN});awaituploadFile({repo:"my-user/nlp-model",accessToken:HF_TOKEN,// Can work with native File in browsersfile:{path:"pytorch_model.bin",content:newBlob(...)}});// Use all supported Inference Providers!awaitinference.chatCompletion({model:"meta-llama/Llama-3.1-8B-Instruct",provider:"sambanova",// or together, fal-ai, replicate, cohere …messages:[{role:"user",content:"Hello, nice to meet you!",},],max_tokens:512,temperature:0.5,});awaitinference.textToImage({model:"black-forest-labs/FLUX.1-dev",provider:"replicate",inputs:"a picture of a green bird",});// and much more…
This is a collection of JS libraries to interact with the Hugging Face API, with TS types included.
- @huggingface/inference: Use all supported (serverless) Inference Providers or switch to Inference Endpoints (dedicated) to make calls to 100,000+ Machine Learning models
- @huggingface/hub: Interact with huggingface.co to create or delete repos and commit / download files
- @huggingface/mcp-client: A Model Context Protocol (MCP) client, and a tiny Agent library, built on top of InferenceClient.
- @huggingface/gguf: A GGUF parser that works on remotely hosted files.
- @huggingface/dduf: Similar package for DDUF (DDUF Diffusers Unified Format)
- @huggingface/tasks: The definition files and source-of-truth for the Hub's main primitives like pipeline tasks, model libraries, etc.
- @huggingface/jinja: A minimalistic JS implementation of the Jinja templating engine, to be used for ML chat templates.
- @huggingface/space-header: Use the Space
mini_header
outside Hugging Face - @huggingface/ollama-utils: Various utilities for maintaining Ollama compatibility with models on the Hugging Face Hub.
- @huggingface/tiny-agents: A tiny, model-agnostic library for building AI agents that can use tools.
We use modern features to avoid polyfills and dependencies, so the libraries will only work on modern browsers / Node.js >= 18 / Bun / Deno.
The libraries are still very young, please help us by opening issues!
To install via NPM, you can download the libraries as needed:
npm install @huggingface/inferencenpm install @huggingface/hubnpm install @huggingface/mcp-client
Then import the libraries in your code:
import{InferenceClient}from"@huggingface/inference";import{createRepo,commit,deleteRepo,listFiles}from"@huggingface/hub";import{McpClient}from"@huggingface/mcp-client";importtype{RepoId}from"@huggingface/hub";
You can run our packages with vanilla JS, without any bundler, by using a CDN or static hosting. UsingES modules, i.e.<script type="module">
, you can import the libraries in your code:
<scripttype="module">import{InferenceClient}from'https://cdn.jsdelivr.net/npm/@huggingface/inference@4.4.0/+esm';import{createRepo,commit,deleteRepo,listFiles}from"https://cdn.jsdelivr.net/npm/@huggingface/hub@2.4.0/+esm";</script>
// esm.shimport{InferenceClient}from"https://esm.sh/@huggingface/inference"import{createRepo,commit,deleteRepo,listFiles}from"https://esm.sh/@huggingface/hub"// or npm:import{InferenceClient}from"npm:@huggingface/inference"import{createRepo,commit,deleteRepo,listFiles}from"npm:@huggingface/hub"
Get your HF access token in youraccount settings.
import{InferenceClient}from"@huggingface/inference";constHF_TOKEN="hf_...";constclient=newInferenceClient(HF_TOKEN);// Chat completion APIconstout=awaitclient.chatCompletion({model:"meta-llama/Llama-3.1-8B-Instruct",messages:[{role:"user",content:"Hello, nice to meet you!"}],max_tokens:512});console.log(out.choices[0].message);// Streaming chat completion APIforawait(constchunkofclient.chatCompletionStream({model:"meta-llama/Llama-3.1-8B-Instruct",messages:[{role:"user",content:"Hello, nice to meet you!"}],max_tokens:512})){console.log(chunk.choices[0].delta.content);}/// Using a third-party provider:awaitclient.chatCompletion({model:"meta-llama/Llama-3.1-8B-Instruct",messages:[{role:"user",content:"Hello, nice to meet you!"}],max_tokens:512,provider:"sambanova",// or together, fal-ai, replicate, cohere …})awaitclient.textToImage({model:"black-forest-labs/FLUX.1-dev",inputs:"a picture of a green bird",provider:"fal-ai",})// You can also omit "model" to use the recommended model for the taskawaitclient.translation({inputs:"My name is Wolfgang and I live in Amsterdam",parameters:{src_lang:"en",tgt_lang:"fr",},});// pass multimodal files or URLs as inputsawaitclient.imageToText({model:'nlpconnect/vit-gpt2-image-captioning',data:await(awaitfetch('https://picsum.photos/300/300')).blob(),})// Using your own dedicated inference endpoint: https://hf.co/docs/inference-endpoints/constgpt2Client=client.endpoint('https://xyz.eu-west-1.aws.endpoints.huggingface.cloud/gpt2');const{ generated_text}=awaitgpt2Client.textGeneration({inputs:'The answer to the universe is'});// Chat CompletionconstllamaEndpoint=client.endpoint("https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.1-8B-Instruct");constout=awaitllamaEndpoint.chatCompletion({model:"meta-llama/Llama-3.1-8B-Instruct",messages:[{role:"user",content:"Hello, nice to meet you!"}],max_tokens:512,});console.log(out.choices[0].message);
import{createRepo,uploadFile,deleteFiles}from"@huggingface/hub";constHF_TOKEN="hf_...";awaitcreateRepo({repo:"my-user/nlp-model",// or { type: "model", name: "my-user/nlp-test" },accessToken:HF_TOKEN});awaituploadFile({repo:"my-user/nlp-model",accessToken:HF_TOKEN,// Can work with native File in browsersfile:{path:"pytorch_model.bin",content:newBlob(...)}});awaitdeleteFiles({repo:{type:"space",name:"my-user/my-space"},// or "spaces/my-user/my-space"accessToken:HF_TOKEN,paths:["README.md",".gitattributes"]});
import{Agent}from'@huggingface/mcp-client';constHF_TOKEN="hf_...";constagent=newAgent({provider:"auto",model:"Qwen/Qwen2.5-72B-Instruct",apiKey:HF_TOKEN,servers:[{// Playwright MCPcommand:"npx",args:["@playwright/mcp@latest"],},],});awaitagent.loadTools();forawait(constchunkofagent.run("What are the top 5 trending models on Hugging Face?")){if("choices"inchunk){constdelta=chunk.choices[0]?.delta;if(delta.content){console.log(delta.content);}}}
There are more features of course, check each library's README!
sudo corepack enablepnpm installpnpm -r format:checkpnpm -r lint:checkpnpm -r test
pnpm -r build
This will generate ESM and CJS javascript files inpackages/*/dist
, egpackages/inference/dist/index.mjs
.
About
Use Hugging Face with JavaScript
Resources
License
Uh oh!
There was an error while loading.Please reload this page.
Stars
Watchers
Forks
Releases
Packages0
Languages
- TypeScript86.5%
- JavaScript10.6%
- Python1.6%
- Jinja0.9%
- Shell0.4%
- Svelte0.0%