Use LlamaIndex.TS across Node.js, Deno, Bun, Cloudflare Workers, and Vercel Edge Runtime
LlamaIndex.TS is designed to work seamlessly across all major JavaScript runtimes. This is achieved through runtime-specific entry points and the @llamaindex/env abstraction layer.
Node.js provides the complete LlamaIndex.TS feature set:
Node.js with filesystem
import { VectorStoreIndex, SimpleDirectoryReader, Settings } from "llamaindex";import { OpenAI } from "@llamaindex/openai";Settings.llm = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });// File system access availableconst reader = new SimpleDirectoryReader();const documents = await reader.loadData({ directoryPath: "./docs" });// Full index functionalityconst index = await VectorStoreIndex.fromDocuments(documents);const queryEngine = index.asQueryEngine();const response = await queryEngine.query({ query: "What is LlamaIndex?"});
Verify Node.js runtime
import { process } from "@llamaindex/env";if (typeof process !== "undefined" && process.versions?.node) { console.log("Running in Node.js", process.versions.node);}
import { OpenAI } from "@llamaindex/openai";import { Settings } from "llamaindex";// Force Edge Runtimeexport const runtime = "edge";Settings.llm = new OpenAI({ apiKey: process.env.OPENAI_API_KEY,});export async function POST(request: Request) { const { message } = await request.json(); const response = await Settings.llm.chat({ messages: [{ role: "user", content: message }], }); return Response.json(response);}
utils/llm.ts
import "llamaindex";// Verify Edge Runtime// @ts-expect-error EdgeRuntime is not definedif (typeof EdgeRuntime !== "string") { throw new Error("Expected to run in EdgeRuntime");}
next.config.mjs
import { withLlamaIndex } from "llamaindex/next";/** @type {import('next').NextConfig} */const nextConfig = { // Your Next.js config};export default withLlamaIndex(nextConfig);
import { VectorStoreIndex, Document, Settings } from "npm:llamaindex";import { OpenAI } from "npm:@llamaindex/openai";Settings.llm = new OpenAI({ apiKey: Deno.env.get("OPENAI_API_KEY") });const documents = [ new Document({ text: "LlamaIndex is a data framework for LLMs" }),];const index = await VectorStoreIndex.fromDocuments(documents);const queryEngine = index.asQueryEngine();const response = await queryEngine.query({ query: "What is LlamaIndex?"});console.log(response.toString());
import { VectorStoreIndex, Settings } from "llamaindex";import { OpenAI } from "@llamaindex/openai";Settings.llm = new OpenAI({ apiKey: Bun.env.OPENAI_API_KEY });// Bun has fast file I/Oconst file = Bun.file("./document.txt");const text = await file.text();const documents = [new Document({ text })];const index = await VectorStoreIndex.fromDocuments(documents);