Quick Start
// Using environment variables
const file = Bun.s3.file("my-file.txt", {
bucket: "my-bucket",
});
await file.write("Hello World");
const content = await file.text();
console.log(content);
Creating an S3 Client
Using Environment Variables
// Set in .env:
// S3_BUCKET=my-bucket
// S3_REGION=us-east-1
// S3_ACCESS_KEY_ID=...
// S3_SECRET_ACCESS_KEY=...
const client = new Bun.S3Client();
With Options
const client = new Bun.S3Client({
bucket: "my-bucket",
region: "us-east-1",
endpoint: "https://s3.us-east-1.amazonaws.com",
accessKeyId: "...",
secretAccessKey: "...",
sessionToken: "...", // Optional for temp credentials
});
S3-Compatible Services
// Cloudflare R2
const r2 = new Bun.S3Client({
bucket: "my-bucket",
endpoint: "https://<account-id>.r2.cloudflarestorage.com",
accessKeyId: "...",
secretAccessKey: "...",
});
// DigitalOcean Spaces
const spaces = new Bun.S3Client({
bucket: "my-space",
endpoint: "https://nyc3.digitaloceanspaces.com",
accessKeyId: "...",
secretAccessKey: "...",
});
// MinIO (local development)
const minio = new Bun.S3Client({
bucket: "mybucket",
endpoint: "http://localhost:9000",
accessKeyId: "minioadmin",
secretAccessKey: "minioadmin",
});
Working with Files
Get File Reference
const client = new Bun.S3Client({ bucket: "my-bucket" });
// Instance method
const file = client.file("path/to/file.txt");
// Static method
const file2 = Bun.S3Client.file("file.txt", {
bucket: "my-bucket",
accessKeyId: "...",
secretAccessKey: "...",
});
// Or use global s3
const file3 = Bun.s3.file("file.txt");
Reading Files
const file = client.file("data.txt");
// Read as text
const text = await file.text();
// Read as JSON
const data = await file.json();
// Read as ArrayBuffer
const buffer = await file.arrayBuffer();
// Read as stream
const stream = file.stream();
for await (const chunk of stream) {
console.log(chunk);
}
Writing Files
const file = client.file("output.txt");
// Write string
await file.write("Hello World");
// Write buffer
await file.write(new Uint8Array([1, 2, 3]));
// Write from Response
const response = await fetch("https://example.com/data");
await file.write(response);
// Write another file
await file.write(Bun.file("./local.txt"));
// Write with options
await file.write(data, {
type: "text/plain",
acl: "public-read",
contentDisposition: "attachment; filename=\"data.txt\"",
});
Write from Client
// Instance method
await client.write("path/to/file.txt", "Hello World", {
type: "text/plain",
});
// Static method
await Bun.S3Client.write("file.txt", data, {
bucket: "my-bucket",
accessKeyId: "...",
secretAccessKey: "...",
});
Streaming Files
Stream Writing
const file = client.file("large-file.dat");
const writer = file.writer({
partSize: 10 * 1024 * 1024, // 10 MB parts
queueSize: 4, // Upload 4 parts in parallel
retry: 3, // Retry failed parts
});
for (const chunk of largeDataChunks) {
writer.write(chunk);
}
await writer.end();
Stream Reading
const file = client.file("large-file.dat");
const stream = file.stream();
for await (const chunk of stream) {
// Process chunk
}
Pipe to Response
const file = client.file("video.mp4");
Bun.serve({
fetch(req) {
return new Response(file.stream(), {
headers: {
"Content-Type": "video/mp4",
},
});
},
});
File Operations
Check Existence
const file = client.file("file.txt");
if (await file.exists()) {
console.log("File exists");
}
// Or from client
if (await client.exists("file.txt")) {
console.log("File exists");
}
Get File Size
const size = await file.size;
console.log(`${size} bytes`);
// Or from client
const size2 = await client.size("file.txt");
Get File Stats
const stats = await file.stat();
console.log(stats.size); // bytes
console.log(stats.lastModified); // Date
console.log(stats.etag); // ETag
console.log(stats.type); // Content-Type
// Or from client
const stats2 = await client.stat("file.txt");
Delete Files
await file.delete();
// or
await file.unlink();
// From client
await client.delete("file.txt");
await client.unlink("file.txt");
Slice Files
const file = client.file("large.dat");
// Read first 1024 bytes
const header = file.slice(0, 1024);
const data = await header.arrayBuffer();
// Read from offset
const chunk = file.slice(1024, 2048);
Presigned URLs
Generate Download URL
const file = client.file("document.pdf");
const url = file.presign({
expiresIn: 3600, // 1 hour
method: "GET",
});
console.log(url); // Temporary download URL
Generate Upload URL
const file = client.file("uploads/image.jpg");
const uploadUrl = file.presign({
method: "PUT",
expiresIn: 3600,
type: "image/jpeg",
acl: "public-read",
});
// Client can now upload directly
await fetch(uploadUrl, {
method: "PUT",
body: imageData,
headers: { "Content-Type": "image/jpeg" },
});
From Client
const url = client.presign("file.pdf", {
expiresIn: 7 * 24 * 60 * 60, // 7 days
});
Listing Objects
// List all objects
const result = await client.list();
console.log(result.contents); // Array of objects
console.log(result.isTruncated); // More results?
console.log(result.nextContinuationToken); // For pagination
// List with prefix
const uploads = await client.list({
prefix: "uploads/",
maxKeys: 100,
});
// Pagination
let continuationToken: string | undefined;
do {
const result = await client.list({
prefix: "uploads/",
maxKeys: 1000,
continuationToken,
});
for (const obj of result.contents || []) {
console.log(obj.key, obj.size);
}
continuationToken = result.nextContinuationToken;
} while (continuationToken);
Configuration Options
ACL (Access Control)
await file.write(data, {
acl: "private", // Default
// acl: "public-read",
// acl: "public-read-write",
// acl: "authenticated-read",
// acl: "bucket-owner-read",
// acl: "bucket-owner-full-control",
});
Storage Class
await file.write(data, {
storageClass: "STANDARD", // Default
// storageClass: "STANDARD_IA",
// storageClass: "ONEZONE_IA",
// storageClass: "INTELLIGENT_TIERING",
// storageClass: "GLACIER",
// storageClass: "GLACIER_IR",
// storageClass: "DEEP_ARCHIVE",
});
Content Headers
await file.write(data, {
type: "application/json",
contentDisposition: "attachment; filename=\"data.json\"",
contentEncoding: "gzip",
});
Multipart Upload Configuration
const writer = file.writer({
partSize: 5 * 1024 * 1024, // 5 MB minimum
queueSize: 5, // Upload 5 parts in parallel (default)
retry: 3, // Retry failed parts 3 times (default)
});
Requester Pays
const file = client.file("file.txt", {
requestPayer: true, // Acknowledge charges
});
Type Signatures
class S3Client {
constructor(options?: S3Options);
file(path: string, options?: S3Options): S3File;
write(path: string, data: Blob | Response | BunFile | ..., options?: S3Options): Promise<number>;
delete(path: string, options?: S3Options): Promise<void>;
unlink(path: string, options?: S3Options): Promise<void>;
exists(path: string, options?: S3Options): Promise<boolean>;
size(path: string, options?: S3Options): Promise<number>;
stat(path: string, options?: S3Options): Promise<S3Stats>;
presign(path: string, options?: S3FilePresignOptions): string;
list(input?: S3ListObjectsOptions, options?: S3Options): Promise<S3ListObjectsResponse>;
static file(path: string, options: S3Options): S3File;
static write(path: string, data: ..., options: S3Options): Promise<number>;
// ... other static methods
}
interface S3File extends Blob {
readonly name?: string;
readonly bucket?: string;
text(): Promise<string>;
json<T>(): Promise<T>;
arrayBuffer(): Promise<ArrayBuffer>;
stream(): ReadableStream<Uint8Array>;
slice(begin?: number, end?: number): S3File;
exists(): Promise<boolean>;
write(data: Blob | Response | BunFile | ..., options?: S3Options): Promise<number>;
writer(options?: S3Options): NetworkSink;
presign(options?: S3FilePresignOptions): string;
delete(): Promise<void>;
unlink(): Promise<void>;
stat(): Promise<S3Stats>;
}
interface S3Options {
bucket?: string;
region?: string;
endpoint?: string;
accessKeyId?: string;
secretAccessKey?: string;
sessionToken?: string;
acl?: "private" | "public-read" | ...;
type?: string;
contentDisposition?: string;
contentEncoding?: string;
storageClass?: "STANDARD" | "STANDARD_IA" | ...;
partSize?: number;
queueSize?: number;
retry?: number;
requestPayer?: boolean;
virtualHostedStyle?: boolean;
}