Overview
TrailBase includes integrated object storage for managing files like images, documents, and user uploads. Use S3-compatible storage (AWS S3, MinIO, R2, etc.) or local filesystem storage.
Storage Backends
Local Filesystem Store files directly on the server’s filesystem
Amazon S3 Use AWS S3 buckets for scalable storage
MinIO Self-hosted S3-compatible storage
Cloudflare R2 S3-compatible with zero egress fees
Local Filesystem Storage
By default, TrailBase stores files in the uploads/ directory:
traildepot/
├── data/
│ └── main.db
├── uploads/ # File storage
│ ├── avatars/
│ ├── documents/
│ └── images/
└── config.textproto
No configuration needed - it works out of the box:
// From crates/core/src/app_state.rs
pub ( crate ) fn build_objectstore (
data_dir : & DataDir ,
config : Option < & S3StorageConfig >,
) -> Result < Box < dyn ObjectStore >, object_store :: Error > {
if let Some ( config ) = config {
// S3 configuration
}
// Default: local filesystem
return Ok ( Box :: new (
object_store :: local :: LocalFileSystem :: new_with_prefix (
data_dir . uploads_path ()
) ? ,
));
}
S3-Compatible Storage
AWS S3
Configure AWS S3 in config.textproto:
server {
s3_storage_config {
bucket_name: "my-app-uploads"
region: "us-east-1"
# Credentials from environment variables:
# AWS_ACCESS_KEY_ID
# AWS_SECRET_ACCESS_KEY
}
}
Or specify credentials explicitly:
server {
s3_storage_config {
bucket_name: "my-app-uploads"
region: "us-east-1"
access_key: "AKIAIOSFODNN7EXAMPLE"
secret_access_key: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
}
}
MinIO (Self-Hosted)
server {
s3_storage_config {
endpoint: "http: //localhost:9000"
bucket_name: "uploads"
region: "us-east-1" # MinIO requires any region
access_key: "minioadmin"
secret_access_key: "minioadmin"
}
}
MinIO endpoints using http:// are automatically configured to allow HTTP (non-HTTPS) connections.
Cloudflare R2
server {
s3_storage_config {
endpoint: "https: //[account-id].r2.cloudflarestorage.com"
bucket_name: "my-bucket"
region: "auto"
access_key: "your-r2-access-key"
secret_access_key: "your-r2-secret-key"
}
}
DigitalOcean Spaces
server {
s3_storage_config {
endpoint: "https: //nyc3.digitaloceanspaces.com"
bucket_name: "my-space"
region: "nyc3"
access_key: "your-spaces-key"
secret_access_key: "your-spaces-secret"
}
}
Configuration Details
From crates/core/src/app_state.rs:
pub ( crate ) fn build_objectstore (
data_dir : & DataDir ,
config : Option < & S3StorageConfig >,
) -> Result < Box < dyn ObjectStore >, object_store :: Error > {
if let Some ( config ) = config {
let mut builder = object_store :: aws :: AmazonS3Builder :: from_env ();
// Custom endpoint (for MinIO, R2, etc.)
if let Some ( ref endpoint ) = config . endpoint {
builder = builder . with_endpoint ( endpoint );
// Allow HTTP for local development
if endpoint . starts_with ( "http://" ) {
builder = builder . with_client_options (
object_store :: ClientOptions :: default () . with_allow_http ( true )
)
}
}
// Region
if let Some ( ref region ) = config . region {
builder = builder . with_region ( region );
}
// Bucket name (required)
let Some ( ref bucket_name ) = config . bucket_name else {
panic! ( "S3StorageConfig missing 'bucket_name'." );
};
builder = builder . with_bucket_name ( bucket_name );
// Credentials
if let Some ( ref access_key ) = config . access_key {
builder = builder . with_access_key_id ( access_key );
}
if let Some ( ref secret_access_key ) = config . secret_access_key {
builder = builder . with_secret_access_key ( secret_access_key );
}
return Ok ( Box :: new ( builder . build () ? ));
}
// Default: local filesystem
return Ok ( Box :: new (
object_store :: local :: LocalFileSystem :: new_with_prefix (
data_dir . uploads_path ()
) ? ,
));
}
File Management
Upload Files
Files are automatically managed when using file columns in your schema:
CREATE TABLE posts (
id INTEGER PRIMARY KEY ,
title TEXT NOT NULL ,
-- File columns are automatically handled
image BLOB,
attachment BLOB
) STRICT;
TrailBase stores file metadata in a special _files table:
CREATE TABLE _files (
id BLOB PRIMARY KEY ,
name TEXT NOT NULL ,
content_type TEXT ,
size INTEGER NOT NULL ,
created INTEGER NOT NULL
);
File Deletion
Deleted files are queued for cleanup:
CREATE TABLE _file_deletions (
id BLOB PRIMARY KEY ,
deleted INTEGER NOT NULL
);
The file deletion job runs hourly to remove orphaned files:
// From crates/core/src/scheduler.rs
SystemJobId :: FileDeletions => {
DefaultSystemJob {
name : "File Deletions" ,
default : SystemJob {
id : Some ( id as i32 ),
schedule : Some ( "@hourly" . into ()),
disabled : Some ( false ),
},
callback : build_callback ( move || {
// Deletes files older than 15 minutes from _file_deletions table
delete_pending_files_job ( & conn , & object_store , & db_name ) . await
}),
}
}
API Integration
Upload via Records API
# Upload file with metadata
curl -X POST \
-H "Authorization: Bearer $TOKEN " \
-F "title=My Document" \
-F "[email protected] " \
https://api.example.com/api/records/v1/documents
Download Files
# Download by file ID
curl -H "Authorization: Bearer $TOKEN " \
https://api.example.com/api/records/v1/files/ $FILE_ID \
-o downloaded_file.pdf
File URLs
Files are served through the records API:
{
"id" : 1 ,
"title" : "My Document" ,
"_file_image" : {
"id" : "01234567-89ab-cdef-0123-456789abcdef" ,
"name" : "photo.jpg" ,
"content_type" : "image/jpeg" ,
"size" : 1048576 ,
"url" : "/api/records/v1/files/01234567-89ab-cdef-0123-456789abcdef"
}
}
Direct Object Store Access
Access the object store programmatically:
use object_store :: { ObjectStore , path :: Path };
async fn store_file (
object_store : & Arc < dyn ObjectStore >,
path : & str ,
data : Vec < u8 >,
) -> Result <(), object_store :: Error > {
let path = Path :: from ( path );
object_store . put ( & path , data . into ()) . await ? ;
Ok (())
}
async fn read_file (
object_store : & Arc < dyn ObjectStore >,
path : & str ,
) -> Result < Vec < u8 >, object_store :: Error > {
let path = Path :: from ( path );
let result = object_store . get ( & path ) . await ? ;
let bytes = result . bytes () . await ? ;
Ok ( bytes . to_vec ())
}
async fn delete_file (
object_store : & Arc < dyn ObjectStore >,
path : & str ,
) -> Result <(), object_store :: Error > {
let path = Path :: from ( path );
object_store . delete ( & path ) . await ? ;
Ok (())
}
async fn list_files (
object_store : & Arc < dyn ObjectStore >,
prefix : & str ,
) -> Result < Vec < String >, object_store :: Error > {
let prefix = Path :: from ( prefix );
let mut files = vec! [];
let list = object_store . list ( Some ( & prefix )) . await ? ;
for meta in list {
files . push ( meta . location . to_string ());
}
Ok ( files )
}
File Processing
Image Resizing
Process uploaded images:
use image :: ImageFormat ;
async fn resize_image (
object_store : & Arc < dyn ObjectStore >,
file_id : & str ,
) -> Result <(), Box < dyn std :: error :: Error >> {
// Read original
let path = Path :: from ( format! ( "images/{}" , file_id ));
let data = object_store . get ( & path ) . await ?. bytes () . await ? ;
// Decode image
let img = image :: load_from_memory ( & data ) ? ;
// Resize
let thumbnail = img . resize ( 300 , 300 , image :: imageops :: FilterType :: Lanczos3 );
// Save thumbnail
let mut buffer = Vec :: new ();
thumbnail . write_to ( & mut buffer , ImageFormat :: Jpeg ) ? ;
let thumb_path = Path :: from ( format! ( "thumbnails/{}" , file_id ));
object_store . put ( & thumb_path , buffer . into ()) . await ? ;
Ok (())
}
Virus Scanning
async fn scan_file (
object_store : & Arc < dyn ObjectStore >,
file_id : & str ,
) -> Result < bool , Box < dyn std :: error :: Error >> {
// Read file
let path = Path :: from ( format! ( "uploads/{}" , file_id ));
let data = object_store . get ( & path ) . await ?. bytes () . await ? ;
// Scan with ClamAV or similar
let is_clean = clamav_scan ( & data ) . await ? ;
if ! is_clean {
// Delete infected file
object_store . delete ( & path ) . await ? ;
}
Ok ( is_clean )
}
Backup and Migration
Backup to S3
#!/bin/bash
# Backup local files to S3
aws s3 sync traildepot/uploads/ s3://my-backup-bucket/uploads/ \
--exclude "*.tmp" \
--storage-class GLACIER_IR
Migrate Between Storage
async fn migrate_storage (
source : & Arc < dyn ObjectStore >,
dest : & Arc < dyn ObjectStore >,
prefix : & str ,
) -> Result <(), object_store :: Error > {
let prefix_path = Path :: from ( prefix );
let list = source . list ( Some ( & prefix_path )) . await ? ;
for meta in list {
println! ( "Migrating: {}" , meta . location);
// Read from source
let data = source . get ( & meta . location) . await ? ;
let bytes = data . bytes () . await ? ;
// Write to destination
dest . put ( & meta . location, bytes ) . await ? ;
}
Ok (())
}
CDN Integration
Serve files through a CDN:
server {
# Use CloudFlare, Fastly, or CloudFront
cdn_url: "https: //cdn.example.com"
}
Generate CDN URLs:
fn get_file_url ( file_id : & str , cdn_url : Option < & str >) -> String {
match cdn_url {
Some ( cdn ) => format! ( "{}/files/{}" , cdn , file_id ),
None => format! ( "/api/records/v1/files/{}" , file_id ),
}
}
Caching
Set appropriate cache headers:
use axum :: http :: header;
async fn serve_file ( file_id : & str ) -> Result < Response , Error > {
let data = read_file ( file_id ) . await ? ;
Ok ( Response :: builder ()
. header ( header :: CONTENT_TYPE , "image/jpeg" )
. header ( header :: CACHE_CONTROL , "public, max-age=31536000" ) // 1 year
. header ( header :: ETAG , file_id )
. body ( data )
. unwrap ())
}
Multipart Uploads
For large files, use multipart uploads:
use object_store :: MultipartUpload ;
async fn upload_large_file (
object_store : & Arc < dyn ObjectStore >,
path : & str ,
data : Vec < u8 >,
) -> Result <(), object_store :: Error > {
let path = Path :: from ( path );
let mut upload = object_store . put_multipart ( & path ) . await ? ;
// Upload in 5MB chunks
const CHUNK_SIZE : usize = 5 * 1024 * 1024 ;
for chunk in data . chunks ( CHUNK_SIZE ) {
upload . put_part ( chunk . to_vec () . into ()) . await ? ;
}
upload . complete () . await ? ;
Ok (())
}
Security
Access Control
Control file access in your schema:
-- Only file owner can access
CREATE POLICY documents_read ON documents
FOR SELECT
USING ( _user . id = owner_id);
-- Public files
CREATE POLICY public_images_read ON images
FOR SELECT
USING (public = 1 );
Signed URLs
Generate temporary signed URLs for S3:
use object_store :: aws :: AmazonS3 ;
async fn generate_presigned_url (
bucket : & str ,
key : & str ,
expiration : Duration ,
) -> Result < String , Box < dyn std :: error :: Error >> {
let s3_client = aws_sdk_s3 :: Client :: new ( & aws_config :: load_from_env () . await );
let presigned = s3_client
. get_object ()
. bucket ( bucket )
. key ( key )
. presigned (
aws_sdk_s3 :: presigning :: PresigningConfig :: expires_in ( expiration ) ?
)
. await ? ;
Ok ( presigned . uri () . to_string ())
}
Monitoring
Storage Usage
async fn get_storage_stats (
object_store : & Arc < dyn ObjectStore >,
) -> Result < StorageStats , object_store :: Error > {
let list = object_store . list ( None ) . await ? ;
let mut total_size = 0 u64 ;
let mut file_count = 0 u64 ;
for meta in list {
total_size += meta . size as u64 ;
file_count += 1 ;
}
Ok ( StorageStats {
total_size_bytes : total_size ,
file_count ,
average_size : total_size / file_count . max ( 1 ),
})
}
Audit Logging
CREATE TABLE file_audit_log (
id INTEGER PRIMARY KEY AUTOINCREMENT,
file_id TEXT NOT NULL ,
action TEXT NOT NULL CHECK ( action IN ( 'upload' , 'download' , 'delete' )),
user_id BLOB,
ip_address TEXT ,
created_at INTEGER NOT NULL DEFAULT (unixepoch())
);
CREATE INDEX idx_file_audit_log_file_id ON file_audit_log(file_id);
CREATE INDEX idx_file_audit_log_user_id ON file_audit_log(user_id);
Best Practices
Use S3 for production
Local filesystem is fine for development, but use S3-compatible storage for production scalability and durability.
Set lifecycle policies
Configure automatic deletion of old files: {
"Rules" : [{
"Id" : "DeleteOldUploads" ,
"Status" : "Enabled" ,
"Expiration" : { "Days" : 90 },
"Filter" : { "Prefix" : "temp/" }
}]
}
Validate file types
Check MIME types and file extensions before storing.
Implement virus scanning
Scan uploaded files for malware before serving them.
Use CDN
Serve static files through a CDN for better performance.
Monitor storage costs
Track usage and set up alerts for unexpected growth.
Next Steps
Custom Endpoints Build file upload APIs
Jobs Scheduler Process files in background
OAuth Providers Store user avatars
Server-Side Rendering Display file galleries