Borg UI provides fast archive browsing with intelligent Redis caching that dramatically improves performance for large repositories. Navigate through backup archives, preview files, and restore individual files or entire directories.
Archive browsing uses Redis caching to accelerate file listings:
# From browse.py:23-255MAX_ITEMS_IN_MEMORY = 1_000_000 # Maximum items to loadMAX_ESTIMATED_MEMORY_MB = 1024 # 1GB memory limitITEM_SIZE_ESTIMATE = 200 # Average bytes per item@router.get("/{repository_id}/{archive_name}")async def browse_archive_contents( repository_id: int, archive_name: str, path: str = Query("", description="Path within archive"), current_user: User = Depends(get_current_user), db: Session = Depends(get_db)): # Check cache first all_items = await archive_cache.get(repository_id, archive_name) if all_items is not None: logger.info("Using cached archive contents", archive=archive_name, items_count=len(all_items))
Performance Improvement: First browse loads from Borg (slow), subsequent browses use Redis cache (600x faster). Cache is automatically invalidated when archives change.
Borg UI protects against out-of-memory errors when browsing large archives:
# From browse.py:52-75result = await borg.list_archive_contents( repository.path, archive_name, path="", remote_path=repository.remote_path, passphrase=repository.passphrase, max_lines=max_items, # Kill borg process if limit exceeded bypass_lock=repository.bypass_lock)# Check if line limit was exceededif result.get("line_count_exceeded"): lines_read = result.get("lines_read", 0) raise HTTPException( status_code=413, detail=f"Archive is too large to browse (>{lines_read:,} files). " f"Maximum supported: {max_items:,} files. " f"You can increase this limit in Settings > System." )
Configurable Limits:
browse_max_items: Maximum files to load (default: 1,000,000)
browse_max_memory_mb: Maximum memory usage (default: 1024 MB)
For archives with millions of files, consider using command-line tools or increasing memory limits in Settings > System.
# From browse.py:133-161def calculate_directory_size(dir_path: str) -> int: """Calculate total size of all files in a directory recursively""" total_size = 0 file_count = 0 search_prefix = f"{dir_path}/" if dir_path else "" for item in all_items: item_path = item["path"] # Check if this item is under the directory if search_prefix: if item_path.startswith(search_prefix) or item_path == dir_path: # Only count files, not directories if item.get("type") != "d" and item.get("size") is not None: total_size += item.get("size", 0) file_count += 1 return total_size
# From browse.py:122-131# Store in cachecache_success = await archive_cache.set(repository_id, archive_name, all_items)if cache_success: logger.info("Cached archive contents", archive=archive_name, items_count=len(all_items))else: logger.warning("Failed to cache archive (too large or cache full)", archive=archive_name, items_count=len(all_items))