Skip to main content

Memory formatting

format_memory

Format memory size in human-readable format.
def format_memory(bytes_value: Optional[Union[int, float]]) -> str
bytes_value
Union[int, float]
Memory size in bytes
str
str
Formatted string (e.g., “2.50 GB”, “512.00 MB”)
Example:
from tfmemprof.utils import format_memory

print(format_memory(1024))           # "1.00 KB"
print(format_memory(1024 * 1024))    # "1.00 MB"
print(format_memory(2.5 * 1024**3))  # "2.50 GB"
print(format_memory(None))           # "N/A"

System information

get_gpu_info

Get detailed GPU information for TensorFlow.
def get_gpu_info() -> Dict[str, Any]
Dict[str, Any]
Dict[str, Any]
available
bool
Whether GPU is available
count
int
Number of GPUs detected
devices
List[Dict]
List of GPU device information
driver_version
str
GPU driver version
cuda_version
str
CUDA version
total_memory
float
Total GPU memory across all devices in MB
Example:
from tfmemprof.utils import get_gpu_info

info = get_gpu_info()
print(f"GPU available: {info['available']}")
print(f"GPU count: {info['count']}")
print(f"Total memory: {info['total_memory']:.2f} MB")

for device in info['devices']:
    print(f"GPU {device['id']}: {device['current_memory_mb']:.2f} MB")

get_system_info

Get system and TensorFlow environment information.
def get_system_info() -> Dict[str, Any]
Dict[str, Any]
Dict[str, Any]
platform
str
System platform
python_version
str
Python version
tensorflow_version
str
TensorFlow version
cpu_count
int
Number of CPU cores
total_memory_gb
float
Total system RAM in GB
gpu
Dict
GPU information from get_gpu_info()
backend
BackendInfo
Backend detection information

get_backend_info

Return backend diagnostics for TensorFlow.
def get_backend_info() -> BackendInfo
BackendInfo
BackendInfo
is_apple_silicon
bool
Running on Apple Silicon (M1/M2)
hardware_gpu_detected
bool
GPU hardware detected
runtime_gpu_count
int
Number of GPUs visible to TensorFlow
runtime_backend
str
Backend type: ‘cuda’, ‘rocm’, ‘metal’, ‘gpu’, or ‘cpu’
is_cuda_build
bool
TensorFlow built with CUDA
is_rocm_build
bool
TensorFlow built with ROCm
is_tensorrt_build
bool
TensorFlow built with TensorRT
tensorflow_metal_installed
bool
tensorflow-metal package installed

Analysis utilities

analyze_fragmentation

Analyze memory fragmentation from snapshots.
def analyze_fragmentation(snapshots: List[MemorySnapshot]) -> Dict[str, float]
snapshots
List[MemorySnapshot]
List of memory snapshots
Dict[str, float]
Dict[str, float]
fragmentation_score
float
Average fragmentation (0.0-1.0)
fragmentation_trend
float
Trend (positive = increasing fragmentation)
max_fragmentation
float
Maximum fragmentation observed
min_fragmentation
float
Minimum fragmentation observed

suggest_optimizations

Generate TensorFlow-specific optimization suggestions.
def suggest_optimizations(profile_result: ProfileResult) -> List[str]
profile_result
ProfileResult
Profiling results
List[str]
List[str]
List of optimization suggestions (up to 10)
Example:
from tfmemprof.utils import suggest_optimizations

suggestions = suggest_optimizations(results)
for i, suggestion in enumerate(suggestions, 1):
    print(f"{i}. {suggestion}")

generate_summary_report

Generate a comprehensive summary report.
def generate_summary_report(profile_result: ProfileResult) -> str
profile_result
ProfileResult
Profiling results
str
str
Formatted text report with statistics, analysis, and recommendations
Example:
from tfmemprof.utils import generate_summary_report

report = generate_summary_report(results)
print(report)

# Or save to file
with open('memory_report.txt', 'w') as f:
    f.write(report)

TensorFlow utilities

get_tensorflow_memory_usage

Get current TensorFlow memory usage.
def get_tensorflow_memory_usage() -> Dict[str, float]
Dict[str, float]
Dict[str, float]
gpu_current_mb
float
Current GPU memory usage in MB
gpu_peak_mb
float
Peak GPU memory usage in MB
cpu_mb
float
CPU memory usage in MB

optimize_tensorflow_memory

Apply TensorFlow memory optimizations.
def optimize_tensorflow_memory() -> List[str]
List[str]
List[str]
List of optimizations applied
Applies:
  • Memory growth for all GPUs
  • Mixed precision training (if supported)
  • XLA compilation (if supported)
Example:
from tfmemprof.utils import optimize_tensorflow_memory

applied = optimize_tensorflow_memory()
for opt in applied:
    print(f"✓ {opt}")

clear_tensorflow_session

Clear TensorFlow session and free memory.
def clear_tensorflow_session() -> None

validate_tensorflow_environment

Validate TensorFlow environment for memory profiling.
def validate_tensorflow_environment() -> Dict[str, Any]
Dict[str, Any]
Dict[str, Any]
tensorflow_available
bool
TensorFlow installed
gpu_available
bool
GPU available
memory_growth_enabled
bool
Memory growth successfully enabled
version_compatible
bool
TensorFlow version compatible (2.4+)
issues
List[str]
List of validation issues
Example:
from tfmemprof.utils import validate_tensorflow_environment

validation = validate_tensorflow_environment()

if validation['tensorflow_available']:
    print("✓ TensorFlow available")
if validation['gpu_available']:
    print("✓ GPU available")
    
if validation['issues']:
    print("\nIssues found:")
    for issue in validation['issues']:
        print(f"  - {issue}")

Complete example

from tfmemprof.profiler import TFMemoryProfiler
from tfmemprof.utils import (
    format_memory,
    get_system_info,
    get_gpu_info,
    analyze_fragmentation,
    suggest_optimizations,
    generate_summary_report,
    optimize_tensorflow_memory,
    validate_tensorflow_environment
)

# Validate environment
validation = validate_tensorflow_environment()
if not validation['tensorflow_available']:
    print("Error: TensorFlow not available")
    exit(1)

# Get system info
sys_info = get_system_info()
print(f"TensorFlow {sys_info['tensorflow_version']}")
print(f"Python {sys_info['python_version']}")

gpu_info = get_gpu_info()
if gpu_info['available']:
    print(f"GPUs: {gpu_info['count']}")
    print(f"Total GPU memory: {format_memory(gpu_info['total_memory'] * 1024 * 1024)}")

# Apply optimizations
print("\nApplying optimizations...")
optimizations = optimize_tensorflow_memory()
for opt in optimizations:
    print(f"  ✓ {opt}")

# Profile your code
profiler = TFMemoryProfiler()

with profiler.profile_context("training"):
    # Your training code
    pass

results = profiler.get_results()

# Analyze fragmentation
frag = analyze_fragmentation(results.snapshots)
print(f"\nFragmentation score: {frag['fragmentation_score']:.3f}")

# Get suggestions
suggestions = suggest_optimizations(results)
print("\nOptimization suggestions:")
for i, suggestion in enumerate(suggestions[:5], 1):
    print(f"  {i}. {suggestion}")

# Generate full report
report = generate_summary_report(results)
print("\n" + "="*50)
print(report)

# Save report
with open('memory_analysis_report.txt', 'w') as f:
    f.write(report)

Build docs developers (and LLMs) love