Format memory size in human-readable format.
def format_memory(bytes_value: Optional[Union[int, float]]) -> str
Formatted string (e.g., “2.50 GB”, “512.00 MB”)
Example:
from tfmemprof.utils import format_memory
print(format_memory(1024)) # "1.00 KB"
print(format_memory(1024 * 1024)) # "1.00 MB"
print(format_memory(2.5 * 1024**3)) # "2.50 GB"
print(format_memory(None)) # "N/A"
get_gpu_info
Get detailed GPU information for TensorFlow.
def get_gpu_info() -> Dict[str, Any]
List of GPU device information
Total GPU memory across all devices in MB
Example:
from tfmemprof.utils import get_gpu_info
info = get_gpu_info()
print(f"GPU available: {info['available']}")
print(f"GPU count: {info['count']}")
print(f"Total memory: {info['total_memory']:.2f} MB")
for device in info['devices']:
print(f"GPU {device['id']}: {device['current_memory_mb']:.2f} MB")
get_system_info
Get system and TensorFlow environment information.
def get_system_info() -> Dict[str, Any]
GPU information from get_gpu_info()
Backend detection information
get_backend_info
Return backend diagnostics for TensorFlow.
def get_backend_info() -> BackendInfo
Running on Apple Silicon (M1/M2)
Number of GPUs visible to TensorFlow
Backend type: ‘cuda’, ‘rocm’, ‘metal’, ‘gpu’, or ‘cpu’
TensorFlow built with CUDA
TensorFlow built with ROCm
TensorFlow built with TensorRT
tensorflow_metal_installed
tensorflow-metal package installed
Analysis utilities
analyze_fragmentation
Analyze memory fragmentation from snapshots.
def analyze_fragmentation(snapshots: List[MemorySnapshot]) -> Dict[str, float]
Average fragmentation (0.0-1.0)
Trend (positive = increasing fragmentation)
Maximum fragmentation observed
Minimum fragmentation observed
suggest_optimizations
Generate TensorFlow-specific optimization suggestions.
def suggest_optimizations(profile_result: ProfileResult) -> List[str]
List of optimization suggestions (up to 10)
Example:
from tfmemprof.utils import suggest_optimizations
suggestions = suggest_optimizations(results)
for i, suggestion in enumerate(suggestions, 1):
print(f"{i}. {suggestion}")
generate_summary_report
Generate a comprehensive summary report.
def generate_summary_report(profile_result: ProfileResult) -> str
Formatted text report with statistics, analysis, and recommendations
Example:
from tfmemprof.utils import generate_summary_report
report = generate_summary_report(results)
print(report)
# Or save to file
with open('memory_report.txt', 'w') as f:
f.write(report)
TensorFlow utilities
get_tensorflow_memory_usage
Get current TensorFlow memory usage.
def get_tensorflow_memory_usage() -> Dict[str, float]
Current GPU memory usage in MB
Peak GPU memory usage in MB
optimize_tensorflow_memory
Apply TensorFlow memory optimizations.
def optimize_tensorflow_memory() -> List[str]
List of optimizations applied
Applies:
- Memory growth for all GPUs
- Mixed precision training (if supported)
- XLA compilation (if supported)
Example:
from tfmemprof.utils import optimize_tensorflow_memory
applied = optimize_tensorflow_memory()
for opt in applied:
print(f"✓ {opt}")
clear_tensorflow_session
Clear TensorFlow session and free memory.
def clear_tensorflow_session() -> None
validate_tensorflow_environment
Validate TensorFlow environment for memory profiling.
def validate_tensorflow_environment() -> Dict[str, Any]
Memory growth successfully enabled
TensorFlow version compatible (2.4+)
List of validation issues
Example:
from tfmemprof.utils import validate_tensorflow_environment
validation = validate_tensorflow_environment()
if validation['tensorflow_available']:
print("✓ TensorFlow available")
if validation['gpu_available']:
print("✓ GPU available")
if validation['issues']:
print("\nIssues found:")
for issue in validation['issues']:
print(f" - {issue}")
Complete example
from tfmemprof.profiler import TFMemoryProfiler
from tfmemprof.utils import (
format_memory,
get_system_info,
get_gpu_info,
analyze_fragmentation,
suggest_optimizations,
generate_summary_report,
optimize_tensorflow_memory,
validate_tensorflow_environment
)
# Validate environment
validation = validate_tensorflow_environment()
if not validation['tensorflow_available']:
print("Error: TensorFlow not available")
exit(1)
# Get system info
sys_info = get_system_info()
print(f"TensorFlow {sys_info['tensorflow_version']}")
print(f"Python {sys_info['python_version']}")
gpu_info = get_gpu_info()
if gpu_info['available']:
print(f"GPUs: {gpu_info['count']}")
print(f"Total GPU memory: {format_memory(gpu_info['total_memory'] * 1024 * 1024)}")
# Apply optimizations
print("\nApplying optimizations...")
optimizations = optimize_tensorflow_memory()
for opt in optimizations:
print(f" ✓ {opt}")
# Profile your code
profiler = TFMemoryProfiler()
with profiler.profile_context("training"):
# Your training code
pass
results = profiler.get_results()
# Analyze fragmentation
frag = analyze_fragmentation(results.snapshots)
print(f"\nFragmentation score: {frag['fragmentation_score']:.3f}")
# Get suggestions
suggestions = suggest_optimizations(results)
print("\nOptimization suggestions:")
for i, suggestion in enumerate(suggestions[:5], 1):
print(f" {i}. {suggestion}")
# Generate full report
report = generate_summary_report(results)
print("\n" + "="*50)
print(report)
# Save report
with open('memory_analysis_report.txt', 'w') as f:
f.write(report)