Backtesting evaluates model accuracy by testing predictions against historical data. CryptoView Pro provides a comprehensive backtesting framework with advanced metrics to validate model performance before live deployment.
Interpretation: Lower is better. Always >= MAE. Large difference between RMSE and MAE indicates inconsistent errors.Example: RMSE = 380,MAE=250 suggests some large outlier errors.
MAPE (Mean Absolute Percentage Error)
Average percentage error, scale-independent metric.
Interpretation: Percentage error regardless of price level. <5% is excellent, <10% is good.Example: MAPE = 2.3% means predictions are typically within 2.3% of actual price.
Median APE
Median absolute percentage error, robust to outliers.
from models.xgboost_model import XGBoostCryptoPredictor, backtest_model# Initialize modelpredictor = XGBoostCryptoPredictor()# Run backtest with custom splitresults = backtest_model(df, predictor, train_size=0.75) # 75% train, 25% test# Analyze feature importancefeature_importance = results['feature_importance']print("\nTop 10 Most Important Features:")print(feature_importance.head(10))# Features are ranked by their contribution to predictions# High importance = model relies heavily on this feature
from models.hybrid_model import HybridCryptoPredictor# Train hybrid modelpredictor = HybridCryptoPredictor()training_info = predictor.train(df)# Compare both modelsprint("XGBoost Performance:")for metric, value in training_info['xgboost'].items(): if 'test' in metric: print(f" {metric}: {value:.2f}")print("\nProphet Performance:")for metric, value in training_info['prophet'].items(): print(f" {metric}: {value:.2f}")# Test predictions at different horizonsfor hours in [24, 72, 168, 720]: predictions = predictor.predict_future(df, periods=hours) recommended = predictions.get('recommended', 'unknown') print(f"\n{hours}h prediction: Using {recommended.upper()} model")
import pandas as pdimport numpy as npfrom models.xgboost_model import XGBoostCryptoPredictordef walk_forward_backtest(df: pd.DataFrame, window_size: int = 1000, step_size: int = 24, prediction_horizon: int = 24) -> dict: """ Walk-forward backtesting with rolling windows Args: df: Historical data window_size: Training window size step_size: How many periods to move forward each iteration prediction_horizon: How far ahead to predict """ predictions = [] actuals = [] for i in range(window_size, len(df) - prediction_horizon, step_size): # Training window train_data = df.iloc[i-window_size:i] # Train model predictor = XGBoostCryptoPredictor() predictor.train(train_data, train_size=1.0) # Use all training data # Predict future = predictor.predict_future(train_data, periods=prediction_horizon) # Actual values actual = df['close'].iloc[i:i+prediction_horizon].values pred = future['predicted_price'].values predictions.extend(pred) actuals.extend(actual) # Calculate metrics predictions = np.array(predictions) actuals = np.array(actuals) from utils.backtesting import Backtester backtester = Backtester() metrics = backtester.calculate_metrics(actuals, predictions) return { 'metrics': metrics, 'predictions': predictions, 'actuals': actuals, 'n_windows': (len(df) - window_size) // step_size }# Run walk-forward analysisresults = walk_forward_backtest( df, window_size=1000, # Train on 1000 hours step_size=24, # Move forward 24 hours each iteration prediction_horizon=24 # Predict 24 hours ahead)print(f"Walk-Forward Results ({results['n_windows']} windows):")print(f" Average MAPE: {results['metrics']['MAPE']:.2f}%")print(f" Direction Accuracy: {results['metrics']['Direction_Accuracy']:.2f}%")