Overview
This guide covers deploying CryptoView Pro to production, including Docker containerization, cloud deployment options, monitoring, and best practices for running ML models in production.Production Checklist: Docker setup ✓ Cloud deployment ✓ Model persistence ✓ Monitoring ✓ API endpoints ✓
Deployment Architecture
Docker Setup
1. Create Dockerfile
CreateDockerfile in your project root:
# Dockerfile
FROM python:3.10-slim
# Set working directory
WORKDIR /app
# Install system dependencies
RUN apt-get update && apt-get install -y \
build-essential \
curl \
software-properties-common \
git \
&& rm -rf /var/lib/apt/lists/*
# Copy requirements first (for layer caching)
COPY requirements.txt .
# Install Python dependencies
RUN pip install --no-cache-dir -r requirements.txt
# Copy application code
COPY source/ ./source/
COPY .env.production .env
# Create directories for models and cache
RUN mkdir -p /app/models_cache /app/data_cache
# Expose Streamlit port
EXPOSE 8501
# Health check
HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health
# Run the application
ENTRYPOINT ["streamlit", "run", "source/app.py", "--server.port=8501", "--server.address=0.0.0.0"]
2. Docker Compose for Local Testing
Createdocker-compose.yml:
# docker-compose.yml
version: '3.8'
services:
# Main Streamlit application
cryptoview-app:
build:
context: .
dockerfile: Dockerfile
container_name: cryptoview-pro
ports:
- "8501:8501"
environment:
- TELEGRAM_BOT_TOKEN=${TELEGRAM_BOT_TOKEN}
- TELEGRAM_CHAT_ID=${TELEGRAM_CHAT_ID}
- REDIS_URL=redis://redis:6379
- DATABASE_URL=postgresql://user:password@postgres:5432/cryptodb
volumes:
- ./models_cache:/app/models_cache
- ./data_cache:/app/data_cache
depends_on:
- redis
- postgres
restart: unless-stopped
networks:
- cryptoview-network
# Redis for caching
redis:
image: redis:7-alpine
container_name: cryptoview-redis
ports:
- "6379:6379"
volumes:
- redis-data:/data
networks:
- cryptoview-network
restart: unless-stopped
# PostgreSQL for storing predictions and alerts
postgres:
image: postgres:15-alpine
container_name: cryptoview-postgres
environment:
- POSTGRES_USER=user
- POSTGRES_PASSWORD=password
- POSTGRES_DB=cryptodb
ports:
- "5432:5432"
volumes:
- postgres-data:/var/lib/postgresql/data
networks:
- cryptoview-network
restart: unless-stopped
# Nginx reverse proxy (optional, for production)
nginx:
image: nginx:alpine
container_name: cryptoview-nginx
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf:ro
- ./ssl:/etc/nginx/ssl:ro
depends_on:
- cryptoview-app
networks:
- cryptoview-network
restart: unless-stopped
volumes:
redis-data:
postgres-data:
networks:
cryptoview-network:
driver: bridge
3. Nginx Configuration
Createnginx.conf for SSL and load balancing:
# nginx.conf
events {
worker_connections 1024;
}
http {
upstream streamlit_app {
server cryptoview-app:8501;
}
# Redirect HTTP to HTTPS
server {
listen 80;
server_name your-domain.com;
return 301 https://$server_name$request_uri;
}
# HTTPS server
server {
listen 443 ssl http2;
server_name your-domain.com;
# SSL certificates
ssl_certificate /etc/nginx/ssl/cert.pem;
ssl_certificate_key /etc/nginx/ssl/key.pem;
# SSL configuration
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
# Proxy settings for Streamlit
location / {
proxy_pass http://streamlit_app;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Timeouts for long-running predictions
proxy_read_timeout 300s;
proxy_connect_timeout 300s;
}
# WebSocket for Streamlit
location /_stcore/stream {
proxy_pass http://streamlit_app/_stcore/stream;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
}
}
}
4. Build and Run
# Build the Docker image
docker-compose build
# Start all services
docker-compose up -d
# View logs
docker-compose logs -f cryptoview-app
# Stop services
docker-compose down
# Restart a specific service
docker-compose restart cryptoview-app
Cloud Deployment Options
Option 1: AWS (Recommended)
Using AWS ECS (Elastic Container Service)
# 1. Install AWS CLI
pip install awscli
aws configure
# 2. Create ECR repository
aws ecr create-repository --repository-name cryptoview-pro
# 3. Authenticate Docker to ECR
aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin <account-id>.dkr.ecr.us-east-1.amazonaws.com
# 4. Tag and push image
docker tag cryptoview-pro:latest <account-id>.dkr.ecr.us-east-1.amazonaws.com/cryptoview-pro:latest
docker push <account-id>.dkr.ecr.us-east-1.amazonaws.com/cryptoview-pro:latest
# 5. Create ECS task definition (task-definition.json)
{
"family": "cryptoview-task",
"networkMode": "awsvpc",
"requiresCompatibilities": ["FARGATE"],
"cpu": "2048",
"memory": "4096",
"containerDefinitions": [
{
"name": "cryptoview-container",
"image": "<account-id>.dkr.ecr.us-east-1.amazonaws.com/cryptoview-pro:latest",
"portMappings": [
{
"containerPort": 8501,
"protocol": "tcp"
}
],
"environment": [
{"name": "TELEGRAM_BOT_TOKEN", "value": "your-token"},
{"name": "TELEGRAM_CHAT_ID", "value": "your-chat-id"}
],
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": "/ecs/cryptoview",
"awslogs-region": "us-east-1",
"awslogs-stream-prefix": "ecs"
}
}
}
]
}
# 6. Register task definition
aws ecs register-task-definition --cli-input-json file://task-definition.json
# 7. Create ECS cluster
aws ecs create-cluster --cluster-name cryptoview-cluster
# 8. Create service
aws ecs create-service \
--cluster cryptoview-cluster \
--service-name cryptoview-service \
--task-definition cryptoview-task \
--desired-count 2 \
--launch-type FARGATE \
--network-configuration "awsvpcConfiguration={subnets=[subnet-xxxxx],securityGroups=[sg-xxxxx],assignPublicIp=ENABLED}"
Using AWS EC2
# 1. Launch EC2 instance (t3.medium or larger)
# 2. SSH into instance
ssh -i your-key.pem ec2-user@<instance-ip>
# 3. Install Docker
sudo yum update -y
sudo yum install docker -y
sudo systemctl start docker
sudo systemctl enable docker
sudo usermod -aG docker ec2-user
# 4. Install Docker Compose
sudo curl -L "https://github.com/docker/compose/releases/download/v2.20.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
# 5. Clone repo and deploy
git clone https://github.com/your-repo/cryptoview-pro.git
cd cryptoview-pro
docker-compose up -d
Option 2: Google Cloud Platform (GCP)
# 1. Install gcloud CLI
curl https://sdk.cloud.google.com | bash
gcloud init
# 2. Enable required APIs
gcloud services enable run.googleapis.com
gcloud services enable containerregistry.googleapis.com
# 3. Build and push to Google Container Registry
gcloud builds submit --tag gcr.io/<project-id>/cryptoview-pro
# 4. Deploy to Cloud Run
gcloud run deploy cryptoview-pro \
--image gcr.io/<project-id>/cryptoview-pro \
--platform managed \
--region us-central1 \
--allow-unauthenticated \
--memory 4Gi \
--cpu 2 \
--timeout 300 \
--set-env-vars TELEGRAM_BOT_TOKEN=your-token,TELEGRAM_CHAT_ID=your-chat-id
Option 3: Digital Ocean
# 1. Install doctl
snap install doctl
doctl auth init
# 2. Create Kubernetes cluster
doctl kubernetes cluster create cryptoview-cluster \
--region nyc1 \
--node-pool "name=worker-pool;size=s-2vcpu-4gb;count=2"
# 3. Create Kubernetes deployment (deployment.yaml)
apiVersion: apps/v1
kind: Deployment
metadata:
name: cryptoview-deployment
spec:
replicas: 2
selector:
matchLabels:
app: cryptoview
template:
metadata:
labels:
app: cryptoview
spec:
containers:
- name: cryptoview
image: your-registry/cryptoview-pro:latest
ports:
- containerPort: 8501
env:
- name: TELEGRAM_BOT_TOKEN
valueFrom:
secretKeyRef:
name: cryptoview-secrets
key: telegram-token
resources:
requests:
memory: "2Gi"
cpu: "1000m"
limits:
memory: "4Gi"
cpu: "2000m"
# 4. Apply deployment
kubectl apply -f deployment.yaml
# 5. Expose service
kubectl expose deployment cryptoview-deployment --type=LoadBalancer --port=80 --target-port=8501
Option 4: Streamlit Cloud (Easiest)
# 1. Push code to GitHub
git init
git add .
git commit -m "Initial commit"
git remote add origin https://github.com/your-username/cryptoview-pro.git
git push -u origin main
# 2. Go to https://share.streamlit.io
# 3. Connect GitHub repository
# 4. Set secrets in Streamlit Cloud dashboard:
# - TELEGRAM_BOT_TOKEN
# - TELEGRAM_CHAT_ID
# 5. Deploy!
Model Persistence and Versioning
Save and Load Models
# models/model_manager.py
import joblib
import os
from datetime import datetime
import json
class ModelManager:
"""
Manage model persistence and versioning
"""
def __init__(self, models_dir: str = "./models_cache"):
self.models_dir = models_dir
os.makedirs(models_dir, exist_ok=True)
def save_model(self, model, model_name: str, metadata: dict = None):
"""
Save model with versioning
"""
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
version_dir = os.path.join(self.models_dir, model_name, timestamp)
os.makedirs(version_dir, exist_ok=True)
# Save model
model_path = os.path.join(version_dir, "model.pkl")
joblib.dump(model, model_path)
# Save metadata
if metadata:
metadata_path = os.path.join(version_dir, "metadata.json")
with open(metadata_path, 'w') as f:
json.dump(metadata, f, indent=2)
# Create symlink to latest
latest_path = os.path.join(self.models_dir, model_name, "latest")
if os.path.exists(latest_path):
os.remove(latest_path)
os.symlink(version_dir, latest_path)
print(f"Model saved: {version_dir}")
return version_dir
def load_model(self, model_name: str, version: str = "latest"):
"""
Load model by name and version
"""
model_path = os.path.join(self.models_dir, model_name, version, "model.pkl")
if not os.path.exists(model_path):
raise FileNotFoundError(f"Model not found: {model_path}")
model = joblib.load(model_path)
# Load metadata if exists
metadata_path = os.path.join(self.models_dir, model_name, version, "metadata.json")
metadata = None
if os.path.exists(metadata_path):
with open(metadata_path, 'r') as f:
metadata = json.load(f)
return model, metadata
def list_versions(self, model_name: str):
"""
List all versions of a model
"""
model_dir = os.path.join(self.models_dir, model_name)
if not os.path.exists(model_dir):
return []
versions = [d for d in os.listdir(model_dir)
if os.path.isdir(os.path.join(model_dir, d)) and d != "latest"]
return sorted(versions, reverse=True)
# Usage in app
from models.model_manager import ModelManager
manager = ModelManager()
# Save after training
if st.button("Train and Save Model"):
predictor = XGBoostCryptoPredictor()
metrics = predictor.train(df)
metadata = {
"trained_on": datetime.now().isoformat(),
"data_points": len(df),
"metrics": metrics,
"crypto": crypto_symbol,
"timeframe": timeframe
}
manager.save_model(predictor, "xgboost_btc", metadata)
st.success("Model saved!")
# Load for inference
if st.button("Load Latest Model"):
predictor, metadata = manager.load_model("xgboost_btc")
st.json(metadata)
Monitoring and Logging
Application Monitoring
# utils/monitoring.py
import time
import psutil
import logging
from functools import wraps
import streamlit as st
# Setup logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler('cryptoview.log'),
logging.StreamHandler()
]
)
logger = logging.getLogger(__name__)
def monitor_performance(func):
"""
Decorator to monitor function execution time and resource usage
"""
@wraps(func)
def wrapper(*args, **kwargs):
# Start metrics
start_time = time.time()
start_memory = psutil.Process().memory_info().rss / 1024 / 1024 # MB
try:
result = func(*args, **kwargs)
status = "SUCCESS"
except Exception as e:
logger.error(f"Error in {func.__name__}: {str(e)}")
status = "ERROR"
raise
finally:
# End metrics
end_time = time.time()
end_memory = psutil.Process().memory_info().rss / 1024 / 1024
duration = end_time - start_time
memory_used = end_memory - start_memory
logger.info(f"{func.__name__} - Status: {status}, Duration: {duration:.2f}s, Memory: {memory_used:.2f}MB")
return result
return wrapper
# Usage
@monitor_performance
def train_model(df):
predictor = XGBoostCryptoPredictor()
return predictor.train(df)
# System health metrics
def display_system_health():
"""
Display system health in Streamlit sidebar
"""
with st.sidebar:
st.markdown("### 🔧 System Health")
cpu_percent = psutil.cpu_percent(interval=1)
memory = psutil.virtual_memory()
disk = psutil.disk_usage('/')
st.metric("CPU Usage", f"{cpu_percent}%")
st.metric("Memory Usage", f"{memory.percent}%")
st.metric("Disk Usage", f"{disk.percent}%")
if cpu_percent > 80:
st.warning("⚠️ High CPU usage")
if memory.percent > 80:
st.warning("⚠️ High memory usage")
Prediction Logging
# utils/prediction_logger.py
import pandas as pd
from datetime import datetime
import sqlite3
class PredictionLogger:
"""
Log all predictions for monitoring and retraining
"""
def __init__(self, db_path: str = "predictions.db"):
self.db_path = db_path
self._init_db()
def _init_db(self):
"""Create predictions table"""
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS predictions (
id INTEGER PRIMARY KEY AUTOINCREMENT,
timestamp DATETIME,
crypto_symbol TEXT,
model_name TEXT,
predicted_price REAL,
actual_price REAL,
prediction_horizon INT,
confidence REAL,
metadata TEXT
)
''')
conn.commit()
conn.close()
def log_prediction(self, crypto_symbol: str, model_name: str,
predicted_price: float, prediction_horizon: int,
confidence: float = None, metadata: dict = None):
"""Log a new prediction"""
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
cursor.execute('''
INSERT INTO predictions
(timestamp, crypto_symbol, model_name, predicted_price,
prediction_horizon, confidence, metadata)
VALUES (?, ?, ?, ?, ?, ?, ?)
''', (
datetime.now(),
crypto_symbol,
model_name,
predicted_price,
prediction_horizon,
confidence,
str(metadata)
))
conn.commit()
prediction_id = cursor.lastrowid
conn.close()
return prediction_id
def update_actual_price(self, prediction_id: int, actual_price: float):
"""Update prediction with actual price for evaluation"""
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
cursor.execute('''
UPDATE predictions
SET actual_price = ?
WHERE id = ?
''', (actual_price, prediction_id))
conn.commit()
conn.close()
def get_model_performance(self, model_name: str, days: int = 7):
"""Calculate model performance over last N days"""
conn = sqlite3.connect(self.db_path)
query = '''
SELECT
predicted_price,
actual_price,
ABS(predicted_price - actual_price) / actual_price * 100 as mape
FROM predictions
WHERE model_name = ?
AND actual_price IS NOT NULL
AND timestamp > datetime('now', '-' || ? || ' days')
'''
df = pd.read_sql_query(query, conn, params=(model_name, days))
conn.close()
if len(df) == 0:
return None
return {
"predictions_count": len(df),
"mean_mape": df['mape'].mean(),
"median_mape": df['mape'].median(),
"max_error": df['mape'].max()
}
API Endpoints (Optional)
Create a FastAPI wrapper for REST API access:# api/main.py
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
import pandas as pd
from models.xgboost_model import XGBoostCryptoPredictor
from models.model_manager import ModelManager
app = FastAPI(title="CryptoView Pro API")
manager = ModelManager()
class PredictionRequest(BaseModel):
crypto_symbol: str
model_name: str = "xgboost"
periods: int = 24
class PredictionResponse(BaseModel):
predictions: list
confidence: float
model_version: str
@app.get("/health")
def health_check():
return {"status": "healthy", "timestamp": datetime.now().isoformat()}
@app.post("/predict", response_model=PredictionResponse)
def predict(request: PredictionRequest):
try:
# Load latest model
predictor, metadata = manager.load_model(request.model_name)
# Get data (implement your data fetching)
df = fetch_crypto_data(request.crypto_symbol)
# Predict
predictions = predictor.predict_future(df, periods=request.periods)
return PredictionResponse(
predictions=predictions['predicted_price'].tolist(),
confidence=0.85, # Calculate based on model metrics
model_version=metadata.get('trained_on', 'unknown')
)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.get("/models")
def list_models():
return {"models": ["xgboost", "prophet", "hybrid"]}
# Run with: uvicorn api.main:app --reload
requirements.txt:
fastapi
uvicorn[standard]
Automated Retraining
# scripts/retrain_models.py
import schedule
import time
from datetime import datetime
from models.xgboost_model import XGBoostCryptoPredictor
from models.model_manager import ModelManager
from data.collectors import CryptoDataCollector
def retrain_job():
"""Daily retraining job"""
print(f"Starting retraining at {datetime.now()}")
manager = ModelManager()
collector = CryptoDataCollector('kraken')
cryptos = ['BTC/USDT', 'ETH/USDT', 'SOL/USDT']
for crypto in cryptos:
try:
# Fetch latest data
df = collector.fetch_ohlcv(crypto, '1h', 2000)
# Train new model
predictor = XGBoostCryptoPredictor()
metrics = predictor.train(df)
# Save if performance is good
if metrics['test_mape'] < 5.0: # MAPE threshold
metadata = {
"crypto": crypto,
"trained_on": datetime.now().isoformat(),
"metrics": metrics,
"data_points": len(df)
}
manager.save_model(predictor, f"xgboost_{crypto.replace('/', '_')}", metadata)
print(f"✅ {crypto} model retrained successfully")
else:
print(f"⚠️ {crypto} model performance below threshold")
except Exception as e:
print(f"❌ Error retraining {crypto}: {str(e)}")
# Schedule daily at 2 AM
schedule.every().day.at("02:00").do(retrain_job)
if __name__ == "__main__":
print("Retraining scheduler started")
while True:
schedule.run_pending()
time.sleep(60)
# Using systemd (Linux)
sudo nano /etc/systemd/system/cryptoview-retrain.service
[Unit]
Description=CryptoView Pro Model Retraining
After=network.target
[Service]
Type=simple
User=cryptoview
WorkingDirectory=/app
ExecStart=/usr/bin/python3 /app/scripts/retrain_models.py
Restart=always
[Install]
WantedBy=multi-user.target
# Enable and start
sudo systemctl enable cryptoview-retrain
sudo systemctl start cryptoview-retrain
Environment Variables
Create.env.production:
# .env.production
# Application
ENVIRONMENT=production
DEBUG=False
# Telegram
TELEGRAM_BOT_TOKEN=your_production_token
TELEGRAM_CHAT_ID=your_chat_id
# Database
DATABASE_URL=postgresql://user:password@postgres:5432/cryptodb
REDIS_URL=redis://redis:6379
# Exchange APIs
KRAKEN_API_KEY=your_api_key
KRAKEN_API_SECRET=your_api_secret
# Model settings
MODEL_CACHE_DIR=/app/models_cache
DATA_CACHE_TTL=300
# Monitoring
SENTRY_DSN=your_sentry_dsn
LOG_LEVEL=INFO
Security Best Practices
Never commit secrets to Git! Use environment variables or secrets management.
-
Use secrets management:
- AWS Secrets Manager
- Google Cloud Secret Manager
- HashiCorp Vault
-
Enable HTTPS:
# Get free SSL with Let's Encrypt sudo apt install certbot sudo certbot --nginx -d your-domain.com -
Implement rate limiting:
from slowapi import Limiter, _rate_limit_exceeded_handler from slowapi.util import get_remote_address limiter = Limiter(key_func=get_remote_address) app.state.limiter = limiter @app.post("/predict") @limiter.limit("10/minute") def predict(request: Request): ... -
Add authentication:
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials security = HTTPBearer() @app.post("/predict") def predict(credentials: HTTPAuthorizationCredentials = Depends(security)): token = credentials.credentials if not verify_token(token): raise HTTPException(status_code=401, detail="Invalid token") ...
Cost Optimization
- Use spot instances (AWS EC2, GCP) for non-critical workloads - save 70%
- Auto-scaling: Scale down during low traffic
- Caching: Redis for API responses and predictions
- Model compression: Use
joblibcompression for smaller model files - Serverless: Use Cloud Run/Lambda for sporadic usage
Next Steps
Custom Models
Deploy your custom prediction models
Monitoring
Advanced monitoring and alerting
API Documentation
Full API reference for integrations
Troubleshooting
Common deployment issues and solutions