Regularization penalties are added to loss functions to prevent overfitting by constraining model weights.
L1 penalty
Computes the L1 (Lasso) regularization penalty, which is the sum of absolute values of weights. L1 regularization encourages sparsity in the model.
template<Arithmetic T>
T l1_penalty(const std::vector<T>& weights)
Vector of model weights to penalize
The L1 penalty (sum of absolute values of weights)
Template constraints
T must satisfy the Arithmetic concept (any arithmetic type)
Example
#include "Losses/loss_functions.hpp"
#include <vector>
#include <iostream>
int main() {
std::vector<double> weights = {0.5, -1.2, 0.8, -0.3};
double penalty = mlpp::losses::l1_penalty(weights);
std::cout << "L1 penalty: " << penalty << std::endl;
// Output: L1 penalty: 2.8
// Add to loss function
double lambda = 0.01;
double total_loss = base_loss + lambda * penalty;
return 0;
}
L2 penalty
Computes the L2 (Ridge) regularization penalty, which is the sum of squared weights. L2 regularization encourages small weights and smooth models.
template<Arithmetic T>
T l2_penalty(const std::vector<T>& weights)
Vector of model weights to penalize
The L2 penalty (sum of squared weights)
Template constraints
T must satisfy the Arithmetic concept (any arithmetic type)
Example
#include "Losses/loss_functions.hpp"
#include <vector>
#include <iostream>
int main() {
std::vector<double> weights = {0.5, -1.2, 0.8, -0.3};
double penalty = mlpp::losses::l2_penalty(weights);
std::cout << "L2 penalty: " << penalty << std::endl;
// Output: L2 penalty: 2.22
// Add to loss function
double lambda = 0.01;
double total_loss = base_loss + lambda * penalty;
return 0;
}
Elastic net penalty
Computes the elastic net regularization penalty, which is a weighted combination of L1 and L2 penalties. This provides the benefits of both regularization methods.
template<Arithmetic T>
T elastic_net_penalty(const std::vector<T>& weights, T alpha, T l1_ratio)
Vector of model weights to penalize
Overall regularization strength multiplier
Mixing ratio between L1 and L2 penalties. Should be between 0 and 1.
l1_ratio = 1.0: Pure L1 penalty (Lasso)
l1_ratio = 0.0: Pure L2 penalty (Ridge)
0.0 < l1_ratio < 1.0: Combination of both
The elastic net penalty: alpha * (l1_ratio * L1 + (1 - l1_ratio) * L2)
Template constraints
T must satisfy the Arithmetic concept (any arithmetic type)
Example
#include "Losses/loss_functions.hpp"
#include <vector>
#include <iostream>
int main() {
std::vector<double> weights = {0.5, -1.2, 0.8, -0.3};
// Elastic net with equal mix of L1 and L2
double alpha = 0.01;
double l1_ratio = 0.5;
double penalty = mlpp::losses::elastic_net_penalty(weights, alpha, l1_ratio);
std::cout << "Elastic net penalty: " << penalty << std::endl;
// Pure L1 (Lasso)
double lasso = mlpp::losses::elastic_net_penalty(weights, alpha, 1.0);
std::cout << "Pure L1: " << lasso << std::endl;
// Pure L2 (Ridge)
double ridge = mlpp::losses::elastic_net_penalty(weights, alpha, 0.0);
std::cout << "Pure L2: " << ridge << std::endl;
// Add to loss function
double total_loss = base_loss + penalty;
return 0;
}
Usage in training
Regularization penalties are typically added to the main loss function during model training:
#include "Losses/loss_functions.hpp"
#include <vector>
int main() {
// Training data
std::vector<double> y_true = {1.0, 2.0, 3.0, 4.0};
std::vector<double> y_pred = {1.1, 2.2, 2.9, 4.3};
std::vector<double> weights = {0.5, -1.2, 0.8, -0.3};
// Compute base loss
double base_loss = mlpp::losses::mse(y_true, y_pred);
// Add L2 regularization
double lambda = 0.01;
double reg_penalty = mlpp::losses::l2_penalty(weights);
double total_loss = base_loss + lambda * reg_penalty;
// Or use elastic net for combined L1/L2
double alpha = 0.01;
double l1_ratio = 0.5;
double elastic_penalty = mlpp::losses::elastic_net_penalty(weights, alpha, l1_ratio);
double total_loss_elastic = base_loss + elastic_penalty;
return 0;
}