openGPMP
Open Source Mathematics Package
Static Public Member Functions | List of all members
gpmp::ml::Regularize Class Reference

#include <regularizers.hpp>

Static Public Member Functions

static double l1_regularization (const std::vector< double > &weights, double lambda)
 Computes L1 regularization penalty (Lasso regression) More...
 
static double l2_regularization (const std::vector< double > &weights, double lambda)
 Computes L2 regularization penalty (Ridge regression) More...
 
static double elastic_net_regularization (const std::vector< double > &weights, double lambda1, double lambda2)
 Computes Elastic Net regularization penalty. More...
 
static double dropout_regularization (double dropout_rate, int num_neurons)
 Computes Dropout regularization penalty. More...
 
static bool early_stopping (double current_val_loss, double &best_val_loss, int patience, int epoch)
 Performs early stopping based on validation loss. More...
 
static std::vector< double > ensemble_predictions (const std::vector< std::vector< double >> &predictions)
 Combines predictions from multiple models using ensembling. More...
 
static void max_norm_regularization (std::vector< double > &weights, double max_norm)
 Applies max norm regularization to the weights. More...
 
static void weight_decay_regularization (std::vector< double > &weights, double lambda)
 Applies weight decay regularization to the weights. More...
 
static std::vector< std::vector< double > > batch_normalization (const std::vector< std::vector< double >> &input_data, double epsilon=1e-5, double scale=1.0, double shift=0.0)
 Applies batch normalization to the input data. More...
 
static std::vector< std::vector< double > > data_augmentation (const std::vector< std::vector< double >> &input_data, int augmentation_factor)
 Applies data augmentation to the input data. More...
 

Detailed Description

Definition at line 43 of file regularizers.hpp.

Member Function Documentation

◆ batch_normalization()

std::vector< std::vector< double > > gpmp::ml::Regularize::batch_normalization ( const std::vector< std::vector< double >> &  input_data,
double  epsilon = 1e-5,
double  scale = 1.0,
double  shift = 0.0 
)
static

Applies batch normalization to the input data.

Parameters
input_dataThe input data matrix
epsilonA small value added to variance to avoid division by zero
scaleThe scale parameter
shiftThe shift parameter
Returns
The normalized input data matrix

Definition at line 127 of file regularizers.cpp.

131  {
132  std::vector<std::vector<double>> normalized_data;
133  for (const auto &instance : input_data) {
134  double mean = 0.0;
135  for (double val : instance) {
136  mean += val;
137  }
138  mean /= instance.size();
139 
140  double variance = 0.0;
141  for (double val : instance) {
142  variance += (val - mean) * (val - mean);
143  }
144  variance /= instance.size();
145 
146  double std_dev = sqrt(variance + epsilon);
147 
148  std::vector<double> normalized_instance;
149  for (double val : instance) {
150  double normalized_val = scale * ((val - mean) / std_dev) + shift;
151  normalized_instance.push_back(normalized_val);
152  }
153  normalized_data.push_back(normalized_instance);
154  }
155  return normalized_data;
156 }

◆ data_augmentation()

std::vector< std::vector< double > > gpmp::ml::Regularize::data_augmentation ( const std::vector< std::vector< double >> &  input_data,
int  augmentation_factor 
)
static

Applies data augmentation to the input data.

Parameters
input_dataThe input data matrix
augmentation_factorThe factor by which to augment the data (eg, 2 for doubling the data)
Returns
The augmented input data matrix

Definition at line 158 of file regularizers.cpp.

160  {
161  std::vector<std::vector<double>> augmented_data;
162  std::random_device rd;
163  std::mt19937 gen(rd());
164 
165  for (const auto &instance : input_data) {
166  augmented_data.push_back(instance);
167  for (int i = 1; i < augmentation_factor; ++i) {
168  std::vector<double> augmented_instance = instance;
169  std::shuffle(augmented_instance.begin(),
170  augmented_instance.end(),
171  gen);
172  augmented_data.push_back(augmented_instance);
173  }
174  }
175  return augmented_data;
176 }

◆ dropout_regularization()

double gpmp::ml::Regularize::dropout_regularization ( double  dropout_rate,
int  num_neurons 
)
static

Computes Dropout regularization penalty.

Parameters
dropout_rateThe dropout rate
num_neuronsThe number of neurons in the layer
Returns
The Dropout regularization penalty

Definition at line 67 of file regularizers.cpp.

68  {
69  return 0.5 * dropout_rate * num_neurons;
70 }

◆ early_stopping()

bool gpmp::ml::Regularize::early_stopping ( double  current_val_loss,
double &  best_val_loss,
int  patience,
int  epoch 
)
static

Performs early stopping based on validation loss.

Parameters
current_val_lossThe current validation loss
best_val_lossThe best validation loss observed so far
patienceThe number of epochs to wait before stopping if validation loss doesn't improve
epochThe current epoch number
Returns
True if early stopping criterion is met, False otherwise

Definition at line 72 of file regularizers.cpp.

75  {
76  if (current_val_loss < best_val_loss) {
77  best_val_loss = current_val_loss;
78  patience = epoch + patience; // Reset patience
79  } else {
80  if (epoch >= patience) {
81  return true; // Stop training
82  }
83  }
84  return false; // Continue training
85 }

◆ elastic_net_regularization()

double gpmp::ml::Regularize::elastic_net_regularization ( const std::vector< double > &  weights,
double  lambda1,
double  lambda2 
)
static

Computes Elastic Net regularization penalty.

Parameters
weightsThe weights of the model
lambda1The L1 regularization parameter
lambda2The L2 regularization parameter
Returns
The Elastic Net regularization penalty

Definition at line 58 of file regularizers.cpp.

61  {
62  double l1_penalty = l1_regularization(weights, lambda1);
63  double l2_penalty = l2_regularization(weights, lambda2);
64  return l1_penalty + l2_penalty;
65 }
static double l2_regularization(const std::vector< double > &weights, double lambda)
Computes L2 regularization penalty (Ridge regression)
static double l1_regularization(const std::vector< double > &weights, double lambda)
Computes L1 regularization penalty (Lasso regression)

◆ ensemble_predictions()

std::vector< double > gpmp::ml::Regularize::ensemble_predictions ( const std::vector< std::vector< double >> &  predictions)
static

Combines predictions from multiple models using ensembling.

Parameters
predictionsVector of prediction vectors from individual models
Returns
Ensemble prediction vector

Definition at line 87 of file regularizers.cpp.

88  {
89  std::vector<double> ensemble;
90  if (!predictions.empty()) {
91  ensemble.resize(predictions.front().size(), 0.0);
92  for (const auto &prediction : predictions) {
93  for (size_t i = 0; i < prediction.size(); ++i) {
94  ensemble[i] += prediction[i];
95  }
96  }
97  for (auto &val : ensemble) {
98  val /= predictions.size();
99  }
100  }
101  return ensemble;
102 }

◆ l1_regularization()

double gpmp::ml::Regularize::l1_regularization ( const std::vector< double > &  weights,
double  lambda 
)
static

Computes L1 regularization penalty (Lasso regression)

Parameters
weightsThe weights of the model
lambdaThe regularization parameter
Returns
The L1 regularization penalty

Definition at line 39 of file regularizers.cpp.

40  {
41  double penalty = 0.0;
42  for (double weight : weights) {
43  penalty += std::abs(weight);
44  }
45  return lambda * penalty;
46 }

◆ l2_regularization()

double gpmp::ml::Regularize::l2_regularization ( const std::vector< double > &  weights,
double  lambda 
)
static

Computes L2 regularization penalty (Ridge regression)

Parameters
weightsThe weights of the model
lambdaThe regularization parameter
Returns
The L2 regularization penalty

Definition at line 49 of file regularizers.cpp.

50  {
51  double penalty = 0.0;
52  for (double weight : weights) {
53  penalty += weight * weight;
54  }
55  return 0.5 * lambda * penalty;
56 }

◆ max_norm_regularization()

void gpmp::ml::Regularize::max_norm_regularization ( std::vector< double > &  weights,
double  max_norm 
)
static

Applies max norm regularization to the weights.

Parameters
weightsThe weights of the model
max_normThe maximum norm value

Definition at line 104 of file regularizers.cpp.

105  {
106  double norm = 0.0;
107  for (double &weight : weights) {
108  norm += weight * weight;
109  }
110  norm = sqrt(norm);
111  if (norm > max_norm) {
112  double factor = max_norm / norm;
113  for (double &weight : weights) {
114  weight *= factor;
115  }
116  }
117 }

◆ weight_decay_regularization()

void gpmp::ml::Regularize::weight_decay_regularization ( std::vector< double > &  weights,
double  lambda 
)
static

Applies weight decay regularization to the weights.

Parameters
weightsThe weights of the model
lambdaThe regularization parameter

Definition at line 119 of file regularizers.cpp.

121  {
122  for (double &weight : weights) {
123  weight *= (1.0 - lambda);
124  }
125 }

The documentation for this class was generated from the following files: