openGPMP
Open Source Mathematics Package
Public Member Functions | Public Attributes | List of all members
gpmp::ml::SecondaryMLP< T > Class Template Reference

Secondary Multi-Layer Perceptron Class making use of the Linear Algebra module. More...

#include <mlp_net.hpp>

Public Member Functions

void log (auto &file, const auto &x, const auto &y, const auto &y_hat)
 
long double sigmoid_activ (long double x)
 Sigmoid activation function. More...
 
long double sigmoid_deriv (long double x)
 Sigmoid Derivative for backwards propogation. More...
 
 SecondaryMLP (std::vector< size_t > _layer_units, long double _lr=.001)
 
auto prop_forwards (gpmp::linalg::Matrix< T > x)
 
void prop_backwards (gpmp::linalg::Matrix< T > target)
 

Public Attributes

std::vector< size_t > layer_units
 
std::vector< gpmp::linalg::Matrix< T > > bias_vectors
 
std::vector< gpmp::linalg::Matrix< T > > wt_mtx
 
std::vector< gpmp::linalg::Matrix< T > > activations
 
long double lr
 

Detailed Description

template<typename T>
class gpmp::ml::SecondaryMLP< T >

Secondary Multi-Layer Perceptron Class making use of the Linear Algebra module.

Definition at line 151 of file mlp_net.hpp.

Constructor & Destructor Documentation

◆ SecondaryMLP()

template<typename T >
gpmp::ml::SecondaryMLP< T >::SecondaryMLP ( std::vector< size_t >  _layer_units,
long double  _lr = .001 
)
inlineexplicit

Secondary Multi-Layer Perceptron Constructor Initialize a set of weights + biases for each layer set to random Gaussian Noise related values

Definition at line 194 of file mlp_net.hpp.

196  : layer_units(_layer_units), wt_mtx(), bias_vectors(), lr(_lr) {
197  // traverse the elements
198  for (size_t i = 0; i < layer_units.size() - 1; ++i) {
199  // size of inputs
200  size_t inputs{layer_units[i]};
201  // size of outputs
202  size_t outputs{layer_units[i + 1]};
203 
204  // set to random Guassian Noise related values
205  // weights
206  auto gauss_wt = gpmp::linalg::mtx<T>::randn(outputs, inputs);
207  wt_mtx.push_back(gauss_wt);
208  // biases
209  auto bias_wt = gpmp::linalg::mtx<T>::randn(outputs, 1);
210  bias_vectors.push_back(bias_wt);
211  // activation function
212  activations.resize(layer_units.size());
213  }
214  }
std::vector< gpmp::linalg::Matrix< T > > bias_vectors
Definition: mlp_net.hpp:183
std::vector< gpmp::linalg::Matrix< T > > wt_mtx
Definition: mlp_net.hpp:184
std::vector< gpmp::linalg::Matrix< T > > activations
Definition: mlp_net.hpp:185
std::vector< size_t > layer_units
Definition: mlp_net.hpp:182
static Matrix< T > randn(size_t rows, size_t cols)
Definition: mtx_tmpl.hpp:425

References gpmp::ml::SecondaryMLP< T >::activations, gpmp::ml::SecondaryMLP< T >::bias_vectors, gpmp::ml::SecondaryMLP< T >::layer_units, gpmp::linalg::mtx< T >::randn(), and gpmp::ml::SecondaryMLP< T >::wt_mtx.

Member Function Documentation

◆ log()

template<typename T >
void gpmp::ml::SecondaryMLP< T >::log ( auto &  file,
const auto &  x,
const auto &  y,
const auto &  y_hat 
)
inline

Logging function for collecting the results

Definition at line 156 of file mlp_net.hpp.

156  {
157  // mean squared error
158  auto mse = (y.data[0] - y_hat.data[0]);
159  mse = mse * mse;
160  file << mse << " " << x.data[0] << " " << y.data[0] << " "
161  << y_hat.data[0] << " \n";
162  }

◆ prop_backwards()

template<typename T >
void gpmp::ml::SecondaryMLP< T >::prop_backwards ( gpmp::linalg::Matrix< T >  target)
inline

Backwards Propagation is utilized to optimize the net's weights. Enabling learning how to correctly map arbitrary inputs to outputs. The goal being to update each weight of the network allowing them to increase the chance of the actual output being closer to the target output. This method takes the target output as an input parameter

Definition at line 245 of file mlp_net.hpp.

245  {
246  assert(std::get<0>(target.shape) == layer_units.back());
247  // calculate the error, target - ouput
248  auto y = target;
249  auto y_hat = activations.back();
250  auto err = target - y_hat;
251  // back propagate the error calculated from output to input
252  // and step the weights
253  for (int64_t i = wt_mtx.size() - 1; i >= 0; --i) {
254  // calculate errors for previous layer
255  auto wt = wt_mtx[i].T();
256  auto prior_errs = wt.mult(err);
257  auto outputs_d =
258  activations[i + 1].apply_function(&SecondaryMLP::sigmoid_deriv);
259  auto gradients = err.hadamard(outputs_d);
260  gradients = gradients.scalar_mult(lr);
261  auto trans_a = activations[i].T();
262  auto gradients_wt = gradients.mult(trans_a);
263 
264  // adjust the networks weights based on propagation
265  // technique
266  bias_vectors[i] = bias_vectors[i].add(gradients);
267  wt_mtx[i] = wt_mtx[i].add(gradients_wt);
268  err = prior_errs;
269  }
270  }
long double sigmoid_deriv(long double x)
Sigmoid Derivative for backwards propogation.
Definition: mlp_net.hpp:178

References gpmp::ml::SecondaryMLP< T >::activations, gpmp::ml::SecondaryMLP< T >::bias_vectors, gpmp::ml::SecondaryMLP< T >::layer_units, gpmp::ml::SecondaryMLP< T >::lr, gpmp::ml::SecondaryMLP< T >::sigmoid_deriv(), and gpmp::ml::SecondaryMLP< T >::wt_mtx.

◆ prop_forwards()

template<typename T >
auto gpmp::ml::SecondaryMLP< T >::prop_forwards ( gpmp::linalg::Matrix< T >  x)
inline

Forward passes compute the activations at a specific layer. This method saves the results to the activations Matrix passing it forwards to use as an input paramater on the next layer

Definition at line 221 of file mlp_net.hpp.

221  {
222  assert(std::get<0>(x.shape) == layer_units[0] && std::get<1>(x.shape));
223  // input = to previously declared acitvations method
224  activations[0] = x;
225  gpmp::linalg::Matrix prev(x);
226 
227  // traverse layer units
228  for (uint64_t i = 0; i < layer_units.size() - 1; ++i) {
229  gpmp::linalg::Matrix y = wt_mtx[i].mult(prev);
230  y = y + bias_vectors[i];
231  y = y.apply_function(&SecondaryMLP::sigmoid_activ);
232  activations[i + 1] = y;
233  prev = y;
234  }
235  return prev;
236  }
Matrix and Scalar operations.
Definition: mtx_tmpl.hpp:61
long double sigmoid_activ(long double x)
Sigmoid activation function.
Definition: mlp_net.hpp:169

References gpmp::ml::SecondaryMLP< T >::activations, gpmp::ml::SecondaryMLP< T >::bias_vectors, gpmp::ml::SecondaryMLP< T >::layer_units, gpmp::ml::SecondaryMLP< T >::sigmoid_activ(), and gpmp::ml::SecondaryMLP< T >::wt_mtx.

◆ sigmoid_activ()

template<typename T >
long double gpmp::ml::SecondaryMLP< T >::sigmoid_activ ( long double  x)
inline

Sigmoid activation function.

Parameters
[in]x: (float)

Definition at line 169 of file mlp_net.hpp.

169  {
170  return 1.0f / (1 + exp(-x));
171  }

Referenced by gpmp::ml::SecondaryMLP< T >::prop_forwards().

◆ sigmoid_deriv()

template<typename T >
long double gpmp::ml::SecondaryMLP< T >::sigmoid_deriv ( long double  x)
inline

Sigmoid Derivative for backwards propogation.

Parameters
[in]x: (float)

Definition at line 178 of file mlp_net.hpp.

178  {
179  return (x * (1 - x));
180  }

Referenced by gpmp::ml::SecondaryMLP< T >::prop_backwards().

Member Data Documentation

◆ activations

template<typename T >
std::vector<gpmp::linalg::Matrix<T> > gpmp::ml::SecondaryMLP< T >::activations

◆ bias_vectors

template<typename T >
std::vector<gpmp::linalg::Matrix<T> > gpmp::ml::SecondaryMLP< T >::bias_vectors

◆ layer_units

template<typename T >
std::vector<size_t> gpmp::ml::SecondaryMLP< T >::layer_units

◆ lr

template<typename T >
long double gpmp::ml::SecondaryMLP< T >::lr

Definition at line 187 of file mlp_net.hpp.

Referenced by gpmp::ml::SecondaryMLP< T >::prop_backwards().

◆ wt_mtx

template<typename T >
std::vector<gpmp::linalg::Matrix<T> > gpmp::ml::SecondaryMLP< T >::wt_mtx

The documentation for this class was generated from the following file: