40 #ifndef MLP_NETWORK_HPP
41 #define MLP_NETWORK_HPP
42 #include "../linalg.hpp"
43 #include "../linalg/mtx_tmpl.hpp"
91 int64_t
rand_int(int64_t low, int64_t hi);
93 long double rand_real(
long double low,
long double hi);
141 int64_t
test(
const char *fname);
144 void run(
const char *fname,
const int64_t &max_iters);
156 void log(
auto &file,
const auto &x,
const auto &y,
const auto &y_hat) {
158 auto mse = (y.data[0] - y_hat.data[0]);
160 file << mse <<
" " << x.data[0] <<
" " << y.data[0] <<
" "
161 << y_hat.data[0] <<
" \n";
170 return 1.0f / (1 + exp(-x));
179 return (x * (1 - x));
184 std::vector<gpmp::linalg::Matrix<T>>
wt_mtx;
195 long double _lr = .001)
198 for (
size_t i = 0; i <
layer_units.size() - 1; ++i) {
207 wt_mtx.push_back(gauss_wt);
222 assert(std::get<0>(x.shape) ==
layer_units[0] && std::get<1>(x.shape));
228 for (uint64_t i = 0; i <
layer_units.size() - 1; ++i) {
246 assert(std::get<0>(target.shape) ==
layer_units.back());
250 auto err = target - y_hat;
253 for (int64_t i =
wt_mtx.size() - 1; i >= 0; --i) {
256 auto prior_errs = wt.mult(err);
259 auto gradients = err.hadamard(outputs_d);
260 gradients = gradients.scalar_mult(
lr);
262 auto gradients_wt = gradients.mult(trans_a);
Matrix and Scalar operations.
Primary Multi-Layer Perceptron Class.
PrimaryMLP(int64_t nl, int64_t npl[])
long double _MSE
Mean Squared Error.
void output_err(long double *target)
void run(const char *fname, const int64_t &max_iters)
long double _AvgTestError
long double rand_real(long double low, long double hi)
int64_t rand_int(int64_t low, int64_t hi)
void get_signal_out(long double *output)
int64_t test(const char *fname)
void simulate(long double *input, long double *output, long double *target, bool training)
void set_signal_in(long double *input)
int64_t train(const char *fnames)
long double _MAE
Mean Absolute Error.
Secondary Multi-Layer Perceptron Class making use of the Linear Algebra module.
void prop_backwards(gpmp::linalg::Matrix< T > target)
std::vector< gpmp::linalg::Matrix< T > > bias_vectors
std::vector< gpmp::linalg::Matrix< T > > wt_mtx
void log(auto &file, const auto &x, const auto &y, const auto &y_hat)
std::vector< gpmp::linalg::Matrix< T > > activations
SecondaryMLP(std::vector< size_t > _layer_units, long double _lr=.001)
long double sigmoid_activ(long double x)
Sigmoid activation function.
long double sigmoid_deriv(long double x)
Sigmoid Derivative for backwards propogation.
auto prop_forwards(gpmp::linalg::Matrix< T > x)
std::vector< size_t > layer_units
The source C++ openGPMP namespace.
static Matrix< T > randn(size_t rows, size_t cols)