Codestyle.

This commit is contained in:
Relintai 2023-02-13 00:19:16 +01:00
parent 638ae1664f
commit d40ebe1ca3
10 changed files with 488 additions and 488 deletions

View File

@ -12,114 +12,114 @@
#include <random>
int MLPPHiddenLayer::get_n_hidden() const {
return n_hidden;
return _n_hidden;
}
void MLPPHiddenLayer::set_n_hidden(const int val) {
n_hidden = val;
_n_hidden = val;
_initialized = false;
}
MLPPActivation::ActivationFunction MLPPHiddenLayer::get_activation() const {
return activation;
return _activation;
}
void MLPPHiddenLayer::set_activation(const MLPPActivation::ActivationFunction val) {
activation = val;
_activation = val;
_initialized = false;
}
Ref<MLPPMatrix> MLPPHiddenLayer::get_input() {
return input;
return _input;
}
void MLPPHiddenLayer::set_input(const Ref<MLPPMatrix> &val) {
input = val;
_input = val;
_initialized = false;
}
Ref<MLPPMatrix> MLPPHiddenLayer::get_weights() {
return weights;
return _weights;
}
void MLPPHiddenLayer::set_weights(const Ref<MLPPMatrix> &val) {
weights = val;
_weights = val;
_initialized = false;
}
Ref<MLPPVector> MLPPHiddenLayer::MLPPHiddenLayer::get_bias() {
return bias;
return _bias;
}
void MLPPHiddenLayer::set_bias(const Ref<MLPPVector> &val) {
bias = val;
_bias = val;
_initialized = false;
}
Ref<MLPPMatrix> MLPPHiddenLayer::get_z() {
return z;
return _z;
}
void MLPPHiddenLayer::set_z(const Ref<MLPPMatrix> &val) {
z = val;
_z = val;
_initialized = false;
}
Ref<MLPPMatrix> MLPPHiddenLayer::get_a() {
return a;
return _a;
}
void MLPPHiddenLayer::set_a(const Ref<MLPPMatrix> &val) {
a = val;
_a = val;
_initialized = false;
}
Ref<MLPPVector> MLPPHiddenLayer::get_z_test() {
return z_test;
return _z_test;
}
void MLPPHiddenLayer::set_z_test(const Ref<MLPPVector> &val) {
z_test = val;
_z_test = val;
_initialized = false;
}
Ref<MLPPVector> MLPPHiddenLayer::get_a_test() {
return a_test;
return _a_test;
}
void MLPPHiddenLayer::set_a_test(const Ref<MLPPVector> &val) {
a_test = val;
_a_test = val;
_initialized = false;
}
Ref<MLPPMatrix> MLPPHiddenLayer::get_delta() {
return delta;
return _delta;
}
void MLPPHiddenLayer::set_delta(const Ref<MLPPMatrix> &val) {
delta = val;
_delta = val;
_initialized = false;
}
MLPPReg::RegularizationType MLPPHiddenLayer::get_reg() const {
return reg;
return _reg;
}
void MLPPHiddenLayer::set_reg(const MLPPReg::RegularizationType val) {
reg = val;
_reg = val;
_initialized = false;
}
real_t MLPPHiddenLayer::get_lambda() const {
return lambda;
return _lambda;
}
void MLPPHiddenLayer::set_lambda(const real_t val) {
lambda = val;
_lambda = val;
_initialized = false;
}
real_t MLPPHiddenLayer::get_alpha() const {
return alpha;
return _alpha;
}
void MLPPHiddenLayer::set_alpha(const real_t val) {
alpha = val;
_alpha = val;
_initialized = false;
}
MLPPUtilities::WeightDistributionType MLPPHiddenLayer::get_weight_init() const {
return weight_init;
return _weight_init;
}
void MLPPHiddenLayer::set_weight_init(const MLPPUtilities::WeightDistributionType val) {
weight_init = val;
_weight_init = val;
_initialized = false;
}
@ -131,13 +131,13 @@ void MLPPHiddenLayer::initialize() {
return;
}
weights->resize(Size2i(n_hidden, input->size().x));
bias->resize(n_hidden);
_weights->resize(Size2i(_n_hidden, _input->size().x));
_bias->resize(_n_hidden);
MLPPUtilities utils;
utils.weight_initializationm(weights, weight_init);
utils.bias_initializationv(bias);
utils.weight_initializationm(_weights, _weight_init);
utils.bias_initializationv(_bias);
_initialized = true;
}
@ -150,8 +150,8 @@ void MLPPHiddenLayer::forward_pass() {
MLPPLinAlg alg;
MLPPActivation avn;
z = alg.mat_vec_addv(alg.matmultm(input, weights), bias);
a = avn.run_activation_norm_matrix(activation, z);
_z = alg.mat_vec_addv(alg.matmultm(_input, _weights), _bias);
_a = avn.run_activation_norm_matrix(_activation, _z);
}
void MLPPHiddenLayer::test(const Ref<MLPPVector> &x) {
@ -162,66 +162,66 @@ void MLPPHiddenLayer::test(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
MLPPActivation avn;
z_test = alg.additionm(alg.mat_vec_multv(alg.transposem(weights), x), bias);
a_test = avn.run_activation_norm_matrix(activation, z_test);
_z_test = alg.additionm(alg.mat_vec_multv(alg.transposem(_weights), x), _bias);
_a_test = avn.run_activation_norm_matrix(_activation, _z_test);
}
MLPPHiddenLayer::MLPPHiddenLayer(int p_n_hidden, MLPPActivation::ActivationFunction p_activation, Ref<MLPPMatrix> p_input, MLPPUtilities::WeightDistributionType p_weight_init, MLPPReg::RegularizationType p_reg, real_t p_lambda, real_t p_alpha) {
n_hidden = p_n_hidden;
activation = p_activation;
_n_hidden = p_n_hidden;
_activation = p_activation;
input = p_input;
_input = p_input;
// Regularization Params
reg = p_reg;
lambda = p_lambda; /* Regularization Parameter */
alpha = p_alpha; /* This is the controlling param for Elastic Net*/
_reg = p_reg;
_lambda = p_lambda; /* Regularization Parameter */
_alpha = p_alpha; /* This is the controlling param for Elastic Net*/
weight_init = p_weight_init;
_weight_init = p_weight_init;
z.instance();
a.instance();
_z.instance();
_a.instance();
z_test.instance();
a_test.instance();
_z_test.instance();
_a_test.instance();
delta.instance();
_delta.instance();
weights.instance();
bias.instance();
_weights.instance();
_bias.instance();
weights->resize(Size2i(n_hidden, input->size().x));
bias->resize(n_hidden);
_weights->resize(Size2i(_n_hidden, _input->size().x));
_bias->resize(_n_hidden);
MLPPUtilities utils;
utils.weight_initializationm(weights, weight_init);
utils.bias_initializationv(bias);
utils.weight_initializationm(_weights, _weight_init);
utils.bias_initializationv(_bias);
_initialized = true;
}
MLPPHiddenLayer::MLPPHiddenLayer() {
n_hidden = 0;
activation = MLPPActivation::ACTIVATION_FUNCTION_LINEAR;
_n_hidden = 0;
_activation = MLPPActivation::ACTIVATION_FUNCTION_LINEAR;
// Regularization Params
//reg = 0;
lambda = 0; /* Regularization Parameter */
alpha = 0; /* This is the controlling param for Elastic Net*/
_lambda = 0; /* Regularization Parameter */
_alpha = 0; /* This is the controlling param for Elastic Net*/
weight_init = MLPPUtilities::WEIGHT_DISTRIBUTION_TYPE_DEFAULT;
_weight_init = MLPPUtilities::WEIGHT_DISTRIBUTION_TYPE_DEFAULT;
z.instance();
a.instance();
_z.instance();
_a.instance();
z_test.instance();
a_test.instance();
_z_test.instance();
_a_test.instance();
delta.instance();
_delta.instance();
weights.instance();
bias.instance();
_weights.instance();
_bias.instance();
_initialized = false;
}

View File

@ -84,28 +84,28 @@ public:
protected:
static void _bind_methods();
int n_hidden;
MLPPActivation::ActivationFunction activation;
int _n_hidden;
MLPPActivation::ActivationFunction _activation;
Ref<MLPPMatrix> input;
Ref<MLPPMatrix> _input;
Ref<MLPPMatrix> weights;
Ref<MLPPVector> bias;
Ref<MLPPMatrix> _weights;
Ref<MLPPVector> _bias;
Ref<MLPPMatrix> z;
Ref<MLPPMatrix> a;
Ref<MLPPMatrix> _z;
Ref<MLPPMatrix> _a;
Ref<MLPPVector> z_test;
Ref<MLPPVector> a_test;
Ref<MLPPVector> _z_test;
Ref<MLPPVector> _a_test;
Ref<MLPPMatrix> delta;
Ref<MLPPMatrix> _delta;
// Regularization Params
MLPPReg::RegularizationType reg;
real_t lambda; /* Regularization Parameter */
real_t alpha; /* This is the controlling param for Elastic Net*/
MLPPReg::RegularizationType _reg;
real_t _lambda; /* Regularization Parameter */
real_t _alpha; /* This is the controlling param for Elastic Net*/
MLPPUtilities::WeightDistributionType weight_init;
MLPPUtilities::WeightDistributionType _weight_init;
bool _initialized;
};

View File

@ -18,55 +18,55 @@
#include <random>
Ref<MLPPMatrix> MLPPMLP::get_input_set() {
return input_set;
return _input_set;
}
void MLPPMLP::set_input_set(const Ref<MLPPMatrix> &val) {
input_set = val;
_input_set = val;
_initialized = false;
}
Ref<MLPPVector> MLPPMLP::get_output_set() {
return output_set;
return _output_set;
}
void MLPPMLP::set_output_set(const Ref<MLPPVector> &val) {
output_set = val;
_output_set = val;
_initialized = false;
}
int MLPPMLP::get_n_hidden() {
return n_hidden;
return _n_hidden;
}
void MLPPMLP::set_n_hidden(const int val) {
n_hidden = val;
_n_hidden = val;
_initialized = false;
}
real_t MLPPMLP::get_lambda() {
return lambda;
return _lambda;
}
void MLPPMLP::set_lambda(const real_t val) {
lambda = val;
_lambda = val;
_initialized = false;
}
real_t MLPPMLP::get_alpha() {
return alpha;
return _alpha;
}
void MLPPMLP::set_alpha(const real_t val) {
alpha = val;
_alpha = val;
_initialized = false;
}
MLPPReg::RegularizationType MLPPMLP::get_reg() {
return reg;
return _reg;
}
void MLPPMLP::set_reg(const MLPPReg::RegularizationType val) {
reg = val;
_reg = val;
_initialized = false;
}
@ -88,47 +88,47 @@ void MLPPMLP::gradient_descent(real_t learning_rate, int max_epoch, bool UI) {
real_t cost_prev = 0;
int epoch = 1;
y_hat->fill(0);
_y_hat->fill(0);
forward_pass();
while (true) {
cost_prev = cost(y_hat, output_set);
cost_prev = cost(_y_hat, _output_set);
// Calculating the errors
Ref<MLPPVector> error = alg.subtractionnv(y_hat, output_set);
Ref<MLPPVector> error = alg.subtractionnv(_y_hat, _output_set);
// Calculating the weight/bias gradients for layer 2
Ref<MLPPVector> D2_1 = alg.mat_vec_multv(alg.transposem(a2), error);
Ref<MLPPVector> D2_1 = alg.mat_vec_multv(alg.transposem(_a2), error);
// weights and bias updation for layer 2
weights2->set_from_mlpp_vector(alg.subtractionnv(weights2, alg.scalar_multiplynv(learning_rate / static_cast<real_t>(n), D2_1)));
weights2->set_from_mlpp_vector(regularization.reg_weightsv(weights2, lambda, alpha, reg));
_weights2->set_from_mlpp_vector(alg.subtractionnv(_weights2, alg.scalar_multiplynv(learning_rate / static_cast<real_t>(_n), D2_1)));
_weights2->set_from_mlpp_vector(regularization.reg_weightsv(_weights2, _lambda, _alpha, _reg));
bias2 -= learning_rate * alg.sum_elementsv(error) / static_cast<real_t>(n);
_bias2 -= learning_rate * alg.sum_elementsv(error) / static_cast<real_t>(_n);
// Calculating the weight/bias for layer 1
Ref<MLPPMatrix> D1_1 = alg.outer_product(error, weights2);
Ref<MLPPMatrix> D1_2 = alg.hadamard_productm(alg.transposem(D1_1), avn.sigmoid_derivm(z2));
Ref<MLPPMatrix> D1_3 = alg.matmultm(alg.transposem(input_set), D1_2);
Ref<MLPPMatrix> D1_1 = alg.outer_product(error, _weights2);
Ref<MLPPMatrix> D1_2 = alg.hadamard_productm(alg.transposem(D1_1), avn.sigmoid_derivm(_z2));
Ref<MLPPMatrix> D1_3 = alg.matmultm(alg.transposem(_input_set), D1_2);
// weight an bias updation for layer 1
weights1->set_from_mlpp_matrix(alg.subtractionm(weights1, alg.scalar_multiplym(learning_rate / n, D1_3)));
weights1->set_from_mlpp_matrix(regularization.reg_weightsm(weights1, lambda, alpha, reg));
_weights1->set_from_mlpp_matrix(alg.subtractionm(_weights1, alg.scalar_multiplym(learning_rate / _n, D1_3)));
_weights1->set_from_mlpp_matrix(regularization.reg_weightsm(_weights1, _lambda, _alpha, _reg));
bias1->set_from_mlpp_vector(alg.subtract_matrix_rows(bias1, alg.scalar_multiplym(learning_rate / n, D1_2)));
_bias1->set_from_mlpp_vector(alg.subtract_matrix_rows(_bias1, alg.scalar_multiplym(learning_rate / _n, D1_2)));
forward_pass();
// UI PORTION
if (UI) {
MLPPUtilities::cost_info(epoch, cost_prev, cost(y_hat, output_set));
MLPPUtilities::cost_info(epoch, cost_prev, cost(_y_hat, _output_set));
PLOG_MSG("Layer 1:");
MLPPUtilities::print_ui_mb(weights1, bias1);
MLPPUtilities::print_ui_mb(_weights1, _bias1);
PLOG_MSG("Layer 2:");
MLPPUtilities::print_ui_vb(weights2, bias2);
MLPPUtilities::print_ui_vb(_weights2, _bias2);
}
epoch++;
@ -150,11 +150,11 @@ void MLPPMLP::sgd(real_t learning_rate, int max_epoch, bool UI) {
std::random_device rd;
std::default_random_engine generator(rd());
std::uniform_int_distribution<int> distribution(0, int(n - 1));
std::uniform_int_distribution<int> distribution(0, int(_n - 1));
Ref<MLPPVector> input_set_row_tmp;
input_set_row_tmp.instance();
input_set_row_tmp->resize(input_set->size().x);
input_set_row_tmp->resize(_input_set->size().x);
Ref<MLPPVector> output_set_row_tmp;
output_set_row_tmp.instance();
@ -172,8 +172,8 @@ void MLPPMLP::sgd(real_t learning_rate, int max_epoch, bool UI) {
while (true) {
int output_Index = distribution(generator);
input_set->get_row_into_mlpp_vector(output_Index, input_set_row_tmp);
real_t output_element = output_set->get_element(output_Index);
_input_set->get_row_into_mlpp_vector(output_Index, input_set_row_tmp);
real_t output_element = _output_set->get_element(output_Index);
output_set_row_tmp->set_element(0, output_element);
real_t ly_hat = evaluatev(input_set_row_tmp);
@ -185,31 +185,31 @@ void MLPPMLP::sgd(real_t learning_rate, int max_epoch, bool UI) {
// Weight updation for layer 2
Ref<MLPPVector> D2_1 = alg.scalar_multiplynv(error, la2);
weights2->set_from_mlpp_vector(alg.subtractionnv(weights2, alg.scalar_multiplynv(learning_rate, D2_1)));
weights2->set_from_mlpp_vector(regularization.reg_weightsv(weights2, lambda, alpha, reg));
_weights2->set_from_mlpp_vector(alg.subtractionnv(_weights2, alg.scalar_multiplynv(learning_rate, D2_1)));
_weights2->set_from_mlpp_vector(regularization.reg_weightsv(_weights2, _lambda, _alpha, _reg));
// Bias updation for layer 2
bias2 -= learning_rate * error;
_bias2 -= learning_rate * error;
// Weight updation for layer 1
Ref<MLPPVector> D1_1 = alg.scalar_multiplynv(error, weights2);
Ref<MLPPVector> D1_1 = alg.scalar_multiplynv(error, _weights2);
Ref<MLPPVector> D1_2 = alg.hadamard_productnv(D1_1, avn.sigmoid_derivv(lz2));
Ref<MLPPMatrix> D1_3 = alg.outer_product(input_set_row_tmp, D1_2);
weights1->set_from_mlpp_matrix(alg.subtractionm(weights1, alg.scalar_multiplym(learning_rate, D1_3)));
weights1->set_from_mlpp_matrix(regularization.reg_weightsm(weights1, lambda, alpha, reg));
_weights1->set_from_mlpp_matrix(alg.subtractionm(_weights1, alg.scalar_multiplym(learning_rate, D1_3)));
_weights1->set_from_mlpp_matrix(regularization.reg_weightsm(_weights1, _lambda, _alpha, _reg));
// Bias updation for layer 1
bias1->set_from_mlpp_vector(alg.subtractionnv(bias1, alg.scalar_multiplynv(learning_rate, D1_2)));
_bias1->set_from_mlpp_vector(alg.subtractionnv(_bias1, alg.scalar_multiplynv(learning_rate, D1_2)));
ly_hat = evaluatev(input_set_row_tmp);
if (UI) {
MLPPUtilities::cost_info(epoch, cost_prev, cost_prev);
PLOG_MSG("Layer 1:");
MLPPUtilities::print_ui_mb(weights1, bias1);
MLPPUtilities::print_ui_mb(_weights1, _bias1);
PLOG_MSG("Layer 2:");
MLPPUtilities::print_ui_vb(weights2, bias2);
MLPPUtilities::print_ui_vb(_weights2, _bias2);
}
epoch++;
@ -237,9 +237,9 @@ void MLPPMLP::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size, boo
la2.instance();
// Creating the mini-batches
int n_mini_batch = n / mini_batch_size;
int n_mini_batch = _n / mini_batch_size;
MLPPUtilities::CreateMiniBatchMVBatch batches = MLPPUtilities::create_mini_batchesmv(input_set, output_set, n_mini_batch);
MLPPUtilities::CreateMiniBatchMVBatch batches = MLPPUtilities::create_mini_batchesmv(_input_set, _output_set, n_mini_batch);
while (true) {
for (int i = 0; i < n_mini_batch; i++) {
@ -259,34 +259,34 @@ void MLPPMLP::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size, boo
real_t lr_d_cos = learning_rate / static_cast<real_t>(current_output->size());
// weights and bias updation for layser 2
weights2->set_from_mlpp_vector(alg.subtractionnv(weights2, alg.scalar_multiplynv(lr_d_cos, D2_1)));
weights2->set_from_mlpp_vector(regularization.reg_weightsv(weights2, lambda, alpha, reg));
_weights2->set_from_mlpp_vector(alg.subtractionnv(_weights2, alg.scalar_multiplynv(lr_d_cos, D2_1)));
_weights2->set_from_mlpp_vector(regularization.reg_weightsv(_weights2, _lambda, _alpha, _reg));
// Calculating the bias gradients for layer 2
real_t b_gradient = alg.sum_elementsv(error);
// Bias Updation for layer 2
bias2 -= learning_rate * b_gradient / current_output->size();
_bias2 -= learning_rate * b_gradient / current_output->size();
//Calculating the weight/bias for layer 1
Ref<MLPPMatrix> D1_1 = alg.outer_product(error, weights2);
Ref<MLPPMatrix> D1_1 = alg.outer_product(error, _weights2);
Ref<MLPPMatrix> D1_2 = alg.hadamard_productm(D1_1, avn.sigmoid_derivm(lz2));
Ref<MLPPMatrix> D1_3 = alg.matmultm(alg.transposem(current_input), D1_2);
// weight an bias updation for layer 1
weights1->set_from_mlpp_matrix(alg.subtractionm(weights1, alg.scalar_multiplym(lr_d_cos, D1_3)));
weights1->set_from_mlpp_matrix(regularization.reg_weightsm(weights1, lambda, alpha, reg));
_weights1->set_from_mlpp_matrix(alg.subtractionm(_weights1, alg.scalar_multiplym(lr_d_cos, D1_3)));
_weights1->set_from_mlpp_matrix(regularization.reg_weightsm(_weights1, _lambda, _alpha, _reg));
bias1->set_from_mlpp_vector(alg.subtract_matrix_rows(bias1, alg.scalar_multiplym(lr_d_cos, D1_2)));
_bias1->set_from_mlpp_vector(alg.subtract_matrix_rows(_bias1, alg.scalar_multiplym(lr_d_cos, D1_2)));
y_hat = evaluatem(current_input);
_y_hat = evaluatem(current_input);
if (UI) {
MLPPUtilities::CostInfo(epoch, cost_prev, cost(ly_hat, current_output));
PLOG_MSG("Layer 1:");
MLPPUtilities::print_ui_mb(weights1, bias1);
MLPPUtilities::print_ui_mb(_weights1, _bias1);
PLOG_MSG("Layer 2:");
MLPPUtilities::print_ui_vb(weights2, bias2);
MLPPUtilities::print_ui_vb(_weights2, _bias2);
}
}
@ -302,7 +302,7 @@ void MLPPMLP::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size, boo
real_t MLPPMLP::score() {
MLPPUtilities util;
return util.performance_vec(y_hat, output_set);
return util.performance_vec(_y_hat, _output_set);
}
void MLPPMLP::save(const String &fileName) {
@ -322,54 +322,54 @@ void MLPPMLP::initialize() {
return;
}
ERR_FAIL_COND(!input_set.is_valid() || !output_set.is_valid() || n_hidden == 0);
ERR_FAIL_COND(!_input_set.is_valid() || !_output_set.is_valid() || _n_hidden == 0);
n = input_set->size().y;
k = input_set->size().x;
_n = _input_set->size().y;
_k = _input_set->size().x;
MLPPActivation avn;
y_hat->resize(n);
_y_hat->resize(_n);
MLPPUtilities util;
weights1->resize(Size2i(k, n_hidden));
weights2->resize(n_hidden);
bias1->resize(n_hidden);
_weights1->resize(Size2i(_k, _n_hidden));
_weights2->resize(_n_hidden);
_bias1->resize(_n_hidden);
util.weight_initializationm(weights1);
util.weight_initializationv(weights2);
util.bias_initializationv(bias1);
util.weight_initializationm(_weights1);
util.weight_initializationv(_weights2);
util.bias_initializationv(_bias1);
bias2 = util.bias_initializationr();
_bias2 = util.bias_initializationr();
z2.instance();
a2.instance();
_z2.instance();
_a2.instance();
_initialized = true;
}
real_t MLPPMLP::cost(const Ref<MLPPVector> &p_y_hat, const Ref<MLPPVector> &p_y) {
MLPPReg regularization;
class MLPPCost cost;
MLPPCost mlpp_cost;
return cost.log_lossv(p_y_hat, p_y) + regularization.reg_termv(weights2, lambda, alpha, reg) + regularization.reg_termm(weights1, lambda, alpha, reg);
return mlpp_cost.log_lossv(p_y_hat, p_y) + regularization.reg_termv(_weights2, _lambda, _alpha, _reg) + regularization.reg_termm(_weights1, _lambda, _alpha, _reg);
}
Ref<MLPPVector> MLPPMLP::evaluatem(const Ref<MLPPMatrix> &X) {
MLPPLinAlg alg;
MLPPActivation avn;
Ref<MLPPMatrix> pz2 = alg.mat_vec_addv(alg.matmultm(X, weights1), bias1);
Ref<MLPPMatrix> pz2 = alg.mat_vec_addv(alg.matmultm(X, _weights1), _bias1);
Ref<MLPPMatrix> pa2 = avn.sigmoid_normm(pz2);
return avn.sigmoid_normv(alg.scalar_addnv(bias2, alg.mat_vec_multv(pa2, weights2)));
return avn.sigmoid_normv(alg.scalar_addnv(_bias2, alg.mat_vec_multv(pa2, _weights2)));
}
void MLPPMLP::propagatem(const Ref<MLPPMatrix> &X, Ref<MLPPMatrix> z2_out, Ref<MLPPMatrix> a2_out) {
MLPPLinAlg alg;
MLPPActivation avn;
z2_out->set_from_mlpp_matrix(alg.mat_vec_addv(alg.matmultm(X, weights1), bias1));
z2_out->set_from_mlpp_matrix(alg.mat_vec_addv(alg.matmultm(X, _weights1), _bias1));
a2_out->set_from_mlpp_matrix(avn.sigmoid_normm(z2_out));
}
@ -377,17 +377,17 @@ real_t MLPPMLP::evaluatev(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
MLPPActivation avn;
Ref<MLPPVector> pz2 = alg.additionnv(alg.mat_vec_multv(alg.transposem(weights1), x), bias1);
Ref<MLPPVector> pz2 = alg.additionnv(alg.mat_vec_multv(alg.transposem(_weights1), x), _bias1);
Ref<MLPPVector> pa2 = avn.sigmoid_normv(pz2);
return avn.sigmoid(alg.dotv(weights2, pa2) + bias2);
return avn.sigmoid(alg.dotv(_weights2, pa2) + _bias2);
}
void MLPPMLP::propagatev(const Ref<MLPPVector> &x, Ref<MLPPVector> z2_out, Ref<MLPPVector> a2_out) {
MLPPLinAlg alg;
MLPPActivation avn;
z2_out->set_from_mlpp_vector(alg.additionnv(alg.mat_vec_multv(alg.transposem(weights1), x), bias1));
z2_out->set_from_mlpp_vector(alg.additionnv(alg.mat_vec_multv(alg.transposem(_weights1), x), _bias1));
a2_out->set_from_mlpp_vector(avn.sigmoid_normv(z2_out));
}
@ -395,69 +395,69 @@ void MLPPMLP::forward_pass() {
MLPPLinAlg alg;
MLPPActivation avn;
z2->set_from_mlpp_matrix(alg.mat_vec_addv(alg.matmultm(input_set, weights1), bias1));
a2->set_from_mlpp_matrix(avn.sigmoid_normm(z2));
_z2->set_from_mlpp_matrix(alg.mat_vec_addv(alg.matmultm(_input_set, _weights1), _bias1));
_a2->set_from_mlpp_matrix(avn.sigmoid_normm(_z2));
y_hat->set_from_mlpp_vector(avn.sigmoid_normv(alg.scalar_addnv(bias2, alg.mat_vec_multv(a2, weights2))));
_y_hat->set_from_mlpp_vector(avn.sigmoid_normv(alg.scalar_addnv(_bias2, alg.mat_vec_multv(_a2, _weights2))));
}
MLPPMLP::MLPPMLP(const Ref<MLPPMatrix> &p_input_set, const Ref<MLPPVector> &p_output_set, int p_n_hidden, MLPPReg::RegularizationType p_reg, real_t p_lambda, real_t p_alpha) {
input_set = p_input_set;
output_set = p_output_set;
_input_set = p_input_set;
_output_set = p_output_set;
y_hat.instance();
_y_hat.instance();
n_hidden = p_n_hidden;
n = input_set->size().y;
k = input_set->size().x;
reg = p_reg;
lambda = p_lambda;
alpha = p_alpha;
_n_hidden = p_n_hidden;
_n = _input_set->size().y;
_k = _input_set->size().x;
_reg = p_reg;
_lambda = p_lambda;
_alpha = p_alpha;
MLPPActivation avn;
y_hat->resize(n);
_y_hat->resize(_n);
MLPPUtilities util;
weights1.instance();
weights1->resize(Size2i(k, n_hidden));
_weights1.instance();
_weights1->resize(Size2i(_k, _n_hidden));
weights2.instance();
weights2->resize(n_hidden);
_weights2.instance();
_weights2->resize(_n_hidden);
bias1.instance();
bias1->resize(n_hidden);
_bias1.instance();
_bias1->resize(_n_hidden);
util.weight_initializationm(weights1);
util.weight_initializationv(weights2);
util.bias_initializationv(bias1);
util.weight_initializationm(_weights1);
util.weight_initializationv(_weights2);
util.bias_initializationv(_bias1);
bias2 = util.bias_initializationr();
_bias2 = util.bias_initializationr();
z2.instance();
a2.instance();
_z2.instance();
_a2.instance();
_initialized = true;
}
MLPPMLP::MLPPMLP() {
y_hat.instance();
_y_hat.instance();
n_hidden = 0;
n = 0;
k = 0;
reg = MLPPReg::REGULARIZATION_TYPE_NONE;
lambda = 0.5;
alpha = 0.5;
_n_hidden = 0;
_n = 0;
_k = 0;
_reg = MLPPReg::REGULARIZATION_TYPE_NONE;
_lambda = 0.5;
_alpha = 0.5;
weights1.instance();
weights2.instance();
bias1.instance();
_weights1.instance();
_weights2.instance();
_bias1.instance();
bias2 = 0;
_bias2 = 0;
z2.instance();
a2.instance();
_z2.instance();
_a2.instance();
_initialized = false;
}

View File

@ -77,27 +77,27 @@ protected:
static void _bind_methods();
Ref<MLPPMatrix> input_set;
Ref<MLPPVector> output_set;
Ref<MLPPVector> y_hat;
Ref<MLPPMatrix> _input_set;
Ref<MLPPVector> _output_set;
Ref<MLPPVector> _y_hat;
Ref<MLPPMatrix> weights1;
Ref<MLPPVector> weights2;
Ref<MLPPMatrix> _weights1;
Ref<MLPPVector> _weights2;
Ref<MLPPVector> bias1;
real_t bias2;
Ref<MLPPVector> _bias1;
real_t _bias2;
Ref<MLPPMatrix> z2;
Ref<MLPPMatrix> a2;
Ref<MLPPMatrix> _z2;
Ref<MLPPMatrix> _a2;
int n;
int k;
int n_hidden;
int _n;
int _k;
int _n_hidden;
// Regularization Params
MLPPReg::RegularizationType reg;
real_t lambda; /* Regularization Parameter */
real_t alpha; /* This is the controlling param for Elastic Net*/
MLPPReg::RegularizationType _reg;
real_t _lambda; /* Regularization Parameter */
real_t _alpha; /* This is the controlling param for Elastic Net*/
bool _initialized;
};

View File

@ -9,187 +9,187 @@
#include "../utilities/utilities.h"
int MLPPMultiOutputLayer::get_n_output() {
return n_output;
return _n_output;
}
void MLPPMultiOutputLayer::set_n_output(const int val) {
n_output = val;
_n_output = val;
}
int MLPPMultiOutputLayer::get_n_hidden() {
return n_hidden;
return _n_hidden;
}
void MLPPMultiOutputLayer::set_n_hidden(const int val) {
n_hidden = val;
_n_hidden = val;
}
MLPPActivation::ActivationFunction MLPPMultiOutputLayer::get_activation() {
return activation;
return _activation;
}
void MLPPMultiOutputLayer::set_activation(const MLPPActivation::ActivationFunction val) {
activation = val;
_activation = val;
}
MLPPCost::CostTypes MLPPMultiOutputLayer::get_cost() {
return cost;
return _cost;
}
void MLPPMultiOutputLayer::set_cost(const MLPPCost::CostTypes val) {
cost = val;
_cost = val;
}
Ref<MLPPMatrix> MLPPMultiOutputLayer::get_input() {
return input;
return _input;
}
void MLPPMultiOutputLayer::set_input(const Ref<MLPPMatrix> &val) {
input = val;
_input = val;
}
Ref<MLPPMatrix> MLPPMultiOutputLayer::get_weights() {
return weights;
return _weights;
}
void MLPPMultiOutputLayer::set_weights(const Ref<MLPPMatrix> &val) {
weights = val;
_weights = val;
}
Ref<MLPPVector> MLPPMultiOutputLayer::get_bias() {
return bias;
return _bias;
}
void MLPPMultiOutputLayer::set_bias(const Ref<MLPPVector> &val) {
bias = val;
_bias = val;
}
Ref<MLPPMatrix> MLPPMultiOutputLayer::get_z() {
return z;
return _z;
}
void MLPPMultiOutputLayer::set_z(const Ref<MLPPMatrix> &val) {
z = val;
_z = val;
}
Ref<MLPPMatrix> MLPPMultiOutputLayer::get_a() {
return a;
return _a;
}
void MLPPMultiOutputLayer::set_a(const Ref<MLPPMatrix> &val) {
a = val;
_a = val;
}
Ref<MLPPVector> MLPPMultiOutputLayer::get_z_test() {
return z_test;
return _z_test;
}
void MLPPMultiOutputLayer::set_z_test(const Ref<MLPPVector> &val) {
z_test = val;
_z_test = val;
}
Ref<MLPPVector> MLPPMultiOutputLayer::get_a_test() {
return a_test;
return _a_test;
}
void MLPPMultiOutputLayer::set_a_test(const Ref<MLPPVector> &val) {
a_test = val;
_a_test = val;
}
Ref<MLPPMatrix> MLPPMultiOutputLayer::get_delta() {
return delta;
return _delta;
}
void MLPPMultiOutputLayer::set_delta(const Ref<MLPPMatrix> &val) {
delta = val;
_delta = val;
}
MLPPReg::RegularizationType MLPPMultiOutputLayer::get_reg() {
return reg;
return _reg;
}
void MLPPMultiOutputLayer::set_reg(const MLPPReg::RegularizationType val) {
reg = val;
_reg = val;
}
real_t MLPPMultiOutputLayer::get_lambda() {
return lambda;
return _lambda;
}
void MLPPMultiOutputLayer::set_lambda(const real_t val) {
lambda = val;
_lambda = val;
}
real_t MLPPMultiOutputLayer::get_alpha() {
return alpha;
return _alpha;
}
void MLPPMultiOutputLayer::set_alpha(const real_t val) {
alpha = val;
_alpha = val;
}
MLPPUtilities::WeightDistributionType MLPPMultiOutputLayer::get_weight_init() {
return weight_init;
return _weight_init;
}
void MLPPMultiOutputLayer::set_weight_init(const MLPPUtilities::WeightDistributionType val) {
weight_init = val;
_weight_init = val;
}
void MLPPMultiOutputLayer::forward_pass() {
MLPPLinAlg alg;
MLPPActivation avn;
z = alg.mat_vec_addv(alg.matmultm(input, weights), bias);
a = avn.run_activation_norm_matrix(activation, z);
_z = alg.mat_vec_addv(alg.matmultm(_input, _weights), _bias);
_a = avn.run_activation_norm_matrix(_activation, _z);
}
void MLPPMultiOutputLayer::test(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
MLPPActivation avn;
z_test = alg.additionm(alg.mat_vec_multv(alg.transposem(weights), x), bias);
a_test = avn.run_activation_norm_vector(activation, z_test);
_z_test = alg.additionm(alg.mat_vec_multv(alg.transposem(_weights), x), _bias);
_a_test = avn.run_activation_norm_vector(_activation, _z_test);
}
MLPPMultiOutputLayer::MLPPMultiOutputLayer(int p_n_hidden, MLPPActivation::ActivationFunction p_activation, Ref<MLPPMatrix> p_input, MLPPUtilities::WeightDistributionType p_weight_init, MLPPReg::RegularizationType p_reg, real_t p_lambda, real_t p_alpha) {
n_hidden = p_n_hidden;
activation = p_activation;
_n_hidden = p_n_hidden;
_activation = p_activation;
input = p_input;
_input = p_input;
// Regularization Params
reg = p_reg;
lambda = p_lambda; /* Regularization Parameter */
alpha = p_alpha; /* This is the controlling param for Elastic Net*/
_reg = p_reg;
_lambda = p_lambda; /* Regularization Parameter */
_alpha = p_alpha; /* This is the controlling param for Elastic Net*/
weight_init = p_weight_init;
_weight_init = p_weight_init;
z.instance();
a.instance();
_z.instance();
_a.instance();
z_test.instance();
a_test.instance();
_z_test.instance();
_a_test.instance();
delta.instance();
_delta.instance();
weights.instance();
bias.instance();
_weights.instance();
_bias.instance();
weights->resize(Size2i(n_hidden, n_output));
bias->resize(n_output);
_weights->resize(Size2i(_n_hidden, _n_output));
_bias->resize(_n_output);
MLPPUtilities utils;
utils.weight_initializationm(weights, weight_init);
utils.bias_initializationv(bias);
utils.weight_initializationm(_weights, _weight_init);
utils.bias_initializationv(_bias);
}
MLPPMultiOutputLayer::MLPPMultiOutputLayer() {
n_hidden = 0;
activation = MLPPActivation::ACTIVATION_FUNCTION_LINEAR;
_n_hidden = 0;
_activation = MLPPActivation::ACTIVATION_FUNCTION_LINEAR;
// Regularization Params
//reg = 0;
lambda = 0; /* Regularization Parameter */
alpha = 0; /* This is the controlling param for Elastic Net*/
_lambda = 0; /* Regularization Parameter */
_alpha = 0; /* This is the controlling param for Elastic Net*/
weight_init = MLPPUtilities::WEIGHT_DISTRIBUTION_TYPE_DEFAULT;
_weight_init = MLPPUtilities::WEIGHT_DISTRIBUTION_TYPE_DEFAULT;
z.instance();
a.instance();
_z.instance();
_a.instance();
z_test.instance();
a_test.instance();
_z_test.instance();
_a_test.instance();
delta.instance();
_delta.instance();
weights.instance();
bias.instance();
_weights.instance();
_bias.instance();
}
MLPPMultiOutputLayer::~MLPPMultiOutputLayer() {
}

View File

@ -84,30 +84,30 @@ public:
protected:
static void _bind_methods();
int n_output;
int n_hidden;
MLPPActivation::ActivationFunction activation;
MLPPCost::CostTypes cost;
int _n_output;
int _n_hidden;
MLPPActivation::ActivationFunction _activation;
MLPPCost::CostTypes _cost;
Ref<MLPPMatrix> input;
Ref<MLPPMatrix> _input;
Ref<MLPPMatrix> weights;
Ref<MLPPVector> bias;
Ref<MLPPMatrix> _weights;
Ref<MLPPVector> _bias;
Ref<MLPPMatrix> z;
Ref<MLPPMatrix> a;
Ref<MLPPMatrix> _z;
Ref<MLPPMatrix> _a;
Ref<MLPPVector> z_test;
Ref<MLPPVector> a_test;
Ref<MLPPVector> _z_test;
Ref<MLPPVector> _a_test;
Ref<MLPPMatrix> delta;
Ref<MLPPMatrix> _delta;
// Regularization Params
MLPPReg::RegularizationType reg;
real_t lambda; /* Regularization Parameter */
real_t alpha; /* This is the controlling param for Elastic Net*/
MLPPReg::RegularizationType _reg;
real_t _lambda; /* Regularization Parameter */
real_t _alpha; /* This is the controlling param for Elastic Net*/
MLPPUtilities::WeightDistributionType weight_init;
MLPPUtilities::WeightDistributionType _weight_init;
};
#endif /* MultiOutputLayer_hpp */

View File

@ -9,121 +9,121 @@
#include "../utilities/utilities.h"
int MLPPOutputLayer::get_n_hidden() {
return n_hidden;
return _n_hidden;
}
void MLPPOutputLayer::set_n_hidden(const int val) {
n_hidden = val;
_n_hidden = val;
_initialized = false;
}
MLPPActivation::ActivationFunction MLPPOutputLayer::get_activation() {
return activation;
return _activation;
}
void MLPPOutputLayer::set_activation(const MLPPActivation::ActivationFunction val) {
activation = val;
_activation = val;
_initialized = false;
}
MLPPCost::CostTypes MLPPOutputLayer::get_cost() {
return cost;
return _cost;
}
void MLPPOutputLayer::set_cost(const MLPPCost::CostTypes val) {
cost = val;
_cost = val;
_initialized = false;
}
Ref<MLPPMatrix> MLPPOutputLayer::get_input() {
return input;
return _input;
}
void MLPPOutputLayer::set_input(const Ref<MLPPMatrix> &val) {
input = val;
_input = val;
_initialized = false;
}
Ref<MLPPVector> MLPPOutputLayer::get_weights() {
return weights;
return _weights;
}
void MLPPOutputLayer::set_weights(const Ref<MLPPVector> &val) {
weights = val;
_weights = val;
_initialized = false;
}
real_t MLPPOutputLayer::MLPPOutputLayer::get_bias() {
return bias;
return _bias;
}
void MLPPOutputLayer::set_bias(const real_t val) {
bias = val;
_bias = val;
_initialized = false;
}
Ref<MLPPVector> MLPPOutputLayer::get_z() {
return z;
return _z;
}
void MLPPOutputLayer::set_z(const Ref<MLPPVector> &val) {
z = val;
_z = val;
_initialized = false;
}
Ref<MLPPVector> MLPPOutputLayer::get_a() {
return a;
return _a;
}
void MLPPOutputLayer::set_a(const Ref<MLPPVector> &val) {
a = val;
_a = val;
_initialized = false;
}
Ref<MLPPVector> MLPPOutputLayer::get_z_test() {
return z_test;
return _z_test;
}
void MLPPOutputLayer::set_z_test(const Ref<MLPPVector> &val) {
z_test = val;
_z_test = val;
_initialized = false;
}
Ref<MLPPVector> MLPPOutputLayer::get_a_test() {
return a_test;
return _a_test;
}
void MLPPOutputLayer::set_a_test(const Ref<MLPPVector> &val) {
a_test = val;
_a_test = val;
_initialized = false;
}
Ref<MLPPVector> MLPPOutputLayer::get_delta() {
return delta;
return _delta;
}
void MLPPOutputLayer::set_delta(const Ref<MLPPVector> &val) {
delta = val;
_delta = val;
_initialized = false;
}
MLPPReg::RegularizationType MLPPOutputLayer::get_reg() {
return reg;
return _reg;
}
void MLPPOutputLayer::set_reg(const MLPPReg::RegularizationType val) {
reg = val;
_reg = val;
}
real_t MLPPOutputLayer::get_lambda() {
return lambda;
return _lambda;
}
void MLPPOutputLayer::set_lambda(const real_t val) {
lambda = val;
_lambda = val;
_initialized = false;
}
real_t MLPPOutputLayer::get_alpha() {
return alpha;
return _alpha;
}
void MLPPOutputLayer::set_alpha(const real_t val) {
alpha = val;
_alpha = val;
_initialized = false;
}
MLPPUtilities::WeightDistributionType MLPPOutputLayer::get_weight_init() {
return weight_init;
return _weight_init;
}
void MLPPOutputLayer::set_weight_init(const MLPPUtilities::WeightDistributionType val) {
weight_init = val;
_weight_init = val;
_initialized = false;
}
@ -135,12 +135,12 @@ void MLPPOutputLayer::initialize() {
return;
}
weights->resize(n_hidden);
_weights->resize(_n_hidden);
MLPPUtilities utils;
utils.weight_initializationv(weights, weight_init);
bias = utils.bias_initializationr();
utils.weight_initializationv(_weights, _weight_init);
_bias = utils.bias_initializationr();
_initialized = true;
}
@ -153,8 +153,8 @@ void MLPPOutputLayer::forward_pass() {
MLPPLinAlg alg;
MLPPActivation avn;
z = alg.scalar_addnv(bias, alg.mat_vec_multv(input, weights));
a = avn.run_activation_norm_vector(activation, z);
_z = alg.scalar_addnv(_bias, alg.mat_vec_multv(_input, _weights));
_a = avn.run_activation_norm_vector(_activation, _z);
}
void MLPPOutputLayer::test(const Ref<MLPPVector> &x) {
@ -165,65 +165,65 @@ void MLPPOutputLayer::test(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
MLPPActivation avn;
z_test = alg.dotv(weights, x) + bias;
a_test = avn.run_activation_norm_vector(activation, z_test);
_z_test = alg.dotv(_weights, x) + _bias;
_a_test = avn.run_activation_norm_vector(_activation, _z_test);
}
MLPPOutputLayer::MLPPOutputLayer(int p_n_hidden, MLPPActivation::ActivationFunction p_activation, Ref<MLPPMatrix> p_input, MLPPUtilities::WeightDistributionType p_weight_init, MLPPReg::RegularizationType p_reg, real_t p_lambda, real_t p_alpha) {
n_hidden = p_n_hidden;
activation = p_activation;
_n_hidden = p_n_hidden;
_activation = p_activation;
input = p_input;
_input = p_input;
// Regularization Params
reg = p_reg;
lambda = p_lambda; /* Regularization Parameter */
alpha = p_alpha; /* This is the controlling param for Elastic Net*/
_reg = p_reg;
_lambda = p_lambda; /* Regularization Parameter */
_alpha = p_alpha; /* This is the controlling param for Elastic Net*/
weight_init = p_weight_init;
_weight_init = p_weight_init;
z.instance();
a.instance();
_z.instance();
_a.instance();
z_test.instance();
a_test.instance();
_z_test.instance();
_a_test.instance();
delta.instance();
_delta.instance();
weights.instance();
bias = 0;
_weights.instance();
_bias = 0;
weights->resize(n_hidden);
_weights->resize(_n_hidden);
MLPPUtilities utils;
utils.weight_initializationv(weights, weight_init);
bias = utils.bias_initializationr();
utils.weight_initializationv(_weights, _weight_init);
_bias = utils.bias_initializationr();
_initialized = true;
}
MLPPOutputLayer::MLPPOutputLayer() {
n_hidden = 0;
activation = MLPPActivation::ACTIVATION_FUNCTION_LINEAR;
_n_hidden = 0;
_activation = MLPPActivation::ACTIVATION_FUNCTION_LINEAR;
// Regularization Params
//reg = 0;
lambda = 0; /* Regularization Parameter */
alpha = 0; /* This is the controlling param for Elastic Net*/
_lambda = 0; /* Regularization Parameter */
_alpha = 0; /* This is the controlling param for Elastic Net*/
weight_init = MLPPUtilities::WEIGHT_DISTRIBUTION_TYPE_DEFAULT;
_weight_init = MLPPUtilities::WEIGHT_DISTRIBUTION_TYPE_DEFAULT;
z.instance();
a.instance();
_z.instance();
_a.instance();
z_test.instance();
a_test.instance();
_z_test.instance();
_a_test.instance();
delta.instance();
_delta.instance();
weights.instance();
bias = 0;
_weights.instance();
_bias = 0;
_initialized = false;
}

View File

@ -84,29 +84,29 @@ public:
protected:
static void _bind_methods();
int n_hidden;
MLPPActivation::ActivationFunction activation;
MLPPCost::CostTypes cost;
int _n_hidden;
MLPPActivation::ActivationFunction _activation;
MLPPCost::CostTypes _cost;
Ref<MLPPMatrix> input;
Ref<MLPPMatrix> _input;
Ref<MLPPVector> weights;
real_t bias;
Ref<MLPPVector> _weights;
real_t _bias;
Ref<MLPPVector> z;
Ref<MLPPVector> a;
Ref<MLPPVector> _z;
Ref<MLPPVector> _a;
Ref<MLPPVector> z_test;
Ref<MLPPVector> a_test;
Ref<MLPPVector> _z_test;
Ref<MLPPVector> _a_test;
Ref<MLPPVector> delta;
Ref<MLPPVector> _delta;
// Regularization Params
MLPPReg::RegularizationType reg;
real_t lambda; /* Regularization Parameter */
real_t alpha; /* This is the controlling param for Elastic Net*/
MLPPReg::RegularizationType _reg;
real_t _lambda; /* Regularization Parameter */
real_t _alpha; /* This is the controlling param for Elastic Net*/
MLPPUtilities::WeightDistributionType weight_init;
MLPPUtilities::WeightDistributionType _weight_init;
bool _initialized;
};

View File

@ -17,29 +17,29 @@
#include "core/object/method_bind_ext.gen.inc"
Ref<MLPPMatrix> MLPPWGAN::get_output_set() {
return output_set;
return _output_set;
}
void MLPPWGAN::set_output_set(const Ref<MLPPMatrix> &val) {
output_set = val;
_output_set = val;
n = 0;
_n = 0;
if (output_set.is_valid()) {
n = output_set->size().y;
if (_output_set.is_valid()) {
_n = _output_set->size().y;
}
}
int MLPPWGAN::get_k() const {
return k;
return _k;
}
void MLPPWGAN::set_k(const int val) {
k = val;
_k = val;
}
Ref<MLPPMatrix> MLPPWGAN::generate_example(int n) {
MLPPLinAlg alg;
return model_set_test_generator(alg.gaussian_noise(n, k));
return model_set_test_generator(alg.gaussian_noise(n, _k));
}
void MLPPWGAN::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
@ -53,7 +53,7 @@ void MLPPWGAN::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
const int CRITIC_INTERATIONS = 5; // Wasserstein GAN specific parameter.
while (true) {
cost_prev = cost(y_hat, alg.onevecv(n));
cost_prev = cost(_y_hat, alg.onevecv(_n));
Ref<MLPPMatrix> generator_input_set;
Ref<MLPPMatrix> discriminator_input_set;
@ -64,38 +64,38 @@ void MLPPWGAN::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
// Training of the discriminator.
for (int i = 0; i < CRITIC_INTERATIONS; i++) {
generator_input_set = alg.gaussian_noise(n, k);
generator_input_set = alg.gaussian_noise(_n, _k);
discriminator_input_set->set_from_mlpp_matrix(model_set_test_generator(generator_input_set));
discriminator_input_set->add_rows_mlpp_matrix(output_set); // Fake + real inputs.
discriminator_input_set->add_rows_mlpp_matrix(_output_set); // Fake + real inputs.
ly_hat = model_set_test_discriminator(discriminator_input_set);
loutput_set = alg.scalar_multiplynv(-1, alg.onevecv(n)); // WGAN changes y_i = 1 and y_i = 0 to y_i = 1 and y_i = -1
Ref<MLPPVector> output_set_real = alg.onevecv(n);
loutput_set = alg.scalar_multiplynv(-1, alg.onevecv(_n)); // WGAN changes y_i = 1 and y_i = 0 to y_i = 1 and y_i = -1
Ref<MLPPVector> output_set_real = alg.onevecv(_n);
loutput_set->add_mlpp_vector(output_set_real); // Fake + real output scores.
DiscriminatorGradientResult discriminator_gradient_results = compute_discriminator_gradients(ly_hat, loutput_set);
Vector<Ref<MLPPMatrix>> cumulative_discriminator_hidden_layer_w_grad = discriminator_gradient_results.cumulative_hidden_layer_w_grad;
Ref<MLPPVector> output_discriminator_w_grad = discriminator_gradient_results.output_w_grad;
cumulative_discriminator_hidden_layer_w_grad = alg.scalar_multiply_vm(learning_rate / n, cumulative_discriminator_hidden_layer_w_grad);
output_discriminator_w_grad = alg.scalar_multiplynv(learning_rate / n, output_discriminator_w_grad);
cumulative_discriminator_hidden_layer_w_grad = alg.scalar_multiply_vm(learning_rate / _n, cumulative_discriminator_hidden_layer_w_grad);
output_discriminator_w_grad = alg.scalar_multiplynv(learning_rate / _n, output_discriminator_w_grad);
update_discriminator_parameters(cumulative_discriminator_hidden_layer_w_grad, output_discriminator_w_grad, learning_rate);
}
// Training of the generator.
generator_input_set = alg.gaussian_noise(n, k);
generator_input_set = alg.gaussian_noise(_n, _k);
discriminator_input_set->set_from_mlpp_matrix(model_set_test_generator(generator_input_set));
ly_hat = model_set_test_discriminator(discriminator_input_set);
loutput_set = alg.onevecv(n);
loutput_set = alg.onevecv(_n);
Vector<Ref<MLPPMatrix>> cumulative_generator_hidden_layer_w_grad = compute_generator_gradients(y_hat, loutput_set);
cumulative_generator_hidden_layer_w_grad = alg.scalar_multiply_vm(learning_rate / n, cumulative_generator_hidden_layer_w_grad);
Vector<Ref<MLPPMatrix>> cumulative_generator_hidden_layer_w_grad = compute_generator_gradients(_y_hat, loutput_set);
cumulative_generator_hidden_layer_w_grad = alg.scalar_multiply_vm(learning_rate / _n, cumulative_generator_hidden_layer_w_grad);
update_generator_parameters(cumulative_generator_hidden_layer_w_grad, learning_rate);
forward_pass();
if (ui) {
handle_ui(epoch, cost_prev, y_hat, alg.onevecv(n));
handle_ui(epoch, cost_prev, _y_hat, alg.onevecv(_n));
}
epoch++;
@ -109,7 +109,7 @@ real_t MLPPWGAN::score() {
MLPPLinAlg alg;
MLPPUtilities util;
forward_pass();
return util.performance_vec(y_hat, alg.onevecv(n));
return util.performance_vec(_y_hat, alg.onevecv(_n));
}
void MLPPWGAN::save(const String &file_name) {
@ -141,81 +141,81 @@ void MLPPWGAN::add_layer(int n_hidden, MLPPActivation::ActivationFunction activa
layer->set_lambda(lambda);
layer->set_alpha(alpha);
if (network.empty()) {
layer->set_input(alg.gaussian_noise(n, k));
if (_network.empty()) {
layer->set_input(alg.gaussian_noise(_n, _k));
} else {
layer->set_input(network.write[network.size() - 1]->get_a());
layer->set_input(_network.write[_network.size() - 1]->get_a());
}
network.push_back(layer);
_network.push_back(layer);
layer->forward_pass();
}
void MLPPWGAN::add_output_layer(MLPPUtilities::WeightDistributionType weight_init, MLPPReg::RegularizationType reg, real_t lambda, real_t alpha) {
ERR_FAIL_COND(network.empty());
ERR_FAIL_COND(_network.empty());
if (!output_layer.is_valid()) {
output_layer.instance();
if (!_output_layer.is_valid()) {
_output_layer.instance();
}
output_layer->set_n_hidden(network[network.size() - 1]->get_n_hidden());
output_layer->set_activation(MLPPActivation::ACTIVATION_FUNCTION_LINEAR);
output_layer->set_cost(MLPPCost::COST_TYPE_WASSERSTEIN_LOSS);
output_layer->set_input(network.write[network.size() - 1]->get_a());
output_layer->set_weight_init(weight_init);
output_layer->set_lambda(lambda);
output_layer->set_alpha(alpha);
_output_layer->set_n_hidden(_network[_network.size() - 1]->get_n_hidden());
_output_layer->set_activation(MLPPActivation::ACTIVATION_FUNCTION_LINEAR);
_output_layer->set_cost(MLPPCost::COST_TYPE_WASSERSTEIN_LOSS);
_output_layer->set_input(_network.write[_network.size() - 1]->get_a());
_output_layer->set_weight_init(weight_init);
_output_layer->set_lambda(lambda);
_output_layer->set_alpha(alpha);
}
MLPPWGAN::MLPPWGAN(real_t p_k, const Ref<MLPPMatrix> &p_output_set) {
output_set = p_output_set;
n = p_output_set->size().y;
k = p_k;
_output_set = p_output_set;
_n = p_output_set->size().y;
_k = p_k;
y_hat.instance();
_y_hat.instance();
}
MLPPWGAN::MLPPWGAN() {
n = 0;
k = 0;
_n = 0;
_k = 0;
y_hat.instance();
_y_hat.instance();
}
MLPPWGAN::~MLPPWGAN() {
}
Ref<MLPPMatrix> MLPPWGAN::model_set_test_generator(const Ref<MLPPMatrix> &X) {
if (!network.empty()) {
network.write[0]->set_input(X);
network.write[0]->forward_pass();
if (!_network.empty()) {
_network.write[0]->set_input(X);
_network.write[0]->forward_pass();
for (int i = 1; i <= network.size() / 2; ++i) {
network.write[i]->set_input(network.write[i - 1]->get_a());
network.write[i]->forward_pass();
for (int i = 1; i <= _network.size() / 2; ++i) {
_network.write[i]->set_input(_network.write[i - 1]->get_a());
_network.write[i]->forward_pass();
}
}
return network.write[network.size() / 2]->get_a();
return _network.write[_network.size() / 2]->get_a();
}
Ref<MLPPVector> MLPPWGAN::model_set_test_discriminator(const Ref<MLPPMatrix> &X) {
if (!network.empty()) {
for (int i = network.size() / 2 + 1; i < network.size(); i++) {
if (i == network.size() / 2 + 1) {
network.write[i]->set_input(X);
if (!_network.empty()) {
for (int i = _network.size() / 2 + 1; i < _network.size(); i++) {
if (i == _network.size() / 2 + 1) {
_network.write[i]->set_input(X);
} else {
network.write[i]->set_input(network.write[i - 1]->get_a());
_network.write[i]->set_input(_network.write[i - 1]->get_a());
}
network.write[i]->forward_pass();
_network.write[i]->forward_pass();
}
output_layer->set_input(network.write[network.size() - 1]->get_a());
_output_layer->set_input(_network.write[_network.size() - 1]->get_a());
}
output_layer->forward_pass();
_output_layer->forward_pass();
return output_layer->get_a();
return _output_layer->get_a();
}
real_t MLPPWGAN::cost(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y) {
@ -224,60 +224,60 @@ real_t MLPPWGAN::cost(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y) {
real_t total_reg_term = 0;
for (int i = 0; i < network.size() - 1; ++i) {
Ref<MLPPHiddenLayer> layer = network[i];
for (int i = 0; i < _network.size() - 1; ++i) {
Ref<MLPPHiddenLayer> layer = _network[i];
total_reg_term += regularization.reg_termm(layer->get_weights(), layer->get_lambda(), layer->get_alpha(), layer->get_reg());
}
total_reg_term += regularization.reg_termv(output_layer->get_weights(), output_layer->get_lambda(), output_layer->get_alpha(), output_layer->get_reg());
total_reg_term += regularization.reg_termv(_output_layer->get_weights(), _output_layer->get_lambda(), _output_layer->get_alpha(), _output_layer->get_reg());
return mlpp_cost.run_cost_norm_vector(output_layer->get_cost(), y_hat, y) + total_reg_term;
return mlpp_cost.run_cost_norm_vector(_output_layer->get_cost(), y_hat, y) + total_reg_term;
}
void MLPPWGAN::forward_pass() {
MLPPLinAlg alg;
if (!network.empty()) {
Ref<MLPPHiddenLayer> layer = network[0];
if (!_network.empty()) {
Ref<MLPPHiddenLayer> layer = _network[0];
layer->set_input(alg.gaussian_noise(n, k));
layer->set_input(alg.gaussian_noise(_n, _k));
layer->forward_pass();
for (int i = 1; i < network.size(); i++) {
layer = network[i];
for (int i = 1; i < _network.size(); i++) {
layer = _network[i];
layer->set_input(network.write[i - 1]->get_a());
layer->set_input(_network.write[i - 1]->get_a());
layer->forward_pass();
}
output_layer->set_input(network.write[network.size() - 1]->get_a());
_output_layer->set_input(_network.write[_network.size() - 1]->get_a());
} else { // Should never happen, though.
output_layer->set_input(alg.gaussian_noise(n, k));
_output_layer->set_input(alg.gaussian_noise(_n, _k));
}
output_layer->forward_pass();
_output_layer->forward_pass();
y_hat->set_from_mlpp_vector(output_layer->get_a());
_y_hat->set_from_mlpp_vector(_output_layer->get_a());
}
void MLPPWGAN::update_discriminator_parameters(Vector<Ref<MLPPMatrix>> hidden_layer_updations, const Ref<MLPPVector> &output_layer_updation, real_t learning_rate) {
MLPPLinAlg alg;
output_layer->set_weights(alg.subtractionnv(output_layer->get_weights(), output_layer_updation));
output_layer->set_bias(output_layer->get_bias() - learning_rate * alg.sum_elementsv(output_layer->get_delta()) / n);
_output_layer->set_weights(alg.subtractionnv(_output_layer->get_weights(), output_layer_updation));
_output_layer->set_bias(_output_layer->get_bias() - learning_rate * alg.sum_elementsv(_output_layer->get_delta()) / _n);
if (!network.empty()) {
Ref<MLPPHiddenLayer> layer = network[network.size() - 1];
if (!_network.empty()) {
Ref<MLPPHiddenLayer> layer = _network[_network.size() - 1];
layer->set_weights(alg.subtractionm(layer->get_weights(), hidden_layer_updations[0]));
layer->set_bias(alg.subtract_matrix_rows(layer->get_bias(), alg.scalar_multiplym(learning_rate / n, layer->get_delta())));
layer->set_bias(alg.subtract_matrix_rows(layer->get_bias(), alg.scalar_multiplym(learning_rate / _n, layer->get_delta())));
for (int i = network.size() - 2; i > network.size() / 2; i--) {
layer = network[i];
for (int i = _network.size() - 2; i > _network.size() / 2; i--) {
layer = _network[i];
layer->set_weights(alg.subtractionm(layer->get_weights(), hidden_layer_updations[(network.size() - 2) - i + 1]));
layer->set_bias(alg.subtract_matrix_rows(layer->get_bias(), alg.scalar_multiplym(learning_rate / n, layer->get_delta())));
layer->set_weights(alg.subtractionm(layer->get_weights(), hidden_layer_updations[(_network.size() - 2) - i + 1]));
layer->set_bias(alg.subtract_matrix_rows(layer->get_bias(), alg.scalar_multiplym(learning_rate / _n, layer->get_delta())));
}
}
}
@ -285,14 +285,14 @@ void MLPPWGAN::update_discriminator_parameters(Vector<Ref<MLPPMatrix>> hidden_la
void MLPPWGAN::update_generator_parameters(Vector<Ref<MLPPMatrix>> hidden_layer_updations, real_t learning_rate) {
MLPPLinAlg alg;
if (!network.empty()) {
for (int i = network.size() / 2; i >= 0; i--) {
Ref<MLPPHiddenLayer> layer = network[i];
if (!_network.empty()) {
for (int i = _network.size() / 2; i >= 0; i--) {
Ref<MLPPHiddenLayer> layer = _network[i];
//std::cout << network[i].weights.size() << "x" << network[i].weights[0].size() << std::endl;
//std::cout << hiddenLayerUpdations[(network.size() - 2) - i + 1].size() << "x" << hiddenLayerUpdations[(network.size() - 2) - i + 1][0].size() << std::endl;
layer->set_weights(alg.subtractionm(layer->get_weights(), hidden_layer_updations[(network.size() - 2) - i + 1]));
layer->set_bias(alg.subtract_matrix_rows(layer->get_bias(), alg.scalar_multiplym(learning_rate / n, layer->get_delta())));
layer->set_weights(alg.subtractionm(layer->get_weights(), hidden_layer_updations[(_network.size() - 2) - i + 1]));
layer->set_bias(alg.subtract_matrix_rows(layer->get_bias(), alg.scalar_multiplym(learning_rate / _n, layer->get_delta())));
}
}
}
@ -305,15 +305,15 @@ MLPPWGAN::DiscriminatorGradientResult MLPPWGAN::compute_discriminator_gradients(
DiscriminatorGradientResult data;
output_layer->set_delta(alg.hadamard_productnv(mlpp_cost.run_cost_deriv_vector(output_layer->get_cost(), y_hat, output_set), avn.run_activation_deriv_vector(output_layer->get_activation(), output_layer->get_z())));
_output_layer->set_delta(alg.hadamard_productnv(mlpp_cost.run_cost_deriv_vector(_output_layer->get_cost(), y_hat, output_set), avn.run_activation_deriv_vector(_output_layer->get_activation(), _output_layer->get_z())));
data.output_w_grad = alg.mat_vec_multv(alg.transposem(output_layer->get_input()), output_layer->get_delta());
data.output_w_grad = alg.additionnv(data.output_w_grad, regularization.reg_deriv_termv(output_layer->get_weights(), output_layer->get_lambda(), output_layer->get_alpha(), output_layer->get_reg()));
data.output_w_grad = alg.mat_vec_multv(alg.transposem(_output_layer->get_input()), _output_layer->get_delta());
data.output_w_grad = alg.additionnv(data.output_w_grad, regularization.reg_deriv_termv(_output_layer->get_weights(), _output_layer->get_lambda(), _output_layer->get_alpha(), _output_layer->get_reg()));
if (!network.empty()) {
Ref<MLPPHiddenLayer> layer = network[network.size() - 1];
if (!_network.empty()) {
Ref<MLPPHiddenLayer> layer = _network[_network.size() - 1];
layer->set_delta(alg.hadamard_productm(alg.outer_product(output_layer->get_delta(), output_layer->get_weights()), avn.run_activation_deriv_matrix(layer->get_activation(), layer->get_z())));
layer->set_delta(alg.hadamard_productm(alg.outer_product(_output_layer->get_delta(), _output_layer->get_weights()), avn.run_activation_deriv_matrix(layer->get_activation(), layer->get_z())));
Ref<MLPPMatrix> hidden_layer_w_grad = alg.matmultm(alg.transposem(layer->get_input()), layer->get_delta());
@ -322,9 +322,9 @@ MLPPWGAN::DiscriminatorGradientResult MLPPWGAN::compute_discriminator_gradients(
//std::cout << "HIDDENLAYER FIRST:" << hiddenLayerWGrad.size() << "x" << hiddenLayerWGrad[0].size() << std::endl;
//std::cout << "WEIGHTS SECOND:" << layer.weights.size() << "x" << layer.weights[0].size() << std::endl;
for (int i = network.size() - 2; i > network.size() / 2; i--) {
layer = network[i];
Ref<MLPPHiddenLayer> next_layer = network[i + 1];
for (int i = _network.size() - 2; i > _network.size() / 2; i--) {
layer = _network[i];
Ref<MLPPHiddenLayer> next_layer = _network[i + 1];
layer->set_delta(alg.hadamard_productm(alg.matmultm(next_layer->get_delta(), alg.transposem(next_layer->get_weights())), avn.run_activation_deriv_matrix(layer->get_activation(), layer->get_z())));
@ -345,26 +345,26 @@ Vector<Ref<MLPPMatrix>> MLPPWGAN::compute_generator_gradients(const Ref<MLPPVect
Vector<Ref<MLPPMatrix>> cumulative_hidden_layer_w_grad; // Tensor containing ALL hidden grads.
Ref<MLPPVector> cost_deriv_vector = cost.run_cost_deriv_vector(output_layer->get_cost(), y_hat, output_set);
Ref<MLPPVector> activation_deriv_vector = avn.run_activation_deriv_vector(output_layer->get_activation(), output_layer->get_z());
Ref<MLPPVector> cost_deriv_vector = cost.run_cost_deriv_vector(_output_layer->get_cost(), y_hat, output_set);
Ref<MLPPVector> activation_deriv_vector = avn.run_activation_deriv_vector(_output_layer->get_activation(), _output_layer->get_z());
output_layer->set_delta(alg.hadamard_productnv(cost_deriv_vector, activation_deriv_vector));
_output_layer->set_delta(alg.hadamard_productnv(cost_deriv_vector, activation_deriv_vector));
Ref<MLPPVector> output_w_grad = alg.mat_vec_multv(alg.transposem(output_layer->get_input()), output_layer->get_delta());
output_w_grad = alg.additionnv(output_w_grad, regularization.reg_deriv_termv(output_layer->get_weights(), output_layer->get_lambda(), output_layer->get_alpha(), output_layer->get_reg()));
Ref<MLPPVector> output_w_grad = alg.mat_vec_multv(alg.transposem(_output_layer->get_input()), _output_layer->get_delta());
output_w_grad = alg.additionnv(output_w_grad, regularization.reg_deriv_termv(_output_layer->get_weights(), _output_layer->get_lambda(), _output_layer->get_alpha(), _output_layer->get_reg()));
if (!network.empty()) {
Ref<MLPPHiddenLayer> layer = network[network.size() - 1];
if (!_network.empty()) {
Ref<MLPPHiddenLayer> layer = _network[_network.size() - 1];
Ref<MLPPMatrix> activation_deriv_matrix = avn.run_activation_deriv_matrix(layer->get_activation(), layer->get_z());
layer->set_delta(alg.hadamard_productm(alg.outer_product(output_layer->get_delta(), output_layer->get_weights()), activation_deriv_matrix));
layer->set_delta(alg.hadamard_productm(alg.outer_product(_output_layer->get_delta(), _output_layer->get_weights()), activation_deriv_matrix));
Ref<MLPPMatrix> hidden_layer_w_grad = alg.matmultm(alg.transposem(layer->get_input()), layer->get_delta());
cumulative_hidden_layer_w_grad.push_back(alg.additionm(hidden_layer_w_grad, regularization.reg_deriv_termm(layer->get_weights(), layer->get_lambda(), layer->get_alpha(), layer->get_reg()))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
for (int i = network.size() - 2; i >= 0; i--) {
layer = network[i];
Ref<MLPPHiddenLayer> next_layer = network[i + 1];
for (int i = _network.size() - 2; i >= 0; i--) {
layer = _network[i];
Ref<MLPPHiddenLayer> next_layer = _network[i + 1];
activation_deriv_matrix = avn.run_activation_deriv_matrix(layer->get_activation(), layer->get_z());
@ -380,13 +380,13 @@ Vector<Ref<MLPPMatrix>> MLPPWGAN::compute_generator_gradients(const Ref<MLPPVect
void MLPPWGAN::handle_ui(int epoch, real_t cost_prev, const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &output_set) {
MLPPUtilities::cost_info(epoch, cost_prev, cost(y_hat, output_set));
PLOG_MSG("Layer " + itos(network.size() + 1) + ":");
PLOG_MSG("Layer " + itos(_network.size() + 1) + ":");
MLPPUtilities::print_ui_vb(output_layer->get_weights(), output_layer->get_bias());
MLPPUtilities::print_ui_vb(_output_layer->get_weights(), _output_layer->get_bias());
if (!network.empty()) {
for (int i = network.size() - 1; i >= 0; i--) {
Ref<MLPPHiddenLayer> layer = network[i];
if (!_network.empty()) {
for (int i = _network.size() - 1; i >= 0; i--) {
Ref<MLPPHiddenLayer> layer = _network[i];
PLOG_MSG("Layer " + itos(i + 1) + ":");

View File

@ -70,14 +70,14 @@ protected:
static void _bind_methods();
Ref<MLPPMatrix> output_set;
Ref<MLPPVector> y_hat;
Ref<MLPPMatrix> _output_set;
Ref<MLPPVector> _y_hat;
Vector<Ref<MLPPHiddenLayer>> network;
Ref<MLPPOutputLayer> output_layer;
Vector<Ref<MLPPHiddenLayer>> _network;
Ref<MLPPOutputLayer> _output_layer;
int n;
int k;
int _n;
int _k;
};
#endif /* WGAN_hpp */