From 17486baae9fbe7d819b9a744e90e909981da9be5 Mon Sep 17 00:00:00 2001 From: Relintai Date: Thu, 16 Feb 2023 18:43:35 +0100 Subject: [PATCH] Now MLPPExpReg uses engine classes. --- mlpp/exp_reg/exp_reg.cpp | 166 ++++++++++++++++++++++++--------------- mlpp/exp_reg/exp_reg.h | 32 ++++---- test/mlpp_tests.cpp | 14 +++- 3 files changed, 129 insertions(+), 83 deletions(-) diff --git a/mlpp/exp_reg/exp_reg.cpp b/mlpp/exp_reg/exp_reg.cpp index 91da395..76bbe7c 100644 --- a/mlpp/exp_reg/exp_reg.cpp +++ b/mlpp/exp_reg/exp_reg.cpp @@ -14,11 +14,11 @@ #include #include -std::vector MLPPExpReg::model_set_test(std::vector> X) { +Ref MLPPExpReg::model_set_test(const Ref &X) { return evaluatem(X); } -real_t MLPPExpReg::model_test(std::vector x) { +real_t MLPPExpReg::model_test(const Ref &x) { return evaluatev(x); } @@ -34,35 +34,35 @@ void MLPPExpReg::gradient_descent(real_t learning_rate, int max_epoch, bool ui) while (true) { cost_prev = cost(_y_hat, _output_set); - std::vector error = alg.subtraction(_y_hat, _output_set); + Ref error = alg.subtractionnv(_y_hat, _output_set); for (int i = 0; i < _k; i++) { // Calculating the weight gradient real_t sum = 0; for (int j = 0; j < _n; j++) { - sum += error[j] * _input_set[j][i] * std::pow(_weights[i], _input_set[j][i] - 1); + sum += error->get_element(j) * _input_set->get_element(j, i) * Math::pow(_weights->get_element(i), _input_set->get_element(j, i) - 1); } real_t w_gradient = sum / _n; // Calculating the initial gradient real_t sum2 = 0; for (int j = 0; j < _n; j++) { - sum2 += error[j] * std::pow(_weights[i], _input_set[j][i]); + sum2 += error->get_element(j) * Math::pow(_weights->get_element(i), _input_set->get_element(j, i)); } real_t i_gradient = sum2 / _n; // Weight/initial updation - _weights[i] -= learning_rate * w_gradient; - _initial[i] -= learning_rate * i_gradient; + _weights->set_element(i, _weights->get_element(i) - learning_rate * w_gradient); + _initial->set_element(i, _initial->get_element(i) - learning_rate * i_gradient); } - _weights = regularization.regWeights(_weights, _lambda, _alpha, _reg); + _weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg); // Calculating the bias gradient real_t sum = 0; for (int j = 0; j < _n; j++) { - sum += (_y_hat[j] - _output_set[j]); + sum += (_y_hat->get_element(j) - _output_set->get_element(j)); } real_t b_gradient = sum / _n; @@ -72,8 +72,8 @@ void MLPPExpReg::gradient_descent(real_t learning_rate, int max_epoch, bool ui) forward_pass(); if (ui) { - MLPPUtilities::CostInfo(epoch, cost_prev, cost(_y_hat, _output_set)); - MLPPUtilities::UI(_weights, _bias); + MLPPUtilities::cost_info(epoch, cost_prev, cost(_y_hat, _output_set)); + MLPPUtilities::print_ui_vb(_weights, _bias); } epoch++; @@ -94,35 +94,53 @@ void MLPPExpReg::sgd(real_t learning_rate, int max_epoch, bool ui) { std::default_random_engine generator(rd()); std::uniform_int_distribution distribution(0, int(_n - 1)); + Ref input_set_row_tmp; + input_set_row_tmp.instance(); + input_set_row_tmp->resize(_input_set->size().x); + + Ref y_hat_row_tmp; + y_hat_row_tmp.instance(); + y_hat_row_tmp->resize(1); + + Ref output_set_row_tmp; + output_set_row_tmp.instance(); + output_set_row_tmp->resize(1); + while (true) { int output_index = distribution(generator); - real_t y_hat = evaluatev(_input_set[output_index]); - cost_prev = cost({ y_hat }, { _output_set[output_index] }); + _input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp); + real_t output_set_element = _output_set->get_element(output_index); + output_set_row_tmp->set_element(0, output_set_element); + + real_t y_hat = evaluatev(input_set_row_tmp); + y_hat_row_tmp->set_element(0, y_hat); + + cost_prev = cost(y_hat_row_tmp, output_set_row_tmp); for (int i = 0; i < _k; i++) { // Calculating the weight gradients - real_t w_gradient = (y_hat - _output_set[output_index]) * _input_set[output_index][i] * std::pow(_weights[i], _input_set[output_index][i] - 1); - real_t i_gradient = (y_hat - _output_set[output_index]) * std::pow(_weights[i], _input_set[output_index][i]); + real_t w_gradient = (y_hat - output_set_element) * input_set_row_tmp->get_element(i) * Math::pow(_weights->get_element(i), _input_set->get_element(output_index, i) - 1); + real_t i_gradient = (y_hat - output_set_element) * Math::pow(_weights->get_element(i), _input_set->get_element(output_index, i)); // Weight/initial updation - _weights[i] -= learning_rate * w_gradient; - _initial[i] -= learning_rate * i_gradient; + _weights->set_element(i, _weights->get_element(i) - learning_rate * w_gradient); + _initial->set_element(i, _initial->get_element(i) - learning_rate * i_gradient); } - _weights = regularization.regWeights(_weights, _lambda, _alpha, _reg); + _weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg); // Calculating the bias gradients - real_t b_gradient = (y_hat - _output_set[output_index]); + real_t b_gradient = (y_hat - output_set_element); // Bias updation _bias -= learning_rate * b_gradient; - y_hat = evaluatev(_input_set[output_index]); + y_hat = evaluatev(input_set_row_tmp); if (ui) { - MLPPUtilities::CostInfo(epoch, cost_prev, cost({ y_hat }, { _output_set[output_index] })); - MLPPUtilities::UI(_weights, _bias); + MLPPUtilities::cost_info(epoch, cost_prev, cost(y_hat_row_tmp, output_set_row_tmp)); + MLPPUtilities::print_ui_vb(_weights, _bias); } epoch++; @@ -144,51 +162,52 @@ void MLPPExpReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size, // Creating the mini-batches int n_mini_batch = _n / mini_batch_size; - auto batches = MLPPUtilities::createMiniBatches(_input_set, _output_set, n_mini_batch); - auto input_mini_batches = std::get<0>(batches); - auto output_mini_batches = std::get<1>(batches); + MLPPUtilities::CreateMiniBatchMVBatch batches = MLPPUtilities::create_mini_batchesmv(_input_set, _output_set, n_mini_batch); while (true) { for (int i = 0; i < n_mini_batch; i++) { - std::vector y_hat = evaluatem(input_mini_batches[i]); - cost_prev = cost(y_hat, output_mini_batches[i]); - std::vector error = alg.subtraction(y_hat, output_mini_batches[i]); + Ref current_input_batch = batches.input_sets[i]; + Ref current_output_batch = batches.output_sets[i]; + + Ref y_hat = evaluatem(current_input_batch); + cost_prev = cost(y_hat, current_output_batch); + Ref error = alg.subtractionnv(y_hat, current_output_batch); for (int j = 0; j < _k; j++) { // Calculating the weight gradient real_t sum = 0; - for (uint32_t k = 0; k < output_mini_batches[i].size(); k++) { - sum += error[k] * input_mini_batches[i][k][j] * std::pow(_weights[j], input_mini_batches[i][k][j] - 1); + for (int k = 0; k < current_output_batch->size(); k++) { + sum += error->get_element(k) * current_input_batch->get_element(k, j) * Math::pow(_weights->get_element(j), current_input_batch->get_element(k, j) - 1); } - real_t w_gradient = sum / output_mini_batches[i].size(); + real_t w_gradient = sum / current_output_batch->size(); // Calculating the initial gradient real_t sum2 = 0; - for (uint32_t k = 0; k < output_mini_batches[i].size(); k++) { - sum2 += error[k] * std::pow(_weights[j], input_mini_batches[i][k][j]); + for (int k = 0; k < current_output_batch->size(); k++) { + sum2 += error->get_element(k) * Math::pow(_weights->get_element(j), current_input_batch->get_element(k, j)); } - real_t i_gradient = sum2 / output_mini_batches[i].size(); + real_t i_gradient = sum2 / current_output_batch->size(); // Weight/initial updation - _weights[j] -= learning_rate * w_gradient; - _initial[j] -= learning_rate * i_gradient; + _weights->set_element(i, _weights->get_element(i) - learning_rate * w_gradient); + _initial->set_element(i, _initial->get_element(i) - learning_rate * i_gradient); } - _weights = regularization.regWeights(_weights, _lambda, _alpha, _reg); + _weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg); // Calculating the bias gradient real_t sum = 0; - for (uint32_t j = 0; j < output_mini_batches[i].size(); j++) { - sum += (y_hat[j] - output_mini_batches[i][j]); + for (int j = 0; j < current_output_batch->size(); j++) { + sum += (y_hat->get_element(j) - current_output_batch->get_element(j)); } //real_t b_gradient = sum / output_mini_batches[i].size(); - y_hat = evaluatem(input_mini_batches[i]); + y_hat = evaluatem(current_input_batch); if (ui) { - MLPPUtilities::CostInfo(epoch, cost_prev, cost(y_hat, output_mini_batches[i])); - MLPPUtilities::UI(_weights, _bias); + MLPPUtilities::cost_info(epoch, cost_prev, cost(y_hat, current_output_batch)); + MLPPUtilities::print_ui_vb(_weights, _bias); } } @@ -205,28 +224,40 @@ void MLPPExpReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size, real_t MLPPExpReg::score() { MLPPUtilities util; - return util.performance(_y_hat, _output_set); + return util.performance_vec(_y_hat, _output_set); } -void MLPPExpReg::save(std::string file_name) { +void MLPPExpReg::save(const String &file_name) { MLPPUtilities util; - util.saveParameters(file_name, _weights, _initial, _bias); + //util.saveParameters(file_name, _weights, _initial, _bias); } -MLPPExpReg::MLPPExpReg(std::vector> p_input_set, std::vector p_output_set, std::string p_reg, real_t p_lambda, real_t p_alpha) { +MLPPExpReg::MLPPExpReg(const Ref &p_input_set, const Ref &p_output_set, MLPPReg::RegularizationType p_reg, real_t p_lambda, real_t p_alpha) { _input_set = p_input_set; _output_set = p_output_set; - _n = p_input_set.size(); - _k = p_input_set[0].size(); + _n = p_input_set->size().y; + _k = p_input_set->size().x; _reg = p_reg; _lambda = p_lambda; _alpha = p_alpha; - _y_hat.resize(_n); - _weights = MLPPUtilities::weightInitialization(_k); - _initial = MLPPUtilities::weightInitialization(_k); - _bias = MLPPUtilities::biasInitialization(); + _y_hat.instance(); + _y_hat->resize(_n); + + MLPPUtilities util; + + _weights.instance(); + _weights->resize(_k); + + util.weight_initializationv(_weights); + + _initial.instance(); + _initial->resize(_k); + + util.weight_initializationv(_initial); + + _bias = util.bias_initializationr(); } MLPPExpReg::MLPPExpReg() { @@ -234,33 +265,38 @@ MLPPExpReg::MLPPExpReg() { MLPPExpReg::~MLPPExpReg() { } -real_t MLPPExpReg::cost(std::vector y_hat, std::vector y) { +real_t MLPPExpReg::cost(const Ref &y_hat, const Ref &y) { MLPPReg regularization; MLPPCost mlpp_cost; - return mlpp_cost.MSE(y_hat, y) + regularization.regTerm(_weights, _lambda, _alpha, _reg); + return mlpp_cost.msev(y_hat, y) + regularization.reg_termv(_weights, _lambda, _alpha, _reg); } -real_t MLPPExpReg::evaluatev(std::vector x) { +real_t MLPPExpReg::evaluatev(const Ref &x) { real_t y_hat = 0; - for (uint32_t i = 0; i < x.size(); i++) { - y_hat += _initial[i] * std::pow(_weights[i], x[i]); + for (int i = 0; i < x->size(); i++) { + y_hat += _initial->get_element(i) * Math::pow(_weights->get_element(i), x->get_element(i)); } return y_hat + _bias; } -std::vector MLPPExpReg::evaluatem(std::vector> X) { - std::vector y_hat; - y_hat.resize(X.size()); +Ref MLPPExpReg::evaluatem(const Ref &X) { + Ref y_hat; + y_hat.instance(); + y_hat->resize(X->size().y); - for (uint32_t i = 0; i < X.size(); i++) { - y_hat[i] = 0; - for (uint32_t j = 0; j < X[i].size(); j++) { - y_hat[i] += _initial[j] * std::pow(_weights[j], X[i][j]); + for (int i = 0; i < X->size().y; i++) { + real_t y; + + for (int j = 0; j < X->size().x; j++) { + y += _initial->get_element(j) * Math::pow(_weights->get_element(j), X->get_element(i, j)); } - y_hat[i] += _bias; + + y += _bias; + + y_hat->set_element(i, y); } return y_hat; diff --git a/mlpp/exp_reg/exp_reg.h b/mlpp/exp_reg/exp_reg.h index 494b45d..3572303 100644 --- a/mlpp/exp_reg/exp_reg.h +++ b/mlpp/exp_reg/exp_reg.h @@ -12,15 +12,17 @@ #include "core/object/reference.h" -#include -#include +#include "../regularization/reg.h" + +#include "../lin_alg/mlpp_matrix.h" +#include "../lin_alg/mlpp_vector.h" class MLPPExpReg : public Reference { GDCLASS(MLPPExpReg, Reference); public: - std::vector model_set_test(std::vector> X); - real_t model_test(std::vector x); + Ref model_set_test(const Ref &X); + real_t model_test(const Ref &x); void gradient_descent(real_t learning_rate, int max_epoch, bool ui = false); void sgd(real_t learning_rate, int max_epoch, bool ui = false); @@ -28,35 +30,35 @@ public: real_t score(); - void save(std::string file_name); + void save(const String &file_name); - MLPPExpReg(std::vector> p_input_set, std::vector p_output_set, std::string p_reg = "None", real_t p_lambda = 0.5, real_t p_alpha = 0.5); + MLPPExpReg(const Ref &p_input_set, const Ref &p_output_set, MLPPReg::RegularizationType p_reg = MLPPReg::REGULARIZATION_TYPE_NONE, real_t p_lambda = 0.5, real_t p_alpha = 0.5); MLPPExpReg(); ~MLPPExpReg(); protected: - real_t cost(std::vector y_hat, std::vector y); + real_t cost(const Ref &y_hat, const Ref &y); - real_t evaluatev(std::vector x); - std::vector evaluatem(std::vector> X); + real_t evaluatev(const Ref &x); + Ref evaluatem(const Ref &X); void forward_pass(); static void _bind_methods(); - std::vector> _input_set; - std::vector _output_set; - std::vector _y_hat; - std::vector _weights; - std::vector _initial; + Ref _input_set; + Ref _output_set; + Ref _y_hat; + Ref _weights; + Ref _initial; real_t _bias; int _n; int _k; // Regularization Params - std::string _reg; + MLPPReg::RegularizationType _reg; real_t _lambda; real_t _alpha; /* This is the controlling param for Elastic Net*/ }; diff --git a/test/mlpp_tests.cpp b/test/mlpp_tests.cpp index 664f210..3ea5fa7 100644 --- a/test/mlpp_tests.cpp +++ b/test/mlpp_tests.cpp @@ -447,10 +447,18 @@ void MLPPTests::test_exp_reg_regression(bool ui) { alg.printVector(model_old.modelSetTest(alg.transpose(inputSet))); std::cout << "ACCURACY: " << 100 * model_old.score() << "%" << std::endl; - MLPPExpReg model(alg.transpose(inputSet), outputSet); + Ref input_set; + input_set.instance(); + input_set->set_from_std_vectors(inputSet); + + Ref output_set; + output_set.instance(); + output_set->set_from_std_vector(outputSet); + + MLPPExpReg model(alg.transposem(input_set), output_set); model.sgd(0.001, 10000, ui); - alg.printVector(model.model_set_test(alg.transpose(inputSet))); - std::cout << "ACCURACY: " << 100 * model.score() << "%" << std::endl; + PLOG_MSG(model.model_set_test(alg.transposem(input_set))->to_string()); + PLOG_MSG("ACCURACY: " + String::num(100 * model.score()) + "%"); } void MLPPTests::test_tanh_regression(bool ui) { MLPPLinAlg alg;