From 9a529c572d6e5562cd8af597fe68ccfa07893d76 Mon Sep 17 00:00:00 2001 From: Relintai Date: Sun, 12 Feb 2023 10:52:46 +0100 Subject: [PATCH] Initial cleanup pass on MLPPExpReg. --- mlpp/exp_reg/exp_reg.cpp | 235 +++++++++++++++++++++------------------ mlpp/exp_reg/exp_reg.h | 49 ++++---- test/mlpp_tests.cpp | 10 +- 3 files changed, 163 insertions(+), 131 deletions(-) diff --git a/mlpp/exp_reg/exp_reg.cpp b/mlpp/exp_reg/exp_reg.cpp index 309fca9..44d379d 100644 --- a/mlpp/exp_reg/exp_reg.cpp +++ b/mlpp/exp_reg/exp_reg.cpp @@ -14,78 +14,68 @@ #include #include -MLPPExpReg::MLPPExpReg(std::vector> p_inputSet, std::vector p_outputSet, std::string p_reg, real_t p_lambda, real_t p_alpha) { - inputSet = p_inputSet; - outputSet = p_outputSet; - n = p_inputSet.size(); - k = p_inputSet[0].size(); - reg = p_reg; - lambda = p_lambda; - alpha = p_alpha; - - y_hat.resize(n); - weights = MLPPUtilities::weightInitialization(k); - initial = MLPPUtilities::weightInitialization(k); - bias = MLPPUtilities::biasInitialization(); +std::vector MLPPExpReg::model_set_test(std::vector> X) { + return evaluatem(X); } -std::vector MLPPExpReg::modelSetTest(std::vector> X) { - return Evaluate(X); +real_t MLPPExpReg::model_test(std::vector x) { + return evaluatev(x); } -real_t MLPPExpReg::modelTest(std::vector x) { - return Evaluate(x); -} - -void MLPPExpReg::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { +void MLPPExpReg::gradient_descent(real_t learning_rate, int max_epoch, bool ui) { MLPPLinAlg alg; MLPPReg regularization; + real_t cost_prev = 0; int epoch = 1; - forwardPass(); + + forward_pass(); while (true) { - cost_prev = Cost(y_hat, outputSet); + cost_prev = cost(_y_hat, _output_set); - std::vector error = alg.subtraction(y_hat, outputSet); + std::vector error = alg.subtraction(_y_hat, _output_set); - for (int i = 0; i < k; i++) { + for (int i = 0; i < _k; i++) { // Calculating the weight gradient real_t sum = 0; - for (int j = 0; j < n; j++) { - sum += error[j] * inputSet[j][i] * std::pow(weights[i], inputSet[j][i] - 1); + for (int j = 0; j < _n; j++) { + sum += error[j] * _input_set[j][i] * std::pow(_weights[i], _input_set[j][i] - 1); } - real_t w_gradient = sum / n; + real_t w_gradient = sum / _n; // Calculating the initial gradient real_t sum2 = 0; - for (int j = 0; j < n; j++) { - sum2 += error[j] * std::pow(weights[i], inputSet[j][i]); + for (int j = 0; j < _n; j++) { + sum2 += error[j] * std::pow(_weights[i], _input_set[j][i]); } - real_t i_gradient = sum2 / n; + real_t i_gradient = sum2 / _n; // Weight/initial updation - weights[i] -= learning_rate * w_gradient; - initial[i] -= learning_rate * i_gradient; + _weights[i] -= learning_rate * w_gradient; + _initial[i] -= learning_rate * i_gradient; } - weights = regularization.regWeights(weights, lambda, alpha, reg); + + _weights = regularization.regWeights(_weights, _lambda, _alpha, _reg); // Calculating the bias gradient real_t sum = 0; - for (int j = 0; j < n; j++) { - sum += (y_hat[j] - outputSet[j]); + for (int j = 0; j < _n; j++) { + sum += (_y_hat[j] - _output_set[j]); } - real_t b_gradient = sum / n; + real_t b_gradient = sum / _n; // bias updation - bias -= learning_rate * b_gradient; - forwardPass(); + _bias -= learning_rate * b_gradient; - if (UI) { - MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); - MLPPUtilities::UI(weights, bias); + forward_pass(); + + if (ui) { + MLPPUtilities::CostInfo(epoch, cost_prev, cost(_y_hat, _output_set)); + MLPPUtilities::UI(_weights, _bias); } + epoch++; if (epoch > max_epoch) { @@ -94,153 +84,184 @@ void MLPPExpReg::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { } } -void MLPPExpReg::SGD(real_t learning_rate, int max_epoch, bool UI) { +void MLPPExpReg::sgd(real_t learning_rate, int max_epoch, bool ui) { MLPPReg regularization; + real_t cost_prev = 0; int epoch = 1; + std::random_device rd; + std::default_random_engine generator(rd()); + std::uniform_int_distribution distribution(0, int(_n - 1)); + while (true) { - std::random_device rd; - std::default_random_engine generator(rd()); - std::uniform_int_distribution distribution(0, int(n - 1)); - int outputIndex = distribution(generator); + int output_index = distribution(generator); - real_t y_hat = Evaluate(inputSet[outputIndex]); - cost_prev = Cost({ y_hat }, { outputSet[outputIndex] }); + real_t y_hat = evaluatev(_input_set[output_index]); + cost_prev = cost({ y_hat }, { _output_set[output_index] }); - for (int i = 0; i < k; i++) { + for (int i = 0; i < _k; i++) { // Calculating the weight gradients - real_t w_gradient = (y_hat - outputSet[outputIndex]) * inputSet[outputIndex][i] * std::pow(weights[i], inputSet[outputIndex][i] - 1); - real_t i_gradient = (y_hat - outputSet[outputIndex]) * std::pow(weights[i], inputSet[outputIndex][i]); + real_t w_gradient = (y_hat - _output_set[output_index]) * _input_set[output_index][i] * std::pow(_weights[i], _input_set[output_index][i] - 1); + real_t i_gradient = (y_hat - _output_set[output_index]) * std::pow(_weights[i], _input_set[output_index][i]); // Weight/initial updation - weights[i] -= learning_rate * w_gradient; - initial[i] -= learning_rate * i_gradient; + _weights[i] -= learning_rate * w_gradient; + _initial[i] -= learning_rate * i_gradient; } - weights = regularization.regWeights(weights, lambda, alpha, reg); + + _weights = regularization.regWeights(_weights, _lambda, _alpha, _reg); // Calculating the bias gradients - real_t b_gradient = (y_hat - outputSet[outputIndex]); + real_t b_gradient = (y_hat - _output_set[output_index]); // Bias updation - bias -= learning_rate * b_gradient; - y_hat = Evaluate({ inputSet[outputIndex] }); + _bias -= learning_rate * b_gradient; + y_hat = evaluatev(_input_set[output_index]); - if (UI) { - MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] })); - MLPPUtilities::UI(weights, bias); + if (ui) { + MLPPUtilities::CostInfo(epoch, cost_prev, cost({ y_hat }, { _output_set[output_index] })); + MLPPUtilities::UI(_weights, _bias); } + epoch++; if (epoch > max_epoch) { break; } } - forwardPass(); + + forward_pass(); } -void MLPPExpReg::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) { +void MLPPExpReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size, bool ui) { MLPPLinAlg alg; MLPPReg regularization; + real_t cost_prev = 0; int epoch = 1; // Creating the mini-batches - int n_mini_batch = n / mini_batch_size; - auto batches = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch); - auto inputMiniBatches = std::get<0>(batches); - auto outputMiniBatches = std::get<1>(batches); + int n_mini_batch = _n / mini_batch_size; + auto batches = MLPPUtilities::createMiniBatches(_input_set, _output_set, n_mini_batch); + auto input_mini_batches = std::get<0>(batches); + auto output_mini_batches = std::get<1>(batches); while (true) { for (int i = 0; i < n_mini_batch; i++) { - std::vector y_hat = Evaluate(inputMiniBatches[i]); - cost_prev = Cost(y_hat, outputMiniBatches[i]); - std::vector error = alg.subtraction(y_hat, outputMiniBatches[i]); + std::vector y_hat = evaluatem(input_mini_batches[i]); + cost_prev = cost(y_hat, output_mini_batches[i]); + std::vector error = alg.subtraction(y_hat, output_mini_batches[i]); - for (int j = 0; j < k; j++) { + for (int j = 0; j < _k; j++) { // Calculating the weight gradient real_t sum = 0; - for (uint32_t k = 0; k < outputMiniBatches[i].size(); k++) { - sum += error[k] * inputMiniBatches[i][k][j] * std::pow(weights[j], inputMiniBatches[i][k][j] - 1); + for (uint32_t k = 0; k < output_mini_batches[i].size(); k++) { + sum += error[k] * input_mini_batches[i][k][j] * std::pow(_weights[j], input_mini_batches[i][k][j] - 1); } - real_t w_gradient = sum / outputMiniBatches[i].size(); + real_t w_gradient = sum / output_mini_batches[i].size(); // Calculating the initial gradient real_t sum2 = 0; - for (uint32_t k = 0; k < outputMiniBatches[i].size(); k++) { - sum2 += error[k] * std::pow(weights[j], inputMiniBatches[i][k][j]); + for (uint32_t k = 0; k < output_mini_batches[i].size(); k++) { + sum2 += error[k] * std::pow(_weights[j], input_mini_batches[i][k][j]); } - real_t i_gradient = sum2 / outputMiniBatches[i].size(); + real_t i_gradient = sum2 / output_mini_batches[i].size(); // Weight/initial updation - weights[j] -= learning_rate * w_gradient; - initial[j] -= learning_rate * i_gradient; + _weights[j] -= learning_rate * w_gradient; + _initial[j] -= learning_rate * i_gradient; } - weights = regularization.regWeights(weights, lambda, alpha, reg); + + _weights = regularization.regWeights(_weights, _lambda, _alpha, _reg); // Calculating the bias gradient real_t sum = 0; - for (uint32_t j = 0; j < outputMiniBatches[i].size(); j++) { - sum += (y_hat[j] - outputMiniBatches[i][j]); + for (uint32_t j = 0; j < output_mini_batches[i].size(); j++) { + sum += (y_hat[j] - output_mini_batches[i][j]); } - //real_t b_gradient = sum / outputMiniBatches[i].size(); - y_hat = Evaluate(inputMiniBatches[i]); + //real_t b_gradient = sum / output_mini_batches[i].size(); + y_hat = evaluatem(input_mini_batches[i]); - if (UI) { - MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i])); - MLPPUtilities::UI(weights, bias); + if (ui) { + MLPPUtilities::CostInfo(epoch, cost_prev, cost(y_hat, output_mini_batches[i])); + MLPPUtilities::UI(_weights, _bias); } } + epoch++; + if (epoch > max_epoch) { break; } } - forwardPass(); + + forward_pass(); } real_t MLPPExpReg::score() { MLPPUtilities util; - return util.performance(y_hat, outputSet); + + return util.performance(_y_hat, _output_set); } -void MLPPExpReg::save(std::string fileName) { +void MLPPExpReg::save(std::string file_name) { MLPPUtilities util; - util.saveParameters(fileName, weights, initial, bias); + + util.saveParameters(file_name, _weights, _initial, _bias); } -real_t MLPPExpReg::Cost(std::vector y_hat, std::vector y) { +MLPPExpReg::MLPPExpReg(std::vector> p_input_set, std::vector p_output_set, std::string p_reg, real_t p_lambda, real_t p_alpha) { + _input_set = p_input_set; + _output_set = p_output_set; + _n = p_input_set.size(); + _k = p_input_set[0].size(); + _reg = p_reg; + _lambda = p_lambda; + _alpha = p_alpha; + + _y_hat.resize(_n); + _weights = MLPPUtilities::weightInitialization(_k); + _initial = MLPPUtilities::weightInitialization(_k); + _bias = MLPPUtilities::biasInitialization(); +} + +real_t MLPPExpReg::cost(std::vector y_hat, std::vector y) { MLPPReg regularization; - class MLPPCost cost; - return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); + MLPPCost mlpp_cost; + + return mlpp_cost.MSE(y_hat, y) + regularization.regTerm(_weights, _lambda, _alpha, _reg); } -std::vector MLPPExpReg::Evaluate(std::vector> X) { +real_t MLPPExpReg::evaluatev(std::vector x) { + real_t y_hat = 0; + + for (uint32_t i = 0; i < x.size(); i++) { + y_hat += _initial[i] * std::pow(_weights[i], x[i]); + } + + return y_hat + _bias; +} + +std::vector MLPPExpReg::evaluatem(std::vector> X) { std::vector y_hat; y_hat.resize(X.size()); + for (uint32_t i = 0; i < X.size(); i++) { y_hat[i] = 0; for (uint32_t j = 0; j < X[i].size(); j++) { - y_hat[i] += initial[j] * std::pow(weights[j], X[i][j]); + y_hat[i] += _initial[j] * std::pow(_weights[j], X[i][j]); } - y_hat[i] += bias; + y_hat[i] += _bias; } + return y_hat; } -real_t MLPPExpReg::Evaluate(std::vector x) { - real_t y_hat = 0; - for (uint32_t i = 0; i < x.size(); i++) { - y_hat += initial[i] * std::pow(weights[i], x[i]); - } - - return y_hat + bias; -} - // a * w^x + b -void MLPPExpReg::forwardPass() { - y_hat = Evaluate(inputSet); +void MLPPExpReg::forward_pass() { + _y_hat = evaluatem(_input_set); } diff --git a/mlpp/exp_reg/exp_reg.h b/mlpp/exp_reg/exp_reg.h index cd8b8bc..0b475e7 100644 --- a/mlpp/exp_reg/exp_reg.h +++ b/mlpp/exp_reg/exp_reg.h @@ -15,36 +15,41 @@ class MLPPExpReg { public: - MLPPExpReg(std::vector> inputSet, std::vector outputSet, std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5); - std::vector modelSetTest(std::vector> X); - real_t modelTest(std::vector x); - void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1); - void SGD(real_t learning_rate, int max_epoch, bool UI = 1); - void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1); + std::vector model_set_test(std::vector> X); + real_t model_test(std::vector x); + + void gradient_descent(real_t learning_rate, int max_epoch, bool ui = false); + void sgd(real_t learning_rate, int max_epoch, bool ui = false); + void mbgd(real_t learning_rate, int max_epoch, int mini_batch_size, bool ui = false); + real_t score(); - void save(std::string fileName); + + void save(std::string file_name); + + MLPPExpReg(std::vector> p_input_set, std::vector p_output_set, std::string p_reg = "None", real_t p_lambda = 0.5, real_t p_alpha = 0.5); private: - real_t Cost(std::vector y_hat, std::vector y); + real_t cost(std::vector y_hat, std::vector y); - std::vector Evaluate(std::vector> X); - real_t Evaluate(std::vector x); - void forwardPass(); + real_t evaluatev(std::vector x); + std::vector evaluatem(std::vector> X); - std::vector> inputSet; - std::vector outputSet; - std::vector y_hat; - std::vector weights; - std::vector initial; - real_t bias; + void forward_pass(); - int n; - int k; + std::vector> _input_set; + std::vector _output_set; + std::vector _y_hat; + std::vector _weights; + std::vector _initial; + real_t _bias; + + int _n; + int _k; // Regularization Params - std::string reg; - real_t lambda; - real_t alpha; /* This is the controlling param for Elastic Net*/ + std::string _reg; + real_t _lambda; + real_t _alpha; /* This is the controlling param for Elastic Net*/ }; #endif /* ExpReg_hpp */ diff --git a/test/mlpp_tests.cpp b/test/mlpp_tests.cpp index a2dd788..8accb0e 100644 --- a/test/mlpp_tests.cpp +++ b/test/mlpp_tests.cpp @@ -434,9 +434,15 @@ void MLPPTests::test_exp_reg_regression(bool ui) { // EXPREG REGRESSION std::vector> inputSet = { { 0, 1, 2, 3, 4 } }; std::vector outputSet = { 1, 2, 4, 8, 16 }; + + MLPPExpRegOld model_old(alg.transpose(inputSet), outputSet); + model_old.SGD(0.001, 10000, ui); + alg.printVector(model_old.modelSetTest(alg.transpose(inputSet))); + std::cout << "ACCURACY: " << 100 * model_old.score() << "%" << std::endl; + MLPPExpReg model(alg.transpose(inputSet), outputSet); - model.SGD(0.001, 10000, ui); - alg.printVector(model.modelSetTest(alg.transpose(inputSet))); + model.sgd(0.001, 10000, ui); + alg.printVector(model.model_set_test(alg.transpose(inputSet))); std::cout << "ACCURACY: " << 100 * model.score() << "%" << std::endl; } void MLPPTests::test_tanh_regression(bool ui) {