Now MLPPCLogLogReg uses engine classes.

This commit is contained in:
Relintai 2023-02-16 19:56:18 +01:00
parent 0a1d42f627
commit 741475a4ab
3 changed files with 121 additions and 79 deletions

View File

@ -11,14 +11,13 @@
#include "../regularization/reg.h"
#include "../utilities/utilities.h"
#include <iostream>
#include <random>
std::vector<real_t> MLPPCLogLogReg::model_set_test(std::vector<std::vector<real_t>> X) {
Ref<MLPPVector> MLPPCLogLogReg::model_set_test(const Ref<MLPPMatrix> &X) {
return evaluatem(X);
}
real_t MLPPCLogLogReg::model_test(std::vector<real_t> x) {
real_t MLPPCLogLogReg::model_test(const Ref<MLPPVector> &x) {
return evaluatev(x);
}
@ -35,20 +34,20 @@ void MLPPCLogLogReg::gradient_descent(real_t learning_rate, int max_epoch, bool
while (true) {
cost_prev = cost(_y_hat, _output_set);
std::vector<real_t> error = alg.subtraction(_y_hat, _output_set);
Ref<MLPPVector> error = alg.subtractionnv(_y_hat, _output_set);
// Calculating the weight gradients
_weights = alg.subtraction(_weights, alg.scalarMultiply(learning_rate / _n, alg.mat_vec_mult(alg.transpose(_input_set), alg.hadamard_product(error, avn.cloglog(_z, true)))));
_weights = regularization.regWeights(_weights, _lambda, _alpha, _reg);
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multv(alg.transposem(_input_set), alg.hadamard_productnv(error, avn.cloglog_derivv(_z)))));
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients
bias -= learning_rate * alg.sum_elements(alg.hadamard_product(error, avn.cloglog(_z, true))) / _n;
bias -= learning_rate * alg.sum_elementsv(alg.hadamard_productnv(error, avn.cloglog_derivv(_z))) / _n;
forward_pass();
if (ui) {
MLPPUtilities::CostInfo(epoch, cost_prev, cost(_y_hat, _output_set));
MLPPUtilities::UI(_weights, bias);
MLPPUtilities::cost_info(epoch, cost_prev, cost(_y_hat, _output_set));
MLPPUtilities::print_ui_vb(_weights, bias);
}
epoch++;
@ -72,19 +71,19 @@ void MLPPCLogLogReg::mle(real_t learning_rate, int max_epoch, bool ui) {
while (true) {
cost_prev = cost(_y_hat, _output_set);
std::vector<real_t> error = alg.subtraction(_y_hat, _output_set);
Ref<MLPPVector> error = alg.subtractionnv(_y_hat, _output_set);
_weights = alg.addition(_weights, alg.scalarMultiply(learning_rate / _n, alg.mat_vec_mult(alg.transpose(_input_set), alg.hadamard_product(error, avn.cloglog(_z, true)))));
_weights = regularization.regWeights(_weights, _lambda, _alpha, _reg);
_weights = alg.additionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multv(alg.transposem(_input_set), alg.hadamard_productnv(error, avn.cloglog_derivv(_z)))));
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients
bias += learning_rate * alg.sum_elements(alg.hadamard_product(error, avn.cloglog(_z, true))) / _n;
bias += learning_rate * alg.sum_elementsv(alg.hadamard_productnv(error, avn.cloglog_derivv(_z))) / _n;
forward_pass();
if (ui) {
MLPPUtilities::CostInfo(epoch, cost_prev, cost(_y_hat, _output_set));
MLPPUtilities::UI(_weights, bias);
MLPPUtilities::cost_info(epoch, cost_prev, cost(_y_hat, _output_set));
MLPPUtilities::print_ui_vb(_weights, bias);
}
epoch++;
@ -102,32 +101,52 @@ void MLPPCLogLogReg::sgd(real_t learning_rate, int max_epoch, bool p_) {
real_t cost_prev = 0;
int epoch = 1;
std::random_device rd;
std::default_random_engine generator(rd());
std::uniform_int_distribution<int> distribution(0, int(_n - 1));
forward_pass();
Ref<MLPPVector> input_set_row_tmp;
input_set_row_tmp.instance();
input_set_row_tmp->resize(_input_set->size().x);
Ref<MLPPVector> y_hat_row_tmp;
y_hat_row_tmp.instance();
y_hat_row_tmp->resize(1);
Ref<MLPPVector> output_set_row_tmp;
output_set_row_tmp.instance();
output_set_row_tmp->resize(1);
while (true) {
std::random_device rd;
std::default_random_engine generator(rd());
std::uniform_int_distribution<int> distribution(0, int(_n - 1));
int outputIndex = distribution(generator);
int output_index = distribution(generator);
real_t y_hat = evaluatev(_input_set[outputIndex]);
real_t z = propagatev(_input_set[outputIndex]);
cost_prev = cost({ y_hat }, { _output_set[outputIndex] });
_input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
real_t output_set_element = _output_set->get_element(output_index);
output_set_row_tmp->set_element(0, output_set_element);
real_t error = y_hat - _output_set[outputIndex];
real_t y_hat = evaluatev(input_set_row_tmp);
y_hat_row_tmp->set_element(0, y_hat);
real_t z = propagatev(input_set_row_tmp);
cost_prev = cost(y_hat_row_tmp, output_set_row_tmp);
real_t error = y_hat - output_set_element;
// Weight Updation
_weights = alg.subtraction(_weights, alg.scalarMultiply(learning_rate * error * exp(z - exp(z)), _input_set[outputIndex]));
_weights = regularization.regWeights(_weights, _lambda, _alpha, _reg);
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate * error * Math::exp(z - Math::exp(z)), input_set_row_tmp));
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
// Bias updation
bias -= learning_rate * error * exp(z - exp(z));
y_hat = evaluatev(_input_set[outputIndex]);
y_hat = evaluatev(input_set_row_tmp);
if (p_) {
MLPPUtilities::CostInfo(epoch, cost_prev, cost({ y_hat }, { _output_set[outputIndex] }));
MLPPUtilities::UI(_weights, bias);
MLPPUtilities::cost_info(epoch, cost_prev, cost(y_hat_row_tmp, output_set_row_tmp));
MLPPUtilities::print_ui_vb(_weights, bias);
}
epoch++;
@ -149,32 +168,33 @@ void MLPPCLogLogReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_si
// Creating the mini-batches
int n_mini_batch = _n / mini_batch_size;
auto batches = MLPPUtilities::createMiniBatches(_input_set, _output_set, n_mini_batch);
auto inputMiniBatches = std::get<0>(batches);
auto outputMiniBatches = std::get<1>(batches);
MLPPUtilities::CreateMiniBatchMVBatch batches = MLPPUtilities::create_mini_batchesmv(_input_set, _output_set, n_mini_batch);
while (true) {
for (int i = 0; i < n_mini_batch; i++) {
std::vector<real_t> y_hat = evaluatem(inputMiniBatches[i]);
std::vector<real_t> z = propagatem(inputMiniBatches[i]);
cost_prev = cost(y_hat, outputMiniBatches[i]);
Ref<MLPPMatrix> current_input_batch = batches.input_sets[i];
Ref<MLPPVector> current_output_batch = batches.output_sets[i];
std::vector<real_t> error = alg.subtraction(y_hat, outputMiniBatches[i]);
Ref<MLPPVector> y_hat = evaluatem(current_input_batch);
Ref<MLPPVector> z = propagatem(current_input_batch);
cost_prev = cost(y_hat, current_output_batch);
Ref<MLPPVector> error = alg.subtractionnv(y_hat, current_output_batch);
// Calculating the weight gradients
_weights = alg.subtraction(_weights, alg.scalarMultiply(learning_rate / _n, alg.mat_vec_mult(alg.transpose(inputMiniBatches[i]), alg.hadamard_product(error, avn.cloglog(z, 1)))));
_weights = regularization.regWeights(_weights, _lambda, _alpha, _reg);
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multv(alg.transposem(current_input_batch), alg.hadamard_productnv(error, avn.cloglog_derivv(z)))));
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients
bias -= learning_rate * alg.sum_elements(alg.hadamard_product(error, avn.cloglog(z, 1))) / _n;
bias -= learning_rate * alg.sum_elementsv(alg.hadamard_productnv(error, avn.cloglog_derivv(z))) / _n;
forward_pass();
y_hat = evaluatem(inputMiniBatches[i]);
y_hat = evaluatem(current_input_batch);
if (p_) {
MLPPUtilities::CostInfo(epoch, cost_prev, cost(y_hat, outputMiniBatches[i]));
MLPPUtilities::UI(_weights, bias);
MLPPUtilities::cost_info(epoch, cost_prev, cost(y_hat, current_output_batch));
MLPPUtilities::print_ui_vb(_weights, bias);
}
}
@ -190,22 +210,29 @@ void MLPPCLogLogReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_si
real_t MLPPCLogLogReg::score() {
MLPPUtilities util;
return util.performance(_y_hat, _output_set);
return util.performance_vec(_y_hat, _output_set);
}
MLPPCLogLogReg::MLPPCLogLogReg(std::vector<std::vector<real_t>> p_input_set, std::vector<real_t> p_output_set, std::string p_reg, real_t p_lambda, real_t p_alpha) {
MLPPCLogLogReg::MLPPCLogLogReg(const Ref<MLPPMatrix> &p_input_set, const Ref<MLPPVector> &p_output_set, MLPPReg::RegularizationType p_reg, real_t p_lambda, real_t p_alpha) {
_input_set = p_input_set;
_output_set = p_output_set;
_n = _input_set.size();
_k = _input_set[0].size();
_n = _input_set->size().y;
_k = _input_set->size().x;
_reg = p_reg;
_lambda = p_lambda;
_alpha = p_alpha;
_y_hat.resize(_n);
_y_hat.instance();
_y_hat->resize(_n);
_weights = MLPPUtilities::weightInitialization(_k);
bias = MLPPUtilities::biasInitialization();
MLPPUtilities utilities;
_weights.instance();
_weights->resize(_k);
utilities.weight_initializationv(_weights);
bias = utilities.bias_initializationr();
}
MLPPCLogLogReg::MLPPCLogLogReg() {
@ -213,32 +240,37 @@ MLPPCLogLogReg::MLPPCLogLogReg() {
MLPPCLogLogReg::~MLPPCLogLogReg() {
}
real_t MLPPCLogLogReg::cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
real_t MLPPCLogLogReg::cost(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y) {
MLPPReg regularization;
class MLPPCost cost;
return cost.MSE(y_hat, y) + regularization.regTerm(_weights, _lambda, _alpha, _reg);
MLPPCost mlpp_cost;
return mlpp_cost.msev(y_hat, y) + regularization.reg_termv(_weights, _lambda, _alpha, _reg);
}
real_t MLPPCLogLogReg::evaluatev(std::vector<real_t> x) {
real_t MLPPCLogLogReg::evaluatev(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
MLPPActivation avn;
return avn.cloglog(alg.dot(_weights, x) + bias);
return avn.cloglog_normr(alg.dotv(_weights, x) + bias);
}
real_t MLPPCLogLogReg::propagatev(std::vector<real_t> x) {
real_t MLPPCLogLogReg::propagatev(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
return alg.dot(_weights, x) + bias;
return alg.dotv(_weights, x) + bias;
}
std::vector<real_t> MLPPCLogLogReg::evaluatem(std::vector<std::vector<real_t>> X) {
Ref<MLPPVector> MLPPCLogLogReg::evaluatem(const Ref<MLPPMatrix> &X) {
MLPPLinAlg alg;
MLPPActivation avn;
return avn.cloglog(alg.scalarAdd(bias, alg.mat_vec_mult(X, _weights)));
return avn.cloglog_normv(alg.scalar_addnv(bias, alg.mat_vec_multv(X, _weights)));
}
std::vector<real_t> MLPPCLogLogReg::propagatem(std::vector<std::vector<real_t>> X) {
Ref<MLPPVector> MLPPCLogLogReg::propagatem(const Ref<MLPPMatrix> &X) {
MLPPLinAlg alg;
return alg.scalarAdd(bias, alg.mat_vec_mult(X, _weights));
return alg.scalar_addnv(bias, alg.mat_vec_multv(X, _weights));
}
// cloglog ( wTx + b )
@ -246,7 +278,7 @@ void MLPPCLogLogReg::forward_pass() {
MLPPActivation avn;
_z = propagatem(_input_set);
_y_hat = avn.cloglog(_z);
_y_hat = avn.cloglog_normv(_z);
}
void MLPPCLogLogReg::_bind_methods() {

View File

@ -12,15 +12,17 @@
#include "core/object/reference.h"
#include <string>
#include <vector>
#include "../regularization/reg.h"
#include "../lin_alg/mlpp_matrix.h"
#include "../lin_alg/mlpp_vector.h"
class MLPPCLogLogReg : public Reference {
GDCLASS(MLPPCLogLogReg, Reference);
public:
std::vector<real_t> model_set_test(std::vector<std::vector<real_t>> X);
real_t model_test(std::vector<real_t> x);
Ref<MLPPVector> model_set_test(const Ref<MLPPMatrix> &X);
real_t model_test(const Ref<MLPPVector> &x);
void gradient_descent(real_t learning_rate, int max_epoch, bool ui = false);
void mle(real_t learning_rate, int max_epoch, bool ui = false);
@ -29,7 +31,7 @@ public:
real_t score();
MLPPCLogLogReg(std::vector<std::vector<real_t>> p_input_set, std::vector<real_t> p_output_set, std::string p_reg = "None", real_t p_lambda = 0.5, real_t p_alpha = 0.5);
MLPPCLogLogReg(const Ref<MLPPMatrix> &p_input_set, const Ref<MLPPVector> &p_output_set, MLPPReg::RegularizationType p_reg = MLPPReg::REGULARIZATION_TYPE_NONE, real_t p_lambda = 0.5, real_t p_alpha = 0.5);
MLPPCLogLogReg();
~MLPPCLogLogReg();
@ -38,30 +40,30 @@ protected:
void weight_initialization(int k);
void bias_initialization();
real_t cost(std::vector<real_t> y_hat, std::vector<real_t> y);
real_t cost(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y);
real_t evaluatev(std::vector<real_t> x);
real_t propagatev(std::vector<real_t> x);
real_t evaluatev(const Ref<MLPPVector> &x);
real_t propagatev(const Ref<MLPPVector> &x);
std::vector<real_t> evaluatem(std::vector<std::vector<real_t>> X);
std::vector<real_t> propagatem(std::vector<std::vector<real_t>> X);
Ref<MLPPVector> evaluatem(const Ref<MLPPMatrix> &X);
Ref<MLPPVector> propagatem(const Ref<MLPPMatrix> &X);
void forward_pass();
static void _bind_methods();
std::vector<std::vector<real_t>> _input_set;
std::vector<real_t> _output_set;
std::vector<real_t> _y_hat;
std::vector<real_t> _z;
std::vector<real_t> _weights;
Ref<MLPPMatrix> _input_set;
Ref<MLPPVector> _output_set;
Ref<MLPPVector> _y_hat;
Ref<MLPPVector> _z;
Ref<MLPPVector> _weights;
real_t bias;
int _n;
int _k;
// Regularization Params
std::string _reg;
MLPPReg::RegularizationType _reg;
real_t _lambda;
real_t _alpha; /* This is the controlling param for Elastic Net*/
};

View File

@ -430,10 +430,18 @@ void MLPPTests::test_c_log_log_regression(bool ui) {
alg.printVector(model_old.modelSetTest(alg.transpose(inputSet)));
std::cout << "ACCURACY: " << 100 * model_old.score() << "%" << std::endl;
MLPPCLogLogReg model(alg.transpose(inputSet), outputSet);
Ref<MLPPMatrix> input_set;
input_set.instance();
input_set->set_from_std_vectors(alg.transpose(inputSet));
Ref<MLPPVector> output_set;
output_set.instance();
output_set->set_from_std_vector(outputSet);
MLPPCLogLogReg model(alg.transposem(input_set), output_set);
model.sgd(0.1, 10000, ui);
alg.printVector(model.model_set_test(alg.transpose(inputSet)));
std::cout << "ACCURACY: " << 100 * model.score() << "%" << std::endl;
PLOG_MSG(model.model_set_test(alg.transposem(input_set))->to_string());
PLOG_MSG("ACCURACY: " + String::num(100 * model.score()) + "%");
}
void MLPPTests::test_exp_reg_regression(bool ui) {
MLPPLinAlg alg;