More cleanups.

This commit is contained in:
Relintai 2023-04-30 18:46:53 +02:00
parent 94db644c69
commit 1233a4bbe7
5 changed files with 63 additions and 94 deletions

View File

@ -8,7 +8,6 @@
#include "../activation/activation.h"
#include "../cost/cost.h"
#include "../lin_alg/lin_alg.h"
#include "../utilities/utilities.h"
#include "core/log/logger.h"
@ -50,7 +49,6 @@ void MLPPAutoEncoder::gradient_descent(real_t learning_rate, int max_epoch, bool
ERR_FAIL_COND(!_initialized);
MLPPActivation avn;
MLPPLinAlg alg;
real_t cost_prev = 0;
int epoch = 1;
@ -60,27 +58,25 @@ void MLPPAutoEncoder::gradient_descent(real_t learning_rate, int max_epoch, bool
cost_prev = cost(_y_hat, _input_set);
// Calculating the errors
Ref<MLPPMatrix> error = alg.subtractionnm(_y_hat, _input_set);
Ref<MLPPMatrix> error = _y_hat->subn(_input_set);
// Calculating the weight/bias gradients for layer 2
Ref<MLPPMatrix> D2_1 = alg.matmultnm(alg.transposenm(_a2), error);
Ref<MLPPMatrix> D2_1 = _a2->transposen()->multn(error);
// weights and bias updation for layer 2
_weights2 = alg.subtractionnm(_weights2, alg.scalar_multiplynm(learning_rate / _n, D2_1));
_weights2->sub(D2_1->scalar_multiplyn(learning_rate / _n));
// Calculating the bias gradients for layer 2
_bias2 = alg.subtract_matrix_rowsnv(_bias2, alg.scalar_multiplynm(learning_rate, error));
_bias2->subtract_matrix_rows(error->scalar_multiplyn(learning_rate));
//Calculating the weight/bias for layer 1
Ref<MLPPMatrix> D1_1 = alg.matmultnm(error, alg.transposenm(_weights2));
Ref<MLPPMatrix> D1_2 = alg.hadamard_productnm(D1_1, avn.sigmoid_derivm(_z2));
Ref<MLPPMatrix> D1_3 = alg.matmultnm(alg.transposenm(_input_set), D1_2);
Ref<MLPPMatrix> D1_1 = error->multn(_weights2->transposen());
Ref<MLPPMatrix> D1_2 = D1_1->hadamard_productn(avn.sigmoid_derivm(_z2));
Ref<MLPPMatrix> D1_3 = _input_set->transposen()->multn(D1_2);
// weight an bias updation for layer 1
_weights1 = alg.subtractionnm(_weights1, alg.scalar_multiplynm(learning_rate / _n, D1_3));
_bias1 = alg.subtract_matrix_rowsnv(_bias1, alg.scalar_multiplynm(learning_rate / _n, D1_2));
_weights1->sub(D1_3->scalar_multiplyn(learning_rate / _n));
_bias1->subtract_matrix_rows(D1_2->scalar_multiplyn(learning_rate / _n));
forward_pass();
@ -105,7 +101,6 @@ void MLPPAutoEncoder::sgd(real_t learning_rate, int max_epoch, bool ui) {
ERR_FAIL_COND(!_initialized);
MLPPActivation avn;
MLPPLinAlg alg;
real_t cost_prev = 0;
int epoch = 1;
@ -137,24 +132,25 @@ void MLPPAutoEncoder::sgd(real_t learning_rate, int max_epoch, bool ui) {
PropagateVResult prop_res = propagatev(input_set_row_tmp);
cost_prev = cost(y_hat_mat_tmp, input_set_mat_tmp);
Ref<MLPPVector> error = alg.subtractionnv(y_hat, input_set_row_tmp);
Ref<MLPPVector> error = y_hat->subn(input_set_row_tmp);
// Weight updation for layer 2
Ref<MLPPMatrix> D2_1 = alg.outer_product(error, prop_res.a2);
_weights2 = alg.subtractionnm(_weights2, alg.scalar_multiplynm(learning_rate, alg.transposenm(D2_1)));
Ref<MLPPMatrix> D2_1 = error->outer_product(prop_res.a2);
_weights2->sub(D2_1->transposen()->scalar_multiplyn(learning_rate));
// Bias updation for layer 2
_bias2 = alg.subtractionnv(_bias2, alg.scalar_multiplynv(learning_rate, error));
_bias2->sub(error->scalar_multiplyn(learning_rate));
// Weight updation for layer 1
Ref<MLPPVector> D1_1 = alg.mat_vec_multnv(_weights2, error);
Ref<MLPPVector> D1_2 = alg.hadamard_productnv(D1_1, avn.sigmoid_derivv(prop_res.z2));
Ref<MLPPMatrix> D1_3 = alg.outer_product(input_set_row_tmp, D1_2);
Ref<MLPPVector> D1_1 = _weights2->mult_vec(error);
Ref<MLPPVector> D1_2 = D1_1->hadamard_productn(avn.sigmoid_derivv(prop_res.z2));
Ref<MLPPMatrix> D1_3 = input_set_row_tmp->outer_product(D1_2);
_weights1->sub(D1_3->scalar_multiplyn(learning_rate));
_weights1 = alg.subtractionnm(_weights1, alg.scalar_multiplynm(learning_rate, D1_3));
// Bias updation for layer 1
_bias1 = alg.subtractionnv(_bias1, alg.scalar_multiplynv(learning_rate, D1_2));
_bias1->sub(D1_2->scalar_multiplyn(learning_rate));
y_hat = evaluatev(input_set_row_tmp);
@ -181,7 +177,6 @@ void MLPPAutoEncoder::mbgd(real_t learning_rate, int max_epoch, int mini_batch_s
ERR_FAIL_COND(!_initialized);
MLPPActivation avn;
MLPPLinAlg alg;
real_t cost_prev = 0;
int epoch = 1;
@ -200,27 +195,26 @@ void MLPPAutoEncoder::mbgd(real_t learning_rate, int max_epoch, int mini_batch_s
cost_prev = cost(y_hat, current_batch);
// Calculating the errors
Ref<MLPPMatrix> error = alg.subtractionnm(y_hat, current_batch);
Ref<MLPPMatrix> error = y_hat->subn(current_batch);
// Calculating the weight/bias gradients for layer 2
Ref<MLPPMatrix> D2_1 = alg.matmultnm(alg.transposenm(prop_res.a2), error);
Ref<MLPPMatrix> D2_1 = prop_res.a2->transposen()->multn(error);
// weights and bias updation for layer 2
_weights2 = alg.subtractionnm(_weights2, alg.scalar_multiplynm(learning_rate / current_batch->size().y, D2_1));
_weights2->sub(D2_1->scalar_multiplyn(learning_rate / current_batch->size().y));
// Bias Updation for layer 2
_bias2 = alg.subtract_matrix_rowsnv(_bias2, alg.scalar_multiplynm(learning_rate, error));
_bias2->sub(error->scalar_multiplyn(learning_rate));
//Calculating the weight/bias for layer 1
Ref<MLPPMatrix> D1_1 = alg.matmultnm(error, alg.transposenm(_weights2));
Ref<MLPPMatrix> D1_2 = alg.hadamard_productnm(D1_1, avn.sigmoid_derivm(prop_res.z2));
Ref<MLPPMatrix> D1_3 = alg.matmultnm(alg.transposenm(current_batch), D1_2);
Ref<MLPPMatrix> D1_1 = _weights2->transposen()->multn(error);
Ref<MLPPMatrix> D1_2 = D1_1->hadamard_productn(avn.sigmoid_derivm(prop_res.z2));
Ref<MLPPMatrix> D1_3 = current_batch->transposen()->multn(D1_2);
// weight an bias updation for layer 1
_weights1 = alg.subtractionnm(_weights1, alg.scalar_multiplynm(learning_rate / current_batch->size().x, D1_3));
_bias1 = alg.subtract_matrix_rowsnv(_bias1, alg.scalar_multiplynm(learning_rate / current_batch->size().x, D1_2));
_weights2->sub(D1_3->scalar_multiplyn(learning_rate / current_batch->size().x));
_bias1->subtract_matrix_rows(D1_2->scalar_multiplyn(learning_rate / current_batch->size().x));
y_hat = evaluatem(current_batch);
@ -301,56 +295,52 @@ real_t MLPPAutoEncoder::cost(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix>
}
Ref<MLPPVector> MLPPAutoEncoder::evaluatev(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
MLPPActivation avn;
Ref<MLPPVector> z2 = alg.additionnv(alg.mat_vec_multnv(alg.transposenm(_weights1), x), _bias1);
Ref<MLPPVector> z2 = _weights1->transposen()->mult_vec(x)->addn(_bias1);
Ref<MLPPVector> a2 = avn.sigmoid_normv(z2);
return alg.additionnv(alg.mat_vec_multnv(alg.transposenm(_weights2), a2), _bias2);
return _weights2->transposen()->mult_vec(a2)->addn(_bias2);
}
MLPPAutoEncoder::PropagateVResult MLPPAutoEncoder::propagatev(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
MLPPActivation avn;
PropagateVResult res;
res.z2 = alg.additionnv(alg.mat_vec_multnv(alg.transposenm(_weights1), x), _bias1);
res.z2 = _weights1->transposen()->mult_vec(x)->addn(_bias1);
res.a2 = avn.sigmoid_normv(res.z2);
return res;
}
Ref<MLPPMatrix> MLPPAutoEncoder::evaluatem(const Ref<MLPPMatrix> &X) {
MLPPLinAlg alg;
MLPPActivation avn;
Ref<MLPPMatrix> z2 = alg.mat_vec_addnm(alg.matmultnm(X, _weights1), _bias1);
Ref<MLPPMatrix> z2 = X->multn(_weights1)->add_vecn(_bias1);
Ref<MLPPMatrix> a2 = avn.sigmoid_normm(z2);
return alg.mat_vec_addnm(alg.matmultnm(a2, _weights2), _bias2);
return a2->multn(_weights2)->add_vecn(_bias2);
}
MLPPAutoEncoder::PropagateMResult MLPPAutoEncoder::propagatem(const Ref<MLPPMatrix> &X) {
MLPPLinAlg alg;
MLPPActivation avn;
PropagateMResult res;
res.z2 = alg.mat_vec_addnm(alg.matmultnm(X, _weights1), _bias1);
res.z2 = X->multn(_weights1)->add_vecn(_bias1);
res.a2 = avn.sigmoid_normm(res.z2);
return res;
}
void MLPPAutoEncoder::forward_pass() {
MLPPLinAlg alg;
MLPPActivation avn;
_z2 = alg.mat_vec_addnm(alg.matmultnm(_input_set, _weights1), _bias1);
_z2 = _input_set->multn(_weights1)->add_vecn(_bias1);
_a2 = avn.sigmoid_normm(_z2);
_y_hat = alg.mat_vec_addnm(alg.matmultnm(_a2, _weights2), _bias2);
_y_hat = _a2->multn(_weights2)->add_vecn(_bias2);
}
void MLPPAutoEncoder::_bind_methods() {

View File

@ -6,7 +6,6 @@
#include "bernoulli_nb.h"
#include "../data/data.h"
#include "../lin_alg/lin_alg.h"
#include "../utilities/utilities.h"
#include <iostream>
@ -100,10 +99,9 @@ MLPPBernoulliNB::~MLPPBernoulliNB() {
}
void MLPPBernoulliNB::compute_vocab() {
MLPPLinAlg alg;
MLPPData data;
_vocab = data.vec_to_setnv(alg.flattenvvnv(_input_set));
_vocab = data.vec_to_setnv(_input_set->flatten());
}
void MLPPBernoulliNB::compute_theta() {

View File

@ -7,7 +7,6 @@
#include "c_log_log_reg.h"
#include "../activation/activation.h"
#include "../cost/cost.h"
#include "../lin_alg/lin_alg.h"
#include "../regularization/reg.h"
#include "../utilities/utilities.h"
@ -23,7 +22,6 @@ real_t MLPPCLogLogReg::model_test(const Ref<MLPPVector> &x) {
void MLPPCLogLogReg::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
MLPPActivation avn;
MLPPLinAlg alg;
MLPPReg regularization;
real_t cost_prev = 0;
@ -34,14 +32,15 @@ void MLPPCLogLogReg::gradient_descent(real_t learning_rate, int max_epoch, bool
while (true) {
cost_prev = cost(_y_hat, _output_set);
Ref<MLPPVector> error = alg.subtractionnv(_y_hat, _output_set);
Ref<MLPPVector> error = _y_hat->subn(_output_set);
// Calculating the weight gradients
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multnv(alg.transposenm(_input_set), alg.hadamard_productnv(error, avn.cloglog_derivv(_z)))));
_weights->sub(_input_set->transposen()->mult_vec(error->hadamard_productn(avn.cloglog_derivv(_z)))->scalar_multiplyn(learning_rate / _n));
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients
bias -= learning_rate * alg.sum_elementsv(alg.hadamard_productnv(error, avn.cloglog_derivv(_z))) / _n;
bias -= learning_rate * error->hadamard_productn(avn.cloglog_derivv(_z))->sum_elements() / _n;
forward_pass();
@ -60,7 +59,6 @@ void MLPPCLogLogReg::gradient_descent(real_t learning_rate, int max_epoch, bool
void MLPPCLogLogReg::mle(real_t learning_rate, int max_epoch, bool ui) {
MLPPActivation avn;
MLPPLinAlg alg;
MLPPReg regularization;
real_t cost_prev = 0;
@ -71,13 +69,13 @@ void MLPPCLogLogReg::mle(real_t learning_rate, int max_epoch, bool ui) {
while (true) {
cost_prev = cost(_y_hat, _output_set);
Ref<MLPPVector> error = alg.subtractionnv(_y_hat, _output_set);
Ref<MLPPVector> error = _y_hat->subn(_output_set);
_weights = alg.additionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multnv(alg.transposenm(_input_set), alg.hadamard_productnv(error, avn.cloglog_derivv(_z)))));
_weights->add(_input_set->transposen()->mult_vec(error->hadamard_productn(avn.cloglog_derivv(_z)))->scalar_multiplyn(learning_rate / _n));
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients
bias += learning_rate * alg.sum_elementsv(alg.hadamard_productnv(error, avn.cloglog_derivv(_z))) / _n;
bias += learning_rate * error->hadamard_productn(avn.cloglog_derivv(_z))->sum_elements() / _n;
forward_pass();
@ -95,7 +93,6 @@ void MLPPCLogLogReg::mle(real_t learning_rate, int max_epoch, bool ui) {
}
void MLPPCLogLogReg::sgd(real_t learning_rate, int max_epoch, bool p_) {
MLPPLinAlg alg;
MLPPReg regularization;
real_t cost_prev = 0;
@ -136,7 +133,8 @@ void MLPPCLogLogReg::sgd(real_t learning_rate, int max_epoch, bool p_) {
real_t error = y_hat - output_element_set;
// Weight Updation
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate * error * Math::exp(z - Math::exp(z)), input_set_row_tmp));
_weights->sub(input_set_row_tmp->scalar_multiplyn(learning_rate * error * Math::exp(z - Math::exp(z))));
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
// Bias updation
@ -161,7 +159,6 @@ void MLPPCLogLogReg::sgd(real_t learning_rate, int max_epoch, bool p_) {
void MLPPCLogLogReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size, bool p_) {
MLPPActivation avn;
MLPPLinAlg alg;
MLPPReg regularization;
real_t cost_prev = 0;
int epoch = 1;
@ -179,14 +176,15 @@ void MLPPCLogLogReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_si
Ref<MLPPVector> z = propagatem(current_input_batch);
cost_prev = cost(y_hat, current_output_batch);
Ref<MLPPVector> error = alg.subtractionnv(y_hat, current_output_batch);
Ref<MLPPVector> error = y_hat->subn(current_output_batch);
// Calculating the weight gradients
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multnv(alg.transposenm(current_input_batch), alg.hadamard_productnv(error, avn.cloglog_derivv(z)))));
_weights->sub(current_input_batch->transposen()->mult_vec(error->hadamard_productn(avn.cloglog_derivv(z)))->scalar_multiplyn(learning_rate / _n));
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients
bias -= learning_rate * alg.sum_elementsv(alg.hadamard_productnv(error, avn.cloglog_derivv(z))) / _n;
bias -= learning_rate * error->hadamard_productn(avn.cloglog_derivv(z))->sum_elements() / _n;
forward_pass();
@ -248,29 +246,23 @@ real_t MLPPCLogLogReg::cost(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector>
}
real_t MLPPCLogLogReg::evaluatev(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
MLPPActivation avn;
return avn.cloglog_normr(alg.dotnv(_weights, x) + bias);
return avn.cloglog_normr(_weights->dot(x) + bias);
}
real_t MLPPCLogLogReg::propagatev(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
return alg.dotnv(_weights, x) + bias;
return _weights->dot(x) + bias;
}
Ref<MLPPVector> MLPPCLogLogReg::evaluatem(const Ref<MLPPMatrix> &X) {
MLPPLinAlg alg;
MLPPActivation avn;
return avn.cloglog_normv(alg.scalar_addnv(bias, alg.mat_vec_multnv(X, _weights)));
return avn.cloglog_normv(X->mult_vec(_weights)->scalar_addn(bias));
}
Ref<MLPPVector> MLPPCLogLogReg::propagatem(const Ref<MLPPMatrix> &X) {
MLPPLinAlg alg;
return alg.scalar_addnv(bias, alg.mat_vec_multnv(X, _weights));
return X->mult_vec(_weights)->scalar_addn(bias);
}
// cloglog ( wTx + b )

View File

@ -7,7 +7,6 @@
#include "dual_svc.h"
#include "../activation/activation.h"
#include "../cost/cost.h"
#include "../lin_alg/lin_alg.h"
#include "../regularization/reg.h"
#include "../utilities/utilities.h"
@ -24,7 +23,6 @@ real_t MLPPDualSVC::model_test(const Ref<MLPPVector> &x) {
void MLPPDualSVC::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
MLPPCost mlpp_cost;
MLPPActivation avn;
MLPPLinAlg alg;
MLPPReg regularization;
real_t cost_prev = 0;
int epoch = 1;
@ -42,7 +40,7 @@ void MLPPDualSVC::gradient_descent(real_t learning_rate, int max_epoch, bool ui)
while (true) {
cost_prev = cost(_alpha, _input_set, _output_set);
_alpha = alg.subtractionnv(_alpha, alg.scalar_multiplynv(learning_rate, mlpp_cost.dual_form_svm_deriv(_alpha, _input_set, _output_set)));
_alpha->sub(mlpp_cost.dual_form_svm_deriv(_alpha, _input_set, _output_set)->scalar_multiplyn(learning_rate));
alpha_projection();
@ -56,7 +54,7 @@ void MLPPDualSVC::gradient_descent(real_t learning_rate, int max_epoch, bool ui)
_input_set->row_get_into_mlpp_vector(i, input_set_i_row_tmp);
_input_set->row_get_into_mlpp_vector(j, input_set_j_row_tmp);
sum += _alpha->element_get(j) * _output_set->element_get(j) * alg.dotnv(input_set_j_row_tmp, input_set_i_row_tmp); // TO DO: DON'T forget to add non-linear kernelizations.
sum += _alpha->element_get(j) * _output_set->element_get(j) * input_set_j_row_tmp->dot(input_set_i_row_tmp); // TO DO: DON'T forget to add non-linear kernelizations.
}
}
}
@ -207,7 +205,6 @@ real_t MLPPDualSVC::evaluatev(const Ref<MLPPVector> &x) {
}
real_t MLPPDualSVC::propagatev(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
real_t z = 0;
Ref<MLPPVector> input_set_row_tmp;
@ -217,7 +214,7 @@ real_t MLPPDualSVC::propagatev(const Ref<MLPPVector> &x) {
for (int j = 0; j < _alpha->size(); j++) {
if (_alpha->element_get(j) != 0) {
_input_set->row_get_into_mlpp_vector(j, input_set_row_tmp);
z += _alpha->element_get(j) * _output_set->element_get(j) * alg.dotnv(input_set_row_tmp, x); // TO DO: DON'T forget to add non-linear kernelizations.
z += _alpha->element_get(j) * _output_set->element_get(j) * input_set_row_tmp->dot(x); // TO DO: DON'T forget to add non-linear kernelizations.
}
}
z += _bias;
@ -231,7 +228,6 @@ Ref<MLPPVector> MLPPDualSVC::evaluatem(const Ref<MLPPMatrix> &X) {
}
Ref<MLPPVector> MLPPDualSVC::propagatem(const Ref<MLPPMatrix> &X) {
MLPPLinAlg alg;
Ref<MLPPVector> z;
z.instance();
z->resize(X->size().y);
@ -252,7 +248,7 @@ Ref<MLPPVector> MLPPDualSVC::propagatem(const Ref<MLPPMatrix> &X) {
_input_set->row_get_into_mlpp_vector(j, input_set_row_tmp);
X->row_get_into_mlpp_vector(i, x_row_tmp);
sum += _alpha->element_get(j) * _output_set->element_get(j) * alg.dotnv(input_set_row_tmp, x_row_tmp); // TO DO: DON'T forget to add non-linear kernelizations.
sum += _alpha->element_get(j) * _output_set->element_get(j) * input_set_row_tmp->dot(x_row_tmp); // TO DO: DON'T forget to add non-linear kernelizations.
}
}
@ -281,20 +277,16 @@ void MLPPDualSVC::alpha_projection() {
}
real_t MLPPDualSVC::kernel_functionv(const Ref<MLPPVector> &v, const Ref<MLPPVector> &u, KernelMethod kernel) {
MLPPLinAlg alg;
if (kernel == KERNEL_METHOD_LINEAR) {
return alg.dotnv(u, v);
return u->dot(v);
}
return 0;
}
Ref<MLPPMatrix> MLPPDualSVC::kernel_functionm(const Ref<MLPPMatrix> &U, const Ref<MLPPMatrix> &V, KernelMethod kernel) {
MLPPLinAlg alg;
if (kernel == KERNEL_METHOD_LINEAR) {
return alg.matmultnm(_input_set, alg.transposenm(_input_set));
return _input_set->multn(_input_set->transposen());
}
Ref<MLPPMatrix> m;

View File

@ -6,7 +6,6 @@
#include "exp_reg.h"
#include "../cost/cost.h"
#include "../lin_alg/lin_alg.h"
#include "../regularization/reg.h"
#include "../stat/stat.h"
#include "../utilities/utilities.h"
@ -23,7 +22,6 @@ real_t MLPPExpReg::model_test(const Ref<MLPPVector> &x) {
}
void MLPPExpReg::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
MLPPLinAlg alg;
MLPPReg regularization;
real_t cost_prev = 0;
@ -34,7 +32,7 @@ void MLPPExpReg::gradient_descent(real_t learning_rate, int max_epoch, bool ui)
while (true) {
cost_prev = cost(_y_hat, _output_set);
Ref<MLPPVector> error = alg.subtractionnv(_y_hat, _output_set);
Ref<MLPPVector> error = _y_hat->subn(_output_set);
for (int i = 0; i < _k; i++) {
// Calculating the weight gradient
@ -154,7 +152,6 @@ void MLPPExpReg::sgd(real_t learning_rate, int max_epoch, bool ui) {
}
void MLPPExpReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size, bool ui) {
MLPPLinAlg alg;
MLPPReg regularization;
real_t cost_prev = 0;
@ -171,7 +168,7 @@ void MLPPExpReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size,
Ref<MLPPVector> y_hat = evaluatem(current_input_batch);
cost_prev = cost(y_hat, current_output_batch);
Ref<MLPPVector> error = alg.subtractionnv(y_hat, current_output_batch);
Ref<MLPPVector> error = y_hat->subn(current_output_batch);
for (int j = 0; j < _k; j++) {
// Calculating the weight gradient