From 6d5f66d9ff931086cb51f89e0de9a2a12133e106 Mon Sep 17 00:00:00 2001 From: Relintai Date: Fri, 28 Apr 2023 21:07:35 +0200 Subject: [PATCH] Cleaned up MLPPSoftmaxReg. --- mlpp/softmax_reg/softmax_reg.cpp | 38 +++++++++++++------------------- mlpp/softmax_reg/softmax_reg.h | 10 ++++----- 2 files changed, 20 insertions(+), 28 deletions(-) diff --git a/mlpp/softmax_reg/softmax_reg.cpp b/mlpp/softmax_reg/softmax_reg.cpp index fe83491..3b7f2e0 100644 --- a/mlpp/softmax_reg/softmax_reg.cpp +++ b/mlpp/softmax_reg/softmax_reg.cpp @@ -8,7 +8,6 @@ #include "../activation/activation.h" #include "../cost/cost.h" -#include "../lin_alg/lin_alg.h" #include "../regularization/reg.h" #include "../utilities/utilities.h" @@ -74,7 +73,6 @@ Ref MLPPSoftmaxReg::model_set_test(const Ref &X) { void MLPPSoftmaxReg::gradient_descent(real_t learning_rate, int max_epoch, bool ui) { ERR_FAIL_COND(!_initialized); - MLPPLinAlg alg; MLPPReg regularization; real_t cost_prev = 0; int epoch = 1; @@ -84,20 +82,20 @@ void MLPPSoftmaxReg::gradient_descent(real_t learning_rate, int max_epoch, bool while (true) { cost_prev = cost(_y_hat, _output_set); - Ref error = alg.subtractionnm(_y_hat, _output_set); + Ref error = _y_hat->subn(_output_set); //Calculating the weight gradients - Ref w_gradient = alg.matmultnm(alg.transposenm(_input_set), error); + Ref w_gradient = _input_set->transposen()->multn(error); //Weight updation - _weights = alg.subtractionnm(_weights, alg.scalar_multiplynm(learning_rate, w_gradient)); + _weights->sub(w_gradient->scalar_multiplyn(learning_rate)); _weights = regularization.reg_weightsm(_weights, _lambda, _alpha, _reg); // Calculating the bias gradients //real_t b_gradient = alg.sum_elements(error); // Bias Updation - _bias = alg.subtract_matrix_rowsnv(_bias, alg.scalar_multiplynm(learning_rate, error)); + _bias->subtract_matrix_rows(error->scalar_multiplyn(learning_rate)); forward_pass(); @@ -118,7 +116,6 @@ void MLPPSoftmaxReg::gradient_descent(real_t learning_rate, int max_epoch, bool void MLPPSoftmaxReg::sgd(real_t learning_rate, int max_epoch, bool ui) { ERR_FAIL_COND(!_initialized); - MLPPLinAlg alg; MLPPReg regularization; real_t cost_prev = 0; @@ -159,17 +156,17 @@ void MLPPSoftmaxReg::sgd(real_t learning_rate, int max_epoch, bool ui) { cost_prev = cost(y_hat_matrix_tmp, output_set_row_matrix_tmp); // Calculating the weight gradients - Ref w_gradient = alg.outer_product(input_set_row_tmp, alg.subtractionnv(y_hat, output_set_row_tmp)); + Ref w_gradient = input_set_row_tmp->outer_product(y_hat->subn(output_set_row_tmp)); // Weight Updation - _weights = alg.subtractionnm(_weights, alg.scalar_multiplynm(learning_rate, w_gradient)); + _weights->sub(w_gradient->scalar_multiplyn(learning_rate)); _weights = regularization.reg_weightsm(_weights, _lambda, _alpha, _reg); // Calculating the bias gradients - Ref b_gradient = alg.subtractionnv(y_hat, output_set_row_tmp); + Ref b_gradient = y_hat->subn(output_set_row_tmp); // Bias updation - _bias = alg.subtractionnv(_bias, alg.scalar_multiplynv(learning_rate, b_gradient)); + _bias->sub(b_gradient->scalar_multiplyn(learning_rate)); y_hat = evaluatev(output_set_row_tmp); @@ -191,7 +188,6 @@ void MLPPSoftmaxReg::sgd(real_t learning_rate, int max_epoch, bool ui) { void MLPPSoftmaxReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size, bool ui) { ERR_FAIL_COND(!_initialized); - MLPPLinAlg alg; MLPPReg regularization; real_t cost_prev = 0; int epoch = 1; @@ -208,17 +204,17 @@ void MLPPSoftmaxReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_si Ref y_hat = evaluatem(current_inputs); cost_prev = cost(y_hat, current_outputs); - Ref error = alg.subtractionnm(y_hat, current_outputs); + Ref error = y_hat->subn(current_outputs); // Calculating the weight gradients - Ref w_gradient = alg.matmultnm(alg.transposenm(current_inputs), error); + Ref w_gradient = current_inputs->transposen()->multn(error); //Weight updation - _weights = alg.subtractionnm(_weights, alg.scalar_multiplynm(learning_rate, w_gradient)); + _weights->sub(w_gradient->scalar_multiplyn(learning_rate)); _weights = regularization.reg_weightsm(_weights, _lambda, _alpha, _reg); // Calculating the bias gradients - _bias = alg.subtract_matrix_rowsnv(_bias, alg.scalar_multiplynm(learning_rate, error)); + _bias->subtract_matrix_rows(error->scalar_multiplyn(learning_rate)); y_hat = evaluatem(current_inputs); if (ui) { @@ -342,25 +338,21 @@ real_t MLPPSoftmaxReg::cost(const Ref &y_hat, const Ref } Ref MLPPSoftmaxReg::evaluatev(const Ref &x) { - MLPPLinAlg alg; MLPPActivation avn; - - return avn.softmax_normv(alg.additionnv(_bias, alg.mat_vec_multnv(alg.transposenm(_weights), x))); + return avn.softmax_normv(_bias->addn(_weights->transposen()->mult_vec(x))); } Ref MLPPSoftmaxReg::evaluatem(const Ref &X) { - MLPPLinAlg alg; MLPPActivation avn; - return avn.softmax_normm(alg.mat_vec_addnm(alg.matmultnm(X, _weights), _bias)); + return avn.softmax_normm(X->multn(_weights)->add_vecn(_bias)); } // softmax ( wTx + b ) void MLPPSoftmaxReg::forward_pass() { - MLPPLinAlg alg; MLPPActivation avn; - _y_hat = avn.softmax_normm(alg.mat_vec_addnm(alg.matmultnm(_input_set, _weights), _bias)); + _y_hat = avn.softmax_normm(_input_set->multn(_weights)->add_vecn(_bias)); } void MLPPSoftmaxReg::_bind_methods() { diff --git a/mlpp/softmax_reg/softmax_reg.h b/mlpp/softmax_reg/softmax_reg.h index f82199b..bc82805 100644 --- a/mlpp/softmax_reg/softmax_reg.h +++ b/mlpp/softmax_reg/softmax_reg.h @@ -67,6 +67,11 @@ protected: Ref _input_set; Ref _output_set; + // Regularization Params + MLPPReg::RegularizationType _reg; + real_t _lambda; + real_t _alpha; /* This is the controlling param for Elastic Net*/ + Ref _y_hat; Ref _weights; Ref _bias; @@ -75,11 +80,6 @@ protected: int _k; int _n_class; - // Regularization Params - MLPPReg::RegularizationType _reg; - real_t _lambda; - real_t _alpha; /* This is the controlling param for Elastic Net*/ - bool _initialized; };