More cleanups.

This commit is contained in:
Relintai 2023-04-29 20:10:49 +02:00
parent 15f781c8c2
commit fbfd546ee8
4 changed files with 11 additions and 24 deletions

View File

@ -5,7 +5,6 @@
//
#include "multi_output_layer.h"
#include "../lin_alg/lin_alg.h"
#include "../utilities/utilities.h"
int MLPPMultiOutputLayer::get_n_output() {
@ -121,18 +120,16 @@ void MLPPMultiOutputLayer::set_weight_init(const MLPPUtilities::WeightDistributi
}
void MLPPMultiOutputLayer::forward_pass() {
MLPPLinAlg alg;
MLPPActivation avn;
_z = alg.mat_vec_addnm(alg.matmultnm(_input, _weights), _bias);
_z = _input->multn(_weights)->add_vecn(_bias);
_a = avn.run_activation_norm_matrix(_activation, _z);
}
void MLPPMultiOutputLayer::test(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
MLPPActivation avn;
_z_test = alg.additionnm(alg.mat_vec_multnv(alg.transposenm(_weights), x), _bias);
_z_test = _weights->transposen()->mult_vec(x)->addn(_bias);
_a_test = avn.run_activation_norm_vector(_activation, _z_test);
}

View File

@ -8,7 +8,6 @@
#include "core/containers/local_vector.h"
#include "../lin_alg/lin_alg.h"
#include "../utilities/utilities.h"
#include <random>
@ -92,7 +91,7 @@ real_t MLPPMultinomialNB::model_test(const Ref<MLPPVector> &x) {
}
for (int i = 0; i < _priors->size(); i++) {
score[i] += std::log(_priors->element_get(i));
score[i] += Math::log(_priors->element_get(i));
}
int max_index = 0;
@ -181,8 +180,6 @@ void MLPPMultinomialNB::compute_theta() {
}
void MLPPMultinomialNB::evaluate() {
MLPPLinAlg alg;
int output_set_size = _output_set->size();
Size2i input_set_size = _input_set->size();
@ -198,7 +195,7 @@ void MLPPMultinomialNB::evaluate() {
_priors->element_set(osii, _priors->element_get(osii) + 1);
}
_priors = alg.scalar_multiplynv(real_t(1) / real_t(output_set_size), _priors);
_priors->scalar_multiply(real_t(1) / real_t(output_set_size));
// Evaluating Theta...
compute_theta();

View File

@ -5,7 +5,6 @@
//
#include "output_layer.h"
#include "../lin_alg/lin_alg.h"
#include "../utilities/utilities.h"
int MLPPOutputLayer::get_n_hidden() {
@ -150,10 +149,9 @@ void MLPPOutputLayer::forward_pass() {
initialize();
}
MLPPLinAlg alg;
MLPPActivation avn;
_z = alg.scalar_addnv(_bias, alg.mat_vec_multnv(_input, _weights));
_z = _input->mult_vec(_weights)->scalar_addn(_bias);
_a = avn.run_activation_norm_vector(_activation, _z);
}
@ -162,10 +160,9 @@ void MLPPOutputLayer::test(const Ref<MLPPVector> &x) {
initialize();
}
MLPPLinAlg alg;
MLPPActivation avn;
_z_test = alg.dotnv(_weights, x) + _bias;
_z_test = _weights->dot(x) + _bias;
_a_test = avn.run_activation_norm_real(_activation, _z_test);
}

View File

@ -6,7 +6,6 @@
#include "pca.h"
#include "../data/data.h"
#include "../lin_alg/lin_alg.h"
Ref<MLPPMatrix> MLPPPCA::get_input_set() {
return _input_set;
@ -25,10 +24,9 @@ void MLPPPCA::set_k(const int val) {
Ref<MLPPMatrix> MLPPPCA::principal_components() {
ERR_FAIL_COND_V(!_input_set.is_valid() || _k == 0, Ref<MLPPMatrix>());
MLPPLinAlg alg;
MLPPData data;
MLPPLinAlg::SVDResult svr_res = alg.svd(alg.covnm(_input_set));
MLPPMatrix::SVDResult svr_res = _input_set->cov()->svd();
_x_normalized = data.mean_centering(_input_set);
Size2i svr_res_u_size = svr_res.U->size();
@ -41,7 +39,7 @@ Ref<MLPPMatrix> MLPPPCA::principal_components() {
}
}
_z = alg.matmultnm(alg.transposenm(_u_reduce), _x_normalized);
_z = _u_reduce->transposen()->multn(_x_normalized);
return _z;
}
@ -50,9 +48,7 @@ Ref<MLPPMatrix> MLPPPCA::principal_components() {
real_t MLPPPCA::score() {
ERR_FAIL_COND_V(!_input_set.is_valid() || _k == 0, 0);
MLPPLinAlg alg;
Ref<MLPPMatrix> x_approx = alg.matmultnm(_u_reduce, _z);
Ref<MLPPMatrix> x_approx = _u_reduce->multn(_z);
real_t num = 0;
real_t den = 0;
@ -72,7 +68,7 @@ real_t MLPPPCA::score() {
_x_normalized->row_get_into_mlpp_vector(i, x_normalized_row_tmp);
x_approx->row_get_into_mlpp_vector(i, x_approx_row_tmp);
num += alg.norm_sqv(alg.subtractionnv(x_normalized_row_tmp, x_approx_row_tmp));
num += x_normalized_row_tmp->subn(x_approx_row_tmp)->norm_sq();
}
num /= x_normalized_size_y;
@ -80,7 +76,7 @@ real_t MLPPPCA::score() {
for (int i = 0; i < x_normalized_size_y; ++i) {
_x_normalized->row_get_into_mlpp_vector(i, x_normalized_row_tmp);
den += alg.norm_sqv(x_normalized_row_tmp);
den += x_normalized_row_tmp->norm_sq();
}
den /= x_normalized_size_y;