More api standardization.

This commit is contained in:
Relintai 2023-04-22 14:39:13 +02:00
parent 590d1ce5e2
commit eb9f3eaa34
8 changed files with 170 additions and 170 deletions

View File

@ -842,7 +842,7 @@ Ref<MLPPVector> MLPPActivation::linear_derivv(const Ref<MLPPVector> &z) {
} }
Ref<MLPPMatrix> MLPPActivation::linear_derivm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::linear_derivm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.onematm(z->size().x, z->size().y); return alg.onematnm(z->size().x, z->size().y);
} }
//SIGMOID //SIGMOID
@ -851,11 +851,11 @@ real_t MLPPActivation::sigmoid_normr(real_t z) {
} }
Ref<MLPPVector> MLPPActivation::sigmoid_normv(const Ref<MLPPVector> &z) { Ref<MLPPVector> MLPPActivation::sigmoid_normv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_division(alg.onevecv(z->size()), alg.additionnv(alg.onevecv(z->size()), alg.expnv(alg.scalar_multiplynv(-1, z)))); return alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.additionnv(alg.onevecv(z->size()), alg.expnv(alg.scalar_multiplynv(-1, z))));
} }
Ref<MLPPMatrix> MLPPActivation::sigmoid_normm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::sigmoid_normm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_divisionnm(alg.onematm(z->size().x, z->size().y), alg.additionnm(alg.onematm(z->size().x, z->size().y), alg.expm(alg.scalar_multiplynm(-1, z)))); return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.additionnm(alg.onematnm(z->size().x, z->size().y), alg.expnm(alg.scalar_multiplynm(-1, z))));
} }
real_t MLPPActivation::sigmoid_derivr(real_t z) { real_t MLPPActivation::sigmoid_derivr(real_t z) {
@ -1224,7 +1224,7 @@ Ref<MLPPVector> MLPPActivation::softplus_normv(const Ref<MLPPVector> &z) {
Ref<MLPPMatrix> MLPPActivation::softplus_normm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::softplus_normm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.lognv(alg.additionnv(alg.onematm(z->size().x, z->size().y), alg.expnv(z))); return alg.lognv(alg.additionnv(alg.onematnm(z->size().x, z->size().y), alg.expnv(z)));
} }
real_t MLPPActivation::softplus_derivr(real_t z) { real_t MLPPActivation::softplus_derivr(real_t z) {
@ -1245,12 +1245,12 @@ real_t MLPPActivation::softsign_normr(real_t z) {
Ref<MLPPVector> MLPPActivation::softsign_normv(const Ref<MLPPVector> &z) { Ref<MLPPVector> MLPPActivation::softsign_normv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_division(z, alg.additionnv(alg.onevecv(z->size()), alg.absv(z))); return alg.element_wise_divisionnv(z, alg.additionnv(alg.onevecv(z->size()), alg.absv(z)));
} }
Ref<MLPPMatrix> MLPPActivation::softsign_normm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::softsign_normm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_divisionnm(z, alg.additionnv(alg.onematm(z->size().x, z->size().y), alg.absm(z))); return alg.element_wise_divisionnvnm(z, alg.additionnv(alg.onematnm(z->size().x, z->size().y), alg.absnm(z)));
} }
real_t MLPPActivation::softsign_derivr(real_t z) { real_t MLPPActivation::softsign_derivr(real_t z) {
@ -1259,12 +1259,12 @@ real_t MLPPActivation::softsign_derivr(real_t z) {
Ref<MLPPVector> MLPPActivation::softsign_derivv(const Ref<MLPPVector> &z) { Ref<MLPPVector> MLPPActivation::softsign_derivv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_division(alg.onevecv(z->size()), alg.exponentiatenv(alg.additionnv(alg.onevecv(z->size()), alg.absv(z)), 2)); return alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.exponentiatenv(alg.additionnv(alg.onevecv(z->size()), alg.absv(z)), 2));
} }
Ref<MLPPMatrix> MLPPActivation::softsign_derivm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::softsign_derivm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_divisionnm(alg.onematm(z->size().x, z->size().y), alg.exponentiatenv(alg.additionnm(alg.onematm(z->size().x, z->size().y), alg.absm(z)), 2)); return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.exponentiatenv(alg.additionnm(alg.onematnm(z->size().x, z->size().y), alg.absnm(z)), 2));
} }
//GAUSSIANCDF //GAUSSIANCDF
@ -1281,7 +1281,7 @@ Ref<MLPPVector> MLPPActivation::gaussian_cdf_normv(const Ref<MLPPVector> &z) {
Ref<MLPPMatrix> MLPPActivation::gaussian_cdf_normm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::gaussian_cdf_normm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.scalar_multiplynm(0.5, alg.additionnm(alg.onematm(z->size().x, z->size().y), alg.erfm(alg.scalar_multiplynm(1 / sqrt(2), z)))); return alg.scalar_multiplynm(0.5, alg.additionnm(alg.onematnm(z->size().x, z->size().y), alg.erfnm(alg.scalar_multiplynm(1 / sqrt(2), z))));
} }
real_t MLPPActivation::gaussian_cdf_derivr(real_t z) { real_t MLPPActivation::gaussian_cdf_derivr(real_t z) {
@ -1296,7 +1296,7 @@ Ref<MLPPVector> MLPPActivation::gaussian_cdf_derivv(const Ref<MLPPVector> &z) {
Ref<MLPPMatrix> MLPPActivation::gaussian_cdf_derivm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::gaussian_cdf_derivm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.scalar_multiplynm(1 / Math::sqrt(2 * M_PI), alg.expm(alg.scalar_multiplynm(-1 / 2.0, alg.hadamard_productnm(z, z)))); return alg.scalar_multiplynm(1 / Math::sqrt(2 * M_PI), alg.expnm(alg.scalar_multiplynm(-1 / 2.0, alg.hadamard_productnm(z, z))));
} }
//CLOGLOG //CLOGLOG
@ -1313,7 +1313,7 @@ Ref<MLPPVector> MLPPActivation::cloglog_normv(const Ref<MLPPVector> &z) {
Ref<MLPPMatrix> MLPPActivation::cloglog_normm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::cloglog_normm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.scalar_multiplynm(-1, alg.scalar_addnm(-1, alg.expm(alg.scalar_multiplynm(-1, alg.expm(z))))); return alg.scalar_multiplynm(-1, alg.scalar_addnm(-1, alg.expnm(alg.scalar_multiplynm(-1, alg.expnm(z)))));
} }
real_t MLPPActivation::cloglog_derivr(real_t z) { real_t MLPPActivation::cloglog_derivr(real_t z) {
@ -1328,7 +1328,7 @@ Ref<MLPPVector> MLPPActivation::cloglog_derivv(const Ref<MLPPVector> &z) {
Ref<MLPPMatrix> MLPPActivation::cloglog_derivm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::cloglog_derivm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.expm(alg.scalar_multiplynm(-1, alg.expm(z))); return alg.expnm(alg.scalar_multiplynm(-1, alg.expnm(z)));
} }
//LOGIT //LOGIT
@ -1339,12 +1339,12 @@ real_t MLPPActivation::logit_normr(real_t z) {
Ref<MLPPVector> MLPPActivation::logit_normv(const Ref<MLPPVector> &z) { Ref<MLPPVector> MLPPActivation::logit_normv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.lognv(alg.element_wise_division(z, alg.subtractionnv(alg.onevecv(z->size()), z))); return alg.lognv(alg.element_wise_divisionnv(z, alg.subtractionnv(alg.onevecv(z->size()), z)));
} }
Ref<MLPPMatrix> MLPPActivation::logit_normm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::logit_normm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.logm(alg.element_wise_divisionnm(z, alg.subtractionnm(alg.onematm(z->size().x, z->size().y), z))); return alg.lognm(alg.element_wise_divisionnvnm(z, alg.subtractionnm(alg.onematnm(z->size().x, z->size().y), z)));
} }
real_t MLPPActivation::logit_derivr(real_t z) { real_t MLPPActivation::logit_derivr(real_t z) {
@ -1354,17 +1354,17 @@ Ref<MLPPVector> MLPPActivation::logit_derivv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.subtractionnv( return alg.subtractionnv(
alg.element_wise_division(alg.onevecv(z->size()), z), alg.element_wise_divisionnv(alg.onevecv(z->size()), z),
alg.element_wise_division(alg.onevecv(z->size()), alg.subtractionnv(z, alg.onevecv(z->size())))); alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.subtractionnv(z, alg.onevecv(z->size()))));
} }
Ref<MLPPMatrix> MLPPActivation::logit_derivm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::logit_derivm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.subtractionnm( return alg.subtractionnm(
alg.element_wise_divisionnm( alg.element_wise_divisionnvnm(
alg.onematm(z->size().x, z->size().y), z), alg.onematnm(z->size().x, z->size().y), z),
alg.element_wise_divisionnm(alg.onematm(z->size().x, z->size().y), alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y),
alg.subtractionnm(z, alg.onematm(z->size().x, z->size().y)))); alg.subtractionnm(z, alg.onematnm(z->size().x, z->size().y))));
} }
//UNITSTEP //UNITSTEP
@ -1484,7 +1484,7 @@ Ref<MLPPVector> MLPPActivation::mish_derivv(const Ref<MLPPVector> &z) {
sech_normv(softplus_normv(z)), sech_normv(softplus_normv(z))), sech_normv(softplus_normv(z)), sech_normv(softplus_normv(z))),
z), z),
sigmoid_normv(z)), sigmoid_normv(z)),
alg.element_wise_division(mish_normv(z), z)); alg.element_wise_divisionnv(mish_normv(z), z));
} }
Ref<MLPPMatrix> MLPPActivation::mish_derivm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::mish_derivm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
@ -1496,7 +1496,7 @@ Ref<MLPPMatrix> MLPPActivation::mish_derivm(const Ref<MLPPMatrix> &z) {
sech_normm(softplus_normm(z)), sech_normm(softplus_normm(z))), sech_normm(softplus_normm(z)), sech_normm(softplus_normm(z))),
z), z),
sigmoid_normm(z)), sigmoid_normm(z)),
alg.element_wise_divisionnm(mish_normm(z), z)); alg.element_wise_divisionnvnm(mish_normm(z), z));
} }
//SINC //SINC
@ -1507,12 +1507,12 @@ real_t MLPPActivation::sinc_normr(real_t z) {
Ref<MLPPVector> MLPPActivation::sinc_normv(const Ref<MLPPVector> &z) { Ref<MLPPVector> MLPPActivation::sinc_normv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_division(alg.sinv(z), z); return alg.element_wise_divisionnv(alg.sinv(z), z);
} }
Ref<MLPPMatrix> MLPPActivation::sinc_normm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::sinc_normm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_divisionnm(alg.sinm(z), z); return alg.element_wise_divisionnvnm(alg.sinnm(z), z);
} }
real_t MLPPActivation::sinc_derivr(real_t z) { real_t MLPPActivation::sinc_derivr(real_t z) {
@ -1521,12 +1521,12 @@ real_t MLPPActivation::sinc_derivr(real_t z) {
Ref<MLPPVector> MLPPActivation::sinc_derivv(const Ref<MLPPVector> &z) { Ref<MLPPVector> MLPPActivation::sinc_derivv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_division(alg.subtractionnv(alg.hadamard_productnv(z, alg.cosv(z)), alg.sinv(z)), alg.hadamard_productnv(z, z)); return alg.element_wise_divisionnv(alg.subtractionnv(alg.hadamard_productnv(z, alg.cosv(z)), alg.sinv(z)), alg.hadamard_productnv(z, z));
} }
Ref<MLPPMatrix> MLPPActivation::sinc_derivm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::sinc_derivm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_divisionnm(alg.subtractionnm(alg.hadamard_productnm(z, alg.cosm(z)), alg.sinm(z)), alg.hadamard_productnm(z, z)); return alg.element_wise_divisionnvnm(alg.subtractionnm(alg.hadamard_productnm(z, alg.cosnm(z)), alg.sinnm(z)), alg.hadamard_productnm(z, z));
} }
//RELU //RELU
@ -2006,7 +2006,7 @@ Ref<MLPPVector> MLPPActivation::sinh_normv(const Ref<MLPPVector> &z) {
} }
Ref<MLPPMatrix> MLPPActivation::sinh_normm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::sinh_normm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.scalar_multiplynm(0.5, alg.subtractionnm(alg.expm(z), alg.expm(alg.scalar_multiplynm(-1, z)))); return alg.scalar_multiplynm(0.5, alg.subtractionnm(alg.expnm(z), alg.expnm(alg.scalar_multiplynm(-1, z))));
} }
real_t MLPPActivation::sinh_derivr(real_t z) { real_t MLPPActivation::sinh_derivr(real_t z) {
@ -2030,7 +2030,7 @@ Ref<MLPPVector> MLPPActivation::cosh_normv(const Ref<MLPPVector> &z) {
} }
Ref<MLPPMatrix> MLPPActivation::cosh_normm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::cosh_normm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.scalar_multiplynm(0.5, alg.additionnv(alg.expm(z), alg.expm(alg.scalar_multiplynm(-1, z)))); return alg.scalar_multiplynm(0.5, alg.additionnv(alg.expnm(z), alg.expnm(alg.scalar_multiplynm(-1, z))));
} }
real_t MLPPActivation::cosh_derivr(real_t z) { real_t MLPPActivation::cosh_derivr(real_t z) {
@ -2051,12 +2051,12 @@ real_t MLPPActivation::tanh_normr(real_t z) {
Ref<MLPPVector> MLPPActivation::tanh_normv(const Ref<MLPPVector> &z) { Ref<MLPPVector> MLPPActivation::tanh_normv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_division(alg.subtractionnv(alg.expnv(z), alg.expnv(alg.scalar_multiplynv(-1, z))), alg.additionnv(alg.expnv(z), alg.expnv(alg.scalar_multiplynv(-1, z)))); return alg.element_wise_divisionnv(alg.subtractionnv(alg.expnv(z), alg.expnv(alg.scalar_multiplynv(-1, z))), alg.additionnv(alg.expnv(z), alg.expnv(alg.scalar_multiplynv(-1, z))));
} }
Ref<MLPPMatrix> MLPPActivation::tanh_normm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::tanh_normm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_divisionnm(alg.subtractionnm(alg.expm(z), alg.expm(alg.scalar_multiplynm(-1, z))), alg.additionnm(alg.expm(z), alg.expm(alg.scalar_multiplynm(-1, z)))); return alg.element_wise_divisionnvnm(alg.subtractionnm(alg.expnm(z), alg.expnm(alg.scalar_multiplynm(-1, z))), alg.additionnm(alg.expnm(z), alg.expnm(alg.scalar_multiplynm(-1, z))));
} }
real_t MLPPActivation::tanh_derivr(real_t z) { real_t MLPPActivation::tanh_derivr(real_t z) {
@ -2081,13 +2081,13 @@ real_t MLPPActivation::csch_normr(real_t z) {
Ref<MLPPVector> MLPPActivation::csch_normv(const Ref<MLPPVector> &z) { Ref<MLPPVector> MLPPActivation::csch_normv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_division(alg.onevecv(z->size()), sinh_normv(z)); return alg.element_wise_divisionnv(alg.onevecv(z->size()), sinh_normv(z));
} }
Ref<MLPPMatrix> MLPPActivation::csch_normm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::csch_normm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_divisionnm(alg.onematm(z->size().x, z->size().y), sinh_normm(z)); return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), sinh_normm(z));
} }
real_t MLPPActivation::csch_derivr(real_t z) { real_t MLPPActivation::csch_derivr(real_t z) {
@ -2114,14 +2114,14 @@ real_t MLPPActivation::sech_normr(real_t z) {
Ref<MLPPVector> MLPPActivation::sech_normv(const Ref<MLPPVector> &z) { Ref<MLPPVector> MLPPActivation::sech_normv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_division(alg.onevecv(z->size()), cosh_normv(z)); return alg.element_wise_divisionnv(alg.onevecv(z->size()), cosh_normv(z));
// return activation(z, deriv, static_cast<void (*)(real_t, bool)>(&sech)); // return activation(z, deriv, static_cast<void (*)(real_t, bool)>(&sech));
} }
Ref<MLPPMatrix> MLPPActivation::sech_normm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::sech_normm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_divisionnm(alg.onematm(z->size().x, z->size().y), cosh_normm(z)); return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), cosh_normm(z));
// return activation(z, deriv, static_cast<void (*)(real_t, bool)>(&sech)); // return activation(z, deriv, static_cast<void (*)(real_t, bool)>(&sech));
} }
@ -2149,12 +2149,12 @@ real_t MLPPActivation::coth_normr(real_t z) {
Ref<MLPPVector> MLPPActivation::coth_normv(const Ref<MLPPVector> &z) { Ref<MLPPVector> MLPPActivation::coth_normv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_division(alg.onevecv(z->size()), tanh_normv(z)); return alg.element_wise_divisionnv(alg.onevecv(z->size()), tanh_normv(z));
} }
Ref<MLPPMatrix> MLPPActivation::coth_normm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::coth_normm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_divisionnm(alg.onematm(z->size().x, z->size().y), tanh_normm(z)); return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), tanh_normm(z));
} }
real_t MLPPActivation::coth_derivr(real_t z) { real_t MLPPActivation::coth_derivr(real_t z) {
@ -2186,7 +2186,7 @@ Ref<MLPPVector> MLPPActivation::arsinh_normv(const Ref<MLPPVector> &z) {
Ref<MLPPMatrix> MLPPActivation::arsinh_normm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::arsinh_normm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.logm(alg.additionnm(z, alg.sqrtm(alg.additionnm(alg.hadamard_productnm(z, z), alg.onematm(z->size().x, z->size().y))))); return alg.lognm(alg.additionnm(z, alg.sqrtnm(alg.additionnm(alg.hadamard_productnm(z, z), alg.onematnm(z->size().x, z->size().y)))));
} }
real_t MLPPActivation::arsinh_derivr(real_t z) { real_t MLPPActivation::arsinh_derivr(real_t z) {
@ -2196,13 +2196,13 @@ real_t MLPPActivation::arsinh_derivr(real_t z) {
Ref<MLPPVector> MLPPActivation::arsinh_derivv(const Ref<MLPPVector> &z) { Ref<MLPPVector> MLPPActivation::arsinh_derivv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_division(alg.onevecv(z->size()), alg.sqrtnv(alg.additionnv(alg.hadamard_productnv(z, z), alg.onevecv(z->size())))); return alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.sqrtnv(alg.additionnv(alg.hadamard_productnv(z, z), alg.onevecv(z->size()))));
} }
Ref<MLPPMatrix> MLPPActivation::arsinh_derivm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::arsinh_derivm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_divisionnm(alg.onematm(z->size().x, z->size().y), alg.sqrtm(alg.additionnm(alg.hadamard_productnm(z, z), alg.onematm(z->size().x, z->size().y)))); return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.sqrtnm(alg.additionnm(alg.hadamard_productnm(z, z), alg.onematnm(z->size().x, z->size().y))));
} }
//ARCOSH //ARCOSH
@ -2219,7 +2219,7 @@ Ref<MLPPVector> MLPPActivation::arcosh_normv(const Ref<MLPPVector> &z) {
Ref<MLPPMatrix> MLPPActivation::arcosh_normm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::arcosh_normm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.logm(alg.additionnm(z, alg.sqrtm(alg.subtractionnm(alg.hadamard_productnm(z, z), alg.onematm(z->size().x, z->size().y))))); return alg.lognm(alg.additionnm(z, alg.sqrtnm(alg.subtractionnm(alg.hadamard_productnm(z, z), alg.onematnm(z->size().x, z->size().y)))));
} }
real_t MLPPActivation::arcosh_derivr(real_t z) { real_t MLPPActivation::arcosh_derivr(real_t z) {
@ -2228,13 +2228,13 @@ real_t MLPPActivation::arcosh_derivr(real_t z) {
Ref<MLPPVector> MLPPActivation::arcosh_derivv(const Ref<MLPPVector> &z) { Ref<MLPPVector> MLPPActivation::arcosh_derivv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_division(alg.onevecv(z->size()), alg.sqrtnv(alg.subtractionnv(alg.hadamard_productnv(z, z), alg.onevecv(z->size())))); return alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.sqrtnv(alg.subtractionnv(alg.hadamard_productnv(z, z), alg.onevecv(z->size()))));
} }
Ref<MLPPMatrix> MLPPActivation::arcosh_derivm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::arcosh_derivm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_divisionnm(alg.onematm(z->size().x, z->size().y), alg.sqrtm(alg.subtractionnm(alg.hadamard_productnm(z, z), alg.onematm(z->size().x, z->size().y)))); return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.sqrtnm(alg.subtractionnm(alg.hadamard_productnm(z, z), alg.onematnm(z->size().x, z->size().y))));
} }
//ARTANH //ARTANH
@ -2245,13 +2245,13 @@ real_t MLPPActivation::artanh_normr(real_t z) {
Ref<MLPPVector> MLPPActivation::artanh_normv(const Ref<MLPPVector> &z) { Ref<MLPPVector> MLPPActivation::artanh_normv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.scalar_multiplynv(0.5, alg.lognv(alg.element_wise_division(alg.additionnv(alg.onevecv(z->size()), z), alg.subtractionnv(alg.onevecv(z->size()), z)))); return alg.scalar_multiplynv(0.5, alg.lognv(alg.element_wise_divisionnv(alg.additionnv(alg.onevecv(z->size()), z), alg.subtractionnv(alg.onevecv(z->size()), z))));
} }
Ref<MLPPMatrix> MLPPActivation::artanh_normm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::artanh_normm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.scalar_multiplynm(0.5, alg.logm(alg.element_wise_divisionnm(alg.additionnm(alg.onematm(z->size().x, z->size().y), z), alg.subtractionnm(alg.onematm(z->size().x, z->size().y), z)))); return alg.scalar_multiplynm(0.5, alg.lognm(alg.element_wise_divisionnvnm(alg.additionnm(alg.onematnm(z->size().x, z->size().y), z), alg.subtractionnm(alg.onematnm(z->size().x, z->size().y), z))));
} }
real_t MLPPActivation::artanh_derivr(real_t z) { real_t MLPPActivation::artanh_derivr(real_t z) {
@ -2260,13 +2260,13 @@ real_t MLPPActivation::artanh_derivr(real_t z) {
Ref<MLPPVector> MLPPActivation::artanh_derivv(const Ref<MLPPVector> &z) { Ref<MLPPVector> MLPPActivation::artanh_derivv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_division(alg.onevecv(z->size()), alg.subtractionnv(alg.onevecv(z->size()), alg.hadamard_productnv(z, z))); return alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.subtractionnv(alg.onevecv(z->size()), alg.hadamard_productnv(z, z)));
} }
Ref<MLPPMatrix> MLPPActivation::artanh_derivm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::artanh_derivm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_divisionnm(alg.onematm(z->size().x, z->size().y), alg.subtractionnv(alg.onematm(z->size().x, z->size().y), alg.hadamard_productnm(z, z))); return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.subtractionnv(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z)));
} }
//ARCSCH //ARCSCH
@ -2282,18 +2282,18 @@ Ref<MLPPVector> MLPPActivation::arcsch_normv(const Ref<MLPPVector> &z) {
alg.sqrtnv( alg.sqrtnv(
alg.additionnv( alg.additionnv(
alg.onevecv(z->size()), alg.onevecv(z->size()),
alg.element_wise_division(alg.onevecv(z->size()), alg.hadamard_productnv(z, z)))), alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.hadamard_productnv(z, z)))),
alg.element_wise_division(alg.onevecv(z->size()), z))); alg.element_wise_divisionnv(alg.onevecv(z->size()), z)));
} }
Ref<MLPPMatrix> MLPPActivation::arcsch_normm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::arcsch_normm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.logm( return alg.lognm(
alg.additionnm( alg.additionnm(
alg.sqrtm( alg.sqrtnm(
alg.additionnm(alg.onematm(z->size().x, z->size().y), alg.additionnm(alg.onematnm(z->size().x, z->size().y),
alg.element_wise_divisionnm(alg.onematm(z->size().x, z->size().y), alg.hadamard_productnm(z, z)))), alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z)))),
alg.element_wise_divisionnm(alg.onematm(z->size().x, z->size().y), z))); alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), z)));
} }
real_t MLPPActivation::arcsch_derivr(real_t z) { real_t MLPPActivation::arcsch_derivr(real_t z) {
@ -2302,20 +2302,20 @@ real_t MLPPActivation::arcsch_derivr(real_t z) {
Ref<MLPPVector> MLPPActivation::arcsch_derivv(const Ref<MLPPVector> &z) { Ref<MLPPVector> MLPPActivation::arcsch_derivv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_division( return alg.element_wise_divisionnv(
alg.fullv(z->size(), -1), alg.fullv(z->size(), -1),
alg.hadamard_productnm( alg.hadamard_productnm(
alg.hadamard_productnv(z, z), alg.hadamard_productnv(z, z),
alg.sqrtnv(alg.additionnv(alg.onevecv(z->size()), alg.element_wise_division(alg.onevecv(z->size()), alg.hadamard_productnv(z, z)))))); alg.sqrtnv(alg.additionnv(alg.onevecv(z->size()), alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.hadamard_productnv(z, z))))));
} }
Ref<MLPPMatrix> MLPPActivation::arcsch_derivm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::arcsch_derivm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_divisionnm( return alg.element_wise_divisionnvnm(
alg.fullm(z->size().x, z->size().y, -1), alg.fullnm(z->size().x, z->size().y, -1),
alg.hadamard_productnm(alg.hadamard_productnm(z, z), alg.hadamard_productnm(alg.hadamard_productnm(z, z),
alg.sqrtm(alg.additionnm(alg.onematm(z->size().x, z->size().y), alg.sqrtnm(alg.additionnm(alg.onematnm(z->size().x, z->size().y),
alg.element_wise_divisionnm(alg.onematm(z->size().x, z->size().y), alg.hadamard_productnm(z, z)))))); alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z))))));
} }
//ARSECH //ARSECH
@ -2329,29 +2329,29 @@ Ref<MLPPVector> MLPPActivation::arsech_normv(const Ref<MLPPVector> &z) {
return alg.lognv( return alg.lognv(
alg.additionnv( alg.additionnv(
alg.element_wise_division( alg.element_wise_divisionnv(
alg.onevecv(z->size()), z), alg.onevecv(z->size()), z),
alg.hadamard_productnv( alg.hadamard_productnv(
alg.additionnv(alg.element_wise_division(alg.onevecv(z->size()), z), alg.onevecv(z->size())), alg.additionnv(alg.element_wise_divisionnv(alg.onevecv(z->size()), z), alg.onevecv(z->size())),
alg.subtractionnv(alg.element_wise_division(alg.onevecv(z->size()), z), alg.onevecv(z->size()))))); alg.subtractionnv(alg.element_wise_divisionnv(alg.onevecv(z->size()), z), alg.onevecv(z->size())))));
} }
Ref<MLPPMatrix> MLPPActivation::arsech_normm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::arsech_normm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.logm( return alg.lognm(
alg.additionnm( alg.additionnm(
alg.element_wise_divisionnm( alg.element_wise_divisionnvnm(
alg.onematm(z->size().x, z->size().y), z), alg.onematnm(z->size().x, z->size().y), z),
alg.hadamard_productnm( alg.hadamard_productnm(
alg.additionnm( alg.additionnm(
alg.element_wise_divisionnm( alg.element_wise_divisionnvnm(
alg.onematm(z->size().x, z->size().y), z), alg.onematnm(z->size().x, z->size().y), z),
alg.onematm(z->size().x, z->size().y)), alg.onematnm(z->size().x, z->size().y)),
alg.subtractionnm( alg.subtractionnm(
alg.element_wise_divisionnm( alg.element_wise_divisionnvnm(
alg.onematm(z->size().x, z->size().y), z), alg.onematnm(z->size().x, z->size().y), z),
alg.onematm(z->size().x, z->size().y))))); alg.onematnm(z->size().x, z->size().y)))));
} }
real_t MLPPActivation::arsech_derivr(real_t z) { real_t MLPPActivation::arsech_derivr(real_t z) {
@ -2361,7 +2361,7 @@ real_t MLPPActivation::arsech_derivr(real_t z) {
Ref<MLPPVector> MLPPActivation::arsech_derivv(const Ref<MLPPVector> &z) { Ref<MLPPVector> MLPPActivation::arsech_derivv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_division( return alg.element_wise_divisionnv(
alg.fullv(z->size(), -1), alg.fullv(z->size(), -1),
alg.hadamard_productnv( alg.hadamard_productnv(
z, z,
@ -2372,11 +2372,11 @@ Ref<MLPPVector> MLPPActivation::arsech_derivv(const Ref<MLPPVector> &z) {
Ref<MLPPMatrix> MLPPActivation::arsech_derivm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::arsech_derivm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_divisionnm( return alg.element_wise_divisionnvnm(
alg.fullm(z->size().x, z->size().y, -1), alg.fullnm(z->size().x, z->size().y, -1),
alg.hadamard_productnm( alg.hadamard_productnm(
z, z,
alg.sqrtm(alg.subtractionnm(alg.onematm(z->size().x, z->size().y), alg.hadamard_productnm(z, z))))); alg.sqrtnm(alg.subtractionnm(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z)))));
} }
//ARCOTH //ARCOTH
@ -2389,7 +2389,7 @@ Ref<MLPPVector> MLPPActivation::arcoth_normv(const Ref<MLPPVector> &z) {
return alg.scalar_multiplynv( return alg.scalar_multiplynv(
0.5, 0.5,
alg.lognv(alg.element_wise_division(alg.additionnv(alg.onevecv(z->size()), z), alg.subtractionnv(z, alg.onevecv(z->size()))))); alg.lognv(alg.element_wise_divisionnv(alg.additionnv(alg.onevecv(z->size()), z), alg.subtractionnv(z, alg.onevecv(z->size())))));
} }
Ref<MLPPMatrix> MLPPActivation::arcoth_normm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::arcoth_normm(const Ref<MLPPMatrix> &z) {
@ -2397,7 +2397,7 @@ Ref<MLPPMatrix> MLPPActivation::arcoth_normm(const Ref<MLPPMatrix> &z) {
return alg.scalar_multiplynm( return alg.scalar_multiplynm(
0.5, 0.5,
alg.logm(alg.element_wise_divisionnm(alg.additionnm(alg.onematm(z->size().x, z->size().y), z), alg.subtractionnm(z, alg.onematm(z->size().x, z->size().y))))); alg.lognm(alg.element_wise_divisionnvnm(alg.additionnm(alg.onematnm(z->size().x, z->size().y), z), alg.subtractionnm(z, alg.onematnm(z->size().x, z->size().y)))));
} }
real_t MLPPActivation::arcoth_derivr(real_t z) { real_t MLPPActivation::arcoth_derivr(real_t z) {
@ -2406,13 +2406,13 @@ real_t MLPPActivation::arcoth_derivr(real_t z) {
Ref<MLPPVector> MLPPActivation::arcoth_derivv(const Ref<MLPPVector> &z) { Ref<MLPPVector> MLPPActivation::arcoth_derivv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_division(alg.onevecv(z->size()), alg.subtractionnv(alg.onevecv(z->size()), alg.hadamard_productnv(z, z))); return alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.subtractionnv(alg.onevecv(z->size()), alg.hadamard_productnv(z, z)));
} }
Ref<MLPPMatrix> MLPPActivation::arcoth_derivm(const Ref<MLPPMatrix> &z) { Ref<MLPPMatrix> MLPPActivation::arcoth_derivm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.element_wise_divisionnm(alg.onematm(z->size().x, z->size().y), alg.subtractionnm(alg.onematm(z->size().x, z->size().y), alg.hadamard_productnm(z, z))); return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.subtractionnm(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z)));
} }
void MLPPActivation::_bind_methods() { void MLPPActivation::_bind_methods() {

View File

@ -314,8 +314,8 @@ void MLPPANN::adagrad(real_t learning_rate, int max_epoch, int mini_batch_size,
v_hidden = alg.addition_vt(v_hidden, alg.exponentiate_vt(grads.cumulative_hidden_layer_w_grad, 2)); v_hidden = alg.addition_vt(v_hidden, alg.exponentiate_vt(grads.cumulative_hidden_layer_w_grad, 2));
v_output = alg.additionnv(v_output, alg.exponentiatenv(grads.output_w_grad, 2)); v_output = alg.additionnv(v_output, alg.exponentiatenv(grads.output_w_grad, 2));
Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_division_vt(grads.cumulative_hidden_layer_w_grad, alg.scalar_add_vm(e, alg.sqrt_vt(v_hidden)))); Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_divisionnv_vt(grads.cumulative_hidden_layer_w_grad, alg.scalar_add_vm(e, alg.sqrt_vt(v_hidden))));
Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_division(grads.output_w_grad, alg.scalar_addnv(e, alg.sqrtnv(v_output)))); Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(grads.output_w_grad, alg.scalar_addnv(e, alg.sqrtnv(v_output))));
update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too. update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too.
y_hat = model_set_test(current_input_batch); y_hat = model_set_test(current_input_batch);
@ -378,8 +378,8 @@ void MLPPANN::adadelta(real_t learning_rate, int max_epoch, int mini_batch_size,
v_hidden = alg.addition_vt(alg.scalar_multiply_vm(1 - b1, v_hidden), alg.scalar_multiply_vm(b1, alg.exponentiate_vt(grads.cumulative_hidden_layer_w_grad, 2))); v_hidden = alg.addition_vt(alg.scalar_multiply_vm(1 - b1, v_hidden), alg.scalar_multiply_vm(b1, alg.exponentiate_vt(grads.cumulative_hidden_layer_w_grad, 2)));
v_output = alg.additionnv(v_output, alg.exponentiatenv(grads.output_w_grad, 2)); v_output = alg.additionnv(v_output, alg.exponentiatenv(grads.output_w_grad, 2));
Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_division_vt(grads.cumulative_hidden_layer_w_grad, alg.scalar_add_vm(e, alg.sqrt_vt(v_hidden)))); Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_divisionnv_vt(grads.cumulative_hidden_layer_w_grad, alg.scalar_add_vm(e, alg.sqrt_vt(v_hidden))));
Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_division(grads.output_w_grad, alg.scalar_addnv(e, alg.sqrtnv(v_output)))); Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(grads.output_w_grad, alg.scalar_addnv(e, alg.sqrtnv(v_output))));
update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too. update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too.
y_hat = model_set_test(current_input_batch); y_hat = model_set_test(current_input_batch);
@ -456,8 +456,8 @@ void MLPPANN::adam(real_t learning_rate, int max_epoch, int mini_batch_size, rea
Ref<MLPPVector> m_output_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m_output); Ref<MLPPVector> m_output_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m_output);
Ref<MLPPVector> v_output_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b2, epoch)), v_output); Ref<MLPPVector> v_output_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b2, epoch)), v_output);
Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_division_vt(m_hidden_hat, alg.scalar_add_vm(e, alg.sqrt_vt(v_hidden_hat)))); Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_divisionnv_vt(m_hidden_hat, alg.scalar_add_vm(e, alg.sqrt_vt(v_hidden_hat))));
Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_division(m_output_hat, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat)))); Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(m_output_hat, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat))));
update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too. update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too.
y_hat = model_set_test(current_input_batch); y_hat = model_set_test(current_input_batch);
@ -529,8 +529,8 @@ void MLPPANN::adamax(real_t learning_rate, int max_epoch, int mini_batch_size, r
Ref<MLPPVector> m_output_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m_output); Ref<MLPPVector> m_output_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m_output);
Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_division_vt(m_hidden_hat, alg.scalar_add_vm(e, u_hidden))); Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_divisionnv_vt(m_hidden_hat, alg.scalar_add_vm(e, u_hidden)));
Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_division(m_output_hat, alg.scalar_addnv(e, u_output))); Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(m_output_hat, alg.scalar_addnv(e, u_output)));
update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too. update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too.
y_hat = model_set_test(current_input_batch); y_hat = model_set_test(current_input_batch);
@ -606,8 +606,8 @@ void MLPPANN::nadam(real_t learning_rate, int max_epoch, int mini_batch_size, re
Ref<MLPPVector> v_output_hat = alg.scalar_multiplynv(1 / (1.0 - Math::pow(b2, epoch)), v_output); Ref<MLPPVector> v_output_hat = alg.scalar_multiplynv(1 / (1.0 - Math::pow(b2, epoch)), v_output);
Ref<MLPPVector> m_output_final = alg.additionnv(alg.scalar_multiplynv(b1, m_output_hat), alg.scalar_multiplynv((1 - b1) / (1.0 - Math::pow(b1, epoch)), grads.output_w_grad)); Ref<MLPPVector> m_output_final = alg.additionnv(alg.scalar_multiplynv(b1, m_output_hat), alg.scalar_multiplynv((1 - b1) / (1.0 - Math::pow(b1, epoch)), grads.output_w_grad));
Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_division_vt(m_hidden_final, alg.scalar_add_vm(e, alg.sqrt_vt(v_hidden_hat)))); Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_divisionnv_vt(m_hidden_final, alg.scalar_add_vm(e, alg.sqrt_vt(v_hidden_hat))));
Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnm(m_output_final, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat)))); Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnvnm(m_output_final, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat))));
update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too. update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too.
@ -686,8 +686,8 @@ void MLPPANN::amsgrad(real_t learning_rate, int max_epoch, int mini_batch_size,
v_hidden_hat = alg.max_vt(v_hidden_hat, v_hidden); v_hidden_hat = alg.max_vt(v_hidden_hat, v_hidden);
v_output_hat = alg.maxnvv(v_output_hat, v_output); v_output_hat = alg.maxnvv(v_output_hat, v_output);
Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_division_vt(m_hidden, alg.scalar_add_vm(e, alg.sqrt_vt(v_hidden_hat)))); Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_divisionnv_vt(m_hidden, alg.scalar_add_vm(e, alg.sqrt_vt(v_hidden_hat))));
Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_division(m_output, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat)))); Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(m_output, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat))));
update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too. update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too.
y_hat = model_set_test(current_input_batch); y_hat = model_set_test(current_input_batch);

View File

@ -103,7 +103,7 @@ void MLPPBernoulliNB::compute_vocab() {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPData data; MLPPData data;
_vocab = data.vec_to_setnv(alg.flattenv(_input_set)); _vocab = data.vec_to_setnv(alg.flattenvvnv(_input_set));
} }
void MLPPBernoulliNB::compute_theta() { void MLPPBernoulliNB::compute_theta() {

View File

@ -209,7 +209,7 @@ Ref<MLPPVector> MLPPCost::mbe_derivv(const Ref<MLPPVector> &y_hat, const Ref<MLP
} }
Ref<MLPPMatrix> MLPPCost::mbe_derivm(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y) { Ref<MLPPMatrix> MLPPCost::mbe_derivm(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.onematm(y_hat->size().x, y_hat->size().y); return alg.onematnm(y_hat->size().x, y_hat->size().y);
} }
// Classification Costs // Classification Costs
@ -250,15 +250,15 @@ real_t MLPPCost::log_lossm(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &
Ref<MLPPVector> MLPPCost::log_loss_derivv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y) { Ref<MLPPVector> MLPPCost::log_loss_derivv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.additionnv( return alg.additionnv(
alg.scalar_multiplynv(-1, alg.element_wise_division(y, y_hat)), alg.scalar_multiplynv(-1, alg.element_wise_divisionnv(y, y_hat)),
alg.element_wise_division(alg.scalar_multiplynv(-1, alg.scalar_addnv(-1, y)), alg.scalar_multiplynv(-1, alg.scalar_addnv(-1, y_hat)))); alg.element_wise_divisionnv(alg.scalar_multiplynv(-1, alg.scalar_addnv(-1, y)), alg.scalar_multiplynv(-1, alg.scalar_addnv(-1, y_hat))));
} }
Ref<MLPPMatrix> MLPPCost::log_loss_derivm(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y) { Ref<MLPPMatrix> MLPPCost::log_loss_derivm(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.additionnm( return alg.additionnm(
alg.scalar_multiplynm(-1, alg.element_wise_divisionnm(y, y_hat)), alg.scalar_multiplynm(-1, alg.element_wise_divisionnvnm(y, y_hat)),
alg.element_wise_divisionnm(alg.scalar_multiplynm(-1, alg.scalar_addnm(-1, y)), alg.scalar_multiplynm(-1, alg.scalar_addnm(-1, y_hat)))); alg.element_wise_divisionnvnm(alg.scalar_multiplynm(-1, alg.scalar_addnm(-1, y)), alg.scalar_multiplynm(-1, alg.scalar_addnm(-1, y_hat))));
} }
real_t MLPPCost::cross_entropyv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y) { real_t MLPPCost::cross_entropyv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y) {
@ -294,11 +294,11 @@ real_t MLPPCost::cross_entropym(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatr
Ref<MLPPVector> MLPPCost::cross_entropy_derivv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y) { Ref<MLPPVector> MLPPCost::cross_entropy_derivv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.scalar_multiplynv(-1, alg.element_wise_division(y, y_hat)); return alg.scalar_multiplynv(-1, alg.element_wise_divisionnv(y, y_hat));
} }
Ref<MLPPMatrix> MLPPCost::cross_entropy_derivm(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y) { Ref<MLPPMatrix> MLPPCost::cross_entropy_derivm(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.scalar_multiplynm(-1, alg.element_wise_divisionnm(y, y_hat)); return alg.scalar_multiplynm(-1, alg.element_wise_divisionnvnm(y, y_hat));
} }
real_t MLPPCost::huber_lossv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y, real_t delta) { real_t MLPPCost::huber_lossv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y, real_t delta) {

View File

@ -302,7 +302,7 @@ Ref<MLPPMatrix> MLPPLinAlg::kronecker_productnm(const Ref<MLPPMatrix> &A, const
row.push_back(scalar_multiplynv(a_ptr[A->calculate_index(i, k)], row_tmp)); row.push_back(scalar_multiplynv(a_ptr[A->calculate_index(i, k)], row_tmp));
} }
Ref<MLPPVector> flattened_row = flattenvv(row); Ref<MLPPVector> flattened_row = flattenmnv(row);
C->set_row_mlpp_vector(i * b_size.y + j, flattened_row); C->set_row_mlpp_vector(i * b_size.y + j, flattened_row);
} }
@ -310,7 +310,7 @@ Ref<MLPPMatrix> MLPPLinAlg::kronecker_productnm(const Ref<MLPPMatrix> &A, const
return C; return C;
} }
Ref<MLPPMatrix> MLPPLinAlg::element_wise_divisionnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B) { Ref<MLPPMatrix> MLPPLinAlg::element_wise_divisionnvnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B) {
ERR_FAIL_COND_V(!A.is_valid() || !B.is_valid(), Ref<MLPPMatrix>()); ERR_FAIL_COND_V(!A.is_valid() || !B.is_valid(), Ref<MLPPMatrix>());
Size2i a_size = A->size(); Size2i a_size = A->size();
ERR_FAIL_COND_V(a_size != B->size(), Ref<MLPPMatrix>()); ERR_FAIL_COND_V(a_size != B->size(), Ref<MLPPMatrix>());
@ -485,7 +485,7 @@ std::vector<std::vector<real_t>> MLPPLinAlg::cbrt(std::vector<std::vector<real_t
return exponentiate(A, real_t(1) / real_t(3)); return exponentiate(A, real_t(1) / real_t(3));
} }
Ref<MLPPMatrix> MLPPLinAlg::logm(const Ref<MLPPMatrix> &A) { Ref<MLPPMatrix> MLPPLinAlg::lognm(const Ref<MLPPMatrix> &A) {
ERR_FAIL_COND_V(!A.is_valid(), Ref<MLPPVector>()); ERR_FAIL_COND_V(!A.is_valid(), Ref<MLPPVector>());
Ref<MLPPMatrix> out; Ref<MLPPMatrix> out;
@ -503,7 +503,7 @@ Ref<MLPPMatrix> MLPPLinAlg::logm(const Ref<MLPPMatrix> &A) {
return out; return out;
} }
Ref<MLPPMatrix> MLPPLinAlg::log10m(const Ref<MLPPMatrix> &A) { Ref<MLPPMatrix> MLPPLinAlg::log10nm(const Ref<MLPPMatrix> &A) {
ERR_FAIL_COND_V(!A.is_valid(), Ref<MLPPVector>()); ERR_FAIL_COND_V(!A.is_valid(), Ref<MLPPVector>());
Ref<MLPPMatrix> out; Ref<MLPPMatrix> out;
@ -521,7 +521,7 @@ Ref<MLPPMatrix> MLPPLinAlg::log10m(const Ref<MLPPMatrix> &A) {
return out; return out;
} }
Ref<MLPPMatrix> MLPPLinAlg::expm(const Ref<MLPPMatrix> &A) { Ref<MLPPMatrix> MLPPLinAlg::expnm(const Ref<MLPPMatrix> &A) {
ERR_FAIL_COND_V(!A.is_valid(), Ref<MLPPVector>()); ERR_FAIL_COND_V(!A.is_valid(), Ref<MLPPVector>());
Ref<MLPPMatrix> out; Ref<MLPPMatrix> out;
@ -539,7 +539,7 @@ Ref<MLPPMatrix> MLPPLinAlg::expm(const Ref<MLPPMatrix> &A) {
return out; return out;
} }
Ref<MLPPMatrix> MLPPLinAlg::erfm(const Ref<MLPPMatrix> &A) { Ref<MLPPMatrix> MLPPLinAlg::erfnm(const Ref<MLPPMatrix> &A) {
ERR_FAIL_COND_V(!A.is_valid(), Ref<MLPPVector>()); ERR_FAIL_COND_V(!A.is_valid(), Ref<MLPPVector>());
Ref<MLPPMatrix> out; Ref<MLPPMatrix> out;
@ -557,7 +557,7 @@ Ref<MLPPMatrix> MLPPLinAlg::erfm(const Ref<MLPPMatrix> &A) {
return out; return out;
} }
Ref<MLPPMatrix> MLPPLinAlg::exponentiatem(const Ref<MLPPMatrix> &A, real_t p) { Ref<MLPPMatrix> MLPPLinAlg::exponentiatenm(const Ref<MLPPMatrix> &A, real_t p) {
ERR_FAIL_COND_V(!A.is_valid(), Ref<MLPPVector>()); ERR_FAIL_COND_V(!A.is_valid(), Ref<MLPPVector>());
Ref<MLPPMatrix> out; Ref<MLPPMatrix> out;
@ -575,7 +575,7 @@ Ref<MLPPMatrix> MLPPLinAlg::exponentiatem(const Ref<MLPPMatrix> &A, real_t p) {
return out; return out;
} }
Ref<MLPPMatrix> MLPPLinAlg::sqrtm(const Ref<MLPPMatrix> &A) { Ref<MLPPMatrix> MLPPLinAlg::sqrtnm(const Ref<MLPPMatrix> &A) {
ERR_FAIL_COND_V(!A.is_valid(), Ref<MLPPVector>()); ERR_FAIL_COND_V(!A.is_valid(), Ref<MLPPVector>());
Ref<MLPPMatrix> out; Ref<MLPPMatrix> out;
@ -593,8 +593,8 @@ Ref<MLPPMatrix> MLPPLinAlg::sqrtm(const Ref<MLPPMatrix> &A) {
return out; return out;
} }
Ref<MLPPMatrix> MLPPLinAlg::cbrtm(const Ref<MLPPMatrix> &A) { Ref<MLPPMatrix> MLPPLinAlg::cbrtnm(const Ref<MLPPMatrix> &A) {
return exponentiatem(A, real_t(1) / real_t(3)); return exponentiatenm(A, real_t(1) / real_t(3));
} }
std::vector<std::vector<real_t>> MLPPLinAlg::matrixPower(std::vector<std::vector<real_t>> A, int n) { std::vector<std::vector<real_t>> MLPPLinAlg::matrixPower(std::vector<std::vector<real_t>> A, int n) {
@ -624,7 +624,7 @@ std::vector<std::vector<real_t>> MLPPLinAlg::abs(std::vector<std::vector<real_t>
return B; return B;
} }
Ref<MLPPMatrix> MLPPLinAlg::absm(const Ref<MLPPMatrix> &A) { Ref<MLPPMatrix> MLPPLinAlg::absnm(const Ref<MLPPMatrix> &A) {
ERR_FAIL_COND_V(!A.is_valid(), Ref<MLPPVector>()); ERR_FAIL_COND_V(!A.is_valid(), Ref<MLPPVector>());
Ref<MLPPMatrix> out; Ref<MLPPMatrix> out;
@ -790,7 +790,7 @@ std::vector<std::vector<real_t>> MLPPLinAlg::pinverse(std::vector<std::vector<re
return matmult(inverse(matmult(transpose(A), A)), transpose(A)); return matmult(inverse(matmult(transpose(A), A)), transpose(A));
} }
Ref<MLPPMatrix> MLPPLinAlg::cofactorm(const Ref<MLPPMatrix> &A, int n, int i, int j) { Ref<MLPPMatrix> MLPPLinAlg::cofactornm(const Ref<MLPPMatrix> &A, int n, int i, int j) {
Ref<MLPPMatrix> cof; Ref<MLPPMatrix> cof;
cof.instance(); cof.instance();
cof->resize(A->size()); cof->resize(A->size());
@ -813,7 +813,7 @@ Ref<MLPPMatrix> MLPPLinAlg::cofactorm(const Ref<MLPPMatrix> &A, int n, int i, in
return cof; return cof;
} }
Ref<MLPPMatrix> MLPPLinAlg::adjointm(const Ref<MLPPMatrix> &A) { Ref<MLPPMatrix> MLPPLinAlg::adjointnm(const Ref<MLPPMatrix> &A) {
Ref<MLPPMatrix> adj; Ref<MLPPMatrix> adj;
ERR_FAIL_COND_V(!A.is_valid(), adj); ERR_FAIL_COND_V(!A.is_valid(), adj);
@ -845,7 +845,7 @@ Ref<MLPPMatrix> MLPPLinAlg::adjointm(const Ref<MLPPMatrix> &A) {
for (int i = 0; i < a_size.y; i++) { for (int i = 0; i < a_size.y; i++) {
for (int j = 0; j < a_size.x; j++) { for (int j = 0; j < a_size.x; j++) {
Ref<MLPPMatrix> cof = cofactorm(A, a_size.y, i, j); Ref<MLPPMatrix> cof = cofactornm(A, a_size.y, i, j);
// 1 if even, -1 if odd // 1 if even, -1 if odd
int sign = (i + j) % 2 == 0 ? 1 : -1; int sign = (i + j) % 2 == 0 ? 1 : -1;
adj->set_element(j, i, sign * detm(cof, int(a_size.y) - 1)); adj->set_element(j, i, sign * detm(cof, int(a_size.y) - 1));
@ -853,11 +853,11 @@ Ref<MLPPMatrix> MLPPLinAlg::adjointm(const Ref<MLPPMatrix> &A) {
} }
return adj; return adj;
} }
Ref<MLPPMatrix> MLPPLinAlg::inversem(const Ref<MLPPMatrix> &A) { Ref<MLPPMatrix> MLPPLinAlg::inversenm(const Ref<MLPPMatrix> &A) {
return scalar_multiplynm(1 / detm(A, int(A->size().y)), adjointm(A)); return scalar_multiplynm(1 / detm(A, int(A->size().y)), adjointnm(A));
} }
Ref<MLPPMatrix> MLPPLinAlg::pinversem(const Ref<MLPPMatrix> &A) { Ref<MLPPMatrix> MLPPLinAlg::pinversenm(const Ref<MLPPMatrix> &A) {
return matmultnm(inversem(matmultnm(transposenm(A), A)), transposenm(A)); return matmultnm(inversenm(matmultnm(transposenm(A), A)), transposenm(A));
} }
std::vector<std::vector<real_t>> MLPPLinAlg::zeromat(int n, int m) { std::vector<std::vector<real_t>> MLPPLinAlg::zeromat(int n, int m) {
@ -873,7 +873,7 @@ std::vector<std::vector<real_t>> MLPPLinAlg::onemat(int n, int m) {
return full(n, m, 1); return full(n, m, 1);
} }
Ref<MLPPMatrix> MLPPLinAlg::zeromatm(int n, int m) { Ref<MLPPMatrix> MLPPLinAlg::zeromatnm(int n, int m) {
Ref<MLPPMatrix> mat; Ref<MLPPMatrix> mat;
mat.instance(); mat.instance();
@ -882,7 +882,7 @@ Ref<MLPPMatrix> MLPPLinAlg::zeromatm(int n, int m) {
return mat; return mat;
} }
Ref<MLPPMatrix> MLPPLinAlg::onematm(int n, int m) { Ref<MLPPMatrix> MLPPLinAlg::onematnm(int n, int m) {
Ref<MLPPMatrix> mat; Ref<MLPPMatrix> mat;
mat.instance(); mat.instance();
@ -891,7 +891,7 @@ Ref<MLPPMatrix> MLPPLinAlg::onematm(int n, int m) {
return mat; return mat;
} }
Ref<MLPPMatrix> MLPPLinAlg::fullm(int n, int m, int k) { Ref<MLPPMatrix> MLPPLinAlg::fullnm(int n, int m, int k) {
Ref<MLPPMatrix> mat; Ref<MLPPMatrix> mat;
mat.instance(); mat.instance();
@ -943,7 +943,7 @@ std::vector<std::vector<real_t>> MLPPLinAlg::cos(std::vector<std::vector<real_t>
return B; return B;
} }
Ref<MLPPMatrix> MLPPLinAlg::sinm(const Ref<MLPPMatrix> &A) { Ref<MLPPMatrix> MLPPLinAlg::sinnm(const Ref<MLPPMatrix> &A) {
ERR_FAIL_COND_V(!A.is_valid(), Ref<MLPPVector>()); ERR_FAIL_COND_V(!A.is_valid(), Ref<MLPPVector>());
Ref<MLPPMatrix> out; Ref<MLPPMatrix> out;
@ -961,7 +961,7 @@ Ref<MLPPMatrix> MLPPLinAlg::sinm(const Ref<MLPPMatrix> &A) {
return out; return out;
} }
Ref<MLPPMatrix> MLPPLinAlg::cosm(const Ref<MLPPMatrix> &A) { Ref<MLPPMatrix> MLPPLinAlg::cosnm(const Ref<MLPPMatrix> &A) {
ERR_FAIL_COND_V(!A.is_valid(), Ref<MLPPVector>()); ERR_FAIL_COND_V(!A.is_valid(), Ref<MLPPVector>());
Ref<MLPPMatrix> out; Ref<MLPPMatrix> out;
@ -1103,7 +1103,7 @@ std::vector<std::vector<real_t>> MLPPLinAlg::cov(std::vector<std::vector<real_t>
return covMat; return covMat;
} }
Ref<MLPPMatrix> MLPPLinAlg::covm(const Ref<MLPPMatrix> &A) { Ref<MLPPMatrix> MLPPLinAlg::covnm(const Ref<MLPPMatrix> &A) {
MLPPStat stat; MLPPStat stat;
Ref<MLPPMatrix> cov_mat; Ref<MLPPMatrix> cov_mat;
@ -1437,7 +1437,7 @@ MLPPLinAlg::EigenResult MLPPLinAlg::eigen(Ref<MLPPMatrix> A) {
P->set_element(sub_j, sub_j, Math::cos(theta)); P->set_element(sub_j, sub_j, Math::cos(theta));
P->set_element(sub_j, sub_i, Math::sin(theta)); P->set_element(sub_j, sub_i, Math::sin(theta));
a_new = matmultnm(matmultnm(inversem(P), A), P); a_new = matmultnm(matmultnm(inversenm(P), A), P);
Size2i a_new_size = a_new->size(); Size2i a_new_size = a_new->size();
@ -1549,8 +1549,8 @@ MLPPLinAlg::SVDResult MLPPLinAlg::svd(const Ref<MLPPMatrix> &A) {
EigenResult left_eigen = eigen(matmultnm(A, transposenm(A))); EigenResult left_eigen = eigen(matmultnm(A, transposenm(A)));
EigenResult right_eigen = eigen(matmultnm(transposenm(A), A)); EigenResult right_eigen = eigen(matmultnm(transposenm(A), A));
Ref<MLPPMatrix> singularvals = sqrtm(left_eigen.eigen_values); Ref<MLPPMatrix> singularvals = sqrtnm(left_eigen.eigen_values);
Ref<MLPPMatrix> sigma = zeromatm(a_size.y, a_size.x); Ref<MLPPMatrix> sigma = zeromatnm(a_size.y, a_size.x);
Size2i singularvals_size = singularvals->size(); Size2i singularvals_size = singularvals->size();
@ -1676,7 +1676,7 @@ std::vector<real_t> MLPPLinAlg::flatten(std::vector<std::vector<real_t>> A) {
return a; return a;
} }
Ref<MLPPVector> MLPPLinAlg::flattenvv(const Vector<Ref<MLPPVector>> &A) { Ref<MLPPVector> MLPPLinAlg::flattenmnv(const Vector<Ref<MLPPVector>> &A) {
Ref<MLPPVector> a; Ref<MLPPVector> a;
a.instance(); a.instance();
@ -1705,7 +1705,7 @@ Ref<MLPPVector> MLPPLinAlg::flattenvv(const Vector<Ref<MLPPVector>> &A) {
return a; return a;
} }
Ref<MLPPVector> MLPPLinAlg::flattenv(const Ref<MLPPMatrix> &A) { Ref<MLPPVector> MLPPLinAlg::flattenvvnv(const Ref<MLPPMatrix> &A) {
int data_size = A->data_size(); int data_size = A->data_size();
Ref<MLPPVector> res; Ref<MLPPVector> res;
@ -1878,7 +1878,7 @@ std::vector<real_t> MLPPLinAlg::elementWiseDivision(std::vector<real_t> a, std::
return c; return c;
} }
Ref<MLPPVector> MLPPLinAlg::element_wise_division(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b) { Ref<MLPPVector> MLPPLinAlg::element_wise_divisionnv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b) {
ERR_FAIL_COND_V(!a.is_valid() || !b.is_valid(), Ref<MLPPVector>()); ERR_FAIL_COND_V(!a.is_valid() || !b.is_valid(), Ref<MLPPVector>());
Ref<MLPPVector> out; Ref<MLPPVector> out;
@ -2446,7 +2446,7 @@ std::vector<std::vector<real_t>> MLPPLinAlg::max(std::vector<std::vector<real_t>
return C; return C;
} }
Ref<MLPPMatrix> MLPPLinAlg::max_nm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B) { Ref<MLPPMatrix> MLPPLinAlg::maxnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B) {
Ref<MLPPMatrix> C; Ref<MLPPMatrix> C;
C.instance(); C.instance();
C->resize(A->size()); C->resize(A->size());
@ -2732,12 +2732,12 @@ std::vector<std::vector<std::vector<real_t>>> MLPPLinAlg::elementWiseDivision(st
return A; return A;
} }
Vector<Ref<MLPPMatrix>> MLPPLinAlg::element_wise_division_vt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B) { Vector<Ref<MLPPMatrix>> MLPPLinAlg::element_wise_divisionnv_vt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B) {
Vector<Ref<MLPPMatrix>> res; Vector<Ref<MLPPMatrix>> res;
res.resize(A.size()); res.resize(A.size());
for (int i = 0; i < A.size(); i++) { for (int i = 0; i < A.size(); i++) {
res.write[i] = element_wise_divisionnm(A[i], B[i]); res.write[i] = element_wise_divisionnvnm(A[i], B[i]);
} }
return res; return res;
@ -2755,7 +2755,7 @@ Vector<Ref<MLPPMatrix>> MLPPLinAlg::sqrt_vt(const Vector<Ref<MLPPMatrix>> &A) {
res.resize(A.size()); res.resize(A.size());
for (int i = 0; i < A.size(); i++) { for (int i = 0; i < A.size(); i++) {
res.write[i] = sqrtm(A[i]); res.write[i] = sqrtnm(A[i]);
} }
return res; return res;
@ -2773,7 +2773,7 @@ Vector<Ref<MLPPMatrix>> MLPPLinAlg::exponentiate_vt(const Vector<Ref<MLPPMatrix>
res.resize(A.size()); res.resize(A.size());
for (int i = 0; i < A.size(); i++) { for (int i = 0; i < A.size(); i++) {
res.write[i] = exponentiatem(A[i], p); res.write[i] = exponentiatenm(A[i], p);
} }
return res; return res;
@ -2876,7 +2876,7 @@ Vector<Ref<MLPPMatrix>> MLPPLinAlg::max_vt(const Vector<Ref<MLPPMatrix>> &A, con
res.resize(A.size()); res.resize(A.size());
for (int i = 0; i < A.size(); i++) { for (int i = 0; i < A.size(); i++) {
res.write[i] = max_nm(A[i], B[i]); res.write[i] = maxnm(A[i], B[i]);
} }
return res; return res;
@ -2894,7 +2894,7 @@ Vector<Ref<MLPPMatrix>> MLPPLinAlg::abs_vt(const Vector<Ref<MLPPMatrix>> &A) {
res.resize(A.size()); res.resize(A.size());
for (int i = 0; i < A.size(); i++) { for (int i = 0; i < A.size(); i++) {
res.write[i] = absm(A[i]); res.write[i] = absnm(A[i]);
} }
return A; return A;

View File

@ -47,7 +47,7 @@ public:
Ref<MLPPMatrix> hadamard_productnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B); Ref<MLPPMatrix> hadamard_productnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B);
Ref<MLPPMatrix> kronecker_productnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B); Ref<MLPPMatrix> kronecker_productnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B);
Ref<MLPPMatrix> element_wise_divisionnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B); Ref<MLPPMatrix> element_wise_divisionnvnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B);
std::vector<std::vector<real_t>> transpose(std::vector<std::vector<real_t>> A); std::vector<std::vector<real_t>> transpose(std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> scalarMultiply(real_t scalar, std::vector<std::vector<real_t>> A); std::vector<std::vector<real_t>> scalarMultiply(real_t scalar, std::vector<std::vector<real_t>> A);
@ -65,19 +65,19 @@ public:
std::vector<std::vector<real_t>> sqrt(std::vector<std::vector<real_t>> A); std::vector<std::vector<real_t>> sqrt(std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> cbrt(std::vector<std::vector<real_t>> A); std::vector<std::vector<real_t>> cbrt(std::vector<std::vector<real_t>> A);
Ref<MLPPMatrix> logm(const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> lognm(const Ref<MLPPMatrix> &A);
Ref<MLPPMatrix> log10m(const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> log10nm(const Ref<MLPPMatrix> &A);
Ref<MLPPMatrix> expm(const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> expnm(const Ref<MLPPMatrix> &A);
Ref<MLPPMatrix> erfm(const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> erfnm(const Ref<MLPPMatrix> &A);
Ref<MLPPMatrix> exponentiatem(const Ref<MLPPMatrix> &A, real_t p); Ref<MLPPMatrix> exponentiatenm(const Ref<MLPPMatrix> &A, real_t p);
Ref<MLPPMatrix> sqrtm(const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> sqrtnm(const Ref<MLPPMatrix> &A);
Ref<MLPPMatrix> cbrtm(const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> cbrtnm(const Ref<MLPPMatrix> &A);
std::vector<std::vector<real_t>> matrixPower(std::vector<std::vector<real_t>> A, int n); std::vector<std::vector<real_t>> matrixPower(std::vector<std::vector<real_t>> A, int n);
std::vector<std::vector<real_t>> abs(std::vector<std::vector<real_t>> A); std::vector<std::vector<real_t>> abs(std::vector<std::vector<real_t>> A);
Ref<MLPPMatrix> absm(const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> absnm(const Ref<MLPPMatrix> &A);
real_t det(std::vector<std::vector<real_t>> A, int d); real_t det(std::vector<std::vector<real_t>> A, int d);
real_t detm(const Ref<MLPPMatrix> &A, int d); real_t detm(const Ref<MLPPMatrix> &A, int d);
@ -89,29 +89,29 @@ public:
std::vector<std::vector<real_t>> inverse(std::vector<std::vector<real_t>> A); std::vector<std::vector<real_t>> inverse(std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> pinverse(std::vector<std::vector<real_t>> A); std::vector<std::vector<real_t>> pinverse(std::vector<std::vector<real_t>> A);
Ref<MLPPMatrix> cofactorm(const Ref<MLPPMatrix> &A, int n, int i, int j); Ref<MLPPMatrix> cofactornm(const Ref<MLPPMatrix> &A, int n, int i, int j);
Ref<MLPPMatrix> adjointm(const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> adjointnm(const Ref<MLPPMatrix> &A);
Ref<MLPPMatrix> inversem(const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> inversenm(const Ref<MLPPMatrix> &A);
Ref<MLPPMatrix> pinversem(const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> pinversenm(const Ref<MLPPMatrix> &A);
std::vector<std::vector<real_t>> zeromat(int n, int m); std::vector<std::vector<real_t>> zeromat(int n, int m);
std::vector<std::vector<real_t>> onemat(int n, int m); std::vector<std::vector<real_t>> onemat(int n, int m);
std::vector<std::vector<real_t>> full(int n, int m, int k); std::vector<std::vector<real_t>> full(int n, int m, int k);
Ref<MLPPMatrix> zeromatm(int n, int m); Ref<MLPPMatrix> zeromatnm(int n, int m);
Ref<MLPPMatrix> onematm(int n, int m); Ref<MLPPMatrix> onematnm(int n, int m);
Ref<MLPPMatrix> fullm(int n, int m, int k); Ref<MLPPMatrix> fullnm(int n, int m, int k);
std::vector<std::vector<real_t>> sin(std::vector<std::vector<real_t>> A); std::vector<std::vector<real_t>> sin(std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> cos(std::vector<std::vector<real_t>> A); std::vector<std::vector<real_t>> cos(std::vector<std::vector<real_t>> A);
Ref<MLPPMatrix> sinm(const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> sinnm(const Ref<MLPPMatrix> &A);
Ref<MLPPMatrix> cosm(const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> cosnm(const Ref<MLPPMatrix> &A);
std::vector<std::vector<real_t>> rotate(std::vector<std::vector<real_t>> A, real_t theta, int axis = -1); std::vector<std::vector<real_t>> rotate(std::vector<std::vector<real_t>> A, real_t theta, int axis = -1);
std::vector<std::vector<real_t>> max(std::vector<std::vector<real_t>> A, std::vector<std::vector<real_t>> B); std::vector<std::vector<real_t>> max(std::vector<std::vector<real_t>> A, std::vector<std::vector<real_t>> B);
Ref<MLPPMatrix> max_nm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B); Ref<MLPPMatrix> maxnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B);
real_t max(std::vector<std::vector<real_t>> A); real_t max(std::vector<std::vector<real_t>> A);
real_t min(std::vector<std::vector<real_t>> A); real_t min(std::vector<std::vector<real_t>> A);
@ -124,7 +124,7 @@ public:
Ref<MLPPMatrix> identitym(int d); Ref<MLPPMatrix> identitym(int d);
std::vector<std::vector<real_t>> cov(std::vector<std::vector<real_t>> A); std::vector<std::vector<real_t>> cov(std::vector<std::vector<real_t>> A);
Ref<MLPPMatrix> covm(const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> covnm(const Ref<MLPPMatrix> &A);
std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> eig(std::vector<std::vector<real_t>> A); std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> eig(std::vector<std::vector<real_t>> A);
@ -183,8 +183,8 @@ public:
real_t sum_elements(std::vector<std::vector<real_t>> A); real_t sum_elements(std::vector<std::vector<real_t>> A);
std::vector<real_t> flatten(std::vector<std::vector<real_t>> A); std::vector<real_t> flatten(std::vector<std::vector<real_t>> A);
Ref<MLPPVector> flattenvv(const Vector<Ref<MLPPVector>> &A); Ref<MLPPVector> flattenmnv(const Vector<Ref<MLPPVector>> &A);
Ref<MLPPVector> flattenv(const Ref<MLPPMatrix> &A); Ref<MLPPVector> flattenvvnv(const Ref<MLPPMatrix> &A);
std::vector<real_t> solve(std::vector<std::vector<real_t>> A, std::vector<real_t> b); std::vector<real_t> solve(std::vector<std::vector<real_t>> A, std::vector<real_t> b);
@ -206,7 +206,7 @@ public:
void hadamard_productv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b, Ref<MLPPVector> out); void hadamard_productv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b, Ref<MLPPVector> out);
std::vector<real_t> elementWiseDivision(std::vector<real_t> a, std::vector<real_t> b); std::vector<real_t> elementWiseDivision(std::vector<real_t> a, std::vector<real_t> b);
Ref<MLPPVector> element_wise_division(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b); Ref<MLPPVector> element_wise_divisionnv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b);
std::vector<real_t> scalarMultiply(real_t scalar, std::vector<real_t> a); std::vector<real_t> scalarMultiply(real_t scalar, std::vector<real_t> a);
Ref<MLPPVector> scalar_multiplynv(real_t scalar, const Ref<MLPPVector> &a); Ref<MLPPVector> scalar_multiplynv(real_t scalar, const Ref<MLPPVector> &a);
@ -302,7 +302,7 @@ public:
Vector<Ref<MLPPMatrix>> addition_vt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B); Vector<Ref<MLPPMatrix>> addition_vt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B);
std::vector<std::vector<std::vector<real_t>>> elementWiseDivision(std::vector<std::vector<std::vector<real_t>>> A, std::vector<std::vector<std::vector<real_t>>> B); std::vector<std::vector<std::vector<real_t>>> elementWiseDivision(std::vector<std::vector<std::vector<real_t>>> A, std::vector<std::vector<std::vector<real_t>>> B);
Vector<Ref<MLPPMatrix>> element_wise_division_vt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B); Vector<Ref<MLPPMatrix>> element_wise_divisionnv_vt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B);
std::vector<std::vector<std::vector<real_t>>> sqrt(std::vector<std::vector<std::vector<real_t>>> A); std::vector<std::vector<std::vector<real_t>>> sqrt(std::vector<std::vector<std::vector<real_t>>> A);
Vector<Ref<MLPPMatrix>> sqrt_vt(const Vector<Ref<MLPPMatrix>> &A); Vector<Ref<MLPPMatrix>> sqrt_vt(const Vector<Ref<MLPPMatrix>> &A);

View File

@ -94,7 +94,7 @@ void MLPPLinReg::newton_raphson(real_t learning_rate, int max_epoch, bool ui) {
// Calculating the weight gradients (2nd derivative) // Calculating the weight gradients (2nd derivative)
Ref<MLPPVector> first_derivative = alg.mat_vec_multv(alg.transposenm(_input_set), error); Ref<MLPPVector> first_derivative = alg.mat_vec_multv(alg.transposenm(_input_set), error);
Ref<MLPPMatrix> second_derivative = alg.matmultnm(alg.transposenm(_input_set), _input_set); Ref<MLPPMatrix> second_derivative = alg.matmultnm(alg.transposenm(_input_set), _input_set);
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multv(alg.transposenm(alg.inversem(second_derivative)), first_derivative))); _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multv(alg.transposenm(alg.inversenm(second_derivative)), first_derivative)));
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg); _weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients (2nd derivative) // Calculating the bias gradients (2nd derivative)
@ -405,7 +405,7 @@ void MLPPLinReg::adagrad(real_t learning_rate, int max_epoch, int mini_batch_siz
v = alg.hadamard_productnv(weight_grad, weight_grad); v = alg.hadamard_productnv(weight_grad, weight_grad);
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_division(weight_grad, alg.sqrtnv(alg.scalar_addnv(e, v))))); _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_divisionnv(weight_grad, alg.sqrtnv(alg.scalar_addnv(e, v)))));
// Calculating the bias gradients // Calculating the bias gradients
_bias -= learning_rate * alg.sum_elementsv(error) / current_output_mini_batch->size(); // As normal _bias -= learning_rate * alg.sum_elementsv(error) / current_output_mini_batch->size(); // As normal
@ -460,7 +460,7 @@ void MLPPLinReg::adadelta(real_t learning_rate, int max_epoch, int mini_batch_si
v = alg.additionnv(alg.scalar_multiplynv(b1, v), alg.scalar_multiplynv(1 - b1, alg.hadamard_productnv(weight_grad, weight_grad))); v = alg.additionnv(alg.scalar_multiplynv(b1, v), alg.scalar_multiplynv(1 - b1, alg.hadamard_productnv(weight_grad, weight_grad)));
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_division(weight_grad, alg.sqrtnv(alg.scalar_addnv(e, v))))); _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_divisionnv(weight_grad, alg.sqrtnv(alg.scalar_addnv(e, v)))));
// Calculating the bias gradients // Calculating the bias gradients
_bias -= learning_rate * alg.sum_elementsv(error) / current_output_mini_batch->size(); // As normal _bias -= learning_rate * alg.sum_elementsv(error) / current_output_mini_batch->size(); // As normal
@ -519,7 +519,7 @@ void MLPPLinReg::adam(real_t learning_rate, int max_epoch, int mini_batch_size,
Ref<MLPPVector> m_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m); Ref<MLPPVector> m_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m);
Ref<MLPPVector> v_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b2, epoch)), v); Ref<MLPPVector> v_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b2, epoch)), v);
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_divisionnm(m_hat, alg.scalar_addnv(e, alg.sqrtnv(v_hat))))); _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_divisionnvnm(m_hat, alg.scalar_addnv(e, alg.sqrtnv(v_hat)))));
// Calculating the bias gradients // Calculating the bias gradients
_bias -= learning_rate * alg.sum_elementsv(error) / current_output_mini_batch->size(); // As normal _bias -= learning_rate * alg.sum_elementsv(error) / current_output_mini_batch->size(); // As normal
@ -576,7 +576,7 @@ void MLPPLinReg::adamax(real_t learning_rate, int max_epoch, int mini_batch_size
Ref<MLPPVector> m_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m); Ref<MLPPVector> m_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m);
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_division(m_hat, u))); _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_divisionnv(m_hat, u)));
// Calculating the bias gradients // Calculating the bias gradients
_bias -= learning_rate * alg.sum_elementsv(error) / current_output_mini_batch->size(); // As normal _bias -= learning_rate * alg.sum_elementsv(error) / current_output_mini_batch->size(); // As normal
@ -637,7 +637,7 @@ void MLPPLinReg::nadam(real_t learning_rate, int max_epoch, int mini_batch_size,
Ref<MLPPVector> m_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m); Ref<MLPPVector> m_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m);
Ref<MLPPVector> v_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b2, epoch)), v); Ref<MLPPVector> v_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b2, epoch)), v);
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_division(m_final, alg.scalar_addnv(e, alg.sqrtnv(v_hat))))); _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_divisionnv(m_final, alg.scalar_addnv(e, alg.sqrtnv(v_hat)))));
// Calculating the bias gradients // Calculating the bias gradients
_bias -= learning_rate * alg.sum_elementsv(error) / current_output_mini_batch->size(); // As normal _bias -= learning_rate * alg.sum_elementsv(error) / current_output_mini_batch->size(); // As normal
@ -683,14 +683,14 @@ void MLPPLinReg::normal_equation() {
Ref<MLPPVector> temp; Ref<MLPPVector> temp;
//temp.resize(_k); //temp.resize(_k);
temp = alg.mat_vec_multv(alg.inversem(alg.matmultnm(alg.transposenm(_input_set), _input_set)), alg.mat_vec_multv(alg.transposenm(_input_set), _output_set)); temp = alg.mat_vec_multv(alg.inversenm(alg.matmultnm(alg.transposenm(_input_set), _input_set)), alg.mat_vec_multv(alg.transposenm(_input_set), _output_set));
ERR_FAIL_COND_MSG(Math::is_nan(temp->get_element(0)), "ERR: Resulting matrix was noninvertible/degenerate, and so the normal equation could not be performed. Try utilizing gradient descent."); ERR_FAIL_COND_MSG(Math::is_nan(temp->get_element(0)), "ERR: Resulting matrix was noninvertible/degenerate, and so the normal equation could not be performed. Try utilizing gradient descent.");
if (_reg == MLPPReg::REGULARIZATION_TYPE_RIDGE) { if (_reg == MLPPReg::REGULARIZATION_TYPE_RIDGE) {
_weights = alg.mat_vec_multv(alg.inversem(alg.additionnm(alg.matmultnm(alg.transposenm(_input_set), _input_set), alg.scalar_multiplynm(_lambda, alg.identitym(_k)))), alg.mat_vec_multv(alg.transposenm(_input_set), _output_set)); _weights = alg.mat_vec_multv(alg.inversenm(alg.additionnm(alg.matmultnm(alg.transposenm(_input_set), _input_set), alg.scalar_multiplynm(_lambda, alg.identitym(_k)))), alg.mat_vec_multv(alg.transposenm(_input_set), _output_set));
} else { } else {
_weights = alg.mat_vec_multv(alg.inversem(alg.matmultnm(alg.transposenm(_input_set), _input_set)), alg.mat_vec_multv(alg.transposenm(_input_set), _output_set)); _weights = alg.mat_vec_multv(alg.inversenm(alg.matmultnm(alg.transposenm(_input_set), _input_set)), alg.mat_vec_multv(alg.transposenm(_input_set), _output_set));
} }
_bias = stat.meanv(_output_set) - alg.dotv(_weights, x_means); _bias = stat.meanv(_output_set) - alg.dotv(_weights, x_means);

View File

@ -28,7 +28,7 @@ Ref<MLPPMatrix> MLPPPCA::principal_components() {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPData data; MLPPData data;
MLPPLinAlg::SVDResult svr_res = alg.svd(alg.covm(_input_set)); MLPPLinAlg::SVDResult svr_res = alg.svd(alg.covnm(_input_set));
_x_normalized = data.mean_centering(_input_set); _x_normalized = data.mean_centering(_input_set);
Size2i svr_res_u_size = svr_res.U->size(); Size2i svr_res_u_size = svr_res.U->size();