diff --git a/mlpp/activation/activation.cpp b/mlpp/activation/activation.cpp index ccdb587..9397d3a 100644 --- a/mlpp/activation/activation.cpp +++ b/mlpp/activation/activation.cpp @@ -842,7 +842,7 @@ Ref MLPPActivation::linear_derivv(const Ref &z) { } Ref MLPPActivation::linear_derivm(const Ref &z) { MLPPLinAlg alg; - return alg.onematm(z->size().x, z->size().y); + return alg.onematnm(z->size().x, z->size().y); } //SIGMOID @@ -851,11 +851,11 @@ real_t MLPPActivation::sigmoid_normr(real_t z) { } Ref MLPPActivation::sigmoid_normv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_division(alg.onevecv(z->size()), alg.additionnv(alg.onevecv(z->size()), alg.expnv(alg.scalar_multiplynv(-1, z)))); + return alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.additionnv(alg.onevecv(z->size()), alg.expnv(alg.scalar_multiplynv(-1, z)))); } Ref MLPPActivation::sigmoid_normm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnm(alg.onematm(z->size().x, z->size().y), alg.additionnm(alg.onematm(z->size().x, z->size().y), alg.expm(alg.scalar_multiplynm(-1, z)))); + return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.additionnm(alg.onematnm(z->size().x, z->size().y), alg.expnm(alg.scalar_multiplynm(-1, z)))); } real_t MLPPActivation::sigmoid_derivr(real_t z) { @@ -1224,7 +1224,7 @@ Ref MLPPActivation::softplus_normv(const Ref &z) { Ref MLPPActivation::softplus_normm(const Ref &z) { MLPPLinAlg alg; - return alg.lognv(alg.additionnv(alg.onematm(z->size().x, z->size().y), alg.expnv(z))); + return alg.lognv(alg.additionnv(alg.onematnm(z->size().x, z->size().y), alg.expnv(z))); } real_t MLPPActivation::softplus_derivr(real_t z) { @@ -1245,12 +1245,12 @@ real_t MLPPActivation::softsign_normr(real_t z) { Ref MLPPActivation::softsign_normv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_division(z, alg.additionnv(alg.onevecv(z->size()), alg.absv(z))); + return alg.element_wise_divisionnv(z, alg.additionnv(alg.onevecv(z->size()), alg.absv(z))); } Ref MLPPActivation::softsign_normm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnm(z, alg.additionnv(alg.onematm(z->size().x, z->size().y), alg.absm(z))); + return alg.element_wise_divisionnvnm(z, alg.additionnv(alg.onematnm(z->size().x, z->size().y), alg.absnm(z))); } real_t MLPPActivation::softsign_derivr(real_t z) { @@ -1259,12 +1259,12 @@ real_t MLPPActivation::softsign_derivr(real_t z) { Ref MLPPActivation::softsign_derivv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_division(alg.onevecv(z->size()), alg.exponentiatenv(alg.additionnv(alg.onevecv(z->size()), alg.absv(z)), 2)); + return alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.exponentiatenv(alg.additionnv(alg.onevecv(z->size()), alg.absv(z)), 2)); } Ref MLPPActivation::softsign_derivm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnm(alg.onematm(z->size().x, z->size().y), alg.exponentiatenv(alg.additionnm(alg.onematm(z->size().x, z->size().y), alg.absm(z)), 2)); + return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.exponentiatenv(alg.additionnm(alg.onematnm(z->size().x, z->size().y), alg.absnm(z)), 2)); } //GAUSSIANCDF @@ -1281,7 +1281,7 @@ Ref MLPPActivation::gaussian_cdf_normv(const Ref &z) { Ref MLPPActivation::gaussian_cdf_normm(const Ref &z) { MLPPLinAlg alg; - return alg.scalar_multiplynm(0.5, alg.additionnm(alg.onematm(z->size().x, z->size().y), alg.erfm(alg.scalar_multiplynm(1 / sqrt(2), z)))); + return alg.scalar_multiplynm(0.5, alg.additionnm(alg.onematnm(z->size().x, z->size().y), alg.erfnm(alg.scalar_multiplynm(1 / sqrt(2), z)))); } real_t MLPPActivation::gaussian_cdf_derivr(real_t z) { @@ -1296,7 +1296,7 @@ Ref MLPPActivation::gaussian_cdf_derivv(const Ref &z) { Ref MLPPActivation::gaussian_cdf_derivm(const Ref &z) { MLPPLinAlg alg; - return alg.scalar_multiplynm(1 / Math::sqrt(2 * M_PI), alg.expm(alg.scalar_multiplynm(-1 / 2.0, alg.hadamard_productnm(z, z)))); + return alg.scalar_multiplynm(1 / Math::sqrt(2 * M_PI), alg.expnm(alg.scalar_multiplynm(-1 / 2.0, alg.hadamard_productnm(z, z)))); } //CLOGLOG @@ -1313,7 +1313,7 @@ Ref MLPPActivation::cloglog_normv(const Ref &z) { Ref MLPPActivation::cloglog_normm(const Ref &z) { MLPPLinAlg alg; - return alg.scalar_multiplynm(-1, alg.scalar_addnm(-1, alg.expm(alg.scalar_multiplynm(-1, alg.expm(z))))); + return alg.scalar_multiplynm(-1, alg.scalar_addnm(-1, alg.expnm(alg.scalar_multiplynm(-1, alg.expnm(z))))); } real_t MLPPActivation::cloglog_derivr(real_t z) { @@ -1328,7 +1328,7 @@ Ref MLPPActivation::cloglog_derivv(const Ref &z) { Ref MLPPActivation::cloglog_derivm(const Ref &z) { MLPPLinAlg alg; - return alg.expm(alg.scalar_multiplynm(-1, alg.expm(z))); + return alg.expnm(alg.scalar_multiplynm(-1, alg.expnm(z))); } //LOGIT @@ -1339,12 +1339,12 @@ real_t MLPPActivation::logit_normr(real_t z) { Ref MLPPActivation::logit_normv(const Ref &z) { MLPPLinAlg alg; - return alg.lognv(alg.element_wise_division(z, alg.subtractionnv(alg.onevecv(z->size()), z))); + return alg.lognv(alg.element_wise_divisionnv(z, alg.subtractionnv(alg.onevecv(z->size()), z))); } Ref MLPPActivation::logit_normm(const Ref &z) { MLPPLinAlg alg; - return alg.logm(alg.element_wise_divisionnm(z, alg.subtractionnm(alg.onematm(z->size().x, z->size().y), z))); + return alg.lognm(alg.element_wise_divisionnvnm(z, alg.subtractionnm(alg.onematnm(z->size().x, z->size().y), z))); } real_t MLPPActivation::logit_derivr(real_t z) { @@ -1354,17 +1354,17 @@ Ref MLPPActivation::logit_derivv(const Ref &z) { MLPPLinAlg alg; return alg.subtractionnv( - alg.element_wise_division(alg.onevecv(z->size()), z), - alg.element_wise_division(alg.onevecv(z->size()), alg.subtractionnv(z, alg.onevecv(z->size())))); + alg.element_wise_divisionnv(alg.onevecv(z->size()), z), + alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.subtractionnv(z, alg.onevecv(z->size())))); } Ref MLPPActivation::logit_derivm(const Ref &z) { MLPPLinAlg alg; return alg.subtractionnm( - alg.element_wise_divisionnm( - alg.onematm(z->size().x, z->size().y), z), - alg.element_wise_divisionnm(alg.onematm(z->size().x, z->size().y), - alg.subtractionnm(z, alg.onematm(z->size().x, z->size().y)))); + alg.element_wise_divisionnvnm( + alg.onematnm(z->size().x, z->size().y), z), + alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), + alg.subtractionnm(z, alg.onematnm(z->size().x, z->size().y)))); } //UNITSTEP @@ -1484,7 +1484,7 @@ Ref MLPPActivation::mish_derivv(const Ref &z) { sech_normv(softplus_normv(z)), sech_normv(softplus_normv(z))), z), sigmoid_normv(z)), - alg.element_wise_division(mish_normv(z), z)); + alg.element_wise_divisionnv(mish_normv(z), z)); } Ref MLPPActivation::mish_derivm(const Ref &z) { MLPPLinAlg alg; @@ -1496,7 +1496,7 @@ Ref MLPPActivation::mish_derivm(const Ref &z) { sech_normm(softplus_normm(z)), sech_normm(softplus_normm(z))), z), sigmoid_normm(z)), - alg.element_wise_divisionnm(mish_normm(z), z)); + alg.element_wise_divisionnvnm(mish_normm(z), z)); } //SINC @@ -1507,12 +1507,12 @@ real_t MLPPActivation::sinc_normr(real_t z) { Ref MLPPActivation::sinc_normv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_division(alg.sinv(z), z); + return alg.element_wise_divisionnv(alg.sinv(z), z); } Ref MLPPActivation::sinc_normm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnm(alg.sinm(z), z); + return alg.element_wise_divisionnvnm(alg.sinnm(z), z); } real_t MLPPActivation::sinc_derivr(real_t z) { @@ -1521,12 +1521,12 @@ real_t MLPPActivation::sinc_derivr(real_t z) { Ref MLPPActivation::sinc_derivv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_division(alg.subtractionnv(alg.hadamard_productnv(z, alg.cosv(z)), alg.sinv(z)), alg.hadamard_productnv(z, z)); + return alg.element_wise_divisionnv(alg.subtractionnv(alg.hadamard_productnv(z, alg.cosv(z)), alg.sinv(z)), alg.hadamard_productnv(z, z)); } Ref MLPPActivation::sinc_derivm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnm(alg.subtractionnm(alg.hadamard_productnm(z, alg.cosm(z)), alg.sinm(z)), alg.hadamard_productnm(z, z)); + return alg.element_wise_divisionnvnm(alg.subtractionnm(alg.hadamard_productnm(z, alg.cosnm(z)), alg.sinnm(z)), alg.hadamard_productnm(z, z)); } //RELU @@ -2006,7 +2006,7 @@ Ref MLPPActivation::sinh_normv(const Ref &z) { } Ref MLPPActivation::sinh_normm(const Ref &z) { MLPPLinAlg alg; - return alg.scalar_multiplynm(0.5, alg.subtractionnm(alg.expm(z), alg.expm(alg.scalar_multiplynm(-1, z)))); + return alg.scalar_multiplynm(0.5, alg.subtractionnm(alg.expnm(z), alg.expnm(alg.scalar_multiplynm(-1, z)))); } real_t MLPPActivation::sinh_derivr(real_t z) { @@ -2030,7 +2030,7 @@ Ref MLPPActivation::cosh_normv(const Ref &z) { } Ref MLPPActivation::cosh_normm(const Ref &z) { MLPPLinAlg alg; - return alg.scalar_multiplynm(0.5, alg.additionnv(alg.expm(z), alg.expm(alg.scalar_multiplynm(-1, z)))); + return alg.scalar_multiplynm(0.5, alg.additionnv(alg.expnm(z), alg.expnm(alg.scalar_multiplynm(-1, z)))); } real_t MLPPActivation::cosh_derivr(real_t z) { @@ -2051,12 +2051,12 @@ real_t MLPPActivation::tanh_normr(real_t z) { Ref MLPPActivation::tanh_normv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_division(alg.subtractionnv(alg.expnv(z), alg.expnv(alg.scalar_multiplynv(-1, z))), alg.additionnv(alg.expnv(z), alg.expnv(alg.scalar_multiplynv(-1, z)))); + return alg.element_wise_divisionnv(alg.subtractionnv(alg.expnv(z), alg.expnv(alg.scalar_multiplynv(-1, z))), alg.additionnv(alg.expnv(z), alg.expnv(alg.scalar_multiplynv(-1, z)))); } Ref MLPPActivation::tanh_normm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnm(alg.subtractionnm(alg.expm(z), alg.expm(alg.scalar_multiplynm(-1, z))), alg.additionnm(alg.expm(z), alg.expm(alg.scalar_multiplynm(-1, z)))); + return alg.element_wise_divisionnvnm(alg.subtractionnm(alg.expnm(z), alg.expnm(alg.scalar_multiplynm(-1, z))), alg.additionnm(alg.expnm(z), alg.expnm(alg.scalar_multiplynm(-1, z)))); } real_t MLPPActivation::tanh_derivr(real_t z) { @@ -2081,13 +2081,13 @@ real_t MLPPActivation::csch_normr(real_t z) { Ref MLPPActivation::csch_normv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_division(alg.onevecv(z->size()), sinh_normv(z)); + return alg.element_wise_divisionnv(alg.onevecv(z->size()), sinh_normv(z)); } Ref MLPPActivation::csch_normm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnm(alg.onematm(z->size().x, z->size().y), sinh_normm(z)); + return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), sinh_normm(z)); } real_t MLPPActivation::csch_derivr(real_t z) { @@ -2114,14 +2114,14 @@ real_t MLPPActivation::sech_normr(real_t z) { Ref MLPPActivation::sech_normv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_division(alg.onevecv(z->size()), cosh_normv(z)); + return alg.element_wise_divisionnv(alg.onevecv(z->size()), cosh_normv(z)); // return activation(z, deriv, static_cast(&sech)); } Ref MLPPActivation::sech_normm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnm(alg.onematm(z->size().x, z->size().y), cosh_normm(z)); + return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), cosh_normm(z)); // return activation(z, deriv, static_cast(&sech)); } @@ -2149,12 +2149,12 @@ real_t MLPPActivation::coth_normr(real_t z) { Ref MLPPActivation::coth_normv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_division(alg.onevecv(z->size()), tanh_normv(z)); + return alg.element_wise_divisionnv(alg.onevecv(z->size()), tanh_normv(z)); } Ref MLPPActivation::coth_normm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnm(alg.onematm(z->size().x, z->size().y), tanh_normm(z)); + return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), tanh_normm(z)); } real_t MLPPActivation::coth_derivr(real_t z) { @@ -2186,7 +2186,7 @@ Ref MLPPActivation::arsinh_normv(const Ref &z) { Ref MLPPActivation::arsinh_normm(const Ref &z) { MLPPLinAlg alg; - return alg.logm(alg.additionnm(z, alg.sqrtm(alg.additionnm(alg.hadamard_productnm(z, z), alg.onematm(z->size().x, z->size().y))))); + return alg.lognm(alg.additionnm(z, alg.sqrtnm(alg.additionnm(alg.hadamard_productnm(z, z), alg.onematnm(z->size().x, z->size().y))))); } real_t MLPPActivation::arsinh_derivr(real_t z) { @@ -2196,13 +2196,13 @@ real_t MLPPActivation::arsinh_derivr(real_t z) { Ref MLPPActivation::arsinh_derivv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_division(alg.onevecv(z->size()), alg.sqrtnv(alg.additionnv(alg.hadamard_productnv(z, z), alg.onevecv(z->size())))); + return alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.sqrtnv(alg.additionnv(alg.hadamard_productnv(z, z), alg.onevecv(z->size())))); } Ref MLPPActivation::arsinh_derivm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnm(alg.onematm(z->size().x, z->size().y), alg.sqrtm(alg.additionnm(alg.hadamard_productnm(z, z), alg.onematm(z->size().x, z->size().y)))); + return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.sqrtnm(alg.additionnm(alg.hadamard_productnm(z, z), alg.onematnm(z->size().x, z->size().y)))); } //ARCOSH @@ -2219,7 +2219,7 @@ Ref MLPPActivation::arcosh_normv(const Ref &z) { Ref MLPPActivation::arcosh_normm(const Ref &z) { MLPPLinAlg alg; - return alg.logm(alg.additionnm(z, alg.sqrtm(alg.subtractionnm(alg.hadamard_productnm(z, z), alg.onematm(z->size().x, z->size().y))))); + return alg.lognm(alg.additionnm(z, alg.sqrtnm(alg.subtractionnm(alg.hadamard_productnm(z, z), alg.onematnm(z->size().x, z->size().y))))); } real_t MLPPActivation::arcosh_derivr(real_t z) { @@ -2228,13 +2228,13 @@ real_t MLPPActivation::arcosh_derivr(real_t z) { Ref MLPPActivation::arcosh_derivv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_division(alg.onevecv(z->size()), alg.sqrtnv(alg.subtractionnv(alg.hadamard_productnv(z, z), alg.onevecv(z->size())))); + return alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.sqrtnv(alg.subtractionnv(alg.hadamard_productnv(z, z), alg.onevecv(z->size())))); } Ref MLPPActivation::arcosh_derivm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnm(alg.onematm(z->size().x, z->size().y), alg.sqrtm(alg.subtractionnm(alg.hadamard_productnm(z, z), alg.onematm(z->size().x, z->size().y)))); + return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.sqrtnm(alg.subtractionnm(alg.hadamard_productnm(z, z), alg.onematnm(z->size().x, z->size().y)))); } //ARTANH @@ -2245,13 +2245,13 @@ real_t MLPPActivation::artanh_normr(real_t z) { Ref MLPPActivation::artanh_normv(const Ref &z) { MLPPLinAlg alg; - return alg.scalar_multiplynv(0.5, alg.lognv(alg.element_wise_division(alg.additionnv(alg.onevecv(z->size()), z), alg.subtractionnv(alg.onevecv(z->size()), z)))); + return alg.scalar_multiplynv(0.5, alg.lognv(alg.element_wise_divisionnv(alg.additionnv(alg.onevecv(z->size()), z), alg.subtractionnv(alg.onevecv(z->size()), z)))); } Ref MLPPActivation::artanh_normm(const Ref &z) { MLPPLinAlg alg; - return alg.scalar_multiplynm(0.5, alg.logm(alg.element_wise_divisionnm(alg.additionnm(alg.onematm(z->size().x, z->size().y), z), alg.subtractionnm(alg.onematm(z->size().x, z->size().y), z)))); + return alg.scalar_multiplynm(0.5, alg.lognm(alg.element_wise_divisionnvnm(alg.additionnm(alg.onematnm(z->size().x, z->size().y), z), alg.subtractionnm(alg.onematnm(z->size().x, z->size().y), z)))); } real_t MLPPActivation::artanh_derivr(real_t z) { @@ -2260,13 +2260,13 @@ real_t MLPPActivation::artanh_derivr(real_t z) { Ref MLPPActivation::artanh_derivv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_division(alg.onevecv(z->size()), alg.subtractionnv(alg.onevecv(z->size()), alg.hadamard_productnv(z, z))); + return alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.subtractionnv(alg.onevecv(z->size()), alg.hadamard_productnv(z, z))); } Ref MLPPActivation::artanh_derivm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnm(alg.onematm(z->size().x, z->size().y), alg.subtractionnv(alg.onematm(z->size().x, z->size().y), alg.hadamard_productnm(z, z))); + return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.subtractionnv(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z))); } //ARCSCH @@ -2282,18 +2282,18 @@ Ref MLPPActivation::arcsch_normv(const Ref &z) { alg.sqrtnv( alg.additionnv( alg.onevecv(z->size()), - alg.element_wise_division(alg.onevecv(z->size()), alg.hadamard_productnv(z, z)))), - alg.element_wise_division(alg.onevecv(z->size()), z))); + alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.hadamard_productnv(z, z)))), + alg.element_wise_divisionnv(alg.onevecv(z->size()), z))); } Ref MLPPActivation::arcsch_normm(const Ref &z) { MLPPLinAlg alg; - return alg.logm( + return alg.lognm( alg.additionnm( - alg.sqrtm( - alg.additionnm(alg.onematm(z->size().x, z->size().y), - alg.element_wise_divisionnm(alg.onematm(z->size().x, z->size().y), alg.hadamard_productnm(z, z)))), - alg.element_wise_divisionnm(alg.onematm(z->size().x, z->size().y), z))); + alg.sqrtnm( + alg.additionnm(alg.onematnm(z->size().x, z->size().y), + alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z)))), + alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), z))); } real_t MLPPActivation::arcsch_derivr(real_t z) { @@ -2302,20 +2302,20 @@ real_t MLPPActivation::arcsch_derivr(real_t z) { Ref MLPPActivation::arcsch_derivv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_division( + return alg.element_wise_divisionnv( alg.fullv(z->size(), -1), alg.hadamard_productnm( alg.hadamard_productnv(z, z), - alg.sqrtnv(alg.additionnv(alg.onevecv(z->size()), alg.element_wise_division(alg.onevecv(z->size()), alg.hadamard_productnv(z, z)))))); + alg.sqrtnv(alg.additionnv(alg.onevecv(z->size()), alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.hadamard_productnv(z, z)))))); } Ref MLPPActivation::arcsch_derivm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnm( - alg.fullm(z->size().x, z->size().y, -1), + return alg.element_wise_divisionnvnm( + alg.fullnm(z->size().x, z->size().y, -1), alg.hadamard_productnm(alg.hadamard_productnm(z, z), - alg.sqrtm(alg.additionnm(alg.onematm(z->size().x, z->size().y), - alg.element_wise_divisionnm(alg.onematm(z->size().x, z->size().y), alg.hadamard_productnm(z, z)))))); + alg.sqrtnm(alg.additionnm(alg.onematnm(z->size().x, z->size().y), + alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z)))))); } //ARSECH @@ -2329,29 +2329,29 @@ Ref MLPPActivation::arsech_normv(const Ref &z) { return alg.lognv( alg.additionnv( - alg.element_wise_division( + alg.element_wise_divisionnv( alg.onevecv(z->size()), z), alg.hadamard_productnv( - alg.additionnv(alg.element_wise_division(alg.onevecv(z->size()), z), alg.onevecv(z->size())), - alg.subtractionnv(alg.element_wise_division(alg.onevecv(z->size()), z), alg.onevecv(z->size()))))); + alg.additionnv(alg.element_wise_divisionnv(alg.onevecv(z->size()), z), alg.onevecv(z->size())), + alg.subtractionnv(alg.element_wise_divisionnv(alg.onevecv(z->size()), z), alg.onevecv(z->size()))))); } Ref MLPPActivation::arsech_normm(const Ref &z) { MLPPLinAlg alg; - return alg.logm( + return alg.lognm( alg.additionnm( - alg.element_wise_divisionnm( - alg.onematm(z->size().x, z->size().y), z), + alg.element_wise_divisionnvnm( + alg.onematnm(z->size().x, z->size().y), z), alg.hadamard_productnm( alg.additionnm( - alg.element_wise_divisionnm( - alg.onematm(z->size().x, z->size().y), z), - alg.onematm(z->size().x, z->size().y)), + alg.element_wise_divisionnvnm( + alg.onematnm(z->size().x, z->size().y), z), + alg.onematnm(z->size().x, z->size().y)), alg.subtractionnm( - alg.element_wise_divisionnm( - alg.onematm(z->size().x, z->size().y), z), - alg.onematm(z->size().x, z->size().y))))); + alg.element_wise_divisionnvnm( + alg.onematnm(z->size().x, z->size().y), z), + alg.onematnm(z->size().x, z->size().y))))); } real_t MLPPActivation::arsech_derivr(real_t z) { @@ -2361,7 +2361,7 @@ real_t MLPPActivation::arsech_derivr(real_t z) { Ref MLPPActivation::arsech_derivv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_division( + return alg.element_wise_divisionnv( alg.fullv(z->size(), -1), alg.hadamard_productnv( z, @@ -2372,11 +2372,11 @@ Ref MLPPActivation::arsech_derivv(const Ref &z) { Ref MLPPActivation::arsech_derivm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnm( - alg.fullm(z->size().x, z->size().y, -1), + return alg.element_wise_divisionnvnm( + alg.fullnm(z->size().x, z->size().y, -1), alg.hadamard_productnm( z, - alg.sqrtm(alg.subtractionnm(alg.onematm(z->size().x, z->size().y), alg.hadamard_productnm(z, z))))); + alg.sqrtnm(alg.subtractionnm(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z))))); } //ARCOTH @@ -2389,7 +2389,7 @@ Ref MLPPActivation::arcoth_normv(const Ref &z) { return alg.scalar_multiplynv( 0.5, - alg.lognv(alg.element_wise_division(alg.additionnv(alg.onevecv(z->size()), z), alg.subtractionnv(z, alg.onevecv(z->size()))))); + alg.lognv(alg.element_wise_divisionnv(alg.additionnv(alg.onevecv(z->size()), z), alg.subtractionnv(z, alg.onevecv(z->size()))))); } Ref MLPPActivation::arcoth_normm(const Ref &z) { @@ -2397,7 +2397,7 @@ Ref MLPPActivation::arcoth_normm(const Ref &z) { return alg.scalar_multiplynm( 0.5, - alg.logm(alg.element_wise_divisionnm(alg.additionnm(alg.onematm(z->size().x, z->size().y), z), alg.subtractionnm(z, alg.onematm(z->size().x, z->size().y))))); + alg.lognm(alg.element_wise_divisionnvnm(alg.additionnm(alg.onematnm(z->size().x, z->size().y), z), alg.subtractionnm(z, alg.onematnm(z->size().x, z->size().y))))); } real_t MLPPActivation::arcoth_derivr(real_t z) { @@ -2406,13 +2406,13 @@ real_t MLPPActivation::arcoth_derivr(real_t z) { Ref MLPPActivation::arcoth_derivv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_division(alg.onevecv(z->size()), alg.subtractionnv(alg.onevecv(z->size()), alg.hadamard_productnv(z, z))); + return alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.subtractionnv(alg.onevecv(z->size()), alg.hadamard_productnv(z, z))); } Ref MLPPActivation::arcoth_derivm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnm(alg.onematm(z->size().x, z->size().y), alg.subtractionnm(alg.onematm(z->size().x, z->size().y), alg.hadamard_productnm(z, z))); + return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.subtractionnm(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z))); } void MLPPActivation::_bind_methods() { diff --git a/mlpp/ann/ann.cpp b/mlpp/ann/ann.cpp index 7122961..6755558 100644 --- a/mlpp/ann/ann.cpp +++ b/mlpp/ann/ann.cpp @@ -314,8 +314,8 @@ void MLPPANN::adagrad(real_t learning_rate, int max_epoch, int mini_batch_size, v_hidden = alg.addition_vt(v_hidden, alg.exponentiate_vt(grads.cumulative_hidden_layer_w_grad, 2)); v_output = alg.additionnv(v_output, alg.exponentiatenv(grads.output_w_grad, 2)); - Vector> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_division_vt(grads.cumulative_hidden_layer_w_grad, alg.scalar_add_vm(e, alg.sqrt_vt(v_hidden)))); - Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_division(grads.output_w_grad, alg.scalar_addnv(e, alg.sqrtnv(v_output)))); + Vector> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_divisionnv_vt(grads.cumulative_hidden_layer_w_grad, alg.scalar_add_vm(e, alg.sqrt_vt(v_hidden)))); + Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(grads.output_w_grad, alg.scalar_addnv(e, alg.sqrtnv(v_output)))); update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too. y_hat = model_set_test(current_input_batch); @@ -378,8 +378,8 @@ void MLPPANN::adadelta(real_t learning_rate, int max_epoch, int mini_batch_size, v_hidden = alg.addition_vt(alg.scalar_multiply_vm(1 - b1, v_hidden), alg.scalar_multiply_vm(b1, alg.exponentiate_vt(grads.cumulative_hidden_layer_w_grad, 2))); v_output = alg.additionnv(v_output, alg.exponentiatenv(grads.output_w_grad, 2)); - Vector> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_division_vt(grads.cumulative_hidden_layer_w_grad, alg.scalar_add_vm(e, alg.sqrt_vt(v_hidden)))); - Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_division(grads.output_w_grad, alg.scalar_addnv(e, alg.sqrtnv(v_output)))); + Vector> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_divisionnv_vt(grads.cumulative_hidden_layer_w_grad, alg.scalar_add_vm(e, alg.sqrt_vt(v_hidden)))); + Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(grads.output_w_grad, alg.scalar_addnv(e, alg.sqrtnv(v_output)))); update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too. y_hat = model_set_test(current_input_batch); @@ -456,8 +456,8 @@ void MLPPANN::adam(real_t learning_rate, int max_epoch, int mini_batch_size, rea Ref m_output_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m_output); Ref v_output_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b2, epoch)), v_output); - Vector> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_division_vt(m_hidden_hat, alg.scalar_add_vm(e, alg.sqrt_vt(v_hidden_hat)))); - Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_division(m_output_hat, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat)))); + Vector> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_divisionnv_vt(m_hidden_hat, alg.scalar_add_vm(e, alg.sqrt_vt(v_hidden_hat)))); + Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(m_output_hat, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat)))); update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too. y_hat = model_set_test(current_input_batch); @@ -529,8 +529,8 @@ void MLPPANN::adamax(real_t learning_rate, int max_epoch, int mini_batch_size, r Ref m_output_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m_output); - Vector> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_division_vt(m_hidden_hat, alg.scalar_add_vm(e, u_hidden))); - Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_division(m_output_hat, alg.scalar_addnv(e, u_output))); + Vector> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_divisionnv_vt(m_hidden_hat, alg.scalar_add_vm(e, u_hidden))); + Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(m_output_hat, alg.scalar_addnv(e, u_output))); update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too. y_hat = model_set_test(current_input_batch); @@ -606,8 +606,8 @@ void MLPPANN::nadam(real_t learning_rate, int max_epoch, int mini_batch_size, re Ref v_output_hat = alg.scalar_multiplynv(1 / (1.0 - Math::pow(b2, epoch)), v_output); Ref m_output_final = alg.additionnv(alg.scalar_multiplynv(b1, m_output_hat), alg.scalar_multiplynv((1 - b1) / (1.0 - Math::pow(b1, epoch)), grads.output_w_grad)); - Vector> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_division_vt(m_hidden_final, alg.scalar_add_vm(e, alg.sqrt_vt(v_hidden_hat)))); - Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnm(m_output_final, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat)))); + Vector> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_divisionnv_vt(m_hidden_final, alg.scalar_add_vm(e, alg.sqrt_vt(v_hidden_hat)))); + Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnvnm(m_output_final, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat)))); update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too. @@ -686,8 +686,8 @@ void MLPPANN::amsgrad(real_t learning_rate, int max_epoch, int mini_batch_size, v_hidden_hat = alg.max_vt(v_hidden_hat, v_hidden); v_output_hat = alg.maxnvv(v_output_hat, v_output); - Vector> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_division_vt(m_hidden, alg.scalar_add_vm(e, alg.sqrt_vt(v_hidden_hat)))); - Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_division(m_output, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat)))); + Vector> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_divisionnv_vt(m_hidden, alg.scalar_add_vm(e, alg.sqrt_vt(v_hidden_hat)))); + Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(m_output, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat)))); update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too. y_hat = model_set_test(current_input_batch); diff --git a/mlpp/bernoulli_nb/bernoulli_nb.cpp b/mlpp/bernoulli_nb/bernoulli_nb.cpp index 563f280..856277a 100644 --- a/mlpp/bernoulli_nb/bernoulli_nb.cpp +++ b/mlpp/bernoulli_nb/bernoulli_nb.cpp @@ -103,7 +103,7 @@ void MLPPBernoulliNB::compute_vocab() { MLPPLinAlg alg; MLPPData data; - _vocab = data.vec_to_setnv(alg.flattenv(_input_set)); + _vocab = data.vec_to_setnv(alg.flattenvvnv(_input_set)); } void MLPPBernoulliNB::compute_theta() { diff --git a/mlpp/cost/cost.cpp b/mlpp/cost/cost.cpp index 05c0dd8..f6ed441 100644 --- a/mlpp/cost/cost.cpp +++ b/mlpp/cost/cost.cpp @@ -209,7 +209,7 @@ Ref MLPPCost::mbe_derivv(const Ref &y_hat, const Ref MLPPCost::mbe_derivm(const Ref &y_hat, const Ref &y) { MLPPLinAlg alg; - return alg.onematm(y_hat->size().x, y_hat->size().y); + return alg.onematnm(y_hat->size().x, y_hat->size().y); } // Classification Costs @@ -250,15 +250,15 @@ real_t MLPPCost::log_lossm(const Ref &y_hat, const Ref & Ref MLPPCost::log_loss_derivv(const Ref &y_hat, const Ref &y) { MLPPLinAlg alg; return alg.additionnv( - alg.scalar_multiplynv(-1, alg.element_wise_division(y, y_hat)), - alg.element_wise_division(alg.scalar_multiplynv(-1, alg.scalar_addnv(-1, y)), alg.scalar_multiplynv(-1, alg.scalar_addnv(-1, y_hat)))); + alg.scalar_multiplynv(-1, alg.element_wise_divisionnv(y, y_hat)), + alg.element_wise_divisionnv(alg.scalar_multiplynv(-1, alg.scalar_addnv(-1, y)), alg.scalar_multiplynv(-1, alg.scalar_addnv(-1, y_hat)))); } Ref MLPPCost::log_loss_derivm(const Ref &y_hat, const Ref &y) { MLPPLinAlg alg; return alg.additionnm( - alg.scalar_multiplynm(-1, alg.element_wise_divisionnm(y, y_hat)), - alg.element_wise_divisionnm(alg.scalar_multiplynm(-1, alg.scalar_addnm(-1, y)), alg.scalar_multiplynm(-1, alg.scalar_addnm(-1, y_hat)))); + alg.scalar_multiplynm(-1, alg.element_wise_divisionnvnm(y, y_hat)), + alg.element_wise_divisionnvnm(alg.scalar_multiplynm(-1, alg.scalar_addnm(-1, y)), alg.scalar_multiplynm(-1, alg.scalar_addnm(-1, y_hat)))); } real_t MLPPCost::cross_entropyv(const Ref &y_hat, const Ref &y) { @@ -294,11 +294,11 @@ real_t MLPPCost::cross_entropym(const Ref &y_hat, const Ref MLPPCost::cross_entropy_derivv(const Ref &y_hat, const Ref &y) { MLPPLinAlg alg; - return alg.scalar_multiplynv(-1, alg.element_wise_division(y, y_hat)); + return alg.scalar_multiplynv(-1, alg.element_wise_divisionnv(y, y_hat)); } Ref MLPPCost::cross_entropy_derivm(const Ref &y_hat, const Ref &y) { MLPPLinAlg alg; - return alg.scalar_multiplynm(-1, alg.element_wise_divisionnm(y, y_hat)); + return alg.scalar_multiplynm(-1, alg.element_wise_divisionnvnm(y, y_hat)); } real_t MLPPCost::huber_lossv(const Ref &y_hat, const Ref &y, real_t delta) { diff --git a/mlpp/lin_alg/lin_alg.cpp b/mlpp/lin_alg/lin_alg.cpp index c0f78e6..cfd7e7d 100644 --- a/mlpp/lin_alg/lin_alg.cpp +++ b/mlpp/lin_alg/lin_alg.cpp @@ -302,7 +302,7 @@ Ref MLPPLinAlg::kronecker_productnm(const Ref &A, const row.push_back(scalar_multiplynv(a_ptr[A->calculate_index(i, k)], row_tmp)); } - Ref flattened_row = flattenvv(row); + Ref flattened_row = flattenmnv(row); C->set_row_mlpp_vector(i * b_size.y + j, flattened_row); } @@ -310,7 +310,7 @@ Ref MLPPLinAlg::kronecker_productnm(const Ref &A, const return C; } -Ref MLPPLinAlg::element_wise_divisionnm(const Ref &A, const Ref &B) { +Ref MLPPLinAlg::element_wise_divisionnvnm(const Ref &A, const Ref &B) { ERR_FAIL_COND_V(!A.is_valid() || !B.is_valid(), Ref()); Size2i a_size = A->size(); ERR_FAIL_COND_V(a_size != B->size(), Ref()); @@ -485,7 +485,7 @@ std::vector> MLPPLinAlg::cbrt(std::vector MLPPLinAlg::logm(const Ref &A) { +Ref MLPPLinAlg::lognm(const Ref &A) { ERR_FAIL_COND_V(!A.is_valid(), Ref()); Ref out; @@ -503,7 +503,7 @@ Ref MLPPLinAlg::logm(const Ref &A) { return out; } -Ref MLPPLinAlg::log10m(const Ref &A) { +Ref MLPPLinAlg::log10nm(const Ref &A) { ERR_FAIL_COND_V(!A.is_valid(), Ref()); Ref out; @@ -521,7 +521,7 @@ Ref MLPPLinAlg::log10m(const Ref &A) { return out; } -Ref MLPPLinAlg::expm(const Ref &A) { +Ref MLPPLinAlg::expnm(const Ref &A) { ERR_FAIL_COND_V(!A.is_valid(), Ref()); Ref out; @@ -539,7 +539,7 @@ Ref MLPPLinAlg::expm(const Ref &A) { return out; } -Ref MLPPLinAlg::erfm(const Ref &A) { +Ref MLPPLinAlg::erfnm(const Ref &A) { ERR_FAIL_COND_V(!A.is_valid(), Ref()); Ref out; @@ -557,7 +557,7 @@ Ref MLPPLinAlg::erfm(const Ref &A) { return out; } -Ref MLPPLinAlg::exponentiatem(const Ref &A, real_t p) { +Ref MLPPLinAlg::exponentiatenm(const Ref &A, real_t p) { ERR_FAIL_COND_V(!A.is_valid(), Ref()); Ref out; @@ -575,7 +575,7 @@ Ref MLPPLinAlg::exponentiatem(const Ref &A, real_t p) { return out; } -Ref MLPPLinAlg::sqrtm(const Ref &A) { +Ref MLPPLinAlg::sqrtnm(const Ref &A) { ERR_FAIL_COND_V(!A.is_valid(), Ref()); Ref out; @@ -593,8 +593,8 @@ Ref MLPPLinAlg::sqrtm(const Ref &A) { return out; } -Ref MLPPLinAlg::cbrtm(const Ref &A) { - return exponentiatem(A, real_t(1) / real_t(3)); +Ref MLPPLinAlg::cbrtnm(const Ref &A) { + return exponentiatenm(A, real_t(1) / real_t(3)); } std::vector> MLPPLinAlg::matrixPower(std::vector> A, int n) { @@ -624,7 +624,7 @@ std::vector> MLPPLinAlg::abs(std::vector return B; } -Ref MLPPLinAlg::absm(const Ref &A) { +Ref MLPPLinAlg::absnm(const Ref &A) { ERR_FAIL_COND_V(!A.is_valid(), Ref()); Ref out; @@ -790,7 +790,7 @@ std::vector> MLPPLinAlg::pinverse(std::vector MLPPLinAlg::cofactorm(const Ref &A, int n, int i, int j) { +Ref MLPPLinAlg::cofactornm(const Ref &A, int n, int i, int j) { Ref cof; cof.instance(); cof->resize(A->size()); @@ -813,7 +813,7 @@ Ref MLPPLinAlg::cofactorm(const Ref &A, int n, int i, in return cof; } -Ref MLPPLinAlg::adjointm(const Ref &A) { +Ref MLPPLinAlg::adjointnm(const Ref &A) { Ref adj; ERR_FAIL_COND_V(!A.is_valid(), adj); @@ -845,7 +845,7 @@ Ref MLPPLinAlg::adjointm(const Ref &A) { for (int i = 0; i < a_size.y; i++) { for (int j = 0; j < a_size.x; j++) { - Ref cof = cofactorm(A, a_size.y, i, j); + Ref cof = cofactornm(A, a_size.y, i, j); // 1 if even, -1 if odd int sign = (i + j) % 2 == 0 ? 1 : -1; adj->set_element(j, i, sign * detm(cof, int(a_size.y) - 1)); @@ -853,11 +853,11 @@ Ref MLPPLinAlg::adjointm(const Ref &A) { } return adj; } -Ref MLPPLinAlg::inversem(const Ref &A) { - return scalar_multiplynm(1 / detm(A, int(A->size().y)), adjointm(A)); +Ref MLPPLinAlg::inversenm(const Ref &A) { + return scalar_multiplynm(1 / detm(A, int(A->size().y)), adjointnm(A)); } -Ref MLPPLinAlg::pinversem(const Ref &A) { - return matmultnm(inversem(matmultnm(transposenm(A), A)), transposenm(A)); +Ref MLPPLinAlg::pinversenm(const Ref &A) { + return matmultnm(inversenm(matmultnm(transposenm(A), A)), transposenm(A)); } std::vector> MLPPLinAlg::zeromat(int n, int m) { @@ -873,7 +873,7 @@ std::vector> MLPPLinAlg::onemat(int n, int m) { return full(n, m, 1); } -Ref MLPPLinAlg::zeromatm(int n, int m) { +Ref MLPPLinAlg::zeromatnm(int n, int m) { Ref mat; mat.instance(); @@ -882,7 +882,7 @@ Ref MLPPLinAlg::zeromatm(int n, int m) { return mat; } -Ref MLPPLinAlg::onematm(int n, int m) { +Ref MLPPLinAlg::onematnm(int n, int m) { Ref mat; mat.instance(); @@ -891,7 +891,7 @@ Ref MLPPLinAlg::onematm(int n, int m) { return mat; } -Ref MLPPLinAlg::fullm(int n, int m, int k) { +Ref MLPPLinAlg::fullnm(int n, int m, int k) { Ref mat; mat.instance(); @@ -943,7 +943,7 @@ std::vector> MLPPLinAlg::cos(std::vector return B; } -Ref MLPPLinAlg::sinm(const Ref &A) { +Ref MLPPLinAlg::sinnm(const Ref &A) { ERR_FAIL_COND_V(!A.is_valid(), Ref()); Ref out; @@ -961,7 +961,7 @@ Ref MLPPLinAlg::sinm(const Ref &A) { return out; } -Ref MLPPLinAlg::cosm(const Ref &A) { +Ref MLPPLinAlg::cosnm(const Ref &A) { ERR_FAIL_COND_V(!A.is_valid(), Ref()); Ref out; @@ -1103,7 +1103,7 @@ std::vector> MLPPLinAlg::cov(std::vector return covMat; } -Ref MLPPLinAlg::covm(const Ref &A) { +Ref MLPPLinAlg::covnm(const Ref &A) { MLPPStat stat; Ref cov_mat; @@ -1437,7 +1437,7 @@ MLPPLinAlg::EigenResult MLPPLinAlg::eigen(Ref A) { P->set_element(sub_j, sub_j, Math::cos(theta)); P->set_element(sub_j, sub_i, Math::sin(theta)); - a_new = matmultnm(matmultnm(inversem(P), A), P); + a_new = matmultnm(matmultnm(inversenm(P), A), P); Size2i a_new_size = a_new->size(); @@ -1549,8 +1549,8 @@ MLPPLinAlg::SVDResult MLPPLinAlg::svd(const Ref &A) { EigenResult left_eigen = eigen(matmultnm(A, transposenm(A))); EigenResult right_eigen = eigen(matmultnm(transposenm(A), A)); - Ref singularvals = sqrtm(left_eigen.eigen_values); - Ref sigma = zeromatm(a_size.y, a_size.x); + Ref singularvals = sqrtnm(left_eigen.eigen_values); + Ref sigma = zeromatnm(a_size.y, a_size.x); Size2i singularvals_size = singularvals->size(); @@ -1676,7 +1676,7 @@ std::vector MLPPLinAlg::flatten(std::vector> A) { return a; } -Ref MLPPLinAlg::flattenvv(const Vector> &A) { +Ref MLPPLinAlg::flattenmnv(const Vector> &A) { Ref a; a.instance(); @@ -1705,7 +1705,7 @@ Ref MLPPLinAlg::flattenvv(const Vector> &A) { return a; } -Ref MLPPLinAlg::flattenv(const Ref &A) { +Ref MLPPLinAlg::flattenvvnv(const Ref &A) { int data_size = A->data_size(); Ref res; @@ -1878,7 +1878,7 @@ std::vector MLPPLinAlg::elementWiseDivision(std::vector a, std:: return c; } -Ref MLPPLinAlg::element_wise_division(const Ref &a, const Ref &b) { +Ref MLPPLinAlg::element_wise_divisionnv(const Ref &a, const Ref &b) { ERR_FAIL_COND_V(!a.is_valid() || !b.is_valid(), Ref()); Ref out; @@ -2446,7 +2446,7 @@ std::vector> MLPPLinAlg::max(std::vector return C; } -Ref MLPPLinAlg::max_nm(const Ref &A, const Ref &B) { +Ref MLPPLinAlg::maxnm(const Ref &A, const Ref &B) { Ref C; C.instance(); C->resize(A->size()); @@ -2732,12 +2732,12 @@ std::vector>> MLPPLinAlg::elementWiseDivision(st return A; } -Vector> MLPPLinAlg::element_wise_division_vt(const Vector> &A, const Vector> &B) { +Vector> MLPPLinAlg::element_wise_divisionnv_vt(const Vector> &A, const Vector> &B) { Vector> res; res.resize(A.size()); for (int i = 0; i < A.size(); i++) { - res.write[i] = element_wise_divisionnm(A[i], B[i]); + res.write[i] = element_wise_divisionnvnm(A[i], B[i]); } return res; @@ -2755,7 +2755,7 @@ Vector> MLPPLinAlg::sqrt_vt(const Vector> &A) { res.resize(A.size()); for (int i = 0; i < A.size(); i++) { - res.write[i] = sqrtm(A[i]); + res.write[i] = sqrtnm(A[i]); } return res; @@ -2773,7 +2773,7 @@ Vector> MLPPLinAlg::exponentiate_vt(const Vector res.resize(A.size()); for (int i = 0; i < A.size(); i++) { - res.write[i] = exponentiatem(A[i], p); + res.write[i] = exponentiatenm(A[i], p); } return res; @@ -2876,7 +2876,7 @@ Vector> MLPPLinAlg::max_vt(const Vector> &A, con res.resize(A.size()); for (int i = 0; i < A.size(); i++) { - res.write[i] = max_nm(A[i], B[i]); + res.write[i] = maxnm(A[i], B[i]); } return res; @@ -2894,7 +2894,7 @@ Vector> MLPPLinAlg::abs_vt(const Vector> &A) { res.resize(A.size()); for (int i = 0; i < A.size(); i++) { - res.write[i] = absm(A[i]); + res.write[i] = absnm(A[i]); } return A; diff --git a/mlpp/lin_alg/lin_alg.h b/mlpp/lin_alg/lin_alg.h index 0520276..e1dfdea 100644 --- a/mlpp/lin_alg/lin_alg.h +++ b/mlpp/lin_alg/lin_alg.h @@ -47,7 +47,7 @@ public: Ref hadamard_productnm(const Ref &A, const Ref &B); Ref kronecker_productnm(const Ref &A, const Ref &B); - Ref element_wise_divisionnm(const Ref &A, const Ref &B); + Ref element_wise_divisionnvnm(const Ref &A, const Ref &B); std::vector> transpose(std::vector> A); std::vector> scalarMultiply(real_t scalar, std::vector> A); @@ -65,19 +65,19 @@ public: std::vector> sqrt(std::vector> A); std::vector> cbrt(std::vector> A); - Ref logm(const Ref &A); - Ref log10m(const Ref &A); - Ref expm(const Ref &A); - Ref erfm(const Ref &A); - Ref exponentiatem(const Ref &A, real_t p); - Ref sqrtm(const Ref &A); - Ref cbrtm(const Ref &A); + Ref lognm(const Ref &A); + Ref log10nm(const Ref &A); + Ref expnm(const Ref &A); + Ref erfnm(const Ref &A); + Ref exponentiatenm(const Ref &A, real_t p); + Ref sqrtnm(const Ref &A); + Ref cbrtnm(const Ref &A); std::vector> matrixPower(std::vector> A, int n); std::vector> abs(std::vector> A); - Ref absm(const Ref &A); + Ref absnm(const Ref &A); real_t det(std::vector> A, int d); real_t detm(const Ref &A, int d); @@ -89,29 +89,29 @@ public: std::vector> inverse(std::vector> A); std::vector> pinverse(std::vector> A); - Ref cofactorm(const Ref &A, int n, int i, int j); - Ref adjointm(const Ref &A); - Ref inversem(const Ref &A); - Ref pinversem(const Ref &A); + Ref cofactornm(const Ref &A, int n, int i, int j); + Ref adjointnm(const Ref &A); + Ref inversenm(const Ref &A); + Ref pinversenm(const Ref &A); std::vector> zeromat(int n, int m); std::vector> onemat(int n, int m); std::vector> full(int n, int m, int k); - Ref zeromatm(int n, int m); - Ref onematm(int n, int m); - Ref fullm(int n, int m, int k); + Ref zeromatnm(int n, int m); + Ref onematnm(int n, int m); + Ref fullnm(int n, int m, int k); std::vector> sin(std::vector> A); std::vector> cos(std::vector> A); - Ref sinm(const Ref &A); - Ref cosm(const Ref &A); + Ref sinnm(const Ref &A); + Ref cosnm(const Ref &A); std::vector> rotate(std::vector> A, real_t theta, int axis = -1); std::vector> max(std::vector> A, std::vector> B); - Ref max_nm(const Ref &A, const Ref &B); + Ref maxnm(const Ref &A, const Ref &B); real_t max(std::vector> A); real_t min(std::vector> A); @@ -124,7 +124,7 @@ public: Ref identitym(int d); std::vector> cov(std::vector> A); - Ref covm(const Ref &A); + Ref covnm(const Ref &A); std::tuple>, std::vector>> eig(std::vector> A); @@ -183,8 +183,8 @@ public: real_t sum_elements(std::vector> A); std::vector flatten(std::vector> A); - Ref flattenvv(const Vector> &A); - Ref flattenv(const Ref &A); + Ref flattenmnv(const Vector> &A); + Ref flattenvvnv(const Ref &A); std::vector solve(std::vector> A, std::vector b); @@ -206,7 +206,7 @@ public: void hadamard_productv(const Ref &a, const Ref &b, Ref out); std::vector elementWiseDivision(std::vector a, std::vector b); - Ref element_wise_division(const Ref &a, const Ref &b); + Ref element_wise_divisionnv(const Ref &a, const Ref &b); std::vector scalarMultiply(real_t scalar, std::vector a); Ref scalar_multiplynv(real_t scalar, const Ref &a); @@ -302,7 +302,7 @@ public: Vector> addition_vt(const Vector> &A, const Vector> &B); std::vector>> elementWiseDivision(std::vector>> A, std::vector>> B); - Vector> element_wise_division_vt(const Vector> &A, const Vector> &B); + Vector> element_wise_divisionnv_vt(const Vector> &A, const Vector> &B); std::vector>> sqrt(std::vector>> A); Vector> sqrt_vt(const Vector> &A); diff --git a/mlpp/lin_reg/lin_reg.cpp b/mlpp/lin_reg/lin_reg.cpp index ba4f523..e5cd250 100644 --- a/mlpp/lin_reg/lin_reg.cpp +++ b/mlpp/lin_reg/lin_reg.cpp @@ -94,7 +94,7 @@ void MLPPLinReg::newton_raphson(real_t learning_rate, int max_epoch, bool ui) { // Calculating the weight gradients (2nd derivative) Ref first_derivative = alg.mat_vec_multv(alg.transposenm(_input_set), error); Ref second_derivative = alg.matmultnm(alg.transposenm(_input_set), _input_set); - _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multv(alg.transposenm(alg.inversem(second_derivative)), first_derivative))); + _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multv(alg.transposenm(alg.inversenm(second_derivative)), first_derivative))); _weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg); // Calculating the bias gradients (2nd derivative) @@ -405,7 +405,7 @@ void MLPPLinReg::adagrad(real_t learning_rate, int max_epoch, int mini_batch_siz v = alg.hadamard_productnv(weight_grad, weight_grad); - _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_division(weight_grad, alg.sqrtnv(alg.scalar_addnv(e, v))))); + _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_divisionnv(weight_grad, alg.sqrtnv(alg.scalar_addnv(e, v))))); // Calculating the bias gradients _bias -= learning_rate * alg.sum_elementsv(error) / current_output_mini_batch->size(); // As normal @@ -460,7 +460,7 @@ void MLPPLinReg::adadelta(real_t learning_rate, int max_epoch, int mini_batch_si v = alg.additionnv(alg.scalar_multiplynv(b1, v), alg.scalar_multiplynv(1 - b1, alg.hadamard_productnv(weight_grad, weight_grad))); - _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_division(weight_grad, alg.sqrtnv(alg.scalar_addnv(e, v))))); + _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_divisionnv(weight_grad, alg.sqrtnv(alg.scalar_addnv(e, v))))); // Calculating the bias gradients _bias -= learning_rate * alg.sum_elementsv(error) / current_output_mini_batch->size(); // As normal @@ -519,7 +519,7 @@ void MLPPLinReg::adam(real_t learning_rate, int max_epoch, int mini_batch_size, Ref m_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m); Ref v_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b2, epoch)), v); - _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_divisionnm(m_hat, alg.scalar_addnv(e, alg.sqrtnv(v_hat))))); + _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_divisionnvnm(m_hat, alg.scalar_addnv(e, alg.sqrtnv(v_hat))))); // Calculating the bias gradients _bias -= learning_rate * alg.sum_elementsv(error) / current_output_mini_batch->size(); // As normal @@ -576,7 +576,7 @@ void MLPPLinReg::adamax(real_t learning_rate, int max_epoch, int mini_batch_size Ref m_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m); - _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_division(m_hat, u))); + _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_divisionnv(m_hat, u))); // Calculating the bias gradients _bias -= learning_rate * alg.sum_elementsv(error) / current_output_mini_batch->size(); // As normal @@ -637,7 +637,7 @@ void MLPPLinReg::nadam(real_t learning_rate, int max_epoch, int mini_batch_size, Ref m_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m); Ref v_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b2, epoch)), v); - _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_division(m_final, alg.scalar_addnv(e, alg.sqrtnv(v_hat))))); + _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_divisionnv(m_final, alg.scalar_addnv(e, alg.sqrtnv(v_hat))))); // Calculating the bias gradients _bias -= learning_rate * alg.sum_elementsv(error) / current_output_mini_batch->size(); // As normal @@ -683,14 +683,14 @@ void MLPPLinReg::normal_equation() { Ref temp; //temp.resize(_k); - temp = alg.mat_vec_multv(alg.inversem(alg.matmultnm(alg.transposenm(_input_set), _input_set)), alg.mat_vec_multv(alg.transposenm(_input_set), _output_set)); + temp = alg.mat_vec_multv(alg.inversenm(alg.matmultnm(alg.transposenm(_input_set), _input_set)), alg.mat_vec_multv(alg.transposenm(_input_set), _output_set)); ERR_FAIL_COND_MSG(Math::is_nan(temp->get_element(0)), "ERR: Resulting matrix was noninvertible/degenerate, and so the normal equation could not be performed. Try utilizing gradient descent."); if (_reg == MLPPReg::REGULARIZATION_TYPE_RIDGE) { - _weights = alg.mat_vec_multv(alg.inversem(alg.additionnm(alg.matmultnm(alg.transposenm(_input_set), _input_set), alg.scalar_multiplynm(_lambda, alg.identitym(_k)))), alg.mat_vec_multv(alg.transposenm(_input_set), _output_set)); + _weights = alg.mat_vec_multv(alg.inversenm(alg.additionnm(alg.matmultnm(alg.transposenm(_input_set), _input_set), alg.scalar_multiplynm(_lambda, alg.identitym(_k)))), alg.mat_vec_multv(alg.transposenm(_input_set), _output_set)); } else { - _weights = alg.mat_vec_multv(alg.inversem(alg.matmultnm(alg.transposenm(_input_set), _input_set)), alg.mat_vec_multv(alg.transposenm(_input_set), _output_set)); + _weights = alg.mat_vec_multv(alg.inversenm(alg.matmultnm(alg.transposenm(_input_set), _input_set)), alg.mat_vec_multv(alg.transposenm(_input_set), _output_set)); } _bias = stat.meanv(_output_set) - alg.dotv(_weights, x_means); diff --git a/mlpp/pca/pca.cpp b/mlpp/pca/pca.cpp index 013ca79..580a1b1 100644 --- a/mlpp/pca/pca.cpp +++ b/mlpp/pca/pca.cpp @@ -28,7 +28,7 @@ Ref MLPPPCA::principal_components() { MLPPLinAlg alg; MLPPData data; - MLPPLinAlg::SVDResult svr_res = alg.svd(alg.covm(_input_set)); + MLPPLinAlg::SVDResult svr_res = alg.svd(alg.covnm(_input_set)); _x_normalized = data.mean_centering(_input_set); Size2i svr_res_u_size = svr_res.U->size();