diff --git a/doc_classes/MLPPMatrix.xml b/doc_classes/MLPPMatrix.xml index a6ee3ff..516aac9 100644 --- a/doc_classes/MLPPMatrix.xml +++ b/doc_classes/MLPPMatrix.xml @@ -201,20 +201,20 @@ - + - + - + diff --git a/doc_classes/MLPPTensor3.xml b/doc_classes/MLPPTensor3.xml index 9b6c1c5..95e28aa 100644 --- a/doc_classes/MLPPTensor3.xml +++ b/doc_classes/MLPPTensor3.xml @@ -96,20 +96,20 @@ - + - + - + @@ -480,7 +480,7 @@ - + diff --git a/doc_classes/MLPPVector.xml b/doc_classes/MLPPVector.xml index db51b2f..bfc97ec 100644 --- a/doc_classes/MLPPVector.xml +++ b/doc_classes/MLPPVector.xml @@ -101,20 +101,20 @@ - + - + - + diff --git a/mlpp/activation/activation.cpp b/mlpp/activation/activation.cpp index 8defe7f..8d7888a 100644 --- a/mlpp/activation/activation.cpp +++ b/mlpp/activation/activation.cpp @@ -854,11 +854,11 @@ real_t MLPPActivation::sigmoid_normr(real_t z) { } Ref MLPPActivation::sigmoid_normv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.additionnv(alg.onevecnv(z->size()), alg.expnv(alg.scalar_multiplynv(-1, z)))); + return alg.division_element_wisenv(alg.onevecnv(z->size()), alg.additionnv(alg.onevecnv(z->size()), alg.expnv(alg.scalar_multiplynv(-1, z)))); } Ref MLPPActivation::sigmoid_normm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.additionnm(alg.onematnm(z->size().x, z->size().y), alg.expnm(alg.scalar_multiplynm(-1, z)))); + return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), alg.additionnm(alg.onematnm(z->size().x, z->size().y), alg.expnm(alg.scalar_multiplynm(-1, z)))); } real_t MLPPActivation::sigmoid_derivr(real_t z) { @@ -1248,12 +1248,12 @@ real_t MLPPActivation::softsign_normr(real_t z) { Ref MLPPActivation::softsign_normv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnv(z, alg.additionnv(alg.onevecnv(z->size()), alg.absv(z))); + return alg.division_element_wisenv(z, alg.additionnv(alg.onevecnv(z->size()), alg.absv(z))); } Ref MLPPActivation::softsign_normm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnvnm(z, alg.additionnv(alg.onematnm(z->size().x, z->size().y), alg.absnm(z))); + return alg.division_element_wisenvnm(z, alg.additionnv(alg.onematnm(z->size().x, z->size().y), alg.absnm(z))); } real_t MLPPActivation::softsign_derivr(real_t z) { @@ -1262,12 +1262,12 @@ real_t MLPPActivation::softsign_derivr(real_t z) { Ref MLPPActivation::softsign_derivv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.exponentiatenv(alg.additionnv(alg.onevecnv(z->size()), alg.absv(z)), 2)); + return alg.division_element_wisenv(alg.onevecnv(z->size()), alg.exponentiatenv(alg.additionnv(alg.onevecnv(z->size()), alg.absv(z)), 2)); } Ref MLPPActivation::softsign_derivm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.exponentiatenv(alg.additionnm(alg.onematnm(z->size().x, z->size().y), alg.absnm(z)), 2)); + return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), alg.exponentiatenv(alg.additionnm(alg.onematnm(z->size().x, z->size().y), alg.absnm(z)), 2)); } //GAUSSIANCDF @@ -1342,12 +1342,12 @@ real_t MLPPActivation::logit_normr(real_t z) { Ref MLPPActivation::logit_normv(const Ref &z) { MLPPLinAlg alg; - return alg.lognv(alg.element_wise_divisionnv(z, alg.subtractionnv(alg.onevecnv(z->size()), z))); + return alg.lognv(alg.division_element_wisenv(z, alg.subtractionnv(alg.onevecnv(z->size()), z))); } Ref MLPPActivation::logit_normm(const Ref &z) { MLPPLinAlg alg; - return alg.lognm(alg.element_wise_divisionnvnm(z, alg.subtractionnm(alg.onematnm(z->size().x, z->size().y), z))); + return alg.lognm(alg.division_element_wisenvnm(z, alg.subtractionnm(alg.onematnm(z->size().x, z->size().y), z))); } real_t MLPPActivation::logit_derivr(real_t z) { @@ -1357,16 +1357,16 @@ Ref MLPPActivation::logit_derivv(const Ref &z) { MLPPLinAlg alg; return alg.subtractionnv( - alg.element_wise_divisionnv(alg.onevecnv(z->size()), z), - alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.subtractionnv(z, alg.onevecnv(z->size())))); + alg.division_element_wisenv(alg.onevecnv(z->size()), z), + alg.division_element_wisenv(alg.onevecnv(z->size()), alg.subtractionnv(z, alg.onevecnv(z->size())))); } Ref MLPPActivation::logit_derivm(const Ref &z) { MLPPLinAlg alg; return alg.subtractionnm( - alg.element_wise_divisionnvnm( + alg.division_element_wisenvnm( alg.onematnm(z->size().x, z->size().y), z), - alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), + alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), alg.subtractionnm(z, alg.onematnm(z->size().x, z->size().y)))); } @@ -1487,7 +1487,7 @@ Ref MLPPActivation::mish_derivv(const Ref &z) { sech_normv(softplus_normv(z)), sech_normv(softplus_normv(z))), z), sigmoid_normv(z)), - alg.element_wise_divisionnv(mish_normv(z), z)); + alg.division_element_wisenv(mish_normv(z), z)); } Ref MLPPActivation::mish_derivm(const Ref &z) { MLPPLinAlg alg; @@ -1499,7 +1499,7 @@ Ref MLPPActivation::mish_derivm(const Ref &z) { sech_normm(softplus_normm(z)), sech_normm(softplus_normm(z))), z), sigmoid_normm(z)), - alg.element_wise_divisionnvnm(mish_normm(z), z)); + alg.division_element_wisenvnm(mish_normm(z), z)); } //SINC @@ -1510,12 +1510,12 @@ real_t MLPPActivation::sinc_normr(real_t z) { Ref MLPPActivation::sinc_normv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnv(alg.sinnv(z), z); + return alg.division_element_wisenv(alg.sinnv(z), z); } Ref MLPPActivation::sinc_normm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnvnm(alg.sinnm(z), z); + return alg.division_element_wisenvnm(alg.sinnm(z), z); } real_t MLPPActivation::sinc_derivr(real_t z) { @@ -1524,12 +1524,12 @@ real_t MLPPActivation::sinc_derivr(real_t z) { Ref MLPPActivation::sinc_derivv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnv(alg.subtractionnv(alg.hadamard_productnv(z, alg.cosnv(z)), alg.sinnv(z)), alg.hadamard_productnv(z, z)); + return alg.division_element_wisenv(alg.subtractionnv(alg.hadamard_productnv(z, alg.cosnv(z)), alg.sinnv(z)), alg.hadamard_productnv(z, z)); } Ref MLPPActivation::sinc_derivm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnvnm(alg.subtractionnm(alg.hadamard_productnm(z, alg.cosnm(z)), alg.sinnm(z)), alg.hadamard_productnm(z, z)); + return alg.division_element_wisenvnm(alg.subtractionnm(alg.hadamard_productnm(z, alg.cosnm(z)), alg.sinnm(z)), alg.hadamard_productnm(z, z)); } //RELU @@ -2054,12 +2054,12 @@ real_t MLPPActivation::tanh_normr(real_t z) { Ref MLPPActivation::tanh_normv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnv(alg.subtractionnv(alg.expnv(z), alg.expnv(alg.scalar_multiplynv(-1, z))), alg.additionnv(alg.expnv(z), alg.expnv(alg.scalar_multiplynv(-1, z)))); + return alg.division_element_wisenv(alg.subtractionnv(alg.expnv(z), alg.expnv(alg.scalar_multiplynv(-1, z))), alg.additionnv(alg.expnv(z), alg.expnv(alg.scalar_multiplynv(-1, z)))); } Ref MLPPActivation::tanh_normm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnvnm(alg.subtractionnm(alg.expnm(z), alg.expnm(alg.scalar_multiplynm(-1, z))), alg.additionnm(alg.expnm(z), alg.expnm(alg.scalar_multiplynm(-1, z)))); + return alg.division_element_wisenvnm(alg.subtractionnm(alg.expnm(z), alg.expnm(alg.scalar_multiplynm(-1, z))), alg.additionnm(alg.expnm(z), alg.expnm(alg.scalar_multiplynm(-1, z)))); } real_t MLPPActivation::tanh_derivr(real_t z) { @@ -2084,13 +2084,13 @@ real_t MLPPActivation::csch_normr(real_t z) { Ref MLPPActivation::csch_normv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnv(alg.onevecnv(z->size()), sinh_normv(z)); + return alg.division_element_wisenv(alg.onevecnv(z->size()), sinh_normv(z)); } Ref MLPPActivation::csch_normm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), sinh_normm(z)); + return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), sinh_normm(z)); } real_t MLPPActivation::csch_derivr(real_t z) { @@ -2117,14 +2117,14 @@ real_t MLPPActivation::sech_normr(real_t z) { Ref MLPPActivation::sech_normv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnv(alg.onevecnv(z->size()), cosh_normv(z)); + return alg.division_element_wisenv(alg.onevecnv(z->size()), cosh_normv(z)); // return activation(z, deriv, static_cast(&sech)); } Ref MLPPActivation::sech_normm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), cosh_normm(z)); + return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), cosh_normm(z)); // return activation(z, deriv, static_cast(&sech)); } @@ -2152,12 +2152,12 @@ real_t MLPPActivation::coth_normr(real_t z) { Ref MLPPActivation::coth_normv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnv(alg.onevecnv(z->size()), tanh_normv(z)); + return alg.division_element_wisenv(alg.onevecnv(z->size()), tanh_normv(z)); } Ref MLPPActivation::coth_normm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), tanh_normm(z)); + return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), tanh_normm(z)); } real_t MLPPActivation::coth_derivr(real_t z) { @@ -2199,13 +2199,13 @@ real_t MLPPActivation::arsinh_derivr(real_t z) { Ref MLPPActivation::arsinh_derivv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.sqrtnv(alg.additionnv(alg.hadamard_productnv(z, z), alg.onevecnv(z->size())))); + return alg.division_element_wisenv(alg.onevecnv(z->size()), alg.sqrtnv(alg.additionnv(alg.hadamard_productnv(z, z), alg.onevecnv(z->size())))); } Ref MLPPActivation::arsinh_derivm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.sqrtnm(alg.additionnm(alg.hadamard_productnm(z, z), alg.onematnm(z->size().x, z->size().y)))); + return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), alg.sqrtnm(alg.additionnm(alg.hadamard_productnm(z, z), alg.onematnm(z->size().x, z->size().y)))); } //ARCOSH @@ -2231,13 +2231,13 @@ real_t MLPPActivation::arcosh_derivr(real_t z) { Ref MLPPActivation::arcosh_derivv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.sqrtnv(alg.subtractionnv(alg.hadamard_productnv(z, z), alg.onevecnv(z->size())))); + return alg.division_element_wisenv(alg.onevecnv(z->size()), alg.sqrtnv(alg.subtractionnv(alg.hadamard_productnv(z, z), alg.onevecnv(z->size())))); } Ref MLPPActivation::arcosh_derivm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.sqrtnm(alg.subtractionnm(alg.hadamard_productnm(z, z), alg.onematnm(z->size().x, z->size().y)))); + return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), alg.sqrtnm(alg.subtractionnm(alg.hadamard_productnm(z, z), alg.onematnm(z->size().x, z->size().y)))); } //ARTANH @@ -2248,13 +2248,13 @@ real_t MLPPActivation::artanh_normr(real_t z) { Ref MLPPActivation::artanh_normv(const Ref &z) { MLPPLinAlg alg; - return alg.scalar_multiplynv(0.5, alg.lognv(alg.element_wise_divisionnv(alg.additionnv(alg.onevecnv(z->size()), z), alg.subtractionnv(alg.onevecnv(z->size()), z)))); + return alg.scalar_multiplynv(0.5, alg.lognv(alg.division_element_wisenv(alg.additionnv(alg.onevecnv(z->size()), z), alg.subtractionnv(alg.onevecnv(z->size()), z)))); } Ref MLPPActivation::artanh_normm(const Ref &z) { MLPPLinAlg alg; - return alg.scalar_multiplynm(0.5, alg.lognm(alg.element_wise_divisionnvnm(alg.additionnm(alg.onematnm(z->size().x, z->size().y), z), alg.subtractionnm(alg.onematnm(z->size().x, z->size().y), z)))); + return alg.scalar_multiplynm(0.5, alg.lognm(alg.division_element_wisenvnm(alg.additionnm(alg.onematnm(z->size().x, z->size().y), z), alg.subtractionnm(alg.onematnm(z->size().x, z->size().y), z)))); } real_t MLPPActivation::artanh_derivr(real_t z) { @@ -2263,13 +2263,13 @@ real_t MLPPActivation::artanh_derivr(real_t z) { Ref MLPPActivation::artanh_derivv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.subtractionnv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z))); + return alg.division_element_wisenv(alg.onevecnv(z->size()), alg.subtractionnv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z))); } Ref MLPPActivation::artanh_derivm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.subtractionnv(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z))); + return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), alg.subtractionnv(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z))); } //ARCSCH @@ -2285,8 +2285,8 @@ Ref MLPPActivation::arcsch_normv(const Ref &z) { alg.sqrtnv( alg.additionnv( alg.onevecnv(z->size()), - alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z)))), - alg.element_wise_divisionnv(alg.onevecnv(z->size()), z))); + alg.division_element_wisenv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z)))), + alg.division_element_wisenv(alg.onevecnv(z->size()), z))); } Ref MLPPActivation::arcsch_normm(const Ref &z) { MLPPLinAlg alg; @@ -2295,8 +2295,8 @@ Ref MLPPActivation::arcsch_normm(const Ref &z) { alg.additionnm( alg.sqrtnm( alg.additionnm(alg.onematnm(z->size().x, z->size().y), - alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z)))), - alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), z))); + alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z)))), + alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), z))); } real_t MLPPActivation::arcsch_derivr(real_t z) { @@ -2305,20 +2305,20 @@ real_t MLPPActivation::arcsch_derivr(real_t z) { Ref MLPPActivation::arcsch_derivv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnv( + return alg.division_element_wisenv( alg.fullnv(z->size(), -1), alg.hadamard_productnm( alg.hadamard_productnv(z, z), - alg.sqrtnv(alg.additionnv(alg.onevecnv(z->size()), alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z)))))); + alg.sqrtnv(alg.additionnv(alg.onevecnv(z->size()), alg.division_element_wisenv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z)))))); } Ref MLPPActivation::arcsch_derivm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnvnm( + return alg.division_element_wisenvnm( alg.fullnm(z->size().x, z->size().y, -1), alg.hadamard_productnm(alg.hadamard_productnm(z, z), alg.sqrtnm(alg.additionnm(alg.onematnm(z->size().x, z->size().y), - alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z)))))); + alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z)))))); } //ARSECH @@ -2332,11 +2332,11 @@ Ref MLPPActivation::arsech_normv(const Ref &z) { return alg.lognv( alg.additionnv( - alg.element_wise_divisionnv( + alg.division_element_wisenv( alg.onevecnv(z->size()), z), alg.hadamard_productnv( - alg.additionnv(alg.element_wise_divisionnv(alg.onevecnv(z->size()), z), alg.onevecnv(z->size())), - alg.subtractionnv(alg.element_wise_divisionnv(alg.onevecnv(z->size()), z), alg.onevecnv(z->size()))))); + alg.additionnv(alg.division_element_wisenv(alg.onevecnv(z->size()), z), alg.onevecnv(z->size())), + alg.subtractionnv(alg.division_element_wisenv(alg.onevecnv(z->size()), z), alg.onevecnv(z->size()))))); } Ref MLPPActivation::arsech_normm(const Ref &z) { @@ -2344,15 +2344,15 @@ Ref MLPPActivation::arsech_normm(const Ref &z) { return alg.lognm( alg.additionnm( - alg.element_wise_divisionnvnm( + alg.division_element_wisenvnm( alg.onematnm(z->size().x, z->size().y), z), alg.hadamard_productnm( alg.additionnm( - alg.element_wise_divisionnvnm( + alg.division_element_wisenvnm( alg.onematnm(z->size().x, z->size().y), z), alg.onematnm(z->size().x, z->size().y)), alg.subtractionnm( - alg.element_wise_divisionnvnm( + alg.division_element_wisenvnm( alg.onematnm(z->size().x, z->size().y), z), alg.onematnm(z->size().x, z->size().y))))); } @@ -2364,7 +2364,7 @@ real_t MLPPActivation::arsech_derivr(real_t z) { Ref MLPPActivation::arsech_derivv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnv( + return alg.division_element_wisenv( alg.fullnv(z->size(), -1), alg.hadamard_productnv( z, @@ -2375,7 +2375,7 @@ Ref MLPPActivation::arsech_derivv(const Ref &z) { Ref MLPPActivation::arsech_derivm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnvnm( + return alg.division_element_wisenvnm( alg.fullnm(z->size().x, z->size().y, -1), alg.hadamard_productnm( z, @@ -2392,7 +2392,7 @@ Ref MLPPActivation::arcoth_normv(const Ref &z) { return alg.scalar_multiplynv( 0.5, - alg.lognv(alg.element_wise_divisionnv(alg.additionnv(alg.onevecnv(z->size()), z), alg.subtractionnv(z, alg.onevecnv(z->size()))))); + alg.lognv(alg.division_element_wisenv(alg.additionnv(alg.onevecnv(z->size()), z), alg.subtractionnv(z, alg.onevecnv(z->size()))))); } Ref MLPPActivation::arcoth_normm(const Ref &z) { @@ -2400,7 +2400,7 @@ Ref MLPPActivation::arcoth_normm(const Ref &z) { return alg.scalar_multiplynm( 0.5, - alg.lognm(alg.element_wise_divisionnvnm(alg.additionnm(alg.onematnm(z->size().x, z->size().y), z), alg.subtractionnm(z, alg.onematnm(z->size().x, z->size().y))))); + alg.lognm(alg.division_element_wisenvnm(alg.additionnm(alg.onematnm(z->size().x, z->size().y), z), alg.subtractionnm(z, alg.onematnm(z->size().x, z->size().y))))); } real_t MLPPActivation::arcoth_derivr(real_t z) { @@ -2409,13 +2409,13 @@ real_t MLPPActivation::arcoth_derivr(real_t z) { Ref MLPPActivation::arcoth_derivv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.subtractionnv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z))); + return alg.division_element_wisenv(alg.onevecnv(z->size()), alg.subtractionnv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z))); } Ref MLPPActivation::arcoth_derivm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.subtractionnm(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z))); + return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), alg.subtractionnm(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z))); } void MLPPActivation::_bind_methods() { diff --git a/mlpp/ann/ann.cpp b/mlpp/ann/ann.cpp index 5499f54..54cc5af 100644 --- a/mlpp/ann/ann.cpp +++ b/mlpp/ann/ann.cpp @@ -314,8 +314,8 @@ void MLPPANN::adagrad(real_t learning_rate, int max_epoch, int mini_batch_size, v_hidden = alg.additionnvt(v_hidden, alg.exponentiatenvt(grads.cumulative_hidden_layer_w_grad, 2)); v_output = alg.additionnv(v_output, alg.exponentiatenv(grads.output_w_grad, 2)); - Vector> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.element_wise_divisionnvnvt(grads.cumulative_hidden_layer_w_grad, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden)))); - Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(grads.output_w_grad, alg.scalar_addnv(e, alg.sqrtnv(v_output)))); + Vector> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.division_element_wisenvnvt(grads.cumulative_hidden_layer_w_grad, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden)))); + Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.division_element_wisenv(grads.output_w_grad, alg.scalar_addnv(e, alg.sqrtnv(v_output)))); update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too. y_hat = model_set_test(current_input_batch); @@ -378,8 +378,8 @@ void MLPPANN::adadelta(real_t learning_rate, int max_epoch, int mini_batch_size, v_hidden = alg.additionnvt(alg.scalar_multiplynvt(1 - b1, v_hidden), alg.scalar_multiplynvt(b1, alg.exponentiatenvt(grads.cumulative_hidden_layer_w_grad, 2))); v_output = alg.additionnv(v_output, alg.exponentiatenv(grads.output_w_grad, 2)); - Vector> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.element_wise_divisionnvnvt(grads.cumulative_hidden_layer_w_grad, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden)))); - Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(grads.output_w_grad, alg.scalar_addnv(e, alg.sqrtnv(v_output)))); + Vector> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.division_element_wisenvnvt(grads.cumulative_hidden_layer_w_grad, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden)))); + Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.division_element_wisenv(grads.output_w_grad, alg.scalar_addnv(e, alg.sqrtnv(v_output)))); update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too. y_hat = model_set_test(current_input_batch); @@ -456,8 +456,8 @@ void MLPPANN::adam(real_t learning_rate, int max_epoch, int mini_batch_size, rea Ref m_output_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m_output); Ref v_output_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b2, epoch)), v_output); - Vector> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.element_wise_divisionnvnvt(m_hidden_hat, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden_hat)))); - Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(m_output_hat, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat)))); + Vector> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.division_element_wisenvnvt(m_hidden_hat, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden_hat)))); + Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.division_element_wisenv(m_output_hat, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat)))); update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too. y_hat = model_set_test(current_input_batch); @@ -529,8 +529,8 @@ void MLPPANN::adamax(real_t learning_rate, int max_epoch, int mini_batch_size, r Ref m_output_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m_output); - Vector> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.element_wise_divisionnvnvt(m_hidden_hat, alg.scalar_addnvt(e, u_hidden))); - Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(m_output_hat, alg.scalar_addnv(e, u_output))); + Vector> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.division_element_wisenvnvt(m_hidden_hat, alg.scalar_addnvt(e, u_hidden))); + Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.division_element_wisenv(m_output_hat, alg.scalar_addnv(e, u_output))); update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too. y_hat = model_set_test(current_input_batch); @@ -606,8 +606,8 @@ void MLPPANN::nadam(real_t learning_rate, int max_epoch, int mini_batch_size, re Ref v_output_hat = alg.scalar_multiplynv(1 / (1.0 - Math::pow(b2, epoch)), v_output); Ref m_output_final = alg.additionnv(alg.scalar_multiplynv(b1, m_output_hat), alg.scalar_multiplynv((1 - b1) / (1.0 - Math::pow(b1, epoch)), grads.output_w_grad)); - Vector> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.element_wise_divisionnvnvt(m_hidden_final, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden_hat)))); - Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnvnm(m_output_final, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat)))); + Vector> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.division_element_wisenvnvt(m_hidden_final, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden_hat)))); + Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.division_element_wisenvnm(m_output_final, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat)))); update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too. @@ -686,8 +686,8 @@ void MLPPANN::amsgrad(real_t learning_rate, int max_epoch, int mini_batch_size, v_hidden_hat = alg.maxnvt(v_hidden_hat, v_hidden); v_output_hat = alg.maxnvv(v_output_hat, v_output); - Vector> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.element_wise_divisionnvnvt(m_hidden, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden_hat)))); - Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(m_output, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat)))); + Vector> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.division_element_wisenvnvt(m_hidden, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden_hat)))); + Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.division_element_wisenv(m_output, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat)))); update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too. y_hat = model_set_test(current_input_batch); diff --git a/mlpp/cost/cost.cpp b/mlpp/cost/cost.cpp index 5c351fa..2333dff 100644 --- a/mlpp/cost/cost.cpp +++ b/mlpp/cost/cost.cpp @@ -250,15 +250,15 @@ real_t MLPPCost::log_lossm(const Ref &y_hat, const Ref & Ref MLPPCost::log_loss_derivv(const Ref &y_hat, const Ref &y) { MLPPLinAlg alg; return alg.additionnv( - alg.scalar_multiplynv(-1, alg.element_wise_divisionnv(y, y_hat)), - alg.element_wise_divisionnv(alg.scalar_multiplynv(-1, alg.scalar_addnv(-1, y)), alg.scalar_multiplynv(-1, alg.scalar_addnv(-1, y_hat)))); + alg.scalar_multiplynv(-1, alg.division_element_wisenv(y, y_hat)), + alg.division_element_wisenv(alg.scalar_multiplynv(-1, alg.scalar_addnv(-1, y)), alg.scalar_multiplynv(-1, alg.scalar_addnv(-1, y_hat)))); } Ref MLPPCost::log_loss_derivm(const Ref &y_hat, const Ref &y) { MLPPLinAlg alg; return alg.additionnm( - alg.scalar_multiplynm(-1, alg.element_wise_divisionnvnm(y, y_hat)), - alg.element_wise_divisionnvnm(alg.scalar_multiplynm(-1, alg.scalar_addnm(-1, y)), alg.scalar_multiplynm(-1, alg.scalar_addnm(-1, y_hat)))); + alg.scalar_multiplynm(-1, alg.division_element_wisenvnm(y, y_hat)), + alg.division_element_wisenvnm(alg.scalar_multiplynm(-1, alg.scalar_addnm(-1, y)), alg.scalar_multiplynm(-1, alg.scalar_addnm(-1, y_hat)))); } real_t MLPPCost::cross_entropyv(const Ref &y_hat, const Ref &y) { @@ -294,11 +294,11 @@ real_t MLPPCost::cross_entropym(const Ref &y_hat, const Ref MLPPCost::cross_entropy_derivv(const Ref &y_hat, const Ref &y) { MLPPLinAlg alg; - return alg.scalar_multiplynv(-1, alg.element_wise_divisionnv(y, y_hat)); + return alg.scalar_multiplynv(-1, alg.division_element_wisenv(y, y_hat)); } Ref MLPPCost::cross_entropy_derivm(const Ref &y_hat, const Ref &y) { MLPPLinAlg alg; - return alg.scalar_multiplynm(-1, alg.element_wise_divisionnvnm(y, y_hat)); + return alg.scalar_multiplynm(-1, alg.division_element_wisenvnm(y, y_hat)); } real_t MLPPCost::huber_lossv(const Ref &y_hat, const Ref &y, real_t delta) { diff --git a/mlpp/lin_alg/lin_alg.cpp b/mlpp/lin_alg/lin_alg.cpp index a964892..d4bc999 100644 --- a/mlpp/lin_alg/lin_alg.cpp +++ b/mlpp/lin_alg/lin_alg.cpp @@ -193,7 +193,7 @@ Ref MLPPLinAlg::kronecker_productnm(const Ref &A, const return C; } -Ref MLPPLinAlg::element_wise_divisionnvnm(const Ref &A, const Ref &B) { +Ref MLPPLinAlg::division_element_wisenvnm(const Ref &A, const Ref &B) { ERR_FAIL_COND_V(!A.is_valid() || !B.is_valid(), Ref()); Size2i a_size = A->size(); ERR_FAIL_COND_V(a_size != B->size(), Ref()); @@ -1095,7 +1095,7 @@ void MLPPLinAlg::hadamard_productv(const Ref &a, const Ref MLPPLinAlg::element_wise_divisionnv(const Ref &a, const Ref &b) { +Ref MLPPLinAlg::division_element_wisenv(const Ref &a, const Ref &b) { ERR_FAIL_COND_V(!a.is_valid() || !b.is_valid(), Ref()); Ref out; @@ -1783,12 +1783,12 @@ Vector> MLPPLinAlg::additionnvt(const Vector> &A return res; } -Vector> MLPPLinAlg::element_wise_divisionnvnvt(const Vector> &A, const Vector> &B) { +Vector> MLPPLinAlg::division_element_wisenvnvt(const Vector> &A, const Vector> &B) { Vector> res; res.resize(A.size()); for (int i = 0; i < A.size(); i++) { - res.write[i] = element_wise_divisionnvnm(A[i], B[i]); + res.write[i] = division_element_wisenvnm(A[i], B[i]); } return res; diff --git a/mlpp/lin_alg/lin_alg.h b/mlpp/lin_alg/lin_alg.h index 9f5e8ce..557e05a 100644 --- a/mlpp/lin_alg/lin_alg.h +++ b/mlpp/lin_alg/lin_alg.h @@ -37,7 +37,7 @@ public: Ref hadamard_productnm(const Ref &A, const Ref &B); Ref kronecker_productnm(const Ref &A, const Ref &B); - Ref element_wise_divisionnvnm(const Ref &A, const Ref &B); + Ref division_element_wisenvnm(const Ref &A, const Ref &B); Ref transposenm(const Ref &A); Ref scalar_multiplynm(real_t scalar, const Ref &A); @@ -144,7 +144,7 @@ public: Ref hadamard_productnv(const Ref &a, const Ref &b); void hadamard_productv(const Ref &a, const Ref &b, Ref out); - Ref element_wise_divisionnv(const Ref &a, const Ref &b); + Ref division_element_wisenv(const Ref &a, const Ref &b); Ref scalar_multiplynv(real_t scalar, const Ref &a); void scalar_multiplyv(real_t scalar, const Ref &a, Ref out); @@ -210,7 +210,7 @@ public: // TENSOR FUNCTIONS Vector> additionnvt(const Vector> &A, const Vector> &B); - Vector> element_wise_divisionnvnvt(const Vector> &A, const Vector> &B); + Vector> division_element_wisenvnvt(const Vector> &A, const Vector> &B); Vector> sqrtnvt(const Vector> &A); diff --git a/mlpp/lin_alg/mlpp_matrix.cpp b/mlpp/lin_alg/mlpp_matrix.cpp index dd9dbd4..a30c88f 100644 --- a/mlpp/lin_alg/mlpp_matrix.cpp +++ b/mlpp/lin_alg/mlpp_matrix.cpp @@ -999,7 +999,7 @@ void MLPPMatrix::kronecker_productb(const Ref &A, const Ref &B) { +void MLPPMatrix::division_element_wise(const Ref &B) { ERR_FAIL_COND(!B.is_valid()); ERR_FAIL_COND(_size != B->size()); @@ -1012,7 +1012,7 @@ void MLPPMatrix::element_wise_division(const Ref &B) { c_ptr[i] /= b_ptr[i]; } } -Ref MLPPMatrix::element_wise_divisionn(const Ref &B) const { +Ref MLPPMatrix::division_element_wisen(const Ref &B) const { ERR_FAIL_COND_V(!B.is_valid(), Ref()); ERR_FAIL_COND_V(_size != B->size(), Ref()); @@ -1032,7 +1032,7 @@ Ref MLPPMatrix::element_wise_divisionn(const Ref &B) con return C; } -void MLPPMatrix::element_wise_divisionb(const Ref &A, const Ref &B) { +void MLPPMatrix::division_element_wiseb(const Ref &A, const Ref &B) { ERR_FAIL_COND(!A.is_valid() || !B.is_valid()); Size2i a_size = A->size(); ERR_FAIL_COND(a_size != B->size()); @@ -3071,9 +3071,9 @@ void MLPPMatrix::_bind_methods() { ClassDB::bind_method(D_METHOD("kronecker_productn", "B"), &MLPPMatrix::kronecker_productn); ClassDB::bind_method(D_METHOD("kronecker_productb", "A", "B"), &MLPPMatrix::kronecker_productb); - ClassDB::bind_method(D_METHOD("element_wise_division", "B"), &MLPPMatrix::element_wise_division); - ClassDB::bind_method(D_METHOD("element_wise_divisionn", "B"), &MLPPMatrix::element_wise_divisionn); - ClassDB::bind_method(D_METHOD("element_wise_divisionb", "A", "B"), &MLPPMatrix::element_wise_divisionb); + ClassDB::bind_method(D_METHOD("division_element_wise", "B"), &MLPPMatrix::division_element_wise); + ClassDB::bind_method(D_METHOD("division_element_wisen", "B"), &MLPPMatrix::division_element_wisen); + ClassDB::bind_method(D_METHOD("division_element_wiseb", "A", "B"), &MLPPMatrix::division_element_wiseb); ClassDB::bind_method(D_METHOD("transpose"), &MLPPMatrix::transpose); ClassDB::bind_method(D_METHOD("transposen"), &MLPPMatrix::transposen); diff --git a/mlpp/lin_alg/mlpp_matrix.h b/mlpp/lin_alg/mlpp_matrix.h index c743e68..89e1d14 100644 --- a/mlpp/lin_alg/mlpp_matrix.h +++ b/mlpp/lin_alg/mlpp_matrix.h @@ -148,9 +148,9 @@ public: Ref kronecker_productn(const Ref &B) const; void kronecker_productb(const Ref &A, const Ref &B); - void element_wise_division(const Ref &B); - Ref element_wise_divisionn(const Ref &B) const; - void element_wise_divisionb(const Ref &A, const Ref &B); + void division_element_wise(const Ref &B); + Ref division_element_wisen(const Ref &B) const; + void division_element_wiseb(const Ref &A, const Ref &B); void transpose(); Ref transposen() const; diff --git a/mlpp/lin_alg/mlpp_tensor3.cpp b/mlpp/lin_alg/mlpp_tensor3.cpp index b674e95..6efea97 100644 --- a/mlpp/lin_alg/mlpp_tensor3.cpp +++ b/mlpp/lin_alg/mlpp_tensor3.cpp @@ -231,7 +231,7 @@ void MLPPTensor3::resize(const Size3i &p_size) { CRASH_COND_MSG(!_data, "Out of memory"); } -void MLPPTensor3::set_shape(const Size3i &p_size) { +void MLPPTensor3::shape_set(const Size3i &p_size) { int ds = data_size(); int new_data_size = p_size.x * p_size.y * p_size.z; @@ -1437,7 +1437,7 @@ void MLPPTensor3::subb(const Ref &A, const Ref &B) { } } -void MLPPTensor3::element_wise_division(const Ref &B) { +void MLPPTensor3::division_element_wise(const Ref &B) { ERR_FAIL_COND(!B.is_valid()); ERR_FAIL_COND(_size != B->size()); @@ -1450,7 +1450,7 @@ void MLPPTensor3::element_wise_division(const Ref &B) { c_ptr[i] /= b_ptr[i]; } } -Ref MLPPTensor3::element_wise_divisionn(const Ref &B) const { +Ref MLPPTensor3::division_element_wisen(const Ref &B) const { ERR_FAIL_COND_V(!B.is_valid(), Ref()); ERR_FAIL_COND_V(_size != B->size(), Ref()); @@ -1470,7 +1470,7 @@ Ref MLPPTensor3::element_wise_divisionn(const Ref &B) return C; } -void MLPPTensor3::element_wise_divisionb(const Ref &A, const Ref &B) { +void MLPPTensor3::division_element_wiseb(const Ref &A, const Ref &B) { ERR_FAIL_COND(!A.is_valid() || !B.is_valid()); Size3i a_size = A->size(); ERR_FAIL_COND(a_size != B->size()); @@ -2278,7 +2278,7 @@ void MLPPTensor3::_bind_methods() { ADD_PROPERTY(PropertyInfo(Variant::ARRAY, "data"), "set_data", "get_data"); ClassDB::bind_method(D_METHOD("z_slice_add_pool_vector", "row"), &MLPPTensor3::z_slice_add_pool_vector); - ClassDB::bind_method(D_METHOD("z_add_slice_mlpp_vector", "row"), &MLPPTensor3::z_slice_add_mlpp_vector); + ClassDB::bind_method(D_METHOD("z_slice_add_mlpp_vector", "row"), &MLPPTensor3::z_slice_add_mlpp_vector); ClassDB::bind_method(D_METHOD("z_slice_add_mlpp_matrix", "matrix"), &MLPPTensor3::z_slice_add_mlpp_matrix); ClassDB::bind_method(D_METHOD("z_slice_remove", "index"), &MLPPTensor3::z_slice_remove); @@ -2298,7 +2298,7 @@ void MLPPTensor3::_bind_methods() { ClassDB::bind_method(D_METHOD("resize", "size"), &MLPPTensor3::resize); - ClassDB::bind_method(D_METHOD("set_shape", "size"), &MLPPTensor3::set_shape); + ClassDB::bind_method(D_METHOD("shape_set", "size"), &MLPPTensor3::shape_set); ClassDB::bind_method(D_METHOD("calculate_index", "index_y", "index_x", "index_z"), &MLPPTensor3::calculate_index); ClassDB::bind_method(D_METHOD("calculate_z_slice_index", "index_z"), &MLPPTensor3::calculate_z_slice_index); @@ -2381,9 +2381,9 @@ void MLPPTensor3::_bind_methods() { ClassDB::bind_method(D_METHOD("hadamard_productn", "B"), &MLPPTensor3::hadamard_productn); ClassDB::bind_method(D_METHOD("hadamard_productb", "A", "B"), &MLPPTensor3::hadamard_productb); - ClassDB::bind_method(D_METHOD("element_wise_division", "B"), &MLPPTensor3::element_wise_division); - ClassDB::bind_method(D_METHOD("element_wise_divisionn", "B"), &MLPPTensor3::element_wise_divisionn); - ClassDB::bind_method(D_METHOD("element_wise_divisionb", "A", "B"), &MLPPTensor3::element_wise_divisionb); + ClassDB::bind_method(D_METHOD("division_element_wise", "B"), &MLPPTensor3::division_element_wise); + ClassDB::bind_method(D_METHOD("division_element_wisen", "B"), &MLPPTensor3::division_element_wisen); + ClassDB::bind_method(D_METHOD("division_element_wiseb", "A", "B"), &MLPPTensor3::division_element_wiseb); ClassDB::bind_method(D_METHOD("scalar_multiply", "scalar"), &MLPPTensor3::scalar_multiply); ClassDB::bind_method(D_METHOD("scalar_multiplyn", "scalar"), &MLPPTensor3::scalar_multiplyn); diff --git a/mlpp/lin_alg/mlpp_tensor3.h b/mlpp/lin_alg/mlpp_tensor3.h index 0362f6e..de59064 100644 --- a/mlpp/lin_alg/mlpp_tensor3.h +++ b/mlpp/lin_alg/mlpp_tensor3.h @@ -60,7 +60,7 @@ public: _FORCE_INLINE_ Size3i size() const { return _size; } void resize(const Size3i &p_size); - void set_shape(const Size3i &p_size); + void shape_set(const Size3i &p_size); _FORCE_INLINE_ int calculate_index(int p_index_y, int p_index_x, int p_index_z) const { return p_index_y * _size.x + p_index_x + _size.x * _size.y * p_index_z; @@ -193,9 +193,9 @@ public: Ref subn(const Ref &B) const; void subb(const Ref &A, const Ref &B); - void element_wise_division(const Ref &B); - Ref element_wise_divisionn(const Ref &B) const; - void element_wise_divisionb(const Ref &A, const Ref &B); + void division_element_wise(const Ref &B); + Ref division_element_wisen(const Ref &B) const; + void division_element_wiseb(const Ref &A, const Ref &B); void sqrt(); Ref sqrtn() const; diff --git a/mlpp/lin_alg/mlpp_vector.cpp b/mlpp/lin_alg/mlpp_vector.cpp index 2038085..ff0589d 100644 --- a/mlpp/lin_alg/mlpp_vector.cpp +++ b/mlpp/lin_alg/mlpp_vector.cpp @@ -387,7 +387,7 @@ void MLPPVector::hadamard_productb(const Ref &a, const Ref &b) { +void MLPPVector::division_element_wise(const Ref &b) { ERR_FAIL_COND(!b.is_valid()); Ref out; @@ -404,7 +404,7 @@ void MLPPVector::element_wise_division(const Ref &b) { } } -Ref MLPPVector::element_wise_divisionn(const Ref &b) const { +Ref MLPPVector::division_element_wisen(const Ref &b) const { ERR_FAIL_COND_V(!b.is_valid(), Ref()); Ref out; @@ -425,7 +425,7 @@ Ref MLPPVector::element_wise_divisionn(const Ref &b) con return out; } -void MLPPVector::element_wise_divisionb(const Ref &a, const Ref &b) { +void MLPPVector::division_element_wiseb(const Ref &a, const Ref &b) { ERR_FAIL_COND(!a.is_valid() || !b.is_valid()); int s = a->size(); @@ -1396,9 +1396,9 @@ void MLPPVector::_bind_methods() { ClassDB::bind_method(D_METHOD("hadamard_productn", "b"), &MLPPVector::hadamard_productn); ClassDB::bind_method(D_METHOD("hadamard_productb", "a", "b"), &MLPPVector::hadamard_productb); - ClassDB::bind_method(D_METHOD("element_wise_division", "b"), &MLPPVector::element_wise_division); - ClassDB::bind_method(D_METHOD("element_wise_divisionn", "b"), &MLPPVector::element_wise_divisionn); - ClassDB::bind_method(D_METHOD("element_wise_divisionb", "a", "b"), &MLPPVector::element_wise_divisionb); + ClassDB::bind_method(D_METHOD("division_element_wise", "b"), &MLPPVector::division_element_wise); + ClassDB::bind_method(D_METHOD("division_element_wisen", "b"), &MLPPVector::division_element_wisen); + ClassDB::bind_method(D_METHOD("division_element_wiseb", "a", "b"), &MLPPVector::division_element_wiseb); ClassDB::bind_method(D_METHOD("scalar_multiply", "scalar"), &MLPPVector::scalar_multiply); ClassDB::bind_method(D_METHOD("scalar_multiplyn", "scalar"), &MLPPVector::scalar_multiplyn); diff --git a/mlpp/lin_alg/mlpp_vector.h b/mlpp/lin_alg/mlpp_vector.h index d986eaa..f2f094c 100644 --- a/mlpp/lin_alg/mlpp_vector.h +++ b/mlpp/lin_alg/mlpp_vector.h @@ -121,9 +121,9 @@ public: Ref hadamard_productn(const Ref &b) const; void hadamard_productb(const Ref &a, const Ref &b); - void element_wise_division(const Ref &b); - Ref element_wise_divisionn(const Ref &b) const; - void element_wise_divisionb(const Ref &a, const Ref &b); + void division_element_wise(const Ref &b); + Ref division_element_wisen(const Ref &b) const; + void division_element_wiseb(const Ref &a, const Ref &b); void scalar_multiply(real_t scalar); Ref scalar_multiplyn(real_t scalar) const; diff --git a/mlpp/lin_reg/lin_reg.cpp b/mlpp/lin_reg/lin_reg.cpp index 35e50c4..702d6cf 100644 --- a/mlpp/lin_reg/lin_reg.cpp +++ b/mlpp/lin_reg/lin_reg.cpp @@ -405,7 +405,7 @@ void MLPPLinReg::adagrad(real_t learning_rate, int max_epoch, int mini_batch_siz v = alg.hadamard_productnv(weight_grad, weight_grad); - _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_divisionnv(weight_grad, alg.sqrtnv(alg.scalar_addnv(e, v))))); + _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.division_element_wisenv(weight_grad, alg.sqrtnv(alg.scalar_addnv(e, v))))); // Calculating the bias gradients _bias -= learning_rate * alg.sum_elementsv(error) / current_output_mini_batch->size(); // As normal @@ -460,7 +460,7 @@ void MLPPLinReg::adadelta(real_t learning_rate, int max_epoch, int mini_batch_si v = alg.additionnv(alg.scalar_multiplynv(b1, v), alg.scalar_multiplynv(1 - b1, alg.hadamard_productnv(weight_grad, weight_grad))); - _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_divisionnv(weight_grad, alg.sqrtnv(alg.scalar_addnv(e, v))))); + _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.division_element_wisenv(weight_grad, alg.sqrtnv(alg.scalar_addnv(e, v))))); // Calculating the bias gradients _bias -= learning_rate * alg.sum_elementsv(error) / current_output_mini_batch->size(); // As normal @@ -519,7 +519,7 @@ void MLPPLinReg::adam(real_t learning_rate, int max_epoch, int mini_batch_size, Ref m_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m); Ref v_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b2, epoch)), v); - _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_divisionnvnm(m_hat, alg.scalar_addnv(e, alg.sqrtnv(v_hat))))); + _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.division_element_wisenvnm(m_hat, alg.scalar_addnv(e, alg.sqrtnv(v_hat))))); // Calculating the bias gradients _bias -= learning_rate * alg.sum_elementsv(error) / current_output_mini_batch->size(); // As normal @@ -576,7 +576,7 @@ void MLPPLinReg::adamax(real_t learning_rate, int max_epoch, int mini_batch_size Ref m_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m); - _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_divisionnv(m_hat, u))); + _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.division_element_wisenv(m_hat, u))); // Calculating the bias gradients _bias -= learning_rate * alg.sum_elementsv(error) / current_output_mini_batch->size(); // As normal @@ -637,7 +637,7 @@ void MLPPLinReg::nadam(real_t learning_rate, int max_epoch, int mini_batch_size, Ref m_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m); Ref v_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b2, epoch)), v); - _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_divisionnv(m_final, alg.scalar_addnv(e, alg.sqrtnv(v_hat))))); + _weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.division_element_wisenv(m_final, alg.scalar_addnv(e, alg.sqrtnv(v_hat))))); // Calculating the bias gradients _bias -= learning_rate * alg.sum_elementsv(error) / current_output_mini_batch->size(); // As normal