diff --git a/doc_classes/MLPPMatrix.xml b/doc_classes/MLPPMatrix.xml
index a6ee3ff..516aac9 100644
--- a/doc_classes/MLPPMatrix.xml
+++ b/doc_classes/MLPPMatrix.xml
@@ -201,20 +201,20 @@
-
+
-
+
-
+
diff --git a/doc_classes/MLPPTensor3.xml b/doc_classes/MLPPTensor3.xml
index 9b6c1c5..95e28aa 100644
--- a/doc_classes/MLPPTensor3.xml
+++ b/doc_classes/MLPPTensor3.xml
@@ -96,20 +96,20 @@
-
+
-
+
-
+
@@ -480,7 +480,7 @@
-
+
diff --git a/doc_classes/MLPPVector.xml b/doc_classes/MLPPVector.xml
index db51b2f..bfc97ec 100644
--- a/doc_classes/MLPPVector.xml
+++ b/doc_classes/MLPPVector.xml
@@ -101,20 +101,20 @@
-
+
-
+
-
+
diff --git a/mlpp/activation/activation.cpp b/mlpp/activation/activation.cpp
index 8defe7f..8d7888a 100644
--- a/mlpp/activation/activation.cpp
+++ b/mlpp/activation/activation.cpp
@@ -854,11 +854,11 @@ real_t MLPPActivation::sigmoid_normr(real_t z) {
}
Ref MLPPActivation::sigmoid_normv(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.additionnv(alg.onevecnv(z->size()), alg.expnv(alg.scalar_multiplynv(-1, z))));
+ return alg.division_element_wisenv(alg.onevecnv(z->size()), alg.additionnv(alg.onevecnv(z->size()), alg.expnv(alg.scalar_multiplynv(-1, z))));
}
Ref MLPPActivation::sigmoid_normm(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.additionnm(alg.onematnm(z->size().x, z->size().y), alg.expnm(alg.scalar_multiplynm(-1, z))));
+ return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), alg.additionnm(alg.onematnm(z->size().x, z->size().y), alg.expnm(alg.scalar_multiplynm(-1, z))));
}
real_t MLPPActivation::sigmoid_derivr(real_t z) {
@@ -1248,12 +1248,12 @@ real_t MLPPActivation::softsign_normr(real_t z) {
Ref MLPPActivation::softsign_normv(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnv(z, alg.additionnv(alg.onevecnv(z->size()), alg.absv(z)));
+ return alg.division_element_wisenv(z, alg.additionnv(alg.onevecnv(z->size()), alg.absv(z)));
}
Ref MLPPActivation::softsign_normm(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnvnm(z, alg.additionnv(alg.onematnm(z->size().x, z->size().y), alg.absnm(z)));
+ return alg.division_element_wisenvnm(z, alg.additionnv(alg.onematnm(z->size().x, z->size().y), alg.absnm(z)));
}
real_t MLPPActivation::softsign_derivr(real_t z) {
@@ -1262,12 +1262,12 @@ real_t MLPPActivation::softsign_derivr(real_t z) {
Ref MLPPActivation::softsign_derivv(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.exponentiatenv(alg.additionnv(alg.onevecnv(z->size()), alg.absv(z)), 2));
+ return alg.division_element_wisenv(alg.onevecnv(z->size()), alg.exponentiatenv(alg.additionnv(alg.onevecnv(z->size()), alg.absv(z)), 2));
}
Ref MLPPActivation::softsign_derivm(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.exponentiatenv(alg.additionnm(alg.onematnm(z->size().x, z->size().y), alg.absnm(z)), 2));
+ return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), alg.exponentiatenv(alg.additionnm(alg.onematnm(z->size().x, z->size().y), alg.absnm(z)), 2));
}
//GAUSSIANCDF
@@ -1342,12 +1342,12 @@ real_t MLPPActivation::logit_normr(real_t z) {
Ref MLPPActivation::logit_normv(const Ref &z) {
MLPPLinAlg alg;
- return alg.lognv(alg.element_wise_divisionnv(z, alg.subtractionnv(alg.onevecnv(z->size()), z)));
+ return alg.lognv(alg.division_element_wisenv(z, alg.subtractionnv(alg.onevecnv(z->size()), z)));
}
Ref MLPPActivation::logit_normm(const Ref &z) {
MLPPLinAlg alg;
- return alg.lognm(alg.element_wise_divisionnvnm(z, alg.subtractionnm(alg.onematnm(z->size().x, z->size().y), z)));
+ return alg.lognm(alg.division_element_wisenvnm(z, alg.subtractionnm(alg.onematnm(z->size().x, z->size().y), z)));
}
real_t MLPPActivation::logit_derivr(real_t z) {
@@ -1357,16 +1357,16 @@ Ref MLPPActivation::logit_derivv(const Ref &z) {
MLPPLinAlg alg;
return alg.subtractionnv(
- alg.element_wise_divisionnv(alg.onevecnv(z->size()), z),
- alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.subtractionnv(z, alg.onevecnv(z->size()))));
+ alg.division_element_wisenv(alg.onevecnv(z->size()), z),
+ alg.division_element_wisenv(alg.onevecnv(z->size()), alg.subtractionnv(z, alg.onevecnv(z->size()))));
}
Ref MLPPActivation::logit_derivm(const Ref &z) {
MLPPLinAlg alg;
return alg.subtractionnm(
- alg.element_wise_divisionnvnm(
+ alg.division_element_wisenvnm(
alg.onematnm(z->size().x, z->size().y), z),
- alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y),
+ alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y),
alg.subtractionnm(z, alg.onematnm(z->size().x, z->size().y))));
}
@@ -1487,7 +1487,7 @@ Ref MLPPActivation::mish_derivv(const Ref &z) {
sech_normv(softplus_normv(z)), sech_normv(softplus_normv(z))),
z),
sigmoid_normv(z)),
- alg.element_wise_divisionnv(mish_normv(z), z));
+ alg.division_element_wisenv(mish_normv(z), z));
}
Ref MLPPActivation::mish_derivm(const Ref &z) {
MLPPLinAlg alg;
@@ -1499,7 +1499,7 @@ Ref MLPPActivation::mish_derivm(const Ref &z) {
sech_normm(softplus_normm(z)), sech_normm(softplus_normm(z))),
z),
sigmoid_normm(z)),
- alg.element_wise_divisionnvnm(mish_normm(z), z));
+ alg.division_element_wisenvnm(mish_normm(z), z));
}
//SINC
@@ -1510,12 +1510,12 @@ real_t MLPPActivation::sinc_normr(real_t z) {
Ref MLPPActivation::sinc_normv(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnv(alg.sinnv(z), z);
+ return alg.division_element_wisenv(alg.sinnv(z), z);
}
Ref MLPPActivation::sinc_normm(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnvnm(alg.sinnm(z), z);
+ return alg.division_element_wisenvnm(alg.sinnm(z), z);
}
real_t MLPPActivation::sinc_derivr(real_t z) {
@@ -1524,12 +1524,12 @@ real_t MLPPActivation::sinc_derivr(real_t z) {
Ref MLPPActivation::sinc_derivv(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnv(alg.subtractionnv(alg.hadamard_productnv(z, alg.cosnv(z)), alg.sinnv(z)), alg.hadamard_productnv(z, z));
+ return alg.division_element_wisenv(alg.subtractionnv(alg.hadamard_productnv(z, alg.cosnv(z)), alg.sinnv(z)), alg.hadamard_productnv(z, z));
}
Ref MLPPActivation::sinc_derivm(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnvnm(alg.subtractionnm(alg.hadamard_productnm(z, alg.cosnm(z)), alg.sinnm(z)), alg.hadamard_productnm(z, z));
+ return alg.division_element_wisenvnm(alg.subtractionnm(alg.hadamard_productnm(z, alg.cosnm(z)), alg.sinnm(z)), alg.hadamard_productnm(z, z));
}
//RELU
@@ -2054,12 +2054,12 @@ real_t MLPPActivation::tanh_normr(real_t z) {
Ref MLPPActivation::tanh_normv(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnv(alg.subtractionnv(alg.expnv(z), alg.expnv(alg.scalar_multiplynv(-1, z))), alg.additionnv(alg.expnv(z), alg.expnv(alg.scalar_multiplynv(-1, z))));
+ return alg.division_element_wisenv(alg.subtractionnv(alg.expnv(z), alg.expnv(alg.scalar_multiplynv(-1, z))), alg.additionnv(alg.expnv(z), alg.expnv(alg.scalar_multiplynv(-1, z))));
}
Ref MLPPActivation::tanh_normm(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnvnm(alg.subtractionnm(alg.expnm(z), alg.expnm(alg.scalar_multiplynm(-1, z))), alg.additionnm(alg.expnm(z), alg.expnm(alg.scalar_multiplynm(-1, z))));
+ return alg.division_element_wisenvnm(alg.subtractionnm(alg.expnm(z), alg.expnm(alg.scalar_multiplynm(-1, z))), alg.additionnm(alg.expnm(z), alg.expnm(alg.scalar_multiplynm(-1, z))));
}
real_t MLPPActivation::tanh_derivr(real_t z) {
@@ -2084,13 +2084,13 @@ real_t MLPPActivation::csch_normr(real_t z) {
Ref MLPPActivation::csch_normv(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnv(alg.onevecnv(z->size()), sinh_normv(z));
+ return alg.division_element_wisenv(alg.onevecnv(z->size()), sinh_normv(z));
}
Ref MLPPActivation::csch_normm(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), sinh_normm(z));
+ return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), sinh_normm(z));
}
real_t MLPPActivation::csch_derivr(real_t z) {
@@ -2117,14 +2117,14 @@ real_t MLPPActivation::sech_normr(real_t z) {
Ref MLPPActivation::sech_normv(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnv(alg.onevecnv(z->size()), cosh_normv(z));
+ return alg.division_element_wisenv(alg.onevecnv(z->size()), cosh_normv(z));
// return activation(z, deriv, static_cast(&sech));
}
Ref MLPPActivation::sech_normm(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), cosh_normm(z));
+ return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), cosh_normm(z));
// return activation(z, deriv, static_cast(&sech));
}
@@ -2152,12 +2152,12 @@ real_t MLPPActivation::coth_normr(real_t z) {
Ref MLPPActivation::coth_normv(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnv(alg.onevecnv(z->size()), tanh_normv(z));
+ return alg.division_element_wisenv(alg.onevecnv(z->size()), tanh_normv(z));
}
Ref MLPPActivation::coth_normm(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), tanh_normm(z));
+ return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), tanh_normm(z));
}
real_t MLPPActivation::coth_derivr(real_t z) {
@@ -2199,13 +2199,13 @@ real_t MLPPActivation::arsinh_derivr(real_t z) {
Ref MLPPActivation::arsinh_derivv(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.sqrtnv(alg.additionnv(alg.hadamard_productnv(z, z), alg.onevecnv(z->size()))));
+ return alg.division_element_wisenv(alg.onevecnv(z->size()), alg.sqrtnv(alg.additionnv(alg.hadamard_productnv(z, z), alg.onevecnv(z->size()))));
}
Ref MLPPActivation::arsinh_derivm(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.sqrtnm(alg.additionnm(alg.hadamard_productnm(z, z), alg.onematnm(z->size().x, z->size().y))));
+ return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), alg.sqrtnm(alg.additionnm(alg.hadamard_productnm(z, z), alg.onematnm(z->size().x, z->size().y))));
}
//ARCOSH
@@ -2231,13 +2231,13 @@ real_t MLPPActivation::arcosh_derivr(real_t z) {
Ref MLPPActivation::arcosh_derivv(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.sqrtnv(alg.subtractionnv(alg.hadamard_productnv(z, z), alg.onevecnv(z->size()))));
+ return alg.division_element_wisenv(alg.onevecnv(z->size()), alg.sqrtnv(alg.subtractionnv(alg.hadamard_productnv(z, z), alg.onevecnv(z->size()))));
}
Ref MLPPActivation::arcosh_derivm(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.sqrtnm(alg.subtractionnm(alg.hadamard_productnm(z, z), alg.onematnm(z->size().x, z->size().y))));
+ return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), alg.sqrtnm(alg.subtractionnm(alg.hadamard_productnm(z, z), alg.onematnm(z->size().x, z->size().y))));
}
//ARTANH
@@ -2248,13 +2248,13 @@ real_t MLPPActivation::artanh_normr(real_t z) {
Ref MLPPActivation::artanh_normv(const Ref &z) {
MLPPLinAlg alg;
- return alg.scalar_multiplynv(0.5, alg.lognv(alg.element_wise_divisionnv(alg.additionnv(alg.onevecnv(z->size()), z), alg.subtractionnv(alg.onevecnv(z->size()), z))));
+ return alg.scalar_multiplynv(0.5, alg.lognv(alg.division_element_wisenv(alg.additionnv(alg.onevecnv(z->size()), z), alg.subtractionnv(alg.onevecnv(z->size()), z))));
}
Ref MLPPActivation::artanh_normm(const Ref &z) {
MLPPLinAlg alg;
- return alg.scalar_multiplynm(0.5, alg.lognm(alg.element_wise_divisionnvnm(alg.additionnm(alg.onematnm(z->size().x, z->size().y), z), alg.subtractionnm(alg.onematnm(z->size().x, z->size().y), z))));
+ return alg.scalar_multiplynm(0.5, alg.lognm(alg.division_element_wisenvnm(alg.additionnm(alg.onematnm(z->size().x, z->size().y), z), alg.subtractionnm(alg.onematnm(z->size().x, z->size().y), z))));
}
real_t MLPPActivation::artanh_derivr(real_t z) {
@@ -2263,13 +2263,13 @@ real_t MLPPActivation::artanh_derivr(real_t z) {
Ref MLPPActivation::artanh_derivv(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.subtractionnv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z)));
+ return alg.division_element_wisenv(alg.onevecnv(z->size()), alg.subtractionnv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z)));
}
Ref MLPPActivation::artanh_derivm(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.subtractionnv(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z)));
+ return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), alg.subtractionnv(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z)));
}
//ARCSCH
@@ -2285,8 +2285,8 @@ Ref MLPPActivation::arcsch_normv(const Ref &z) {
alg.sqrtnv(
alg.additionnv(
alg.onevecnv(z->size()),
- alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z)))),
- alg.element_wise_divisionnv(alg.onevecnv(z->size()), z)));
+ alg.division_element_wisenv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z)))),
+ alg.division_element_wisenv(alg.onevecnv(z->size()), z)));
}
Ref MLPPActivation::arcsch_normm(const Ref &z) {
MLPPLinAlg alg;
@@ -2295,8 +2295,8 @@ Ref MLPPActivation::arcsch_normm(const Ref &z) {
alg.additionnm(
alg.sqrtnm(
alg.additionnm(alg.onematnm(z->size().x, z->size().y),
- alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z)))),
- alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), z)));
+ alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z)))),
+ alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), z)));
}
real_t MLPPActivation::arcsch_derivr(real_t z) {
@@ -2305,20 +2305,20 @@ real_t MLPPActivation::arcsch_derivr(real_t z) {
Ref MLPPActivation::arcsch_derivv(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnv(
+ return alg.division_element_wisenv(
alg.fullnv(z->size(), -1),
alg.hadamard_productnm(
alg.hadamard_productnv(z, z),
- alg.sqrtnv(alg.additionnv(alg.onevecnv(z->size()), alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z))))));
+ alg.sqrtnv(alg.additionnv(alg.onevecnv(z->size()), alg.division_element_wisenv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z))))));
}
Ref MLPPActivation::arcsch_derivm(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnvnm(
+ return alg.division_element_wisenvnm(
alg.fullnm(z->size().x, z->size().y, -1),
alg.hadamard_productnm(alg.hadamard_productnm(z, z),
alg.sqrtnm(alg.additionnm(alg.onematnm(z->size().x, z->size().y),
- alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z))))));
+ alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z))))));
}
//ARSECH
@@ -2332,11 +2332,11 @@ Ref MLPPActivation::arsech_normv(const Ref &z) {
return alg.lognv(
alg.additionnv(
- alg.element_wise_divisionnv(
+ alg.division_element_wisenv(
alg.onevecnv(z->size()), z),
alg.hadamard_productnv(
- alg.additionnv(alg.element_wise_divisionnv(alg.onevecnv(z->size()), z), alg.onevecnv(z->size())),
- alg.subtractionnv(alg.element_wise_divisionnv(alg.onevecnv(z->size()), z), alg.onevecnv(z->size())))));
+ alg.additionnv(alg.division_element_wisenv(alg.onevecnv(z->size()), z), alg.onevecnv(z->size())),
+ alg.subtractionnv(alg.division_element_wisenv(alg.onevecnv(z->size()), z), alg.onevecnv(z->size())))));
}
Ref MLPPActivation::arsech_normm(const Ref &z) {
@@ -2344,15 +2344,15 @@ Ref MLPPActivation::arsech_normm(const Ref &z) {
return alg.lognm(
alg.additionnm(
- alg.element_wise_divisionnvnm(
+ alg.division_element_wisenvnm(
alg.onematnm(z->size().x, z->size().y), z),
alg.hadamard_productnm(
alg.additionnm(
- alg.element_wise_divisionnvnm(
+ alg.division_element_wisenvnm(
alg.onematnm(z->size().x, z->size().y), z),
alg.onematnm(z->size().x, z->size().y)),
alg.subtractionnm(
- alg.element_wise_divisionnvnm(
+ alg.division_element_wisenvnm(
alg.onematnm(z->size().x, z->size().y), z),
alg.onematnm(z->size().x, z->size().y)))));
}
@@ -2364,7 +2364,7 @@ real_t MLPPActivation::arsech_derivr(real_t z) {
Ref MLPPActivation::arsech_derivv(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnv(
+ return alg.division_element_wisenv(
alg.fullnv(z->size(), -1),
alg.hadamard_productnv(
z,
@@ -2375,7 +2375,7 @@ Ref MLPPActivation::arsech_derivv(const Ref &z) {
Ref MLPPActivation::arsech_derivm(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnvnm(
+ return alg.division_element_wisenvnm(
alg.fullnm(z->size().x, z->size().y, -1),
alg.hadamard_productnm(
z,
@@ -2392,7 +2392,7 @@ Ref MLPPActivation::arcoth_normv(const Ref &z) {
return alg.scalar_multiplynv(
0.5,
- alg.lognv(alg.element_wise_divisionnv(alg.additionnv(alg.onevecnv(z->size()), z), alg.subtractionnv(z, alg.onevecnv(z->size())))));
+ alg.lognv(alg.division_element_wisenv(alg.additionnv(alg.onevecnv(z->size()), z), alg.subtractionnv(z, alg.onevecnv(z->size())))));
}
Ref MLPPActivation::arcoth_normm(const Ref &z) {
@@ -2400,7 +2400,7 @@ Ref MLPPActivation::arcoth_normm(const Ref &z) {
return alg.scalar_multiplynm(
0.5,
- alg.lognm(alg.element_wise_divisionnvnm(alg.additionnm(alg.onematnm(z->size().x, z->size().y), z), alg.subtractionnm(z, alg.onematnm(z->size().x, z->size().y)))));
+ alg.lognm(alg.division_element_wisenvnm(alg.additionnm(alg.onematnm(z->size().x, z->size().y), z), alg.subtractionnm(z, alg.onematnm(z->size().x, z->size().y)))));
}
real_t MLPPActivation::arcoth_derivr(real_t z) {
@@ -2409,13 +2409,13 @@ real_t MLPPActivation::arcoth_derivr(real_t z) {
Ref MLPPActivation::arcoth_derivv(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.subtractionnv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z)));
+ return alg.division_element_wisenv(alg.onevecnv(z->size()), alg.subtractionnv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z)));
}
Ref MLPPActivation::arcoth_derivm(const Ref &z) {
MLPPLinAlg alg;
- return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.subtractionnm(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z)));
+ return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), alg.subtractionnm(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z)));
}
void MLPPActivation::_bind_methods() {
diff --git a/mlpp/ann/ann.cpp b/mlpp/ann/ann.cpp
index 5499f54..54cc5af 100644
--- a/mlpp/ann/ann.cpp
+++ b/mlpp/ann/ann.cpp
@@ -314,8 +314,8 @@ void MLPPANN::adagrad(real_t learning_rate, int max_epoch, int mini_batch_size,
v_hidden = alg.additionnvt(v_hidden, alg.exponentiatenvt(grads.cumulative_hidden_layer_w_grad, 2));
v_output = alg.additionnv(v_output, alg.exponentiatenv(grads.output_w_grad, 2));
- Vector[> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.element_wise_divisionnvnvt(grads.cumulative_hidden_layer_w_grad, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden))));
- Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(grads.output_w_grad, alg.scalar_addnv(e, alg.sqrtnv(v_output))));
+ Vector][> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.division_element_wisenvnvt(grads.cumulative_hidden_layer_w_grad, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden))));
+ Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.division_element_wisenv(grads.output_w_grad, alg.scalar_addnv(e, alg.sqrtnv(v_output))));
update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too.
y_hat = model_set_test(current_input_batch);
@@ -378,8 +378,8 @@ void MLPPANN::adadelta(real_t learning_rate, int max_epoch, int mini_batch_size,
v_hidden = alg.additionnvt(alg.scalar_multiplynvt(1 - b1, v_hidden), alg.scalar_multiplynvt(b1, alg.exponentiatenvt(grads.cumulative_hidden_layer_w_grad, 2)));
v_output = alg.additionnv(v_output, alg.exponentiatenv(grads.output_w_grad, 2));
- Vector][> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.element_wise_divisionnvnvt(grads.cumulative_hidden_layer_w_grad, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden))));
- Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(grads.output_w_grad, alg.scalar_addnv(e, alg.sqrtnv(v_output))));
+ Vector][> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.division_element_wisenvnvt(grads.cumulative_hidden_layer_w_grad, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden))));
+ Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.division_element_wisenv(grads.output_w_grad, alg.scalar_addnv(e, alg.sqrtnv(v_output))));
update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too.
y_hat = model_set_test(current_input_batch);
@@ -456,8 +456,8 @@ void MLPPANN::adam(real_t learning_rate, int max_epoch, int mini_batch_size, rea
Ref m_output_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m_output);
Ref v_output_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b2, epoch)), v_output);
- Vector][> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.element_wise_divisionnvnvt(m_hidden_hat, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden_hat))));
- Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(m_output_hat, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat))));
+ Vector][> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.division_element_wisenvnvt(m_hidden_hat, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden_hat))));
+ Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.division_element_wisenv(m_output_hat, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat))));
update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too.
y_hat = model_set_test(current_input_batch);
@@ -529,8 +529,8 @@ void MLPPANN::adamax(real_t learning_rate, int max_epoch, int mini_batch_size, r
Ref m_output_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m_output);
- Vector][> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.element_wise_divisionnvnvt(m_hidden_hat, alg.scalar_addnvt(e, u_hidden)));
- Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(m_output_hat, alg.scalar_addnv(e, u_output)));
+ Vector][> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.division_element_wisenvnvt(m_hidden_hat, alg.scalar_addnvt(e, u_hidden)));
+ Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.division_element_wisenv(m_output_hat, alg.scalar_addnv(e, u_output)));
update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too.
y_hat = model_set_test(current_input_batch);
@@ -606,8 +606,8 @@ void MLPPANN::nadam(real_t learning_rate, int max_epoch, int mini_batch_size, re
Ref v_output_hat = alg.scalar_multiplynv(1 / (1.0 - Math::pow(b2, epoch)), v_output);
Ref m_output_final = alg.additionnv(alg.scalar_multiplynv(b1, m_output_hat), alg.scalar_multiplynv((1 - b1) / (1.0 - Math::pow(b1, epoch)), grads.output_w_grad));
- Vector][> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.element_wise_divisionnvnvt(m_hidden_final, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden_hat))));
- Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnvnm(m_output_final, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat))));
+ Vector][> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.division_element_wisenvnvt(m_hidden_final, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden_hat))));
+ Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.division_element_wisenvnm(m_output_final, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat))));
update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too.
@@ -686,8 +686,8 @@ void MLPPANN::amsgrad(real_t learning_rate, int max_epoch, int mini_batch_size,
v_hidden_hat = alg.maxnvt(v_hidden_hat, v_hidden);
v_output_hat = alg.maxnvv(v_output_hat, v_output);
- Vector][> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.element_wise_divisionnvnvt(m_hidden, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden_hat))));
- Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(m_output, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat))));
+ Vector][> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.division_element_wisenvnvt(m_hidden, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden_hat))));
+ Ref output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.division_element_wisenv(m_output, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat))));
update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too.
y_hat = model_set_test(current_input_batch);
diff --git a/mlpp/cost/cost.cpp b/mlpp/cost/cost.cpp
index 5c351fa..2333dff 100644
--- a/mlpp/cost/cost.cpp
+++ b/mlpp/cost/cost.cpp
@@ -250,15 +250,15 @@ real_t MLPPCost::log_lossm(const Ref &y_hat, const Ref &
Ref MLPPCost::log_loss_derivv(const Ref &y_hat, const Ref &y) {
MLPPLinAlg alg;
return alg.additionnv(
- alg.scalar_multiplynv(-1, alg.element_wise_divisionnv(y, y_hat)),
- alg.element_wise_divisionnv(alg.scalar_multiplynv(-1, alg.scalar_addnv(-1, y)), alg.scalar_multiplynv(-1, alg.scalar_addnv(-1, y_hat))));
+ alg.scalar_multiplynv(-1, alg.division_element_wisenv(y, y_hat)),
+ alg.division_element_wisenv(alg.scalar_multiplynv(-1, alg.scalar_addnv(-1, y)), alg.scalar_multiplynv(-1, alg.scalar_addnv(-1, y_hat))));
}
Ref MLPPCost::log_loss_derivm(const Ref &y_hat, const Ref &y) {
MLPPLinAlg alg;
return alg.additionnm(
- alg.scalar_multiplynm(-1, alg.element_wise_divisionnvnm(y, y_hat)),
- alg.element_wise_divisionnvnm(alg.scalar_multiplynm(-1, alg.scalar_addnm(-1, y)), alg.scalar_multiplynm(-1, alg.scalar_addnm(-1, y_hat))));
+ alg.scalar_multiplynm(-1, alg.division_element_wisenvnm(y, y_hat)),
+ alg.division_element_wisenvnm(alg.scalar_multiplynm(-1, alg.scalar_addnm(-1, y)), alg.scalar_multiplynm(-1, alg.scalar_addnm(-1, y_hat))));
}
real_t MLPPCost::cross_entropyv(const Ref &y_hat, const Ref &y) {
@@ -294,11 +294,11 @@ real_t MLPPCost::cross_entropym(const Ref &y_hat, const Ref MLPPCost::cross_entropy_derivv(const Ref &y_hat, const Ref &y) {
MLPPLinAlg alg;
- return alg.scalar_multiplynv(-1, alg.element_wise_divisionnv(y, y_hat));
+ return alg.scalar_multiplynv(-1, alg.division_element_wisenv(y, y_hat));
}
Ref MLPPCost::cross_entropy_derivm(const Ref &y_hat, const Ref &y) {
MLPPLinAlg alg;
- return alg.scalar_multiplynm(-1, alg.element_wise_divisionnvnm(y, y_hat));
+ return alg.scalar_multiplynm(-1, alg.division_element_wisenvnm(y, y_hat));
}
real_t MLPPCost::huber_lossv(const Ref &y_hat, const Ref &y, real_t delta) {
diff --git a/mlpp/lin_alg/lin_alg.cpp b/mlpp/lin_alg/lin_alg.cpp
index a964892..d4bc999 100644
--- a/mlpp/lin_alg/lin_alg.cpp
+++ b/mlpp/lin_alg/lin_alg.cpp
@@ -193,7 +193,7 @@ Ref MLPPLinAlg::kronecker_productnm(const Ref &A, const
return C;
}
-Ref MLPPLinAlg::element_wise_divisionnvnm(const Ref &A, const Ref &B) {
+Ref MLPPLinAlg::division_element_wisenvnm(const Ref &A, const Ref &B) {
ERR_FAIL_COND_V(!A.is_valid() || !B.is_valid(), Ref());
Size2i a_size = A->size();
ERR_FAIL_COND_V(a_size != B->size(), Ref());
@@ -1095,7 +1095,7 @@ void MLPPLinAlg::hadamard_productv(const Ref &a, const Ref MLPPLinAlg::element_wise_divisionnv(const Ref &a, const Ref &b) {
+Ref MLPPLinAlg::division_element_wisenv(const Ref &a, const Ref &b) {
ERR_FAIL_COND_V(!a.is_valid() || !b.is_valid(), Ref());
Ref out;
@@ -1783,12 +1783,12 @@ Vector][> MLPPLinAlg::additionnvt(const Vector][> &A
return res;
}
-Vector][> MLPPLinAlg::element_wise_divisionnvnvt(const Vector][> &A, const Vector][> &B) {
+Vector][> MLPPLinAlg::division_element_wisenvnvt(const Vector][> &A, const Vector][> &B) {
Vector][> res;
res.resize(A.size());
for (int i = 0; i < A.size(); i++) {
- res.write[i] = element_wise_divisionnvnm(A[i], B[i]);
+ res.write[i] = division_element_wisenvnm(A[i], B[i]);
}
return res;
diff --git a/mlpp/lin_alg/lin_alg.h b/mlpp/lin_alg/lin_alg.h
index 9f5e8ce..557e05a 100644
--- a/mlpp/lin_alg/lin_alg.h
+++ b/mlpp/lin_alg/lin_alg.h
@@ -37,7 +37,7 @@ public:
Ref hadamard_productnm(const Ref &A, const Ref &B);
Ref kronecker_productnm(const Ref &A, const Ref &B);
- Ref element_wise_divisionnvnm(const Ref &A, const Ref &B);
+ Ref division_element_wisenvnm(const Ref &A, const Ref &B);
Ref transposenm(const Ref &A);
Ref scalar_multiplynm(real_t scalar, const Ref &A);
@@ -144,7 +144,7 @@ public:
Ref hadamard_productnv(const Ref &a, const Ref &b);
void hadamard_productv(const Ref &a, const Ref &b, Ref out);
- Ref element_wise_divisionnv(const Ref &a, const Ref &b);
+ Ref division_element_wisenv(const Ref &a, const Ref &b);
Ref scalar_multiplynv(real_t scalar, const Ref &a);
void scalar_multiplyv(real_t scalar, const Ref &a, Ref out);
@@ -210,7 +210,7 @@ public:
// TENSOR FUNCTIONS
Vector][> additionnvt(const Vector][> &A, const Vector][> &B);
- Vector][> element_wise_divisionnvnvt(const Vector][> &A, const Vector][> &B);
+ Vector][> division_element_wisenvnvt(const Vector][> &A, const Vector][> &B);
Vector][> sqrtnvt(const Vector][> &A);
diff --git a/mlpp/lin_alg/mlpp_matrix.cpp b/mlpp/lin_alg/mlpp_matrix.cpp
index dd9dbd4..a30c88f 100644
--- a/mlpp/lin_alg/mlpp_matrix.cpp
+++ b/mlpp/lin_alg/mlpp_matrix.cpp
@@ -999,7 +999,7 @@ void MLPPMatrix::kronecker_productb(const Ref &A, const Ref &B) {
+void MLPPMatrix::division_element_wise(const Ref &B) {
ERR_FAIL_COND(!B.is_valid());
ERR_FAIL_COND(_size != B->size());
@@ -1012,7 +1012,7 @@ void MLPPMatrix::element_wise_division(const Ref &B) {
c_ptr[i] /= b_ptr[i];
}
}
-Ref MLPPMatrix::element_wise_divisionn(const Ref &B) const {
+Ref MLPPMatrix::division_element_wisen(const Ref &B) const {
ERR_FAIL_COND_V(!B.is_valid(), Ref());
ERR_FAIL_COND_V(_size != B->size(), Ref());
@@ -1032,7 +1032,7 @@ Ref MLPPMatrix::element_wise_divisionn(const Ref &B) con
return C;
}
-void MLPPMatrix::element_wise_divisionb(const Ref &A, const Ref &B) {
+void MLPPMatrix::division_element_wiseb(const Ref &A, const Ref &B) {
ERR_FAIL_COND(!A.is_valid() || !B.is_valid());
Size2i a_size = A->size();
ERR_FAIL_COND(a_size != B->size());
@@ -3071,9 +3071,9 @@ void MLPPMatrix::_bind_methods() {
ClassDB::bind_method(D_METHOD("kronecker_productn", "B"), &MLPPMatrix::kronecker_productn);
ClassDB::bind_method(D_METHOD("kronecker_productb", "A", "B"), &MLPPMatrix::kronecker_productb);
- ClassDB::bind_method(D_METHOD("element_wise_division", "B"), &MLPPMatrix::element_wise_division);
- ClassDB::bind_method(D_METHOD("element_wise_divisionn", "B"), &MLPPMatrix::element_wise_divisionn);
- ClassDB::bind_method(D_METHOD("element_wise_divisionb", "A", "B"), &MLPPMatrix::element_wise_divisionb);
+ ClassDB::bind_method(D_METHOD("division_element_wise", "B"), &MLPPMatrix::division_element_wise);
+ ClassDB::bind_method(D_METHOD("division_element_wisen", "B"), &MLPPMatrix::division_element_wisen);
+ ClassDB::bind_method(D_METHOD("division_element_wiseb", "A", "B"), &MLPPMatrix::division_element_wiseb);
ClassDB::bind_method(D_METHOD("transpose"), &MLPPMatrix::transpose);
ClassDB::bind_method(D_METHOD("transposen"), &MLPPMatrix::transposen);
diff --git a/mlpp/lin_alg/mlpp_matrix.h b/mlpp/lin_alg/mlpp_matrix.h
index c743e68..89e1d14 100644
--- a/mlpp/lin_alg/mlpp_matrix.h
+++ b/mlpp/lin_alg/mlpp_matrix.h
@@ -148,9 +148,9 @@ public:
Ref kronecker_productn(const Ref &B) const;
void kronecker_productb(const Ref &A, const Ref &B);
- void element_wise_division(const Ref &B);
- Ref element_wise_divisionn(const Ref &B) const;
- void element_wise_divisionb(const Ref]