mirror of
https://github.com/Relintai/pmlpp.git
synced 2024-11-08 13:12:09 +01:00
Tweaks to Tensor3's api.
This commit is contained in:
parent
e84e45f6e8
commit
488cdde8c9
@ -201,20 +201,20 @@
|
||||
<description>
|
||||
</description>
|
||||
</method>
|
||||
<method name="element_wise_division">
|
||||
<method name="division_element_wise">
|
||||
<return type="void" />
|
||||
<argument index="0" name="B" type="MLPPMatrix" />
|
||||
<description>
|
||||
</description>
|
||||
</method>
|
||||
<method name="element_wise_divisionb">
|
||||
<method name="division_element_wiseb">
|
||||
<return type="void" />
|
||||
<argument index="0" name="A" type="MLPPMatrix" />
|
||||
<argument index="1" name="B" type="MLPPMatrix" />
|
||||
<description>
|
||||
</description>
|
||||
</method>
|
||||
<method name="element_wise_divisionn" qualifiers="const">
|
||||
<method name="division_element_wisen" qualifiers="const">
|
||||
<return type="MLPPMatrix" />
|
||||
<argument index="0" name="B" type="MLPPMatrix" />
|
||||
<description>
|
||||
|
@ -96,20 +96,20 @@
|
||||
<description>
|
||||
</description>
|
||||
</method>
|
||||
<method name="element_wise_division">
|
||||
<method name="division_element_wise">
|
||||
<return type="void" />
|
||||
<argument index="0" name="B" type="MLPPTensor3" />
|
||||
<description>
|
||||
</description>
|
||||
</method>
|
||||
<method name="element_wise_divisionb">
|
||||
<method name="division_element_wiseb">
|
||||
<return type="void" />
|
||||
<argument index="0" name="A" type="MLPPTensor3" />
|
||||
<argument index="1" name="B" type="MLPPTensor3" />
|
||||
<description>
|
||||
</description>
|
||||
</method>
|
||||
<method name="element_wise_divisionn" qualifiers="const">
|
||||
<method name="division_element_wisen" qualifiers="const">
|
||||
<return type="MLPPTensor3" />
|
||||
<argument index="0" name="B" type="MLPPTensor3" />
|
||||
<description>
|
||||
@ -480,7 +480,7 @@
|
||||
<description>
|
||||
</description>
|
||||
</method>
|
||||
<method name="set_shape">
|
||||
<method name="shape_set">
|
||||
<return type="void" />
|
||||
<argument index="0" name="size" type="Vector3i" />
|
||||
<description>
|
||||
|
@ -101,20 +101,20 @@
|
||||
<description>
|
||||
</description>
|
||||
</method>
|
||||
<method name="element_wise_division">
|
||||
<method name="division_element_wise">
|
||||
<return type="void" />
|
||||
<argument index="0" name="b" type="MLPPVector" />
|
||||
<description>
|
||||
</description>
|
||||
</method>
|
||||
<method name="element_wise_divisionb">
|
||||
<method name="division_element_wiseb">
|
||||
<return type="void" />
|
||||
<argument index="0" name="a" type="MLPPVector" />
|
||||
<argument index="1" name="b" type="MLPPVector" />
|
||||
<description>
|
||||
</description>
|
||||
</method>
|
||||
<method name="element_wise_divisionn" qualifiers="const">
|
||||
<method name="division_element_wisen" qualifiers="const">
|
||||
<return type="MLPPVector" />
|
||||
<argument index="0" name="b" type="MLPPVector" />
|
||||
<description>
|
||||
|
@ -854,11 +854,11 @@ real_t MLPPActivation::sigmoid_normr(real_t z) {
|
||||
}
|
||||
Ref<MLPPVector> MLPPActivation::sigmoid_normv(const Ref<MLPPVector> &z) {
|
||||
MLPPLinAlg alg;
|
||||
return alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.additionnv(alg.onevecnv(z->size()), alg.expnv(alg.scalar_multiplynv(-1, z))));
|
||||
return alg.division_element_wisenv(alg.onevecnv(z->size()), alg.additionnv(alg.onevecnv(z->size()), alg.expnv(alg.scalar_multiplynv(-1, z))));
|
||||
}
|
||||
Ref<MLPPMatrix> MLPPActivation::sigmoid_normm(const Ref<MLPPMatrix> &z) {
|
||||
MLPPLinAlg alg;
|
||||
return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.additionnm(alg.onematnm(z->size().x, z->size().y), alg.expnm(alg.scalar_multiplynm(-1, z))));
|
||||
return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), alg.additionnm(alg.onematnm(z->size().x, z->size().y), alg.expnm(alg.scalar_multiplynm(-1, z))));
|
||||
}
|
||||
|
||||
real_t MLPPActivation::sigmoid_derivr(real_t z) {
|
||||
@ -1248,12 +1248,12 @@ real_t MLPPActivation::softsign_normr(real_t z) {
|
||||
Ref<MLPPVector> MLPPActivation::softsign_normv(const Ref<MLPPVector> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnv(z, alg.additionnv(alg.onevecnv(z->size()), alg.absv(z)));
|
||||
return alg.division_element_wisenv(z, alg.additionnv(alg.onevecnv(z->size()), alg.absv(z)));
|
||||
}
|
||||
Ref<MLPPMatrix> MLPPActivation::softsign_normm(const Ref<MLPPMatrix> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnvnm(z, alg.additionnv(alg.onematnm(z->size().x, z->size().y), alg.absnm(z)));
|
||||
return alg.division_element_wisenvnm(z, alg.additionnv(alg.onematnm(z->size().x, z->size().y), alg.absnm(z)));
|
||||
}
|
||||
|
||||
real_t MLPPActivation::softsign_derivr(real_t z) {
|
||||
@ -1262,12 +1262,12 @@ real_t MLPPActivation::softsign_derivr(real_t z) {
|
||||
Ref<MLPPVector> MLPPActivation::softsign_derivv(const Ref<MLPPVector> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.exponentiatenv(alg.additionnv(alg.onevecnv(z->size()), alg.absv(z)), 2));
|
||||
return alg.division_element_wisenv(alg.onevecnv(z->size()), alg.exponentiatenv(alg.additionnv(alg.onevecnv(z->size()), alg.absv(z)), 2));
|
||||
}
|
||||
Ref<MLPPMatrix> MLPPActivation::softsign_derivm(const Ref<MLPPMatrix> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.exponentiatenv(alg.additionnm(alg.onematnm(z->size().x, z->size().y), alg.absnm(z)), 2));
|
||||
return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), alg.exponentiatenv(alg.additionnm(alg.onematnm(z->size().x, z->size().y), alg.absnm(z)), 2));
|
||||
}
|
||||
|
||||
//GAUSSIANCDF
|
||||
@ -1342,12 +1342,12 @@ real_t MLPPActivation::logit_normr(real_t z) {
|
||||
Ref<MLPPVector> MLPPActivation::logit_normv(const Ref<MLPPVector> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.lognv(alg.element_wise_divisionnv(z, alg.subtractionnv(alg.onevecnv(z->size()), z)));
|
||||
return alg.lognv(alg.division_element_wisenv(z, alg.subtractionnv(alg.onevecnv(z->size()), z)));
|
||||
}
|
||||
Ref<MLPPMatrix> MLPPActivation::logit_normm(const Ref<MLPPMatrix> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.lognm(alg.element_wise_divisionnvnm(z, alg.subtractionnm(alg.onematnm(z->size().x, z->size().y), z)));
|
||||
return alg.lognm(alg.division_element_wisenvnm(z, alg.subtractionnm(alg.onematnm(z->size().x, z->size().y), z)));
|
||||
}
|
||||
|
||||
real_t MLPPActivation::logit_derivr(real_t z) {
|
||||
@ -1357,16 +1357,16 @@ Ref<MLPPVector> MLPPActivation::logit_derivv(const Ref<MLPPVector> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.subtractionnv(
|
||||
alg.element_wise_divisionnv(alg.onevecnv(z->size()), z),
|
||||
alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.subtractionnv(z, alg.onevecnv(z->size()))));
|
||||
alg.division_element_wisenv(alg.onevecnv(z->size()), z),
|
||||
alg.division_element_wisenv(alg.onevecnv(z->size()), alg.subtractionnv(z, alg.onevecnv(z->size()))));
|
||||
}
|
||||
Ref<MLPPMatrix> MLPPActivation::logit_derivm(const Ref<MLPPMatrix> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.subtractionnm(
|
||||
alg.element_wise_divisionnvnm(
|
||||
alg.division_element_wisenvnm(
|
||||
alg.onematnm(z->size().x, z->size().y), z),
|
||||
alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y),
|
||||
alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y),
|
||||
alg.subtractionnm(z, alg.onematnm(z->size().x, z->size().y))));
|
||||
}
|
||||
|
||||
@ -1487,7 +1487,7 @@ Ref<MLPPVector> MLPPActivation::mish_derivv(const Ref<MLPPVector> &z) {
|
||||
sech_normv(softplus_normv(z)), sech_normv(softplus_normv(z))),
|
||||
z),
|
||||
sigmoid_normv(z)),
|
||||
alg.element_wise_divisionnv(mish_normv(z), z));
|
||||
alg.division_element_wisenv(mish_normv(z), z));
|
||||
}
|
||||
Ref<MLPPMatrix> MLPPActivation::mish_derivm(const Ref<MLPPMatrix> &z) {
|
||||
MLPPLinAlg alg;
|
||||
@ -1499,7 +1499,7 @@ Ref<MLPPMatrix> MLPPActivation::mish_derivm(const Ref<MLPPMatrix> &z) {
|
||||
sech_normm(softplus_normm(z)), sech_normm(softplus_normm(z))),
|
||||
z),
|
||||
sigmoid_normm(z)),
|
||||
alg.element_wise_divisionnvnm(mish_normm(z), z));
|
||||
alg.division_element_wisenvnm(mish_normm(z), z));
|
||||
}
|
||||
|
||||
//SINC
|
||||
@ -1510,12 +1510,12 @@ real_t MLPPActivation::sinc_normr(real_t z) {
|
||||
Ref<MLPPVector> MLPPActivation::sinc_normv(const Ref<MLPPVector> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnv(alg.sinnv(z), z);
|
||||
return alg.division_element_wisenv(alg.sinnv(z), z);
|
||||
}
|
||||
Ref<MLPPMatrix> MLPPActivation::sinc_normm(const Ref<MLPPMatrix> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnvnm(alg.sinnm(z), z);
|
||||
return alg.division_element_wisenvnm(alg.sinnm(z), z);
|
||||
}
|
||||
|
||||
real_t MLPPActivation::sinc_derivr(real_t z) {
|
||||
@ -1524,12 +1524,12 @@ real_t MLPPActivation::sinc_derivr(real_t z) {
|
||||
Ref<MLPPVector> MLPPActivation::sinc_derivv(const Ref<MLPPVector> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnv(alg.subtractionnv(alg.hadamard_productnv(z, alg.cosnv(z)), alg.sinnv(z)), alg.hadamard_productnv(z, z));
|
||||
return alg.division_element_wisenv(alg.subtractionnv(alg.hadamard_productnv(z, alg.cosnv(z)), alg.sinnv(z)), alg.hadamard_productnv(z, z));
|
||||
}
|
||||
Ref<MLPPMatrix> MLPPActivation::sinc_derivm(const Ref<MLPPMatrix> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnvnm(alg.subtractionnm(alg.hadamard_productnm(z, alg.cosnm(z)), alg.sinnm(z)), alg.hadamard_productnm(z, z));
|
||||
return alg.division_element_wisenvnm(alg.subtractionnm(alg.hadamard_productnm(z, alg.cosnm(z)), alg.sinnm(z)), alg.hadamard_productnm(z, z));
|
||||
}
|
||||
|
||||
//RELU
|
||||
@ -2054,12 +2054,12 @@ real_t MLPPActivation::tanh_normr(real_t z) {
|
||||
Ref<MLPPVector> MLPPActivation::tanh_normv(const Ref<MLPPVector> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnv(alg.subtractionnv(alg.expnv(z), alg.expnv(alg.scalar_multiplynv(-1, z))), alg.additionnv(alg.expnv(z), alg.expnv(alg.scalar_multiplynv(-1, z))));
|
||||
return alg.division_element_wisenv(alg.subtractionnv(alg.expnv(z), alg.expnv(alg.scalar_multiplynv(-1, z))), alg.additionnv(alg.expnv(z), alg.expnv(alg.scalar_multiplynv(-1, z))));
|
||||
}
|
||||
Ref<MLPPMatrix> MLPPActivation::tanh_normm(const Ref<MLPPMatrix> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnvnm(alg.subtractionnm(alg.expnm(z), alg.expnm(alg.scalar_multiplynm(-1, z))), alg.additionnm(alg.expnm(z), alg.expnm(alg.scalar_multiplynm(-1, z))));
|
||||
return alg.division_element_wisenvnm(alg.subtractionnm(alg.expnm(z), alg.expnm(alg.scalar_multiplynm(-1, z))), alg.additionnm(alg.expnm(z), alg.expnm(alg.scalar_multiplynm(-1, z))));
|
||||
}
|
||||
|
||||
real_t MLPPActivation::tanh_derivr(real_t z) {
|
||||
@ -2084,13 +2084,13 @@ real_t MLPPActivation::csch_normr(real_t z) {
|
||||
Ref<MLPPVector> MLPPActivation::csch_normv(const Ref<MLPPVector> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnv(alg.onevecnv(z->size()), sinh_normv(z));
|
||||
return alg.division_element_wisenv(alg.onevecnv(z->size()), sinh_normv(z));
|
||||
}
|
||||
|
||||
Ref<MLPPMatrix> MLPPActivation::csch_normm(const Ref<MLPPMatrix> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), sinh_normm(z));
|
||||
return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), sinh_normm(z));
|
||||
}
|
||||
|
||||
real_t MLPPActivation::csch_derivr(real_t z) {
|
||||
@ -2117,14 +2117,14 @@ real_t MLPPActivation::sech_normr(real_t z) {
|
||||
Ref<MLPPVector> MLPPActivation::sech_normv(const Ref<MLPPVector> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnv(alg.onevecnv(z->size()), cosh_normv(z));
|
||||
return alg.division_element_wisenv(alg.onevecnv(z->size()), cosh_normv(z));
|
||||
|
||||
// return activation(z, deriv, static_cast<void (*)(real_t, bool)>(&sech));
|
||||
}
|
||||
Ref<MLPPMatrix> MLPPActivation::sech_normm(const Ref<MLPPMatrix> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), cosh_normm(z));
|
||||
return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), cosh_normm(z));
|
||||
|
||||
// return activation(z, deriv, static_cast<void (*)(real_t, bool)>(&sech));
|
||||
}
|
||||
@ -2152,12 +2152,12 @@ real_t MLPPActivation::coth_normr(real_t z) {
|
||||
Ref<MLPPVector> MLPPActivation::coth_normv(const Ref<MLPPVector> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnv(alg.onevecnv(z->size()), tanh_normv(z));
|
||||
return alg.division_element_wisenv(alg.onevecnv(z->size()), tanh_normv(z));
|
||||
}
|
||||
Ref<MLPPMatrix> MLPPActivation::coth_normm(const Ref<MLPPMatrix> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), tanh_normm(z));
|
||||
return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), tanh_normm(z));
|
||||
}
|
||||
|
||||
real_t MLPPActivation::coth_derivr(real_t z) {
|
||||
@ -2199,13 +2199,13 @@ real_t MLPPActivation::arsinh_derivr(real_t z) {
|
||||
Ref<MLPPVector> MLPPActivation::arsinh_derivv(const Ref<MLPPVector> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.sqrtnv(alg.additionnv(alg.hadamard_productnv(z, z), alg.onevecnv(z->size()))));
|
||||
return alg.division_element_wisenv(alg.onevecnv(z->size()), alg.sqrtnv(alg.additionnv(alg.hadamard_productnv(z, z), alg.onevecnv(z->size()))));
|
||||
}
|
||||
|
||||
Ref<MLPPMatrix> MLPPActivation::arsinh_derivm(const Ref<MLPPMatrix> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.sqrtnm(alg.additionnm(alg.hadamard_productnm(z, z), alg.onematnm(z->size().x, z->size().y))));
|
||||
return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), alg.sqrtnm(alg.additionnm(alg.hadamard_productnm(z, z), alg.onematnm(z->size().x, z->size().y))));
|
||||
}
|
||||
|
||||
//ARCOSH
|
||||
@ -2231,13 +2231,13 @@ real_t MLPPActivation::arcosh_derivr(real_t z) {
|
||||
Ref<MLPPVector> MLPPActivation::arcosh_derivv(const Ref<MLPPVector> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.sqrtnv(alg.subtractionnv(alg.hadamard_productnv(z, z), alg.onevecnv(z->size()))));
|
||||
return alg.division_element_wisenv(alg.onevecnv(z->size()), alg.sqrtnv(alg.subtractionnv(alg.hadamard_productnv(z, z), alg.onevecnv(z->size()))));
|
||||
}
|
||||
|
||||
Ref<MLPPMatrix> MLPPActivation::arcosh_derivm(const Ref<MLPPMatrix> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.sqrtnm(alg.subtractionnm(alg.hadamard_productnm(z, z), alg.onematnm(z->size().x, z->size().y))));
|
||||
return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), alg.sqrtnm(alg.subtractionnm(alg.hadamard_productnm(z, z), alg.onematnm(z->size().x, z->size().y))));
|
||||
}
|
||||
|
||||
//ARTANH
|
||||
@ -2248,13 +2248,13 @@ real_t MLPPActivation::artanh_normr(real_t z) {
|
||||
Ref<MLPPVector> MLPPActivation::artanh_normv(const Ref<MLPPVector> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.scalar_multiplynv(0.5, alg.lognv(alg.element_wise_divisionnv(alg.additionnv(alg.onevecnv(z->size()), z), alg.subtractionnv(alg.onevecnv(z->size()), z))));
|
||||
return alg.scalar_multiplynv(0.5, alg.lognv(alg.division_element_wisenv(alg.additionnv(alg.onevecnv(z->size()), z), alg.subtractionnv(alg.onevecnv(z->size()), z))));
|
||||
}
|
||||
|
||||
Ref<MLPPMatrix> MLPPActivation::artanh_normm(const Ref<MLPPMatrix> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.scalar_multiplynm(0.5, alg.lognm(alg.element_wise_divisionnvnm(alg.additionnm(alg.onematnm(z->size().x, z->size().y), z), alg.subtractionnm(alg.onematnm(z->size().x, z->size().y), z))));
|
||||
return alg.scalar_multiplynm(0.5, alg.lognm(alg.division_element_wisenvnm(alg.additionnm(alg.onematnm(z->size().x, z->size().y), z), alg.subtractionnm(alg.onematnm(z->size().x, z->size().y), z))));
|
||||
}
|
||||
|
||||
real_t MLPPActivation::artanh_derivr(real_t z) {
|
||||
@ -2263,13 +2263,13 @@ real_t MLPPActivation::artanh_derivr(real_t z) {
|
||||
Ref<MLPPVector> MLPPActivation::artanh_derivv(const Ref<MLPPVector> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.subtractionnv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z)));
|
||||
return alg.division_element_wisenv(alg.onevecnv(z->size()), alg.subtractionnv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z)));
|
||||
}
|
||||
|
||||
Ref<MLPPMatrix> MLPPActivation::artanh_derivm(const Ref<MLPPMatrix> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.subtractionnv(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z)));
|
||||
return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), alg.subtractionnv(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z)));
|
||||
}
|
||||
|
||||
//ARCSCH
|
||||
@ -2285,8 +2285,8 @@ Ref<MLPPVector> MLPPActivation::arcsch_normv(const Ref<MLPPVector> &z) {
|
||||
alg.sqrtnv(
|
||||
alg.additionnv(
|
||||
alg.onevecnv(z->size()),
|
||||
alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z)))),
|
||||
alg.element_wise_divisionnv(alg.onevecnv(z->size()), z)));
|
||||
alg.division_element_wisenv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z)))),
|
||||
alg.division_element_wisenv(alg.onevecnv(z->size()), z)));
|
||||
}
|
||||
Ref<MLPPMatrix> MLPPActivation::arcsch_normm(const Ref<MLPPMatrix> &z) {
|
||||
MLPPLinAlg alg;
|
||||
@ -2295,8 +2295,8 @@ Ref<MLPPMatrix> MLPPActivation::arcsch_normm(const Ref<MLPPMatrix> &z) {
|
||||
alg.additionnm(
|
||||
alg.sqrtnm(
|
||||
alg.additionnm(alg.onematnm(z->size().x, z->size().y),
|
||||
alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z)))),
|
||||
alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), z)));
|
||||
alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z)))),
|
||||
alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), z)));
|
||||
}
|
||||
|
||||
real_t MLPPActivation::arcsch_derivr(real_t z) {
|
||||
@ -2305,20 +2305,20 @@ real_t MLPPActivation::arcsch_derivr(real_t z) {
|
||||
Ref<MLPPVector> MLPPActivation::arcsch_derivv(const Ref<MLPPVector> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnv(
|
||||
return alg.division_element_wisenv(
|
||||
alg.fullnv(z->size(), -1),
|
||||
alg.hadamard_productnm(
|
||||
alg.hadamard_productnv(z, z),
|
||||
alg.sqrtnv(alg.additionnv(alg.onevecnv(z->size()), alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z))))));
|
||||
alg.sqrtnv(alg.additionnv(alg.onevecnv(z->size()), alg.division_element_wisenv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z))))));
|
||||
}
|
||||
Ref<MLPPMatrix> MLPPActivation::arcsch_derivm(const Ref<MLPPMatrix> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnvnm(
|
||||
return alg.division_element_wisenvnm(
|
||||
alg.fullnm(z->size().x, z->size().y, -1),
|
||||
alg.hadamard_productnm(alg.hadamard_productnm(z, z),
|
||||
alg.sqrtnm(alg.additionnm(alg.onematnm(z->size().x, z->size().y),
|
||||
alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z))))));
|
||||
alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z))))));
|
||||
}
|
||||
|
||||
//ARSECH
|
||||
@ -2332,11 +2332,11 @@ Ref<MLPPVector> MLPPActivation::arsech_normv(const Ref<MLPPVector> &z) {
|
||||
|
||||
return alg.lognv(
|
||||
alg.additionnv(
|
||||
alg.element_wise_divisionnv(
|
||||
alg.division_element_wisenv(
|
||||
alg.onevecnv(z->size()), z),
|
||||
alg.hadamard_productnv(
|
||||
alg.additionnv(alg.element_wise_divisionnv(alg.onevecnv(z->size()), z), alg.onevecnv(z->size())),
|
||||
alg.subtractionnv(alg.element_wise_divisionnv(alg.onevecnv(z->size()), z), alg.onevecnv(z->size())))));
|
||||
alg.additionnv(alg.division_element_wisenv(alg.onevecnv(z->size()), z), alg.onevecnv(z->size())),
|
||||
alg.subtractionnv(alg.division_element_wisenv(alg.onevecnv(z->size()), z), alg.onevecnv(z->size())))));
|
||||
}
|
||||
|
||||
Ref<MLPPMatrix> MLPPActivation::arsech_normm(const Ref<MLPPMatrix> &z) {
|
||||
@ -2344,15 +2344,15 @@ Ref<MLPPMatrix> MLPPActivation::arsech_normm(const Ref<MLPPMatrix> &z) {
|
||||
|
||||
return alg.lognm(
|
||||
alg.additionnm(
|
||||
alg.element_wise_divisionnvnm(
|
||||
alg.division_element_wisenvnm(
|
||||
alg.onematnm(z->size().x, z->size().y), z),
|
||||
alg.hadamard_productnm(
|
||||
alg.additionnm(
|
||||
alg.element_wise_divisionnvnm(
|
||||
alg.division_element_wisenvnm(
|
||||
alg.onematnm(z->size().x, z->size().y), z),
|
||||
alg.onematnm(z->size().x, z->size().y)),
|
||||
alg.subtractionnm(
|
||||
alg.element_wise_divisionnvnm(
|
||||
alg.division_element_wisenvnm(
|
||||
alg.onematnm(z->size().x, z->size().y), z),
|
||||
alg.onematnm(z->size().x, z->size().y)))));
|
||||
}
|
||||
@ -2364,7 +2364,7 @@ real_t MLPPActivation::arsech_derivr(real_t z) {
|
||||
Ref<MLPPVector> MLPPActivation::arsech_derivv(const Ref<MLPPVector> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnv(
|
||||
return alg.division_element_wisenv(
|
||||
alg.fullnv(z->size(), -1),
|
||||
alg.hadamard_productnv(
|
||||
z,
|
||||
@ -2375,7 +2375,7 @@ Ref<MLPPVector> MLPPActivation::arsech_derivv(const Ref<MLPPVector> &z) {
|
||||
Ref<MLPPMatrix> MLPPActivation::arsech_derivm(const Ref<MLPPMatrix> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnvnm(
|
||||
return alg.division_element_wisenvnm(
|
||||
alg.fullnm(z->size().x, z->size().y, -1),
|
||||
alg.hadamard_productnm(
|
||||
z,
|
||||
@ -2392,7 +2392,7 @@ Ref<MLPPVector> MLPPActivation::arcoth_normv(const Ref<MLPPVector> &z) {
|
||||
|
||||
return alg.scalar_multiplynv(
|
||||
0.5,
|
||||
alg.lognv(alg.element_wise_divisionnv(alg.additionnv(alg.onevecnv(z->size()), z), alg.subtractionnv(z, alg.onevecnv(z->size())))));
|
||||
alg.lognv(alg.division_element_wisenv(alg.additionnv(alg.onevecnv(z->size()), z), alg.subtractionnv(z, alg.onevecnv(z->size())))));
|
||||
}
|
||||
|
||||
Ref<MLPPMatrix> MLPPActivation::arcoth_normm(const Ref<MLPPMatrix> &z) {
|
||||
@ -2400,7 +2400,7 @@ Ref<MLPPMatrix> MLPPActivation::arcoth_normm(const Ref<MLPPMatrix> &z) {
|
||||
|
||||
return alg.scalar_multiplynm(
|
||||
0.5,
|
||||
alg.lognm(alg.element_wise_divisionnvnm(alg.additionnm(alg.onematnm(z->size().x, z->size().y), z), alg.subtractionnm(z, alg.onematnm(z->size().x, z->size().y)))));
|
||||
alg.lognm(alg.division_element_wisenvnm(alg.additionnm(alg.onematnm(z->size().x, z->size().y), z), alg.subtractionnm(z, alg.onematnm(z->size().x, z->size().y)))));
|
||||
}
|
||||
|
||||
real_t MLPPActivation::arcoth_derivr(real_t z) {
|
||||
@ -2409,13 +2409,13 @@ real_t MLPPActivation::arcoth_derivr(real_t z) {
|
||||
Ref<MLPPVector> MLPPActivation::arcoth_derivv(const Ref<MLPPVector> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.subtractionnv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z)));
|
||||
return alg.division_element_wisenv(alg.onevecnv(z->size()), alg.subtractionnv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z)));
|
||||
}
|
||||
|
||||
Ref<MLPPMatrix> MLPPActivation::arcoth_derivm(const Ref<MLPPMatrix> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
return alg.element_wise_divisionnvnm(alg.onematnm(z->size().x, z->size().y), alg.subtractionnm(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z)));
|
||||
return alg.division_element_wisenvnm(alg.onematnm(z->size().x, z->size().y), alg.subtractionnm(alg.onematnm(z->size().x, z->size().y), alg.hadamard_productnm(z, z)));
|
||||
}
|
||||
|
||||
void MLPPActivation::_bind_methods() {
|
||||
|
@ -314,8 +314,8 @@ void MLPPANN::adagrad(real_t learning_rate, int max_epoch, int mini_batch_size,
|
||||
v_hidden = alg.additionnvt(v_hidden, alg.exponentiatenvt(grads.cumulative_hidden_layer_w_grad, 2));
|
||||
v_output = alg.additionnv(v_output, alg.exponentiatenv(grads.output_w_grad, 2));
|
||||
|
||||
Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.element_wise_divisionnvnvt(grads.cumulative_hidden_layer_w_grad, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden))));
|
||||
Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(grads.output_w_grad, alg.scalar_addnv(e, alg.sqrtnv(v_output))));
|
||||
Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.division_element_wisenvnvt(grads.cumulative_hidden_layer_w_grad, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden))));
|
||||
Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.division_element_wisenv(grads.output_w_grad, alg.scalar_addnv(e, alg.sqrtnv(v_output))));
|
||||
|
||||
update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too.
|
||||
y_hat = model_set_test(current_input_batch);
|
||||
@ -378,8 +378,8 @@ void MLPPANN::adadelta(real_t learning_rate, int max_epoch, int mini_batch_size,
|
||||
v_hidden = alg.additionnvt(alg.scalar_multiplynvt(1 - b1, v_hidden), alg.scalar_multiplynvt(b1, alg.exponentiatenvt(grads.cumulative_hidden_layer_w_grad, 2)));
|
||||
v_output = alg.additionnv(v_output, alg.exponentiatenv(grads.output_w_grad, 2));
|
||||
|
||||
Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.element_wise_divisionnvnvt(grads.cumulative_hidden_layer_w_grad, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden))));
|
||||
Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(grads.output_w_grad, alg.scalar_addnv(e, alg.sqrtnv(v_output))));
|
||||
Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.division_element_wisenvnvt(grads.cumulative_hidden_layer_w_grad, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden))));
|
||||
Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.division_element_wisenv(grads.output_w_grad, alg.scalar_addnv(e, alg.sqrtnv(v_output))));
|
||||
|
||||
update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too.
|
||||
y_hat = model_set_test(current_input_batch);
|
||||
@ -456,8 +456,8 @@ void MLPPANN::adam(real_t learning_rate, int max_epoch, int mini_batch_size, rea
|
||||
Ref<MLPPVector> m_output_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m_output);
|
||||
Ref<MLPPVector> v_output_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b2, epoch)), v_output);
|
||||
|
||||
Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.element_wise_divisionnvnvt(m_hidden_hat, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden_hat))));
|
||||
Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(m_output_hat, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat))));
|
||||
Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.division_element_wisenvnvt(m_hidden_hat, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden_hat))));
|
||||
Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.division_element_wisenv(m_output_hat, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat))));
|
||||
|
||||
update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too.
|
||||
y_hat = model_set_test(current_input_batch);
|
||||
@ -529,8 +529,8 @@ void MLPPANN::adamax(real_t learning_rate, int max_epoch, int mini_batch_size, r
|
||||
|
||||
Ref<MLPPVector> m_output_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m_output);
|
||||
|
||||
Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.element_wise_divisionnvnvt(m_hidden_hat, alg.scalar_addnvt(e, u_hidden)));
|
||||
Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(m_output_hat, alg.scalar_addnv(e, u_output)));
|
||||
Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.division_element_wisenvnvt(m_hidden_hat, alg.scalar_addnvt(e, u_hidden)));
|
||||
Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.division_element_wisenv(m_output_hat, alg.scalar_addnv(e, u_output)));
|
||||
|
||||
update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too.
|
||||
y_hat = model_set_test(current_input_batch);
|
||||
@ -606,8 +606,8 @@ void MLPPANN::nadam(real_t learning_rate, int max_epoch, int mini_batch_size, re
|
||||
Ref<MLPPVector> v_output_hat = alg.scalar_multiplynv(1 / (1.0 - Math::pow(b2, epoch)), v_output);
|
||||
Ref<MLPPVector> m_output_final = alg.additionnv(alg.scalar_multiplynv(b1, m_output_hat), alg.scalar_multiplynv((1 - b1) / (1.0 - Math::pow(b1, epoch)), grads.output_w_grad));
|
||||
|
||||
Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.element_wise_divisionnvnvt(m_hidden_final, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden_hat))));
|
||||
Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnvnm(m_output_final, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat))));
|
||||
Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.division_element_wisenvnvt(m_hidden_final, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden_hat))));
|
||||
Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.division_element_wisenvnm(m_output_final, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat))));
|
||||
|
||||
update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too.
|
||||
|
||||
@ -686,8 +686,8 @@ void MLPPANN::amsgrad(real_t learning_rate, int max_epoch, int mini_batch_size,
|
||||
v_hidden_hat = alg.maxnvt(v_hidden_hat, v_hidden);
|
||||
v_output_hat = alg.maxnvv(v_output_hat, v_output);
|
||||
|
||||
Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.element_wise_divisionnvnvt(m_hidden, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden_hat))));
|
||||
Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(m_output, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat))));
|
||||
Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.division_element_wisenvnvt(m_hidden, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden_hat))));
|
||||
Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.division_element_wisenv(m_output, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat))));
|
||||
|
||||
update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too.
|
||||
y_hat = model_set_test(current_input_batch);
|
||||
|
@ -250,15 +250,15 @@ real_t MLPPCost::log_lossm(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &
|
||||
Ref<MLPPVector> MLPPCost::log_loss_derivv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y) {
|
||||
MLPPLinAlg alg;
|
||||
return alg.additionnv(
|
||||
alg.scalar_multiplynv(-1, alg.element_wise_divisionnv(y, y_hat)),
|
||||
alg.element_wise_divisionnv(alg.scalar_multiplynv(-1, alg.scalar_addnv(-1, y)), alg.scalar_multiplynv(-1, alg.scalar_addnv(-1, y_hat))));
|
||||
alg.scalar_multiplynv(-1, alg.division_element_wisenv(y, y_hat)),
|
||||
alg.division_element_wisenv(alg.scalar_multiplynv(-1, alg.scalar_addnv(-1, y)), alg.scalar_multiplynv(-1, alg.scalar_addnv(-1, y_hat))));
|
||||
}
|
||||
|
||||
Ref<MLPPMatrix> MLPPCost::log_loss_derivm(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y) {
|
||||
MLPPLinAlg alg;
|
||||
return alg.additionnm(
|
||||
alg.scalar_multiplynm(-1, alg.element_wise_divisionnvnm(y, y_hat)),
|
||||
alg.element_wise_divisionnvnm(alg.scalar_multiplynm(-1, alg.scalar_addnm(-1, y)), alg.scalar_multiplynm(-1, alg.scalar_addnm(-1, y_hat))));
|
||||
alg.scalar_multiplynm(-1, alg.division_element_wisenvnm(y, y_hat)),
|
||||
alg.division_element_wisenvnm(alg.scalar_multiplynm(-1, alg.scalar_addnm(-1, y)), alg.scalar_multiplynm(-1, alg.scalar_addnm(-1, y_hat))));
|
||||
}
|
||||
|
||||
real_t MLPPCost::cross_entropyv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y) {
|
||||
@ -294,11 +294,11 @@ real_t MLPPCost::cross_entropym(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatr
|
||||
|
||||
Ref<MLPPVector> MLPPCost::cross_entropy_derivv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y) {
|
||||
MLPPLinAlg alg;
|
||||
return alg.scalar_multiplynv(-1, alg.element_wise_divisionnv(y, y_hat));
|
||||
return alg.scalar_multiplynv(-1, alg.division_element_wisenv(y, y_hat));
|
||||
}
|
||||
Ref<MLPPMatrix> MLPPCost::cross_entropy_derivm(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y) {
|
||||
MLPPLinAlg alg;
|
||||
return alg.scalar_multiplynm(-1, alg.element_wise_divisionnvnm(y, y_hat));
|
||||
return alg.scalar_multiplynm(-1, alg.division_element_wisenvnm(y, y_hat));
|
||||
}
|
||||
|
||||
real_t MLPPCost::huber_lossv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y, real_t delta) {
|
||||
|
@ -193,7 +193,7 @@ Ref<MLPPMatrix> MLPPLinAlg::kronecker_productnm(const Ref<MLPPMatrix> &A, const
|
||||
|
||||
return C;
|
||||
}
|
||||
Ref<MLPPMatrix> MLPPLinAlg::element_wise_divisionnvnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B) {
|
||||
Ref<MLPPMatrix> MLPPLinAlg::division_element_wisenvnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B) {
|
||||
ERR_FAIL_COND_V(!A.is_valid() || !B.is_valid(), Ref<MLPPMatrix>());
|
||||
Size2i a_size = A->size();
|
||||
ERR_FAIL_COND_V(a_size != B->size(), Ref<MLPPMatrix>());
|
||||
@ -1095,7 +1095,7 @@ void MLPPLinAlg::hadamard_productv(const Ref<MLPPVector> &a, const Ref<MLPPVecto
|
||||
}
|
||||
}
|
||||
|
||||
Ref<MLPPVector> MLPPLinAlg::element_wise_divisionnv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b) {
|
||||
Ref<MLPPVector> MLPPLinAlg::division_element_wisenv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b) {
|
||||
ERR_FAIL_COND_V(!a.is_valid() || !b.is_valid(), Ref<MLPPVector>());
|
||||
|
||||
Ref<MLPPVector> out;
|
||||
@ -1783,12 +1783,12 @@ Vector<Ref<MLPPMatrix>> MLPPLinAlg::additionnvt(const Vector<Ref<MLPPMatrix>> &A
|
||||
return res;
|
||||
}
|
||||
|
||||
Vector<Ref<MLPPMatrix>> MLPPLinAlg::element_wise_divisionnvnvt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B) {
|
||||
Vector<Ref<MLPPMatrix>> MLPPLinAlg::division_element_wisenvnvt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B) {
|
||||
Vector<Ref<MLPPMatrix>> res;
|
||||
res.resize(A.size());
|
||||
|
||||
for (int i = 0; i < A.size(); i++) {
|
||||
res.write[i] = element_wise_divisionnvnm(A[i], B[i]);
|
||||
res.write[i] = division_element_wisenvnm(A[i], B[i]);
|
||||
}
|
||||
|
||||
return res;
|
||||
|
@ -37,7 +37,7 @@ public:
|
||||
|
||||
Ref<MLPPMatrix> hadamard_productnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B);
|
||||
Ref<MLPPMatrix> kronecker_productnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B);
|
||||
Ref<MLPPMatrix> element_wise_divisionnvnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B);
|
||||
Ref<MLPPMatrix> division_element_wisenvnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B);
|
||||
|
||||
Ref<MLPPMatrix> transposenm(const Ref<MLPPMatrix> &A);
|
||||
Ref<MLPPMatrix> scalar_multiplynm(real_t scalar, const Ref<MLPPMatrix> &A);
|
||||
@ -144,7 +144,7 @@ public:
|
||||
Ref<MLPPVector> hadamard_productnv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b);
|
||||
void hadamard_productv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b, Ref<MLPPVector> out);
|
||||
|
||||
Ref<MLPPVector> element_wise_divisionnv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b);
|
||||
Ref<MLPPVector> division_element_wisenv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b);
|
||||
|
||||
Ref<MLPPVector> scalar_multiplynv(real_t scalar, const Ref<MLPPVector> &a);
|
||||
void scalar_multiplyv(real_t scalar, const Ref<MLPPVector> &a, Ref<MLPPVector> out);
|
||||
@ -210,7 +210,7 @@ public:
|
||||
// TENSOR FUNCTIONS
|
||||
Vector<Ref<MLPPMatrix>> additionnvt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B);
|
||||
|
||||
Vector<Ref<MLPPMatrix>> element_wise_divisionnvnvt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B);
|
||||
Vector<Ref<MLPPMatrix>> division_element_wisenvnvt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B);
|
||||
|
||||
Vector<Ref<MLPPMatrix>> sqrtnvt(const Vector<Ref<MLPPMatrix>> &A);
|
||||
|
||||
|
@ -999,7 +999,7 @@ void MLPPMatrix::kronecker_productb(const Ref<MLPPMatrix> &A, const Ref<MLPPMatr
|
||||
}
|
||||
}
|
||||
|
||||
void MLPPMatrix::element_wise_division(const Ref<MLPPMatrix> &B) {
|
||||
void MLPPMatrix::division_element_wise(const Ref<MLPPMatrix> &B) {
|
||||
ERR_FAIL_COND(!B.is_valid());
|
||||
ERR_FAIL_COND(_size != B->size());
|
||||
|
||||
@ -1012,7 +1012,7 @@ void MLPPMatrix::element_wise_division(const Ref<MLPPMatrix> &B) {
|
||||
c_ptr[i] /= b_ptr[i];
|
||||
}
|
||||
}
|
||||
Ref<MLPPMatrix> MLPPMatrix::element_wise_divisionn(const Ref<MLPPMatrix> &B) const {
|
||||
Ref<MLPPMatrix> MLPPMatrix::division_element_wisen(const Ref<MLPPMatrix> &B) const {
|
||||
ERR_FAIL_COND_V(!B.is_valid(), Ref<MLPPMatrix>());
|
||||
ERR_FAIL_COND_V(_size != B->size(), Ref<MLPPMatrix>());
|
||||
|
||||
@ -1032,7 +1032,7 @@ Ref<MLPPMatrix> MLPPMatrix::element_wise_divisionn(const Ref<MLPPMatrix> &B) con
|
||||
|
||||
return C;
|
||||
}
|
||||
void MLPPMatrix::element_wise_divisionb(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B) {
|
||||
void MLPPMatrix::division_element_wiseb(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B) {
|
||||
ERR_FAIL_COND(!A.is_valid() || !B.is_valid());
|
||||
Size2i a_size = A->size();
|
||||
ERR_FAIL_COND(a_size != B->size());
|
||||
@ -3071,9 +3071,9 @@ void MLPPMatrix::_bind_methods() {
|
||||
ClassDB::bind_method(D_METHOD("kronecker_productn", "B"), &MLPPMatrix::kronecker_productn);
|
||||
ClassDB::bind_method(D_METHOD("kronecker_productb", "A", "B"), &MLPPMatrix::kronecker_productb);
|
||||
|
||||
ClassDB::bind_method(D_METHOD("element_wise_division", "B"), &MLPPMatrix::element_wise_division);
|
||||
ClassDB::bind_method(D_METHOD("element_wise_divisionn", "B"), &MLPPMatrix::element_wise_divisionn);
|
||||
ClassDB::bind_method(D_METHOD("element_wise_divisionb", "A", "B"), &MLPPMatrix::element_wise_divisionb);
|
||||
ClassDB::bind_method(D_METHOD("division_element_wise", "B"), &MLPPMatrix::division_element_wise);
|
||||
ClassDB::bind_method(D_METHOD("division_element_wisen", "B"), &MLPPMatrix::division_element_wisen);
|
||||
ClassDB::bind_method(D_METHOD("division_element_wiseb", "A", "B"), &MLPPMatrix::division_element_wiseb);
|
||||
|
||||
ClassDB::bind_method(D_METHOD("transpose"), &MLPPMatrix::transpose);
|
||||
ClassDB::bind_method(D_METHOD("transposen"), &MLPPMatrix::transposen);
|
||||
|
@ -148,9 +148,9 @@ public:
|
||||
Ref<MLPPMatrix> kronecker_productn(const Ref<MLPPMatrix> &B) const;
|
||||
void kronecker_productb(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B);
|
||||
|
||||
void element_wise_division(const Ref<MLPPMatrix> &B);
|
||||
Ref<MLPPMatrix> element_wise_divisionn(const Ref<MLPPMatrix> &B) const;
|
||||
void element_wise_divisionb(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B);
|
||||
void division_element_wise(const Ref<MLPPMatrix> &B);
|
||||
Ref<MLPPMatrix> division_element_wisen(const Ref<MLPPMatrix> &B) const;
|
||||
void division_element_wiseb(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B);
|
||||
|
||||
void transpose();
|
||||
Ref<MLPPMatrix> transposen() const;
|
||||
|
@ -231,7 +231,7 @@ void MLPPTensor3::resize(const Size3i &p_size) {
|
||||
CRASH_COND_MSG(!_data, "Out of memory");
|
||||
}
|
||||
|
||||
void MLPPTensor3::set_shape(const Size3i &p_size) {
|
||||
void MLPPTensor3::shape_set(const Size3i &p_size) {
|
||||
int ds = data_size();
|
||||
int new_data_size = p_size.x * p_size.y * p_size.z;
|
||||
|
||||
@ -1437,7 +1437,7 @@ void MLPPTensor3::subb(const Ref<MLPPTensor3> &A, const Ref<MLPPTensor3> &B) {
|
||||
}
|
||||
}
|
||||
|
||||
void MLPPTensor3::element_wise_division(const Ref<MLPPTensor3> &B) {
|
||||
void MLPPTensor3::division_element_wise(const Ref<MLPPTensor3> &B) {
|
||||
ERR_FAIL_COND(!B.is_valid());
|
||||
ERR_FAIL_COND(_size != B->size());
|
||||
|
||||
@ -1450,7 +1450,7 @@ void MLPPTensor3::element_wise_division(const Ref<MLPPTensor3> &B) {
|
||||
c_ptr[i] /= b_ptr[i];
|
||||
}
|
||||
}
|
||||
Ref<MLPPTensor3> MLPPTensor3::element_wise_divisionn(const Ref<MLPPTensor3> &B) const {
|
||||
Ref<MLPPTensor3> MLPPTensor3::division_element_wisen(const Ref<MLPPTensor3> &B) const {
|
||||
ERR_FAIL_COND_V(!B.is_valid(), Ref<MLPPTensor3>());
|
||||
ERR_FAIL_COND_V(_size != B->size(), Ref<MLPPTensor3>());
|
||||
|
||||
@ -1470,7 +1470,7 @@ Ref<MLPPTensor3> MLPPTensor3::element_wise_divisionn(const Ref<MLPPTensor3> &B)
|
||||
|
||||
return C;
|
||||
}
|
||||
void MLPPTensor3::element_wise_divisionb(const Ref<MLPPTensor3> &A, const Ref<MLPPTensor3> &B) {
|
||||
void MLPPTensor3::division_element_wiseb(const Ref<MLPPTensor3> &A, const Ref<MLPPTensor3> &B) {
|
||||
ERR_FAIL_COND(!A.is_valid() || !B.is_valid());
|
||||
Size3i a_size = A->size();
|
||||
ERR_FAIL_COND(a_size != B->size());
|
||||
@ -2278,7 +2278,7 @@ void MLPPTensor3::_bind_methods() {
|
||||
ADD_PROPERTY(PropertyInfo(Variant::ARRAY, "data"), "set_data", "get_data");
|
||||
|
||||
ClassDB::bind_method(D_METHOD("z_slice_add_pool_vector", "row"), &MLPPTensor3::z_slice_add_pool_vector);
|
||||
ClassDB::bind_method(D_METHOD("z_add_slice_mlpp_vector", "row"), &MLPPTensor3::z_slice_add_mlpp_vector);
|
||||
ClassDB::bind_method(D_METHOD("z_slice_add_mlpp_vector", "row"), &MLPPTensor3::z_slice_add_mlpp_vector);
|
||||
ClassDB::bind_method(D_METHOD("z_slice_add_mlpp_matrix", "matrix"), &MLPPTensor3::z_slice_add_mlpp_matrix);
|
||||
|
||||
ClassDB::bind_method(D_METHOD("z_slice_remove", "index"), &MLPPTensor3::z_slice_remove);
|
||||
@ -2298,7 +2298,7 @@ void MLPPTensor3::_bind_methods() {
|
||||
|
||||
ClassDB::bind_method(D_METHOD("resize", "size"), &MLPPTensor3::resize);
|
||||
|
||||
ClassDB::bind_method(D_METHOD("set_shape", "size"), &MLPPTensor3::set_shape);
|
||||
ClassDB::bind_method(D_METHOD("shape_set", "size"), &MLPPTensor3::shape_set);
|
||||
ClassDB::bind_method(D_METHOD("calculate_index", "index_y", "index_x", "index_z"), &MLPPTensor3::calculate_index);
|
||||
ClassDB::bind_method(D_METHOD("calculate_z_slice_index", "index_z"), &MLPPTensor3::calculate_z_slice_index);
|
||||
|
||||
@ -2381,9 +2381,9 @@ void MLPPTensor3::_bind_methods() {
|
||||
ClassDB::bind_method(D_METHOD("hadamard_productn", "B"), &MLPPTensor3::hadamard_productn);
|
||||
ClassDB::bind_method(D_METHOD("hadamard_productb", "A", "B"), &MLPPTensor3::hadamard_productb);
|
||||
|
||||
ClassDB::bind_method(D_METHOD("element_wise_division", "B"), &MLPPTensor3::element_wise_division);
|
||||
ClassDB::bind_method(D_METHOD("element_wise_divisionn", "B"), &MLPPTensor3::element_wise_divisionn);
|
||||
ClassDB::bind_method(D_METHOD("element_wise_divisionb", "A", "B"), &MLPPTensor3::element_wise_divisionb);
|
||||
ClassDB::bind_method(D_METHOD("division_element_wise", "B"), &MLPPTensor3::division_element_wise);
|
||||
ClassDB::bind_method(D_METHOD("division_element_wisen", "B"), &MLPPTensor3::division_element_wisen);
|
||||
ClassDB::bind_method(D_METHOD("division_element_wiseb", "A", "B"), &MLPPTensor3::division_element_wiseb);
|
||||
|
||||
ClassDB::bind_method(D_METHOD("scalar_multiply", "scalar"), &MLPPTensor3::scalar_multiply);
|
||||
ClassDB::bind_method(D_METHOD("scalar_multiplyn", "scalar"), &MLPPTensor3::scalar_multiplyn);
|
||||
|
@ -60,7 +60,7 @@ public:
|
||||
_FORCE_INLINE_ Size3i size() const { return _size; }
|
||||
|
||||
void resize(const Size3i &p_size);
|
||||
void set_shape(const Size3i &p_size);
|
||||
void shape_set(const Size3i &p_size);
|
||||
|
||||
_FORCE_INLINE_ int calculate_index(int p_index_y, int p_index_x, int p_index_z) const {
|
||||
return p_index_y * _size.x + p_index_x + _size.x * _size.y * p_index_z;
|
||||
@ -193,9 +193,9 @@ public:
|
||||
Ref<MLPPTensor3> subn(const Ref<MLPPTensor3> &B) const;
|
||||
void subb(const Ref<MLPPTensor3> &A, const Ref<MLPPTensor3> &B);
|
||||
|
||||
void element_wise_division(const Ref<MLPPTensor3> &B);
|
||||
Ref<MLPPTensor3> element_wise_divisionn(const Ref<MLPPTensor3> &B) const;
|
||||
void element_wise_divisionb(const Ref<MLPPTensor3> &A, const Ref<MLPPTensor3> &B);
|
||||
void division_element_wise(const Ref<MLPPTensor3> &B);
|
||||
Ref<MLPPTensor3> division_element_wisen(const Ref<MLPPTensor3> &B) const;
|
||||
void division_element_wiseb(const Ref<MLPPTensor3> &A, const Ref<MLPPTensor3> &B);
|
||||
|
||||
void sqrt();
|
||||
Ref<MLPPTensor3> sqrtn() const;
|
||||
|
@ -387,7 +387,7 @@ void MLPPVector::hadamard_productb(const Ref<MLPPVector> &a, const Ref<MLPPVecto
|
||||
}
|
||||
}
|
||||
|
||||
void MLPPVector::element_wise_division(const Ref<MLPPVector> &b) {
|
||||
void MLPPVector::division_element_wise(const Ref<MLPPVector> &b) {
|
||||
ERR_FAIL_COND(!b.is_valid());
|
||||
|
||||
Ref<MLPPVector> out;
|
||||
@ -404,7 +404,7 @@ void MLPPVector::element_wise_division(const Ref<MLPPVector> &b) {
|
||||
}
|
||||
}
|
||||
|
||||
Ref<MLPPVector> MLPPVector::element_wise_divisionn(const Ref<MLPPVector> &b) const {
|
||||
Ref<MLPPVector> MLPPVector::division_element_wisen(const Ref<MLPPVector> &b) const {
|
||||
ERR_FAIL_COND_V(!b.is_valid(), Ref<MLPPVector>());
|
||||
|
||||
Ref<MLPPVector> out;
|
||||
@ -425,7 +425,7 @@ Ref<MLPPVector> MLPPVector::element_wise_divisionn(const Ref<MLPPVector> &b) con
|
||||
return out;
|
||||
}
|
||||
|
||||
void MLPPVector::element_wise_divisionb(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b) {
|
||||
void MLPPVector::division_element_wiseb(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b) {
|
||||
ERR_FAIL_COND(!a.is_valid() || !b.is_valid());
|
||||
|
||||
int s = a->size();
|
||||
@ -1396,9 +1396,9 @@ void MLPPVector::_bind_methods() {
|
||||
ClassDB::bind_method(D_METHOD("hadamard_productn", "b"), &MLPPVector::hadamard_productn);
|
||||
ClassDB::bind_method(D_METHOD("hadamard_productb", "a", "b"), &MLPPVector::hadamard_productb);
|
||||
|
||||
ClassDB::bind_method(D_METHOD("element_wise_division", "b"), &MLPPVector::element_wise_division);
|
||||
ClassDB::bind_method(D_METHOD("element_wise_divisionn", "b"), &MLPPVector::element_wise_divisionn);
|
||||
ClassDB::bind_method(D_METHOD("element_wise_divisionb", "a", "b"), &MLPPVector::element_wise_divisionb);
|
||||
ClassDB::bind_method(D_METHOD("division_element_wise", "b"), &MLPPVector::division_element_wise);
|
||||
ClassDB::bind_method(D_METHOD("division_element_wisen", "b"), &MLPPVector::division_element_wisen);
|
||||
ClassDB::bind_method(D_METHOD("division_element_wiseb", "a", "b"), &MLPPVector::division_element_wiseb);
|
||||
|
||||
ClassDB::bind_method(D_METHOD("scalar_multiply", "scalar"), &MLPPVector::scalar_multiply);
|
||||
ClassDB::bind_method(D_METHOD("scalar_multiplyn", "scalar"), &MLPPVector::scalar_multiplyn);
|
||||
|
@ -121,9 +121,9 @@ public:
|
||||
Ref<MLPPVector> hadamard_productn(const Ref<MLPPVector> &b) const;
|
||||
void hadamard_productb(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b);
|
||||
|
||||
void element_wise_division(const Ref<MLPPVector> &b);
|
||||
Ref<MLPPVector> element_wise_divisionn(const Ref<MLPPVector> &b) const;
|
||||
void element_wise_divisionb(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b);
|
||||
void division_element_wise(const Ref<MLPPVector> &b);
|
||||
Ref<MLPPVector> division_element_wisen(const Ref<MLPPVector> &b) const;
|
||||
void division_element_wiseb(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b);
|
||||
|
||||
void scalar_multiply(real_t scalar);
|
||||
Ref<MLPPVector> scalar_multiplyn(real_t scalar) const;
|
||||
|
@ -405,7 +405,7 @@ void MLPPLinReg::adagrad(real_t learning_rate, int max_epoch, int mini_batch_siz
|
||||
|
||||
v = alg.hadamard_productnv(weight_grad, weight_grad);
|
||||
|
||||
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_divisionnv(weight_grad, alg.sqrtnv(alg.scalar_addnv(e, v)))));
|
||||
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.division_element_wisenv(weight_grad, alg.sqrtnv(alg.scalar_addnv(e, v)))));
|
||||
|
||||
// Calculating the bias gradients
|
||||
_bias -= learning_rate * alg.sum_elementsv(error) / current_output_mini_batch->size(); // As normal
|
||||
@ -460,7 +460,7 @@ void MLPPLinReg::adadelta(real_t learning_rate, int max_epoch, int mini_batch_si
|
||||
|
||||
v = alg.additionnv(alg.scalar_multiplynv(b1, v), alg.scalar_multiplynv(1 - b1, alg.hadamard_productnv(weight_grad, weight_grad)));
|
||||
|
||||
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_divisionnv(weight_grad, alg.sqrtnv(alg.scalar_addnv(e, v)))));
|
||||
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.division_element_wisenv(weight_grad, alg.sqrtnv(alg.scalar_addnv(e, v)))));
|
||||
|
||||
// Calculating the bias gradients
|
||||
_bias -= learning_rate * alg.sum_elementsv(error) / current_output_mini_batch->size(); // As normal
|
||||
@ -519,7 +519,7 @@ void MLPPLinReg::adam(real_t learning_rate, int max_epoch, int mini_batch_size,
|
||||
Ref<MLPPVector> m_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m);
|
||||
Ref<MLPPVector> v_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b2, epoch)), v);
|
||||
|
||||
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_divisionnvnm(m_hat, alg.scalar_addnv(e, alg.sqrtnv(v_hat)))));
|
||||
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.division_element_wisenvnm(m_hat, alg.scalar_addnv(e, alg.sqrtnv(v_hat)))));
|
||||
|
||||
// Calculating the bias gradients
|
||||
_bias -= learning_rate * alg.sum_elementsv(error) / current_output_mini_batch->size(); // As normal
|
||||
@ -576,7 +576,7 @@ void MLPPLinReg::adamax(real_t learning_rate, int max_epoch, int mini_batch_size
|
||||
|
||||
Ref<MLPPVector> m_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m);
|
||||
|
||||
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_divisionnv(m_hat, u)));
|
||||
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.division_element_wisenv(m_hat, u)));
|
||||
|
||||
// Calculating the bias gradients
|
||||
_bias -= learning_rate * alg.sum_elementsv(error) / current_output_mini_batch->size(); // As normal
|
||||
@ -637,7 +637,7 @@ void MLPPLinReg::nadam(real_t learning_rate, int max_epoch, int mini_batch_size,
|
||||
Ref<MLPPVector> m_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m);
|
||||
Ref<MLPPVector> v_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b2, epoch)), v);
|
||||
|
||||
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.element_wise_divisionnv(m_final, alg.scalar_addnv(e, alg.sqrtnv(v_hat)))));
|
||||
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate, alg.division_element_wisenv(m_final, alg.scalar_addnv(e, alg.sqrtnv(v_hat)))));
|
||||
|
||||
// Calculating the bias gradients
|
||||
_bias -= learning_rate * alg.sum_elementsv(error) / current_output_mini_batch->size(); // As normal
|
||||
|
Loading…
Reference in New Issue
Block a user