mirror of
https://github.com/Relintai/pmlpp.git
synced 2024-12-21 14:56:47 +01:00
More cleanups.
This commit is contained in:
parent
5a4ff2f19e
commit
539167fee9
File diff suppressed because it is too large
Load Diff
@ -407,135 +407,6 @@ public:
|
||||
Ref<MLPPVector> arcoth_derivv(const Ref<MLPPVector> &z);
|
||||
Ref<MLPPMatrix> arcoth_derivm(const Ref<MLPPMatrix> &z);
|
||||
|
||||
// ========= OLD ===========
|
||||
|
||||
real_t linear(real_t z, bool deriv = false);
|
||||
std::vector<real_t> linear(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> linear(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
real_t sigmoid(real_t z, bool deriv = false);
|
||||
std::vector<real_t> sigmoid(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> sigmoid(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
std::vector<real_t> softmax(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> softmax(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
std::vector<real_t> adjSoftmax(std::vector<real_t> z);
|
||||
std::vector<std::vector<real_t>> adjSoftmax(std::vector<std::vector<real_t>> z);
|
||||
|
||||
std::vector<std::vector<real_t>> softmaxDeriv(std::vector<real_t> z);
|
||||
std::vector<std::vector<std::vector<real_t>>> softmaxDeriv(std::vector<std::vector<real_t>> z);
|
||||
|
||||
real_t softplus(real_t z, bool deriv = false);
|
||||
std::vector<real_t> softplus(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> softplus(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
real_t softsign(real_t z, bool deriv = false);
|
||||
std::vector<real_t> softsign(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> softsign(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
real_t gaussianCDF(real_t z, bool deriv = false);
|
||||
std::vector<real_t> gaussianCDF(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> gaussianCDF(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
real_t cloglog(real_t z, bool deriv = false);
|
||||
std::vector<real_t> cloglog(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> cloglog(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
real_t logit(real_t z, bool deriv = false);
|
||||
std::vector<real_t> logit(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> logit(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
real_t unitStep(real_t z, bool deriv = false);
|
||||
std::vector<real_t> unitStep(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> unitStep(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
real_t swish(real_t z, bool deriv = false);
|
||||
std::vector<real_t> swish(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> swish(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
real_t mish(real_t z, bool deriv = false);
|
||||
std::vector<real_t> mish(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> mish(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
real_t sinc(real_t z, bool deriv = false);
|
||||
std::vector<real_t> sinc(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> sinc(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
real_t RELU(real_t z, bool deriv = false);
|
||||
std::vector<real_t> RELU(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> RELU(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
real_t leakyReLU(real_t z, real_t c, bool deriv = false);
|
||||
std::vector<real_t> leakyReLU(std::vector<real_t> z, real_t c, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> leakyReLU(std::vector<std::vector<real_t>> z, real_t c, bool deriv = false);
|
||||
|
||||
real_t ELU(real_t z, real_t c, bool deriv = false);
|
||||
std::vector<real_t> ELU(std::vector<real_t> z, real_t c, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> ELU(std::vector<std::vector<real_t>> z, real_t c, bool deriv = false);
|
||||
|
||||
real_t SELU(real_t z, real_t lambda, real_t c, bool deriv = false);
|
||||
std::vector<real_t> SELU(std::vector<real_t> z, real_t lambda, real_t c, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> SELU(std::vector<std::vector<real_t>>, real_t lambda, real_t c, bool deriv = false);
|
||||
|
||||
real_t GELU(real_t z, bool deriv = false);
|
||||
std::vector<real_t> GELU(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> GELU(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
real_t sign(real_t z, bool deriv = false);
|
||||
std::vector<real_t> sign(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> sign(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
real_t sinh(real_t z, bool deriv = false);
|
||||
std::vector<real_t> sinh(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> sinh(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
real_t cosh(real_t z, bool deriv = false);
|
||||
std::vector<real_t> cosh(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> cosh(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
real_t tanh(real_t z, bool deriv = false);
|
||||
std::vector<real_t> tanh(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> tanh(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
real_t csch(real_t z, bool deriv = false);
|
||||
std::vector<real_t> csch(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> csch(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
real_t sech(real_t z, bool deriv = false);
|
||||
std::vector<real_t> sech(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> sech(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
real_t coth(real_t z, bool deriv = false);
|
||||
std::vector<real_t> coth(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> coth(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
real_t arsinh(real_t z, bool deriv = false);
|
||||
std::vector<real_t> arsinh(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> arsinh(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
real_t arcosh(real_t z, bool deriv = false);
|
||||
std::vector<real_t> arcosh(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> arcosh(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
real_t artanh(real_t z, bool deriv = false);
|
||||
std::vector<real_t> artanh(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> artanh(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
real_t arcsch(real_t z, bool deriv = false);
|
||||
std::vector<real_t> arcsch(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> arcsch(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
real_t arsech(real_t z, bool deriv = false);
|
||||
std::vector<real_t> arsech(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> arsech(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
real_t arcoth(real_t z, bool deriv = false);
|
||||
std::vector<real_t> arcoth(std::vector<real_t> z, bool deriv = false);
|
||||
std::vector<std::vector<real_t>> arcoth(std::vector<std::vector<real_t>> z, bool deriv = false);
|
||||
|
||||
std::vector<real_t> activation(std::vector<real_t> z, bool deriv, real_t (*function)(real_t, bool));
|
||||
|
||||
protected:
|
||||
static void _bind_methods();
|
||||
};
|
||||
|
@ -5,7 +5,7 @@
|
||||
//
|
||||
|
||||
#include "activation_old.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../lin_alg/lin_alg_old.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
@ -20,7 +20,7 @@ real_t MLPPActivationOld::linear(real_t z, bool deriv) {
|
||||
|
||||
std::vector<real_t> MLPPActivationOld::linear(std::vector<real_t> z, bool deriv) {
|
||||
if (deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
return alg.onevec(z.size());
|
||||
}
|
||||
return z;
|
||||
@ -28,7 +28,7 @@ std::vector<real_t> MLPPActivationOld::linear(std::vector<real_t> z, bool deriv)
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPActivationOld::linear(std::vector<std::vector<real_t>> z, bool deriv) {
|
||||
if (deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
return alg.onemat(z.size(), z[0].size());
|
||||
}
|
||||
return z;
|
||||
@ -42,7 +42,7 @@ real_t MLPPActivationOld::sigmoid(real_t z, bool deriv) {
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPActivationOld::sigmoid(std::vector<real_t> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), sigmoid(z)));
|
||||
}
|
||||
@ -50,7 +50,7 @@ std::vector<real_t> MLPPActivationOld::sigmoid(std::vector<real_t> z, bool deriv
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPActivationOld::sigmoid(std::vector<std::vector<real_t>> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), sigmoid(z)));
|
||||
}
|
||||
@ -58,7 +58,7 @@ std::vector<std::vector<real_t>> MLPPActivationOld::sigmoid(std::vector<std::vec
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPActivationOld::softmax(std::vector<real_t> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
std::vector<real_t> a;
|
||||
a.resize(z.size());
|
||||
std::vector<real_t> expZ = alg.exp(z);
|
||||
@ -84,7 +84,7 @@ std::vector<std::vector<real_t>> MLPPActivationOld::softmax(std::vector<std::vec
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPActivationOld::adjSoftmax(std::vector<real_t> z) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
std::vector<real_t> a;
|
||||
real_t C = -*std::max_element(z.begin(), z.end());
|
||||
z = alg.scalarAdd(C, z);
|
||||
@ -122,7 +122,7 @@ std::vector<std::vector<real_t>> MLPPActivationOld::softmaxDeriv(std::vector<rea
|
||||
}
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> MLPPActivationOld::softmaxDeriv(std::vector<std::vector<real_t>> z) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
std::vector<std::vector<std::vector<real_t>>> deriv;
|
||||
std::vector<std::vector<real_t>> a = softmax(z);
|
||||
|
||||
@ -153,7 +153,7 @@ std::vector<real_t> MLPPActivationOld::softplus(std::vector<real_t> z, bool deri
|
||||
if (deriv) {
|
||||
return sigmoid(z);
|
||||
}
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
return alg.log(alg.addition(alg.onevec(z.size()), alg.exp(z)));
|
||||
}
|
||||
|
||||
@ -161,7 +161,7 @@ std::vector<std::vector<real_t>> MLPPActivationOld::softplus(std::vector<std::ve
|
||||
if (deriv) {
|
||||
return sigmoid(z);
|
||||
}
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
return alg.log(alg.addition(alg.onemat(z.size(), z[0].size()), alg.exp(z)));
|
||||
}
|
||||
|
||||
@ -173,7 +173,7 @@ real_t MLPPActivationOld::softsign(real_t z, bool deriv) {
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPActivationOld::softsign(std::vector<real_t> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.elementWiseDivision(alg.onevec(z.size()), alg.exponentiate(alg.addition(alg.onevec(z.size()), alg.abs(z)), 2));
|
||||
}
|
||||
@ -181,7 +181,7 @@ std::vector<real_t> MLPPActivationOld::softsign(std::vector<real_t> z, bool deri
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPActivationOld::softsign(std::vector<std::vector<real_t>> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.exponentiate(alg.addition(alg.onemat(z.size(), z[0].size()), alg.abs(z)), 2));
|
||||
}
|
||||
@ -196,7 +196,7 @@ real_t MLPPActivationOld::gaussianCDF(real_t z, bool deriv) {
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPActivationOld::gaussianCDF(std::vector<real_t> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.scalarMultiply(1 / sqrt(2 * M_PI), alg.exp(alg.scalarMultiply(-1 / 2, alg.hadamard_product(z, z))));
|
||||
}
|
||||
@ -204,7 +204,7 @@ std::vector<real_t> MLPPActivationOld::gaussianCDF(std::vector<real_t> z, bool d
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPActivationOld::gaussianCDF(std::vector<std::vector<real_t>> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.scalarMultiply(1 / sqrt(2 * M_PI), alg.exp(alg.scalarMultiply(-1 / 2, alg.hadamard_product(z, z))));
|
||||
}
|
||||
@ -219,7 +219,7 @@ real_t MLPPActivationOld::cloglog(real_t z, bool deriv) {
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPActivationOld::cloglog(std::vector<real_t> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.exp(alg.scalarMultiply(-1, alg.exp(z)));
|
||||
}
|
||||
@ -227,7 +227,7 @@ std::vector<real_t> MLPPActivationOld::cloglog(std::vector<real_t> z, bool deriv
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPActivationOld::cloglog(std::vector<std::vector<real_t>> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.exp(alg.scalarMultiply(-1, alg.exp(z)));
|
||||
}
|
||||
@ -242,7 +242,7 @@ real_t MLPPActivationOld::logit(real_t z, bool deriv) {
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPActivationOld::logit(std::vector<real_t> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.subtraction(alg.elementWiseDivision(alg.onevec(z.size()), z), alg.elementWiseDivision(alg.onevec(z.size()), alg.subtraction(z, alg.onevec(z.size()))));
|
||||
}
|
||||
@ -250,7 +250,7 @@ std::vector<real_t> MLPPActivationOld::logit(std::vector<real_t> z, bool deriv)
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPActivationOld::logit(std::vector<std::vector<real_t>> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.subtraction(alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), z), alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.subtraction(z, alg.onemat(z.size(), z[0].size()))));
|
||||
}
|
||||
@ -308,7 +308,7 @@ real_t MLPPActivationOld::swish(real_t z, bool deriv) {
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPActivationOld::swish(std::vector<real_t> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
alg.addition(swish(z), alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), swish(z))));
|
||||
}
|
||||
@ -316,7 +316,7 @@ std::vector<real_t> MLPPActivationOld::swish(std::vector<real_t> z, bool deriv)
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPActivationOld::swish(std::vector<std::vector<real_t>> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
alg.addition(swish(z), alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), swish(z))));
|
||||
}
|
||||
@ -331,7 +331,7 @@ real_t MLPPActivationOld::mish(real_t z, bool deriv) {
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPActivationOld::mish(std::vector<real_t> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.addition(alg.hadamard_product(alg.hadamard_product(alg.hadamard_product(sech(softplus(z)), sech(softplus(z))), z), sigmoid(z)), alg.elementWiseDivision(mish(z), z));
|
||||
}
|
||||
@ -339,7 +339,7 @@ std::vector<real_t> MLPPActivationOld::mish(std::vector<real_t> z, bool deriv) {
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPActivationOld::mish(std::vector<std::vector<real_t>> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.addition(alg.hadamard_product(alg.hadamard_product(alg.hadamard_product(sech(softplus(z)), sech(softplus(z))), z), sigmoid(z)), alg.elementWiseDivision(mish(z), z));
|
||||
}
|
||||
@ -354,7 +354,7 @@ real_t MLPPActivationOld::sinc(real_t z, bool deriv) {
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPActivationOld::sinc(std::vector<real_t> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.elementWiseDivision(alg.subtraction(alg.hadamard_product(z, alg.cos(z)), alg.sin(z)), alg.hadamard_product(z, z));
|
||||
}
|
||||
@ -362,7 +362,7 @@ std::vector<real_t> MLPPActivationOld::sinc(std::vector<real_t> z, bool deriv) {
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPActivationOld::sinc(std::vector<std::vector<real_t>> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.elementWiseDivision(alg.subtraction(alg.hadamard_product(z, alg.cos(z)), alg.sin(z)), alg.hadamard_product(z, z));
|
||||
}
|
||||
@ -660,7 +660,7 @@ std::vector<real_t> MLPPActivationOld::sinh(std::vector<real_t> z, bool deriv) {
|
||||
if (deriv) {
|
||||
return cosh(z);
|
||||
}
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
return alg.scalarMultiply(0.5, alg.subtraction(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z))));
|
||||
}
|
||||
|
||||
@ -668,7 +668,7 @@ std::vector<std::vector<real_t>> MLPPActivationOld::sinh(std::vector<std::vector
|
||||
if (deriv) {
|
||||
return cosh(z);
|
||||
}
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
return alg.scalarMultiply(0.5, alg.subtraction(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z))));
|
||||
}
|
||||
|
||||
@ -683,7 +683,7 @@ std::vector<real_t> MLPPActivationOld::cosh(std::vector<real_t> z, bool deriv) {
|
||||
if (deriv) {
|
||||
return sinh(z);
|
||||
}
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
return alg.scalarMultiply(0.5, alg.addition(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z))));
|
||||
}
|
||||
|
||||
@ -691,7 +691,7 @@ std::vector<std::vector<real_t>> MLPPActivationOld::cosh(std::vector<std::vector
|
||||
if (deriv) {
|
||||
return sinh(z);
|
||||
}
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
return alg.scalarMultiply(0.5, alg.addition(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z))));
|
||||
}
|
||||
|
||||
@ -703,7 +703,7 @@ real_t MLPPActivationOld::tanh(real_t z, bool deriv) {
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPActivationOld::tanh(std::vector<real_t> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.scalarMultiply(-1, alg.scalarAdd(-1, alg.hadamard_product(tanh(z), tanh(z))));
|
||||
}
|
||||
@ -711,7 +711,7 @@ std::vector<real_t> MLPPActivationOld::tanh(std::vector<real_t> z, bool deriv) {
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPActivationOld::tanh(std::vector<std::vector<real_t>> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.scalarMultiply(-1, alg.scalarAdd(-1, alg.hadamard_product(tanh(z), tanh(z))));
|
||||
}
|
||||
@ -727,7 +727,7 @@ real_t MLPPActivationOld::csch(real_t z, bool deriv) {
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPActivationOld::csch(std::vector<real_t> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), coth(z));
|
||||
}
|
||||
@ -735,7 +735,7 @@ std::vector<real_t> MLPPActivationOld::csch(std::vector<real_t> z, bool deriv) {
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPActivationOld::csch(std::vector<std::vector<real_t>> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), coth(z));
|
||||
}
|
||||
@ -750,7 +750,7 @@ real_t MLPPActivationOld::sech(real_t z, bool deriv) {
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPActivationOld::sech(std::vector<real_t> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.hadamard_product(alg.scalarMultiply(-1, sech(z)), tanh(z));
|
||||
}
|
||||
@ -760,7 +760,7 @@ std::vector<real_t> MLPPActivationOld::sech(std::vector<real_t> z, bool deriv) {
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPActivationOld::sech(std::vector<std::vector<real_t>> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.hadamard_product(alg.scalarMultiply(-1, sech(z)), tanh(z));
|
||||
}
|
||||
@ -777,7 +777,7 @@ real_t MLPPActivationOld::coth(real_t z, bool deriv) {
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPActivationOld::coth(std::vector<real_t> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), csch(z));
|
||||
}
|
||||
@ -785,7 +785,7 @@ std::vector<real_t> MLPPActivationOld::coth(std::vector<real_t> z, bool deriv) {
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPActivationOld::coth(std::vector<std::vector<real_t>> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), csch(z));
|
||||
}
|
||||
@ -800,7 +800,7 @@ real_t MLPPActivationOld::arsinh(real_t z, bool deriv) {
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPActivationOld::arsinh(std::vector<real_t> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.elementWiseDivision(alg.onevec(z.size()), alg.sqrt(alg.addition(alg.hadamard_product(z, z), alg.onevec(z.size()))));
|
||||
}
|
||||
@ -808,7 +808,7 @@ std::vector<real_t> MLPPActivationOld::arsinh(std::vector<real_t> z, bool deriv)
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPActivationOld::arsinh(std::vector<std::vector<real_t>> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.sqrt(alg.addition(alg.hadamard_product(z, z), alg.onemat(z.size(), z[0].size()))));
|
||||
}
|
||||
@ -823,7 +823,7 @@ real_t MLPPActivationOld::arcosh(real_t z, bool deriv) {
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPActivationOld::arcosh(std::vector<real_t> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.elementWiseDivision(alg.onevec(z.size()), alg.sqrt(alg.subtraction(alg.hadamard_product(z, z), alg.onevec(z.size()))));
|
||||
}
|
||||
@ -831,7 +831,7 @@ std::vector<real_t> MLPPActivationOld::arcosh(std::vector<real_t> z, bool deriv)
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPActivationOld::arcosh(std::vector<std::vector<real_t>> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.sqrt(alg.subtraction(alg.hadamard_product(z, z), alg.onemat(z.size(), z[0].size()))));
|
||||
}
|
||||
@ -846,7 +846,7 @@ real_t MLPPActivationOld::artanh(real_t z, bool deriv) {
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPActivationOld::artanh(std::vector<real_t> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.elementWiseDivision(alg.onevec(z.size()), alg.subtraction(alg.onevec(z.size()), alg.hadamard_product(z, z)));
|
||||
}
|
||||
@ -854,7 +854,7 @@ std::vector<real_t> MLPPActivationOld::artanh(std::vector<real_t> z, bool deriv)
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPActivationOld::artanh(std::vector<std::vector<real_t>> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.subtraction(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z)));
|
||||
}
|
||||
@ -869,7 +869,7 @@ real_t MLPPActivationOld::arcsch(real_t z, bool deriv) {
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPActivationOld::arcsch(std::vector<real_t> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.elementWiseDivision(alg.full(z.size(), -1), alg.hadamard_product(alg.hadamard_product(z, z), alg.sqrt(alg.addition(alg.onevec(z.size()), alg.elementWiseDivision(alg.onevec(z.size()), alg.hadamard_product(z, z))))));
|
||||
}
|
||||
@ -877,7 +877,7 @@ std::vector<real_t> MLPPActivationOld::arcsch(std::vector<real_t> z, bool deriv)
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPActivationOld::arcsch(std::vector<std::vector<real_t>> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.elementWiseDivision(alg.full(z.size(), z[0].size(), -1), alg.hadamard_product(alg.hadamard_product(z, z), alg.sqrt(alg.addition(alg.onemat(z.size(), z[0].size()), alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z))))));
|
||||
}
|
||||
@ -892,7 +892,7 @@ real_t MLPPActivationOld::arsech(real_t z, bool deriv) {
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPActivationOld::arsech(std::vector<real_t> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.elementWiseDivision(alg.full(z.size(), -1), alg.hadamard_product(z, alg.sqrt(alg.subtraction(alg.onevec(z.size()), alg.hadamard_product(z, z)))));
|
||||
}
|
||||
@ -900,7 +900,7 @@ std::vector<real_t> MLPPActivationOld::arsech(std::vector<real_t> z, bool deriv)
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPActivationOld::arsech(std::vector<std::vector<real_t>> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.elementWiseDivision(alg.full(z.size(), z[0].size(), -1), alg.hadamard_product(z, alg.sqrt(alg.subtraction(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z)))));
|
||||
}
|
||||
@ -915,7 +915,7 @@ real_t MLPPActivationOld::arcoth(real_t z, bool deriv) {
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPActivationOld::arcoth(std::vector<real_t> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.elementWiseDivision(alg.onevec(z.size()), alg.subtraction(alg.onevec(z.size()), alg.hadamard_product(z, z)));
|
||||
}
|
||||
@ -923,7 +923,7 @@ std::vector<real_t> MLPPActivationOld::arcoth(std::vector<real_t> z, bool deriv)
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPActivationOld::arcoth(std::vector<std::vector<real_t>> z, bool deriv) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
if (deriv) {
|
||||
return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.subtraction(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z)));
|
||||
}
|
||||
|
@ -5,9 +5,9 @@
|
||||
//
|
||||
|
||||
#include "ann_old.h"
|
||||
#include "../activation/activation.h"
|
||||
#include "../activation/activation_old.h"
|
||||
#include "../cost/cost.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../lin_alg/lin_alg_old.h"
|
||||
#include "../regularization/reg.h"
|
||||
#include "../utilities/utilities.h"
|
||||
|
||||
@ -62,7 +62,7 @@ real_t MLPPANNOld::modelTest(std::vector<real_t> x) {
|
||||
|
||||
void MLPPANNOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
forwardPass();
|
||||
@ -98,7 +98,7 @@ void MLPPANNOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
|
||||
void MLPPANNOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -139,7 +139,7 @@ void MLPPANNOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
|
||||
|
||||
void MLPPANNOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -184,7 +184,7 @@ void MLPPANNOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size,
|
||||
|
||||
void MLPPANNOld::Momentum(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool NAG, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -246,7 +246,7 @@ void MLPPANNOld::Momentum(real_t learning_rate, int max_epoch, int mini_batch_si
|
||||
|
||||
void MLPPANNOld::Adagrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t e, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -307,7 +307,7 @@ void MLPPANNOld::Adagrad(real_t learning_rate, int max_epoch, int mini_batch_siz
|
||||
|
||||
void MLPPANNOld::Adadelta(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t e, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -368,7 +368,7 @@ void MLPPANNOld::Adadelta(real_t learning_rate, int max_epoch, int mini_batch_si
|
||||
|
||||
void MLPPANNOld::Adam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -441,7 +441,7 @@ void MLPPANNOld::Adam(real_t learning_rate, int max_epoch, int mini_batch_size,
|
||||
|
||||
void MLPPANNOld::Adamax(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -512,7 +512,7 @@ void MLPPANNOld::Adamax(real_t learning_rate, int max_epoch, int mini_batch_size
|
||||
|
||||
void MLPPANNOld::Nadam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -587,7 +587,7 @@ void MLPPANNOld::Nadam(real_t learning_rate, int max_epoch, int mini_batch_size,
|
||||
|
||||
void MLPPANNOld::AMSGrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -757,7 +757,7 @@ void MLPPANNOld::forwardPass() {
|
||||
}
|
||||
|
||||
void MLPPANNOld::updateParameters(std::vector<std::vector<std::vector<real_t>>> hiddenLayerUpdations, std::vector<real_t> outputLayerUpdation, real_t learning_rate) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
|
||||
outputLayer->weights = alg.subtraction(outputLayer->weights, outputLayerUpdation);
|
||||
outputLayer->bias -= learning_rate * alg.sum_elements(outputLayer->delta) / n;
|
||||
@ -776,8 +776,8 @@ void MLPPANNOld::updateParameters(std::vector<std::vector<std::vector<real_t>>>
|
||||
std::tuple<std::vector<std::vector<std::vector<real_t>>>, std::vector<real_t>> MLPPANNOld::computeGradients(std::vector<real_t> y_hat, std::vector<real_t> outputSet) {
|
||||
// std::cout << "BEGIN" << std::endl;
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivationOld avn;
|
||||
MLPPLinAlgOld alg;
|
||||
MLPPReg regularization;
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads.
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
#include "auto_encoder_old.h"
|
||||
|
||||
#include "../activation/activation.h"
|
||||
#include "../activation/activation_old.h"
|
||||
#include "../cost/cost.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../utilities/utilities.h"
|
||||
@ -23,7 +23,7 @@ std::vector<real_t> MLPPAutoEncoderOld::modelTest(std::vector<real_t> x) {
|
||||
}
|
||||
|
||||
void MLPPAutoEncoderOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
MLPPLinAlg alg;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -76,7 +76,7 @@ void MLPPAutoEncoderOld::gradientDescent(real_t learning_rate, int max_epoch, bo
|
||||
}
|
||||
|
||||
void MLPPAutoEncoderOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
MLPPLinAlg alg;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -130,7 +130,7 @@ void MLPPAutoEncoderOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
|
||||
}
|
||||
|
||||
void MLPPAutoEncoderOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
MLPPLinAlg alg;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -210,7 +210,7 @@ MLPPAutoEncoderOld::MLPPAutoEncoderOld(std::vector<std::vector<real_t>> pinputSe
|
||||
n = inputSet.size();
|
||||
k = inputSet[0].size();
|
||||
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
y_hat.resize(inputSet.size());
|
||||
|
||||
weights1 = MLPPUtilities::weightInitialization(k, n_hidden);
|
||||
@ -226,7 +226,7 @@ real_t MLPPAutoEncoderOld::Cost(std::vector<std::vector<real_t>> y_hat, std::vec
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPAutoEncoderOld::Evaluate(std::vector<std::vector<real_t>> X) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
std::vector<std::vector<real_t>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
|
||||
std::vector<std::vector<real_t>> a2 = avn.sigmoid(z2);
|
||||
return alg.mat_vec_add(alg.matmult(a2, weights2), bias2);
|
||||
@ -234,7 +234,7 @@ std::vector<std::vector<real_t>> MLPPAutoEncoderOld::Evaluate(std::vector<std::v
|
||||
|
||||
std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> MLPPAutoEncoderOld::propagate(std::vector<std::vector<real_t>> X) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
std::vector<std::vector<real_t>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
|
||||
std::vector<std::vector<real_t>> a2 = avn.sigmoid(z2);
|
||||
return { z2, a2 };
|
||||
@ -242,7 +242,7 @@ std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> M
|
||||
|
||||
std::vector<real_t> MLPPAutoEncoderOld::Evaluate(std::vector<real_t> x) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
std::vector<real_t> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
|
||||
std::vector<real_t> a2 = avn.sigmoid(z2);
|
||||
return alg.addition(alg.mat_vec_mult(alg.transpose(weights2), a2), bias2);
|
||||
@ -250,7 +250,7 @@ std::vector<real_t> MLPPAutoEncoderOld::Evaluate(std::vector<real_t> x) {
|
||||
|
||||
std::tuple<std::vector<real_t>, std::vector<real_t>> MLPPAutoEncoderOld::propagate(std::vector<real_t> x) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
std::vector<real_t> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
|
||||
std::vector<real_t> a2 = avn.sigmoid(z2);
|
||||
return { z2, a2 };
|
||||
@ -258,7 +258,7 @@ std::tuple<std::vector<real_t>, std::vector<real_t>> MLPPAutoEncoderOld::propaga
|
||||
|
||||
void MLPPAutoEncoderOld::forwardPass() {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1);
|
||||
a2 = avn.sigmoid(z2);
|
||||
y_hat = alg.mat_vec_add(alg.matmult(a2, weights2), bias2);
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
#include "c_log_log_reg_old.h"
|
||||
|
||||
#include "../activation/activation.h"
|
||||
#include "../activation/activation_old.h"
|
||||
#include "../cost/cost.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../regularization/reg.h"
|
||||
@ -31,7 +31,7 @@ real_t MLPPCLogLogRegOld::modelTest(std::vector<real_t> x) {
|
||||
}
|
||||
|
||||
void MLPPCLogLogRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
@ -65,7 +65,7 @@ void MLPPCLogLogRegOld::gradientDescent(real_t learning_rate, int max_epoch, boo
|
||||
}
|
||||
|
||||
void MLPPCLogLogRegOld::MLE(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
@ -138,7 +138,7 @@ void MLPPCLogLogRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
|
||||
}
|
||||
|
||||
void MLPPCLogLogRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
@ -195,7 +195,7 @@ real_t MLPPCLogLogRegOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y)
|
||||
|
||||
std::vector<real_t> MLPPCLogLogRegOld::Evaluate(std::vector<std::vector<real_t>> X) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
return avn.cloglog(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
|
||||
}
|
||||
|
||||
@ -206,7 +206,7 @@ std::vector<real_t> MLPPCLogLogRegOld::propagate(std::vector<std::vector<real_t>
|
||||
|
||||
real_t MLPPCLogLogRegOld::Evaluate(std::vector<real_t> x) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
return avn.cloglog(alg.dot(weights, x) + bias);
|
||||
}
|
||||
|
||||
@ -217,7 +217,7 @@ real_t MLPPCLogLogRegOld::propagate(std::vector<real_t> x) {
|
||||
|
||||
// cloglog ( wTx + b )
|
||||
void MLPPCLogLogRegOld::forwardPass() {
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
|
||||
z = propagate(inputSet);
|
||||
y_hat = avn.cloglog(z);
|
||||
|
@ -203,7 +203,7 @@ real_t MLPPDualSVC::cost(const Ref<MLPPVector> &alpha, const Ref<MLPPMatrix> &X,
|
||||
|
||||
real_t MLPPDualSVC::evaluatev(const Ref<MLPPVector> &x) {
|
||||
MLPPActivation avn;
|
||||
return avn.sign(propagatev(x));
|
||||
return avn.sign_normr(propagatev(x));
|
||||
}
|
||||
|
||||
real_t MLPPDualSVC::propagatev(const Ref<MLPPVector> &x) {
|
||||
|
@ -5,7 +5,7 @@
|
||||
//
|
||||
|
||||
#include "dual_svc_old.h"
|
||||
#include "../activation/activation.h"
|
||||
#include "../activation/activation_old.h"
|
||||
#include "../cost/cost.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../regularization/reg.h"
|
||||
@ -38,7 +38,7 @@ real_t MLPPDualSVCOld::modelTest(std::vector<real_t> x) {
|
||||
|
||||
void MLPPDualSVCOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
@ -86,7 +86,7 @@ void MLPPDualSVCOld::gradientDescent(real_t learning_rate, int max_epoch, bool U
|
||||
|
||||
// void MLPPDualSVCOld::SGD(real_t learning_rate, int max_epoch, bool UI){
|
||||
// class MLPPCost cost;
|
||||
// MLPPActivation avn;
|
||||
// MLPPActivationOld avn;
|
||||
// MLPPLinAlg alg;
|
||||
// MLPPReg regularization;
|
||||
|
||||
@ -119,7 +119,7 @@ void MLPPDualSVCOld::gradientDescent(real_t learning_rate, int max_epoch, bool U
|
||||
|
||||
// void MLPPDualSVCOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI){
|
||||
// class MLPPCost cost;
|
||||
// MLPPActivation avn;
|
||||
// MLPPActivationOld avn;
|
||||
// MLPPLinAlg alg;
|
||||
// MLPPReg regularization;
|
||||
// real_t cost_prev = 0;
|
||||
@ -173,7 +173,7 @@ real_t MLPPDualSVCOld::Cost(std::vector<real_t> alpha, std::vector<std::vector<r
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPDualSVCOld::Evaluate(std::vector<std::vector<real_t>> X) {
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
return avn.sign(propagate(X));
|
||||
}
|
||||
|
||||
@ -194,7 +194,7 @@ std::vector<real_t> MLPPDualSVCOld::propagate(std::vector<std::vector<real_t>> X
|
||||
}
|
||||
|
||||
real_t MLPPDualSVCOld::Evaluate(std::vector<real_t> x) {
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
return avn.sign(propagate(x));
|
||||
}
|
||||
|
||||
@ -211,7 +211,7 @@ real_t MLPPDualSVCOld::propagate(std::vector<real_t> x) {
|
||||
}
|
||||
|
||||
void MLPPDualSVCOld::forwardPass() {
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
|
||||
z = propagate(inputSet);
|
||||
y_hat = avn.sign(z);
|
||||
|
@ -5,7 +5,7 @@
|
||||
//
|
||||
|
||||
#include "gan_old.h"
|
||||
#include "../activation/activation.h"
|
||||
#include "../activation/activation_old.h"
|
||||
#include "../cost/cost.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../regularization/reg.h"
|
||||
@ -211,7 +211,7 @@ void MLPPGANOld::updateGeneratorParameters(std::vector<std::vector<std::vector<r
|
||||
|
||||
std::tuple<std::vector<std::vector<std::vector<real_t>>>, std::vector<real_t>> MLPPGANOld::computeDiscriminatorGradients(std::vector<real_t> y_hat, std::vector<real_t> outputSet) {
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
|
||||
@ -247,7 +247,7 @@ std::tuple<std::vector<std::vector<std::vector<real_t>>>, std::vector<real_t>> M
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> MLPPGANOld::computeGeneratorGradients(std::vector<real_t> y_hat, std::vector<real_t> outputSet) {
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
|
||||
|
@ -23,88 +23,88 @@ MLPPOldHiddenLayer::MLPPOldHiddenLayer(int p_n_hidden, std::string p_activation,
|
||||
weights = MLPPUtilities::weightInitialization(input[0].size(), n_hidden, weightInit);
|
||||
bias = MLPPUtilities::biasInitialization(n_hidden);
|
||||
|
||||
activation_map["Linear"] = &MLPPActivation::linear;
|
||||
activationTest_map["Linear"] = &MLPPActivation::linear;
|
||||
activation_map["Linear"] = &MLPPActivationOld::linear;
|
||||
activationTest_map["Linear"] = &MLPPActivationOld::linear;
|
||||
|
||||
activation_map["Sigmoid"] = &MLPPActivation::sigmoid;
|
||||
activationTest_map["Sigmoid"] = &MLPPActivation::sigmoid;
|
||||
activation_map["Sigmoid"] = &MLPPActivationOld::sigmoid;
|
||||
activationTest_map["Sigmoid"] = &MLPPActivationOld::sigmoid;
|
||||
|
||||
activation_map["Swish"] = &MLPPActivation::swish;
|
||||
activationTest_map["Swish"] = &MLPPActivation::swish;
|
||||
activation_map["Swish"] = &MLPPActivationOld::swish;
|
||||
activationTest_map["Swish"] = &MLPPActivationOld::swish;
|
||||
|
||||
activation_map["Mish"] = &MLPPActivation::mish;
|
||||
activationTest_map["Mish"] = &MLPPActivation::mish;
|
||||
activation_map["Mish"] = &MLPPActivationOld::mish;
|
||||
activationTest_map["Mish"] = &MLPPActivationOld::mish;
|
||||
|
||||
activation_map["SinC"] = &MLPPActivation::sinc;
|
||||
activationTest_map["SinC"] = &MLPPActivation::sinc;
|
||||
activation_map["SinC"] = &MLPPActivationOld::sinc;
|
||||
activationTest_map["SinC"] = &MLPPActivationOld::sinc;
|
||||
|
||||
activation_map["Softplus"] = &MLPPActivation::softplus;
|
||||
activationTest_map["Softplus"] = &MLPPActivation::softplus;
|
||||
activation_map["Softplus"] = &MLPPActivationOld::softplus;
|
||||
activationTest_map["Softplus"] = &MLPPActivationOld::softplus;
|
||||
|
||||
activation_map["Softsign"] = &MLPPActivation::softsign;
|
||||
activationTest_map["Softsign"] = &MLPPActivation::softsign;
|
||||
activation_map["Softsign"] = &MLPPActivationOld::softsign;
|
||||
activationTest_map["Softsign"] = &MLPPActivationOld::softsign;
|
||||
|
||||
activation_map["CLogLog"] = &MLPPActivation::cloglog;
|
||||
activationTest_map["CLogLog"] = &MLPPActivation::cloglog;
|
||||
activation_map["CLogLog"] = &MLPPActivationOld::cloglog;
|
||||
activationTest_map["CLogLog"] = &MLPPActivationOld::cloglog;
|
||||
|
||||
activation_map["Logit"] = &MLPPActivation::logit;
|
||||
activationTest_map["Logit"] = &MLPPActivation::logit;
|
||||
activation_map["Logit"] = &MLPPActivationOld::logit;
|
||||
activationTest_map["Logit"] = &MLPPActivationOld::logit;
|
||||
|
||||
activation_map["GaussianCDF"] = &MLPPActivation::gaussianCDF;
|
||||
activationTest_map["GaussianCDF"] = &MLPPActivation::gaussianCDF;
|
||||
activation_map["GaussianCDF"] = &MLPPActivationOld::gaussianCDF;
|
||||
activationTest_map["GaussianCDF"] = &MLPPActivationOld::gaussianCDF;
|
||||
|
||||
activation_map["RELU"] = &MLPPActivation::RELU;
|
||||
activationTest_map["RELU"] = &MLPPActivation::RELU;
|
||||
activation_map["RELU"] = &MLPPActivationOld::RELU;
|
||||
activationTest_map["RELU"] = &MLPPActivationOld::RELU;
|
||||
|
||||
activation_map["GELU"] = &MLPPActivation::GELU;
|
||||
activationTest_map["GELU"] = &MLPPActivation::GELU;
|
||||
activation_map["GELU"] = &MLPPActivationOld::GELU;
|
||||
activationTest_map["GELU"] = &MLPPActivationOld::GELU;
|
||||
|
||||
activation_map["Sign"] = &MLPPActivation::sign;
|
||||
activationTest_map["Sign"] = &MLPPActivation::sign;
|
||||
activation_map["Sign"] = &MLPPActivationOld::sign;
|
||||
activationTest_map["Sign"] = &MLPPActivationOld::sign;
|
||||
|
||||
activation_map["UnitStep"] = &MLPPActivation::unitStep;
|
||||
activationTest_map["UnitStep"] = &MLPPActivation::unitStep;
|
||||
activation_map["UnitStep"] = &MLPPActivationOld::unitStep;
|
||||
activationTest_map["UnitStep"] = &MLPPActivationOld::unitStep;
|
||||
|
||||
activation_map["Sinh"] = &MLPPActivation::sinh;
|
||||
activationTest_map["Sinh"] = &MLPPActivation::sinh;
|
||||
activation_map["Sinh"] = &MLPPActivationOld::sinh;
|
||||
activationTest_map["Sinh"] = &MLPPActivationOld::sinh;
|
||||
|
||||
activation_map["Cosh"] = &MLPPActivation::cosh;
|
||||
activationTest_map["Cosh"] = &MLPPActivation::cosh;
|
||||
activation_map["Cosh"] = &MLPPActivationOld::cosh;
|
||||
activationTest_map["Cosh"] = &MLPPActivationOld::cosh;
|
||||
|
||||
activation_map["Tanh"] = &MLPPActivation::tanh;
|
||||
activationTest_map["Tanh"] = &MLPPActivation::tanh;
|
||||
activation_map["Tanh"] = &MLPPActivationOld::tanh;
|
||||
activationTest_map["Tanh"] = &MLPPActivationOld::tanh;
|
||||
|
||||
activation_map["Csch"] = &MLPPActivation::csch;
|
||||
activationTest_map["Csch"] = &MLPPActivation::csch;
|
||||
activation_map["Csch"] = &MLPPActivationOld::csch;
|
||||
activationTest_map["Csch"] = &MLPPActivationOld::csch;
|
||||
|
||||
activation_map["Sech"] = &MLPPActivation::sech;
|
||||
activationTest_map["Sech"] = &MLPPActivation::sech;
|
||||
activation_map["Sech"] = &MLPPActivationOld::sech;
|
||||
activationTest_map["Sech"] = &MLPPActivationOld::sech;
|
||||
|
||||
activation_map["Coth"] = &MLPPActivation::coth;
|
||||
activationTest_map["Coth"] = &MLPPActivation::coth;
|
||||
activation_map["Coth"] = &MLPPActivationOld::coth;
|
||||
activationTest_map["Coth"] = &MLPPActivationOld::coth;
|
||||
|
||||
activation_map["Arsinh"] = &MLPPActivation::arsinh;
|
||||
activationTest_map["Arsinh"] = &MLPPActivation::arsinh;
|
||||
activation_map["Arsinh"] = &MLPPActivationOld::arsinh;
|
||||
activationTest_map["Arsinh"] = &MLPPActivationOld::arsinh;
|
||||
|
||||
activation_map["Arcosh"] = &MLPPActivation::arcosh;
|
||||
activationTest_map["Arcosh"] = &MLPPActivation::arcosh;
|
||||
activation_map["Arcosh"] = &MLPPActivationOld::arcosh;
|
||||
activationTest_map["Arcosh"] = &MLPPActivationOld::arcosh;
|
||||
|
||||
activation_map["Artanh"] = &MLPPActivation::artanh;
|
||||
activationTest_map["Artanh"] = &MLPPActivation::artanh;
|
||||
activation_map["Artanh"] = &MLPPActivationOld::artanh;
|
||||
activationTest_map["Artanh"] = &MLPPActivationOld::artanh;
|
||||
|
||||
activation_map["Arcsch"] = &MLPPActivation::arcsch;
|
||||
activationTest_map["Arcsch"] = &MLPPActivation::arcsch;
|
||||
activation_map["Arcsch"] = &MLPPActivationOld::arcsch;
|
||||
activationTest_map["Arcsch"] = &MLPPActivationOld::arcsch;
|
||||
|
||||
activation_map["Arsech"] = &MLPPActivation::arsech;
|
||||
activationTest_map["Arsech"] = &MLPPActivation::arsech;
|
||||
activation_map["Arsech"] = &MLPPActivationOld::arsech;
|
||||
activationTest_map["Arsech"] = &MLPPActivationOld::arsech;
|
||||
|
||||
activation_map["Arcoth"] = &MLPPActivation::arcoth;
|
||||
activationTest_map["Arcoth"] = &MLPPActivation::arcoth;
|
||||
activation_map["Arcoth"] = &MLPPActivationOld::arcoth;
|
||||
activationTest_map["Arcoth"] = &MLPPActivationOld::arcoth;
|
||||
}
|
||||
|
||||
void MLPPOldHiddenLayer::forwardPass() {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
|
||||
z = alg.mat_vec_add(alg.matmult(input, weights), bias);
|
||||
a = (avn.*activation_map[activation])(z, false);
|
||||
@ -112,7 +112,7 @@ void MLPPOldHiddenLayer::forwardPass() {
|
||||
|
||||
void MLPPOldHiddenLayer::Test(std::vector<real_t> x) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias);
|
||||
a_test = (avn.*activationTest_map[activation])(z_test, false);
|
||||
}
|
||||
|
@ -13,7 +13,7 @@
|
||||
|
||||
#include "core/object/reference.h"
|
||||
|
||||
#include "../activation/activation.h"
|
||||
#include "../activation/activation_old.h"
|
||||
#include "../regularization/reg.h"
|
||||
#include "../utilities/utilities.h"
|
||||
|
||||
@ -39,8 +39,8 @@ public:
|
||||
std::vector<std::vector<real_t>> z;
|
||||
std::vector<std::vector<real_t>> a;
|
||||
|
||||
std::map<std::string, std::vector<std::vector<real_t>> (MLPPActivation::*)(std::vector<std::vector<real_t>>, bool)> activation_map;
|
||||
std::map<std::string, std::vector<real_t> (MLPPActivation::*)(std::vector<real_t>, bool)> activationTest_map;
|
||||
std::map<std::string, std::vector<std::vector<real_t>> (MLPPActivationOld::*)(std::vector<std::vector<real_t>>, bool)> activation_map;
|
||||
std::map<std::string, std::vector<real_t> (MLPPActivationOld::*)(std::vector<real_t>, bool)> activationTest_map;
|
||||
|
||||
std::vector<real_t> z_test;
|
||||
std::vector<real_t> a_test;
|
||||
|
@ -2118,59 +2118,6 @@ Ref<MLPPVector> MLPPLinAlg::subtract_matrix_rows(const Ref<MLPPVector> &a, const
|
||||
return c;
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPLinAlg::log(std::vector<real_t> a) {
|
||||
std::vector<real_t> b;
|
||||
b.resize(a.size());
|
||||
for (uint32_t i = 0; i < a.size(); i++) {
|
||||
b[i] = Math::log(a[i]);
|
||||
}
|
||||
return b;
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPLinAlg::log10(std::vector<real_t> a) {
|
||||
std::vector<real_t> b;
|
||||
b.resize(a.size());
|
||||
for (uint32_t i = 0; i < a.size(); i++) {
|
||||
b[i] = Math::log10(a[i]);
|
||||
}
|
||||
return b;
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPLinAlg::exp(std::vector<real_t> a) {
|
||||
std::vector<real_t> b;
|
||||
b.resize(a.size());
|
||||
for (uint32_t i = 0; i < a.size(); i++) {
|
||||
b[i] = Math::exp(a[i]);
|
||||
}
|
||||
return b;
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPLinAlg::erf(std::vector<real_t> a) {
|
||||
std::vector<real_t> b;
|
||||
b.resize(a.size());
|
||||
for (uint32_t i = 0; i < a.size(); i++) {
|
||||
b[i] = Math::erf(a[i]);
|
||||
}
|
||||
return b;
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPLinAlg::exponentiate(std::vector<real_t> a, real_t p) {
|
||||
std::vector<real_t> b;
|
||||
b.resize(a.size());
|
||||
for (uint32_t i = 0; i < b.size(); i++) {
|
||||
b[i] = Math::pow(a[i], p);
|
||||
}
|
||||
return b;
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPLinAlg::sqrt(std::vector<real_t> a) {
|
||||
return exponentiate(a, 0.5);
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPLinAlg::cbrt(std::vector<real_t> a) {
|
||||
return exponentiate(a, real_t(1) / real_t(3));
|
||||
}
|
||||
|
||||
Ref<MLPPVector> MLPPLinAlg::lognv(const Ref<MLPPVector> &a) {
|
||||
ERR_FAIL_COND_V(!a.is_valid(), Ref<MLPPVector>());
|
||||
|
||||
|
@ -227,14 +227,6 @@ public:
|
||||
std::vector<real_t> subtractMatrixRows(std::vector<real_t> a, std::vector<std::vector<real_t>> B);
|
||||
Ref<MLPPVector> subtract_matrix_rows(const Ref<MLPPVector> &a, const Ref<MLPPMatrix> &B);
|
||||
|
||||
std::vector<real_t> log(std::vector<real_t> a);
|
||||
std::vector<real_t> log10(std::vector<real_t> a);
|
||||
std::vector<real_t> exp(std::vector<real_t> a);
|
||||
std::vector<real_t> erf(std::vector<real_t> a);
|
||||
std::vector<real_t> exponentiate(std::vector<real_t> a, real_t p);
|
||||
std::vector<real_t> sqrt(std::vector<real_t> a);
|
||||
std::vector<real_t> cbrt(std::vector<real_t> a);
|
||||
|
||||
Ref<MLPPVector> lognv(const Ref<MLPPVector> &a);
|
||||
Ref<MLPPVector> log10nv(const Ref<MLPPVector> &a);
|
||||
Ref<MLPPVector> expnv(const Ref<MLPPVector> &a);
|
||||
|
@ -253,6 +253,19 @@ std::vector<std::vector<real_t>> MLPPLinAlgOld::cbrt(std::vector<std::vector<rea
|
||||
return exponentiate(A, real_t(1) / real_t(3));
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPLinAlgOld::matrixPower(std::vector<std::vector<real_t>> A, int n) {
|
||||
std::vector<std::vector<real_t>> B = identity(A.size());
|
||||
if (n == 0) {
|
||||
return identity(A.size());
|
||||
} else if (n < 0) {
|
||||
A = inverse(A);
|
||||
}
|
||||
for (int i = 0; i < std::abs(n); i++) {
|
||||
B = matmult(B, A);
|
||||
}
|
||||
return B;
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPLinAlgOld::abs(std::vector<std::vector<real_t>> A) {
|
||||
std::vector<std::vector<real_t>> B;
|
||||
B.resize(A.size());
|
||||
@ -1070,6 +1083,14 @@ std::vector<real_t> MLPPLinAlgOld::cbrt(std::vector<real_t> a) {
|
||||
return exponentiate(a, real_t(1) / real_t(3));
|
||||
}
|
||||
|
||||
real_t MLPPLinAlgOld::dot(std::vector<real_t> a, std::vector<real_t> b) {
|
||||
real_t c = 0;
|
||||
for (uint32_t i = 0; i < a.size(); i++) {
|
||||
c += a[i] * b[i];
|
||||
}
|
||||
return c;
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPLinAlgOld::cross(std::vector<real_t> a, std::vector<real_t> b) {
|
||||
// Cross products exist in R^7 also. Though, I will limit it to R^3 as Wolfram does this.
|
||||
std::vector<std::vector<real_t>> mat = { onevec(3), a, b };
|
||||
|
@ -7,7 +7,7 @@
|
||||
#include "lin_reg_old.h"
|
||||
|
||||
#include "../cost/cost.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../lin_alg/lin_alg_old.h"
|
||||
#include "../regularization/reg.h"
|
||||
#include "../stat/stat.h"
|
||||
#include "../utilities/utilities.h"
|
||||
@ -40,7 +40,7 @@ real_t MLPPLinRegOld::modelTest(std::vector<real_t> x) {
|
||||
}
|
||||
|
||||
void MLPPLinRegOld::NewtonRaphson(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -72,7 +72,7 @@ void MLPPLinRegOld::NewtonRaphson(real_t learning_rate, int max_epoch, bool UI)
|
||||
}
|
||||
|
||||
void MLPPLinRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -103,7 +103,7 @@ void MLPPLinRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI
|
||||
}
|
||||
|
||||
void MLPPLinRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -142,7 +142,7 @@ void MLPPLinRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
|
||||
}
|
||||
|
||||
void MLPPLinRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -182,7 +182,7 @@ void MLPPLinRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_siz
|
||||
}
|
||||
|
||||
void MLPPLinRegOld::Momentum(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -229,7 +229,7 @@ void MLPPLinRegOld::Momentum(real_t learning_rate, int max_epoch, int mini_batch
|
||||
}
|
||||
|
||||
void MLPPLinRegOld::NAG(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -278,7 +278,7 @@ void MLPPLinRegOld::NAG(real_t learning_rate, int max_epoch, int mini_batch_size
|
||||
}
|
||||
|
||||
void MLPPLinRegOld::Adagrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t e, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -326,7 +326,7 @@ void MLPPLinRegOld::Adagrad(real_t learning_rate, int max_epoch, int mini_batch_
|
||||
|
||||
void MLPPLinRegOld::Adadelta(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t e, bool UI) {
|
||||
// Adagrad upgrade. Momentum is applied.
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -373,7 +373,7 @@ void MLPPLinRegOld::Adadelta(real_t learning_rate, int max_epoch, int mini_batch
|
||||
}
|
||||
|
||||
void MLPPLinRegOld::Adam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -426,7 +426,7 @@ void MLPPLinRegOld::Adam(real_t learning_rate, int max_epoch, int mini_batch_siz
|
||||
}
|
||||
|
||||
void MLPPLinRegOld::Adamax(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -477,7 +477,7 @@ void MLPPLinRegOld::Adamax(real_t learning_rate, int max_epoch, int mini_batch_s
|
||||
}
|
||||
|
||||
void MLPPLinRegOld::Nadam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -531,7 +531,7 @@ void MLPPLinRegOld::Nadam(real_t learning_rate, int max_epoch, int mini_batch_si
|
||||
}
|
||||
|
||||
void MLPPLinRegOld::normalEquation() {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
MLPPStat stat;
|
||||
std::vector<real_t> x_means;
|
||||
std::vector<std::vector<real_t>> inputSetT = alg.transpose(inputSet);
|
||||
@ -583,12 +583,12 @@ real_t MLPPLinRegOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPLinRegOld::Evaluate(std::vector<std::vector<real_t>> X) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights));
|
||||
}
|
||||
|
||||
real_t MLPPLinRegOld::Evaluate(std::vector<real_t> x) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPLinAlgOld alg;
|
||||
return alg.dot(weights, x) + bias;
|
||||
}
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
#include "log_reg_old.h"
|
||||
|
||||
#include "../activation/activation.h"
|
||||
#include "../activation/activation_old.h"
|
||||
#include "../cost/cost.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../regularization/reg.h"
|
||||
@ -197,13 +197,13 @@ real_t MLPPLogRegOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
|
||||
|
||||
std::vector<real_t> MLPPLogRegOld::Evaluate(std::vector<std::vector<real_t>> X) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
return avn.sigmoid(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
|
||||
}
|
||||
|
||||
real_t MLPPLogRegOld::Evaluate(std::vector<real_t> x) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
return avn.sigmoid(alg.dot(weights, x) + bias);
|
||||
}
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
#include "mann_old.h"
|
||||
|
||||
#include "../activation/activation.h"
|
||||
#include "../activation/activation_old.h"
|
||||
#include "../cost/cost.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../regularization/reg.h"
|
||||
@ -54,7 +54,7 @@ std::vector<real_t> MLPPMANNOld::modelTest(std::vector<real_t> x) {
|
||||
|
||||
void MLPPMANNOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
|
||||
|
@ -380,7 +380,7 @@ real_t MLPPMLP::evaluatev(const Ref<MLPPVector> &x) {
|
||||
Ref<MLPPVector> pz2 = alg.additionnv(alg.mat_vec_multv(alg.transposem(_weights1), x), _bias1);
|
||||
Ref<MLPPVector> pa2 = avn.sigmoid_normv(pz2);
|
||||
|
||||
return avn.sigmoid(alg.dotv(_weights2, pa2) + _bias2);
|
||||
return avn.sigmoid_normr(alg.dotv(_weights2, pa2) + _bias2);
|
||||
}
|
||||
|
||||
void MLPPMLP::propagatev(const Ref<MLPPVector> &x, Ref<MLPPVector> z2_out, Ref<MLPPVector> a2_out) {
|
||||
|
@ -8,7 +8,7 @@
|
||||
|
||||
#include "core/log/logger.h"
|
||||
|
||||
#include "../activation/activation.h"
|
||||
#include "../activation/activation_old.h"
|
||||
#include "../cost/cost.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../regularization/reg.h"
|
||||
@ -27,7 +27,7 @@ MLPPMLPOld::MLPPMLPOld(std::vector<std::vector<real_t>> p_inputSet, std::vector<
|
||||
lambda = p_lambda;
|
||||
alpha = p_alpha;
|
||||
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
y_hat.resize(n);
|
||||
|
||||
weights1 = MLPPUtilities::weightInitialization(k, n_hidden);
|
||||
@ -45,7 +45,7 @@ real_t MLPPMLPOld::modelTest(std::vector<real_t> x) {
|
||||
}
|
||||
|
||||
void MLPPMLPOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
@ -104,7 +104,7 @@ void MLPPMLPOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
}
|
||||
|
||||
void MLPPMLPOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
@ -160,7 +160,7 @@ void MLPPMLPOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
|
||||
}
|
||||
|
||||
void MLPPMLPOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
@ -249,7 +249,7 @@ real_t MLPPMLPOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
|
||||
|
||||
std::vector<real_t> MLPPMLPOld::Evaluate(std::vector<std::vector<real_t>> X) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
std::vector<std::vector<real_t>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
|
||||
std::vector<std::vector<real_t>> a2 = avn.sigmoid(z2);
|
||||
return avn.sigmoid(alg.scalarAdd(bias2, alg.mat_vec_mult(a2, weights2)));
|
||||
@ -257,7 +257,7 @@ std::vector<real_t> MLPPMLPOld::Evaluate(std::vector<std::vector<real_t>> X) {
|
||||
|
||||
std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> MLPPMLPOld::propagate(std::vector<std::vector<real_t>> X) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
std::vector<std::vector<real_t>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
|
||||
std::vector<std::vector<real_t>> a2 = avn.sigmoid(z2);
|
||||
return { z2, a2 };
|
||||
@ -265,7 +265,7 @@ std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> M
|
||||
|
||||
real_t MLPPMLPOld::Evaluate(std::vector<real_t> x) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
std::vector<real_t> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
|
||||
std::vector<real_t> a2 = avn.sigmoid(z2);
|
||||
return avn.sigmoid(alg.dot(weights2, a2) + bias2);
|
||||
@ -273,7 +273,7 @@ real_t MLPPMLPOld::Evaluate(std::vector<real_t> x) {
|
||||
|
||||
std::tuple<std::vector<real_t>, std::vector<real_t>> MLPPMLPOld::propagate(std::vector<real_t> x) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
std::vector<real_t> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
|
||||
std::vector<real_t> a2 = avn.sigmoid(z2);
|
||||
return { z2, a2 };
|
||||
@ -281,7 +281,7 @@ std::tuple<std::vector<real_t>, std::vector<real_t>> MLPPMLPOld::propagate(std::
|
||||
|
||||
void MLPPMLPOld::forwardPass() {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1);
|
||||
a2 = avn.sigmoid(z2);
|
||||
y_hat = avn.sigmoid(alg.scalarAdd(bias2, alg.mat_vec_mult(a2, weights2)));
|
||||
|
@ -25,86 +25,86 @@ MLPPOldMultiOutputLayer::MLPPOldMultiOutputLayer(int p_n_output, int p_n_hidden,
|
||||
weights = MLPPUtilities::weightInitialization(n_hidden, n_output, weightInit);
|
||||
bias = MLPPUtilities::biasInitialization(n_output);
|
||||
|
||||
activation_map["Linear"] = &MLPPActivation::linear;
|
||||
activationTest_map["Linear"] = &MLPPActivation::linear;
|
||||
activation_map["Linear"] = &MLPPActivationOld::linear;
|
||||
activationTest_map["Linear"] = &MLPPActivationOld::linear;
|
||||
|
||||
activation_map["Sigmoid"] = &MLPPActivation::sigmoid;
|
||||
activationTest_map["Sigmoid"] = &MLPPActivation::sigmoid;
|
||||
activation_map["Sigmoid"] = &MLPPActivationOld::sigmoid;
|
||||
activationTest_map["Sigmoid"] = &MLPPActivationOld::sigmoid;
|
||||
|
||||
activation_map["Softmax"] = &MLPPActivation::softmax;
|
||||
activationTest_map["Softmax"] = &MLPPActivation::softmax;
|
||||
activation_map["Softmax"] = &MLPPActivationOld::softmax;
|
||||
activationTest_map["Softmax"] = &MLPPActivationOld::softmax;
|
||||
|
||||
activation_map["Swish"] = &MLPPActivation::swish;
|
||||
activationTest_map["Swish"] = &MLPPActivation::swish;
|
||||
activation_map["Swish"] = &MLPPActivationOld::swish;
|
||||
activationTest_map["Swish"] = &MLPPActivationOld::swish;
|
||||
|
||||
activation_map["Mish"] = &MLPPActivation::mish;
|
||||
activationTest_map["Mish"] = &MLPPActivation::mish;
|
||||
activation_map["Mish"] = &MLPPActivationOld::mish;
|
||||
activationTest_map["Mish"] = &MLPPActivationOld::mish;
|
||||
|
||||
activation_map["SinC"] = &MLPPActivation::sinc;
|
||||
activationTest_map["SinC"] = &MLPPActivation::sinc;
|
||||
activation_map["SinC"] = &MLPPActivationOld::sinc;
|
||||
activationTest_map["SinC"] = &MLPPActivationOld::sinc;
|
||||
|
||||
activation_map["Softplus"] = &MLPPActivation::softplus;
|
||||
activationTest_map["Softplus"] = &MLPPActivation::softplus;
|
||||
activation_map["Softplus"] = &MLPPActivationOld::softplus;
|
||||
activationTest_map["Softplus"] = &MLPPActivationOld::softplus;
|
||||
|
||||
activation_map["Softsign"] = &MLPPActivation::softsign;
|
||||
activationTest_map["Softsign"] = &MLPPActivation::softsign;
|
||||
activation_map["Softsign"] = &MLPPActivationOld::softsign;
|
||||
activationTest_map["Softsign"] = &MLPPActivationOld::softsign;
|
||||
|
||||
activation_map["CLogLog"] = &MLPPActivation::cloglog;
|
||||
activationTest_map["CLogLog"] = &MLPPActivation::cloglog;
|
||||
activation_map["CLogLog"] = &MLPPActivationOld::cloglog;
|
||||
activationTest_map["CLogLog"] = &MLPPActivationOld::cloglog;
|
||||
|
||||
activation_map["Logit"] = &MLPPActivation::logit;
|
||||
activationTest_map["Logit"] = &MLPPActivation::logit;
|
||||
activation_map["Logit"] = &MLPPActivationOld::logit;
|
||||
activationTest_map["Logit"] = &MLPPActivationOld::logit;
|
||||
|
||||
activation_map["GaussianCDF"] = &MLPPActivation::gaussianCDF;
|
||||
activationTest_map["GaussianCDF"] = &MLPPActivation::gaussianCDF;
|
||||
activation_map["GaussianCDF"] = &MLPPActivationOld::gaussianCDF;
|
||||
activationTest_map["GaussianCDF"] = &MLPPActivationOld::gaussianCDF;
|
||||
|
||||
activation_map["RELU"] = &MLPPActivation::RELU;
|
||||
activationTest_map["RELU"] = &MLPPActivation::RELU;
|
||||
activation_map["RELU"] = &MLPPActivationOld::RELU;
|
||||
activationTest_map["RELU"] = &MLPPActivationOld::RELU;
|
||||
|
||||
activation_map["GELU"] = &MLPPActivation::GELU;
|
||||
activationTest_map["GELU"] = &MLPPActivation::GELU;
|
||||
activation_map["GELU"] = &MLPPActivationOld::GELU;
|
||||
activationTest_map["GELU"] = &MLPPActivationOld::GELU;
|
||||
|
||||
activation_map["Sign"] = &MLPPActivation::sign;
|
||||
activationTest_map["Sign"] = &MLPPActivation::sign;
|
||||
activation_map["Sign"] = &MLPPActivationOld::sign;
|
||||
activationTest_map["Sign"] = &MLPPActivationOld::sign;
|
||||
|
||||
activation_map["UnitStep"] = &MLPPActivation::unitStep;
|
||||
activationTest_map["UnitStep"] = &MLPPActivation::unitStep;
|
||||
activation_map["UnitStep"] = &MLPPActivationOld::unitStep;
|
||||
activationTest_map["UnitStep"] = &MLPPActivationOld::unitStep;
|
||||
|
||||
activation_map["Sinh"] = &MLPPActivation::sinh;
|
||||
activationTest_map["Sinh"] = &MLPPActivation::sinh;
|
||||
activation_map["Sinh"] = &MLPPActivationOld::sinh;
|
||||
activationTest_map["Sinh"] = &MLPPActivationOld::sinh;
|
||||
|
||||
activation_map["Cosh"] = &MLPPActivation::cosh;
|
||||
activationTest_map["Cosh"] = &MLPPActivation::cosh;
|
||||
activation_map["Cosh"] = &MLPPActivationOld::cosh;
|
||||
activationTest_map["Cosh"] = &MLPPActivationOld::cosh;
|
||||
|
||||
activation_map["Tanh"] = &MLPPActivation::tanh;
|
||||
activationTest_map["Tanh"] = &MLPPActivation::tanh;
|
||||
activation_map["Tanh"] = &MLPPActivationOld::tanh;
|
||||
activationTest_map["Tanh"] = &MLPPActivationOld::tanh;
|
||||
|
||||
activation_map["Csch"] = &MLPPActivation::csch;
|
||||
activationTest_map["Csch"] = &MLPPActivation::csch;
|
||||
activation_map["Csch"] = &MLPPActivationOld::csch;
|
||||
activationTest_map["Csch"] = &MLPPActivationOld::csch;
|
||||
|
||||
activation_map["Sech"] = &MLPPActivation::sech;
|
||||
activationTest_map["Sech"] = &MLPPActivation::sech;
|
||||
activation_map["Sech"] = &MLPPActivationOld::sech;
|
||||
activationTest_map["Sech"] = &MLPPActivationOld::sech;
|
||||
|
||||
activation_map["Coth"] = &MLPPActivation::coth;
|
||||
activationTest_map["Coth"] = &MLPPActivation::coth;
|
||||
activation_map["Coth"] = &MLPPActivationOld::coth;
|
||||
activationTest_map["Coth"] = &MLPPActivationOld::coth;
|
||||
|
||||
activation_map["Arsinh"] = &MLPPActivation::arsinh;
|
||||
activationTest_map["Arsinh"] = &MLPPActivation::arsinh;
|
||||
activation_map["Arsinh"] = &MLPPActivationOld::arsinh;
|
||||
activationTest_map["Arsinh"] = &MLPPActivationOld::arsinh;
|
||||
|
||||
activation_map["Arcosh"] = &MLPPActivation::arcosh;
|
||||
activationTest_map["Arcosh"] = &MLPPActivation::arcosh;
|
||||
activation_map["Arcosh"] = &MLPPActivationOld::arcosh;
|
||||
activationTest_map["Arcosh"] = &MLPPActivationOld::arcosh;
|
||||
|
||||
activation_map["Artanh"] = &MLPPActivation::artanh;
|
||||
activationTest_map["Artanh"] = &MLPPActivation::artanh;
|
||||
activation_map["Artanh"] = &MLPPActivationOld::artanh;
|
||||
activationTest_map["Artanh"] = &MLPPActivationOld::artanh;
|
||||
|
||||
activation_map["Arcsch"] = &MLPPActivation::arcsch;
|
||||
activationTest_map["Arcsch"] = &MLPPActivation::arcsch;
|
||||
activation_map["Arcsch"] = &MLPPActivationOld::arcsch;
|
||||
activationTest_map["Arcsch"] = &MLPPActivationOld::arcsch;
|
||||
|
||||
activation_map["Arsech"] = &MLPPActivation::arsech;
|
||||
activationTest_map["Arsech"] = &MLPPActivation::arsech;
|
||||
activation_map["Arsech"] = &MLPPActivationOld::arsech;
|
||||
activationTest_map["Arsech"] = &MLPPActivationOld::arsech;
|
||||
|
||||
activation_map["Arcoth"] = &MLPPActivation::arcoth;
|
||||
activationTest_map["Arcoth"] = &MLPPActivation::arcoth;
|
||||
activation_map["Arcoth"] = &MLPPActivationOld::arcoth;
|
||||
activationTest_map["Arcoth"] = &MLPPActivationOld::arcoth;
|
||||
|
||||
costDeriv_map["MSE"] = &MLPPCost::MSEDeriv;
|
||||
cost_map["MSE"] = &MLPPCost::MSE;
|
||||
@ -126,14 +126,14 @@ MLPPOldMultiOutputLayer::MLPPOldMultiOutputLayer(int p_n_output, int p_n_hidden,
|
||||
|
||||
void MLPPOldMultiOutputLayer::forwardPass() {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
z = alg.mat_vec_add(alg.matmult(input, weights), bias);
|
||||
a = (avn.*activation_map[activation])(z, false);
|
||||
}
|
||||
|
||||
void MLPPOldMultiOutputLayer::Test(std::vector<real_t> x) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias);
|
||||
a_test = (avn.*activationTest_map[activation])(z_test, false);
|
||||
}
|
||||
|
@ -13,7 +13,7 @@
|
||||
|
||||
#include "core/object/reference.h"
|
||||
|
||||
#include "../activation/activation.h"
|
||||
#include "../activation/activation_old.h"
|
||||
#include "../cost/cost.h"
|
||||
#include "../regularization/reg.h"
|
||||
#include "../utilities/utilities.h"
|
||||
@ -42,8 +42,8 @@ public:
|
||||
std::vector<std::vector<real_t>> z;
|
||||
std::vector<std::vector<real_t>> a;
|
||||
|
||||
std::map<std::string, std::vector<std::vector<real_t>> (MLPPActivation::*)(std::vector<std::vector<real_t>>, bool)> activation_map;
|
||||
std::map<std::string, std::vector<real_t> (MLPPActivation::*)(std::vector<real_t>, bool)> activationTest_map;
|
||||
std::map<std::string, std::vector<std::vector<real_t>> (MLPPActivationOld::*)(std::vector<std::vector<real_t>>, bool)> activation_map;
|
||||
std::map<std::string, std::vector<real_t> (MLPPActivationOld::*)(std::vector<real_t>, bool)> activationTest_map;
|
||||
std::map<std::string, real_t (MLPPCost::*)(std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>)> cost_map;
|
||||
std::map<std::string, std::vector<std::vector<real_t>> (MLPPCost::*)(std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>)> costDeriv_map;
|
||||
|
||||
|
@ -24,83 +24,83 @@ MLPPOldOutputLayer::MLPPOldOutputLayer(int p_n_hidden, std::string p_activation,
|
||||
weights = MLPPUtilities::weightInitialization(n_hidden, weightInit);
|
||||
bias = MLPPUtilities::biasInitialization();
|
||||
|
||||
activation_map["Linear"] = &MLPPActivation::linear;
|
||||
activationTest_map["Linear"] = &MLPPActivation::linear;
|
||||
activation_map["Linear"] = &MLPPActivationOld::linear;
|
||||
activationTest_map["Linear"] = &MLPPActivationOld::linear;
|
||||
|
||||
activation_map["Sigmoid"] = &MLPPActivation::sigmoid;
|
||||
activationTest_map["Sigmoid"] = &MLPPActivation::sigmoid;
|
||||
activation_map["Sigmoid"] = &MLPPActivationOld::sigmoid;
|
||||
activationTest_map["Sigmoid"] = &MLPPActivationOld::sigmoid;
|
||||
|
||||
activation_map["Swish"] = &MLPPActivation::swish;
|
||||
activationTest_map["Swish"] = &MLPPActivation::swish;
|
||||
activation_map["Swish"] = &MLPPActivationOld::swish;
|
||||
activationTest_map["Swish"] = &MLPPActivationOld::swish;
|
||||
|
||||
activation_map["Mish"] = &MLPPActivation::mish;
|
||||
activationTest_map["Mish"] = &MLPPActivation::mish;
|
||||
activation_map["Mish"] = &MLPPActivationOld::mish;
|
||||
activationTest_map["Mish"] = &MLPPActivationOld::mish;
|
||||
|
||||
activation_map["SinC"] = &MLPPActivation::sinc;
|
||||
activationTest_map["SinC"] = &MLPPActivation::sinc;
|
||||
activation_map["SinC"] = &MLPPActivationOld::sinc;
|
||||
activationTest_map["SinC"] = &MLPPActivationOld::sinc;
|
||||
|
||||
activation_map["Softplus"] = &MLPPActivation::softplus;
|
||||
activationTest_map["Softplus"] = &MLPPActivation::softplus;
|
||||
activation_map["Softplus"] = &MLPPActivationOld::softplus;
|
||||
activationTest_map["Softplus"] = &MLPPActivationOld::softplus;
|
||||
|
||||
activation_map["Softsign"] = &MLPPActivation::softsign;
|
||||
activationTest_map["Softsign"] = &MLPPActivation::softsign;
|
||||
activation_map["Softsign"] = &MLPPActivationOld::softsign;
|
||||
activationTest_map["Softsign"] = &MLPPActivationOld::softsign;
|
||||
|
||||
activation_map["CLogLog"] = &MLPPActivation::cloglog;
|
||||
activationTest_map["CLogLog"] = &MLPPActivation::cloglog;
|
||||
activation_map["CLogLog"] = &MLPPActivationOld::cloglog;
|
||||
activationTest_map["CLogLog"] = &MLPPActivationOld::cloglog;
|
||||
|
||||
activation_map["Logit"] = &MLPPActivation::logit;
|
||||
activationTest_map["Logit"] = &MLPPActivation::logit;
|
||||
activation_map["Logit"] = &MLPPActivationOld::logit;
|
||||
activationTest_map["Logit"] = &MLPPActivationOld::logit;
|
||||
|
||||
activation_map["GaussianCDF"] = &MLPPActivation::gaussianCDF;
|
||||
activationTest_map["GaussianCDF"] = &MLPPActivation::gaussianCDF;
|
||||
activation_map["GaussianCDF"] = &MLPPActivationOld::gaussianCDF;
|
||||
activationTest_map["GaussianCDF"] = &MLPPActivationOld::gaussianCDF;
|
||||
|
||||
activation_map["RELU"] = &MLPPActivation::RELU;
|
||||
activationTest_map["RELU"] = &MLPPActivation::RELU;
|
||||
activation_map["RELU"] = &MLPPActivationOld::RELU;
|
||||
activationTest_map["RELU"] = &MLPPActivationOld::RELU;
|
||||
|
||||
activation_map["GELU"] = &MLPPActivation::GELU;
|
||||
activationTest_map["GELU"] = &MLPPActivation::GELU;
|
||||
activation_map["GELU"] = &MLPPActivationOld::GELU;
|
||||
activationTest_map["GELU"] = &MLPPActivationOld::GELU;
|
||||
|
||||
activation_map["Sign"] = &MLPPActivation::sign;
|
||||
activationTest_map["Sign"] = &MLPPActivation::sign;
|
||||
activation_map["Sign"] = &MLPPActivationOld::sign;
|
||||
activationTest_map["Sign"] = &MLPPActivationOld::sign;
|
||||
|
||||
activation_map["UnitStep"] = &MLPPActivation::unitStep;
|
||||
activationTest_map["UnitStep"] = &MLPPActivation::unitStep;
|
||||
activation_map["UnitStep"] = &MLPPActivationOld::unitStep;
|
||||
activationTest_map["UnitStep"] = &MLPPActivationOld::unitStep;
|
||||
|
||||
activation_map["Sinh"] = &MLPPActivation::sinh;
|
||||
activationTest_map["Sinh"] = &MLPPActivation::sinh;
|
||||
activation_map["Sinh"] = &MLPPActivationOld::sinh;
|
||||
activationTest_map["Sinh"] = &MLPPActivationOld::sinh;
|
||||
|
||||
activation_map["Cosh"] = &MLPPActivation::cosh;
|
||||
activationTest_map["Cosh"] = &MLPPActivation::cosh;
|
||||
activation_map["Cosh"] = &MLPPActivationOld::cosh;
|
||||
activationTest_map["Cosh"] = &MLPPActivationOld::cosh;
|
||||
|
||||
activation_map["Tanh"] = &MLPPActivation::tanh;
|
||||
activationTest_map["Tanh"] = &MLPPActivation::tanh;
|
||||
activation_map["Tanh"] = &MLPPActivationOld::tanh;
|
||||
activationTest_map["Tanh"] = &MLPPActivationOld::tanh;
|
||||
|
||||
activation_map["Csch"] = &MLPPActivation::csch;
|
||||
activationTest_map["Csch"] = &MLPPActivation::csch;
|
||||
activation_map["Csch"] = &MLPPActivationOld::csch;
|
||||
activationTest_map["Csch"] = &MLPPActivationOld::csch;
|
||||
|
||||
activation_map["Sech"] = &MLPPActivation::sech;
|
||||
activationTest_map["Sech"] = &MLPPActivation::sech;
|
||||
activation_map["Sech"] = &MLPPActivationOld::sech;
|
||||
activationTest_map["Sech"] = &MLPPActivationOld::sech;
|
||||
|
||||
activation_map["Coth"] = &MLPPActivation::coth;
|
||||
activationTest_map["Coth"] = &MLPPActivation::coth;
|
||||
activation_map["Coth"] = &MLPPActivationOld::coth;
|
||||
activationTest_map["Coth"] = &MLPPActivationOld::coth;
|
||||
|
||||
activation_map["Arsinh"] = &MLPPActivation::arsinh;
|
||||
activationTest_map["Arsinh"] = &MLPPActivation::arsinh;
|
||||
activation_map["Arsinh"] = &MLPPActivationOld::arsinh;
|
||||
activationTest_map["Arsinh"] = &MLPPActivationOld::arsinh;
|
||||
|
||||
activation_map["Arcosh"] = &MLPPActivation::arcosh;
|
||||
activationTest_map["Arcosh"] = &MLPPActivation::arcosh;
|
||||
activation_map["Arcosh"] = &MLPPActivationOld::arcosh;
|
||||
activationTest_map["Arcosh"] = &MLPPActivationOld::arcosh;
|
||||
|
||||
activation_map["Artanh"] = &MLPPActivation::artanh;
|
||||
activationTest_map["Artanh"] = &MLPPActivation::artanh;
|
||||
activation_map["Artanh"] = &MLPPActivationOld::artanh;
|
||||
activationTest_map["Artanh"] = &MLPPActivationOld::artanh;
|
||||
|
||||
activation_map["Arcsch"] = &MLPPActivation::arcsch;
|
||||
activationTest_map["Arcsch"] = &MLPPActivation::arcsch;
|
||||
activation_map["Arcsch"] = &MLPPActivationOld::arcsch;
|
||||
activationTest_map["Arcsch"] = &MLPPActivationOld::arcsch;
|
||||
|
||||
activation_map["Arsech"] = &MLPPActivation::arsech;
|
||||
activationTest_map["Arsech"] = &MLPPActivation::arsech;
|
||||
activation_map["Arsech"] = &MLPPActivationOld::arsech;
|
||||
activationTest_map["Arsech"] = &MLPPActivationOld::arsech;
|
||||
|
||||
activation_map["Arcoth"] = &MLPPActivation::arcoth;
|
||||
activationTest_map["Arcoth"] = &MLPPActivation::arcoth;
|
||||
activation_map["Arcoth"] = &MLPPActivationOld::arcoth;
|
||||
activationTest_map["Arcoth"] = &MLPPActivationOld::arcoth;
|
||||
|
||||
costDeriv_map["MSE"] = &MLPPCost::MSEDeriv;
|
||||
cost_map["MSE"] = &MLPPCost::MSE;
|
||||
@ -122,14 +122,14 @@ MLPPOldOutputLayer::MLPPOldOutputLayer(int p_n_hidden, std::string p_activation,
|
||||
|
||||
void MLPPOldOutputLayer::forwardPass() {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
z = alg.scalarAdd(bias, alg.mat_vec_mult(input, weights));
|
||||
a = (avn.*activation_map[activation])(z, false);
|
||||
}
|
||||
|
||||
void MLPPOldOutputLayer::Test(std::vector<real_t> x) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
z_test = alg.dot(weights, x) + bias;
|
||||
a_test = (avn.*activationTest_map[activation])(z_test, false);
|
||||
}
|
||||
|
@ -13,7 +13,7 @@
|
||||
|
||||
#include "core/object/reference.h"
|
||||
|
||||
#include "../activation/activation.h"
|
||||
#include "../activation/activation_old.h"
|
||||
#include "../cost/cost.h"
|
||||
#include "../regularization/reg.h"
|
||||
#include "../utilities/utilities.h"
|
||||
@ -41,8 +41,8 @@ public:
|
||||
std::vector<real_t> z;
|
||||
std::vector<real_t> a;
|
||||
|
||||
std::map<std::string, std::vector<real_t> (MLPPActivation::*)(std::vector<real_t>, bool)> activation_map;
|
||||
std::map<std::string, real_t (MLPPActivation::*)(real_t, bool)> activationTest_map;
|
||||
std::map<std::string, std::vector<real_t> (MLPPActivationOld::*)(std::vector<real_t>, bool)> activation_map;
|
||||
std::map<std::string, real_t (MLPPActivationOld::*)(real_t, bool)> activationTest_map;
|
||||
std::map<std::string, real_t (MLPPCost::*)(std::vector<real_t>, std::vector<real_t>)> cost_map;
|
||||
std::map<std::string, std::vector<real_t> (MLPPCost::*)(std::vector<real_t>, std::vector<real_t>)> costDeriv_map;
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
//
|
||||
|
||||
#include "probit_reg_old.h"
|
||||
#include "../activation/activation.h"
|
||||
#include "../activation/activation_old.h"
|
||||
#include "../cost/cost.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../regularization/reg.h"
|
||||
@ -30,7 +30,7 @@ real_t MLPPProbitRegOld::modelTest(std::vector<real_t> x) {
|
||||
}
|
||||
|
||||
void MLPPProbitRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
@ -63,7 +63,7 @@ void MLPPProbitRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool
|
||||
}
|
||||
|
||||
void MLPPProbitRegOld::MLE(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
@ -97,7 +97,7 @@ void MLPPProbitRegOld::MLE(real_t learning_rate, int max_epoch, bool UI) {
|
||||
|
||||
void MLPPProbitRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
|
||||
// NOTE: ∂y_hat/∂z is sparse
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
@ -138,7 +138,7 @@ void MLPPProbitRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
|
||||
}
|
||||
|
||||
void MLPPProbitRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
@ -216,7 +216,7 @@ real_t MLPPProbitRegOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y)
|
||||
|
||||
std::vector<real_t> MLPPProbitRegOld::Evaluate(std::vector<std::vector<real_t>> X) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
return avn.gaussianCDF(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
|
||||
}
|
||||
|
||||
@ -227,7 +227,7 @@ std::vector<real_t> MLPPProbitRegOld::propagate(std::vector<std::vector<real_t>>
|
||||
|
||||
real_t MLPPProbitRegOld::Evaluate(std::vector<real_t> x) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
return avn.gaussianCDF(alg.dot(weights, x) + bias);
|
||||
}
|
||||
|
||||
@ -238,7 +238,7 @@ real_t MLPPProbitRegOld::propagate(std::vector<real_t> x) {
|
||||
|
||||
// gaussianCDF ( wTx + b )
|
||||
void MLPPProbitRegOld::forwardPass() {
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
|
||||
z = propagate(inputSet);
|
||||
y_hat = avn.gaussianCDF(z);
|
||||
|
@ -169,9 +169,9 @@ real_t MLPPReg::reg_deriv_termvr(const Ref<MLPPVector> &weights, real_t lambda,
|
||||
if (reg == REGULARIZATION_TYPE_RIDGE) {
|
||||
return lambda * wj;
|
||||
} else if (reg == REGULARIZATION_TYPE_LASSO) {
|
||||
return lambda * act.sign(wj);
|
||||
return lambda * act.sign_normr(wj);
|
||||
} else if (reg == REGULARIZATION_TYPE_ELASTIC_NET) {
|
||||
return alpha * lambda * act.sign(wj) + (1 - alpha) * lambda * wj;
|
||||
return alpha * lambda * act.sign_normr(wj) + (1 - alpha) * lambda * wj;
|
||||
} else if (reg == REGULARIZATION_TYPE_WEIGHT_CLIPPING) { // Preparation for Wasserstein GANs.
|
||||
// We assume lambda is the lower clipping threshold, while alpha is the higher clipping threshold.
|
||||
// alpha > lambda.
|
||||
@ -194,9 +194,9 @@ real_t MLPPReg::reg_deriv_termmr(const Ref<MLPPMatrix> &weights, real_t lambda,
|
||||
if (reg == REGULARIZATION_TYPE_RIDGE) {
|
||||
return lambda * wj;
|
||||
} else if (reg == REGULARIZATION_TYPE_LASSO) {
|
||||
return lambda * act.sign(wj);
|
||||
return lambda * act.sign_normr(wj);
|
||||
} else if (reg == REGULARIZATION_TYPE_ELASTIC_NET) {
|
||||
return alpha * lambda * act.sign(wj) + (1 - alpha) * lambda * wj;
|
||||
return alpha * lambda * act.sign_normr(wj) + (1 - alpha) * lambda * wj;
|
||||
} else if (reg == REGULARIZATION_TYPE_WEIGHT_CLIPPING) { // Preparation for Wasserstein GANs.
|
||||
// We assume lambda is the lower clipping threshold, while alpha is the higher clipping threshold.
|
||||
// alpha > lambda.
|
||||
@ -322,9 +322,9 @@ real_t MLPPReg::regDerivTerm(std::vector<real_t> weights, real_t lambda, real_t
|
||||
if (reg == "Ridge") {
|
||||
return lambda * weights[j];
|
||||
} else if (reg == "Lasso") {
|
||||
return lambda * act.sign(weights[j]);
|
||||
return lambda * act.sign_normr(weights[j]);
|
||||
} else if (reg == "ElasticNet") {
|
||||
return alpha * lambda * act.sign(weights[j]) + (1 - alpha) * lambda * weights[j];
|
||||
return alpha * lambda * act.sign_normr(weights[j]) + (1 - alpha) * lambda * weights[j];
|
||||
} else if (reg == "WeightClipping") { // Preparation for Wasserstein GANs.
|
||||
// We assume lambda is the lower clipping threshold, while alpha is the higher clipping threshold.
|
||||
// alpha > lambda.
|
||||
@ -345,9 +345,9 @@ real_t MLPPReg::regDerivTerm(std::vector<std::vector<real_t>> weights, real_t la
|
||||
if (reg == "Ridge") {
|
||||
return lambda * weights[i][j];
|
||||
} else if (reg == "Lasso") {
|
||||
return lambda * act.sign(weights[i][j]);
|
||||
return lambda * act.sign_normr(weights[i][j]);
|
||||
} else if (reg == "ElasticNet") {
|
||||
return alpha * lambda * act.sign(weights[i][j]) + (1 - alpha) * lambda * weights[i][j];
|
||||
return alpha * lambda * act.sign_normr(weights[i][j]) + (1 - alpha) * lambda * weights[i][j];
|
||||
} else if (reg == "WeightClipping") { // Preparation for Wasserstein GANs.
|
||||
// We assume lambda is the lower clipping threshold, while alpha is the higher clipping threshold.
|
||||
// alpha > lambda.
|
||||
|
@ -8,7 +8,7 @@
|
||||
|
||||
#include "core/math/math_defs.h"
|
||||
|
||||
#include "../activation/activation.h"
|
||||
#include "../activation/activation_old.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
|
||||
#include <iostream>
|
||||
@ -66,7 +66,7 @@ std::vector<std::vector<real_t>> MLPPRegOld::regDerivTerm(std::vector<std::vecto
|
||||
}
|
||||
|
||||
real_t MLPPRegOld::regDerivTerm(std::vector<real_t> weights, real_t lambda, real_t alpha, std::string reg, int j) {
|
||||
MLPPActivation act;
|
||||
MLPPActivationOld act;
|
||||
if (reg == "Ridge") {
|
||||
return lambda * weights[j];
|
||||
} else if (reg == "Lasso") {
|
||||
@ -89,7 +89,7 @@ real_t MLPPRegOld::regDerivTerm(std::vector<real_t> weights, real_t lambda, real
|
||||
}
|
||||
|
||||
real_t MLPPRegOld::regDerivTerm(std::vector<std::vector<real_t>> weights, real_t lambda, real_t alpha, std::string reg, int i, int j) {
|
||||
MLPPActivation act;
|
||||
MLPPActivationOld act;
|
||||
if (reg == "Ridge") {
|
||||
return lambda * weights[i][j];
|
||||
} else if (reg == "Lasso") {
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
#include "softmax_net_old.h"
|
||||
|
||||
#include "../activation/activation.h"
|
||||
#include "../activation/activation_old.h"
|
||||
#include "../cost/cost.h"
|
||||
#include "../data/data.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
@ -44,7 +44,7 @@ std::vector<std::vector<real_t>> MLPPSoftmaxNetOld::modelSetTest(std::vector<std
|
||||
}
|
||||
|
||||
void MLPPSoftmaxNetOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
@ -100,7 +100,7 @@ void MLPPSoftmaxNetOld::gradientDescent(real_t learning_rate, int max_epoch, boo
|
||||
}
|
||||
|
||||
void MLPPSoftmaxNetOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
@ -158,7 +158,7 @@ void MLPPSoftmaxNetOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
|
||||
}
|
||||
|
||||
void MLPPSoftmaxNetOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
@ -270,7 +270,7 @@ real_t MLPPSoftmaxNetOld::Cost(std::vector<std::vector<real_t>> y_hat, std::vect
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPSoftmaxNetOld::Evaluate(std::vector<std::vector<real_t>> X) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
std::vector<std::vector<real_t>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
|
||||
std::vector<std::vector<real_t>> a2 = avn.sigmoid(z2);
|
||||
return avn.adjSoftmax(alg.mat_vec_add(alg.matmult(a2, weights2), bias2));
|
||||
@ -278,7 +278,7 @@ std::vector<std::vector<real_t>> MLPPSoftmaxNetOld::Evaluate(std::vector<std::ve
|
||||
|
||||
std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> MLPPSoftmaxNetOld::propagate(std::vector<std::vector<real_t>> X) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
std::vector<std::vector<real_t>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
|
||||
std::vector<std::vector<real_t>> a2 = avn.sigmoid(z2);
|
||||
return { z2, a2 };
|
||||
@ -286,7 +286,7 @@ std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> M
|
||||
|
||||
std::vector<real_t> MLPPSoftmaxNetOld::Evaluate(std::vector<real_t> x) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
std::vector<real_t> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
|
||||
std::vector<real_t> a2 = avn.sigmoid(z2);
|
||||
return avn.adjSoftmax(alg.addition(alg.mat_vec_mult(alg.transpose(weights2), a2), bias2));
|
||||
@ -294,7 +294,7 @@ std::vector<real_t> MLPPSoftmaxNetOld::Evaluate(std::vector<real_t> x) {
|
||||
|
||||
std::tuple<std::vector<real_t>, std::vector<real_t>> MLPPSoftmaxNetOld::propagate(std::vector<real_t> x) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
std::vector<real_t> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
|
||||
std::vector<real_t> a2 = avn.sigmoid(z2);
|
||||
return { z2, a2 };
|
||||
@ -302,7 +302,7 @@ std::tuple<std::vector<real_t>, std::vector<real_t>> MLPPSoftmaxNetOld::propagat
|
||||
|
||||
void MLPPSoftmaxNetOld::forwardPass() {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1);
|
||||
a2 = avn.sigmoid(z2);
|
||||
y_hat = avn.adjSoftmax(alg.mat_vec_add(alg.matmult(a2, weights2), bias2));
|
||||
|
@ -5,7 +5,7 @@
|
||||
//
|
||||
|
||||
#include "softmax_reg_old.h"
|
||||
#include "../activation/activation.h"
|
||||
#include "../activation/activation_old.h"
|
||||
#include "../cost/cost.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../regularization/reg.h"
|
||||
@ -173,13 +173,13 @@ real_t MLPPSoftmaxRegOld::Cost(std::vector<std::vector<real_t>> y_hat, std::vect
|
||||
|
||||
std::vector<real_t> MLPPSoftmaxRegOld::Evaluate(std::vector<real_t> x) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
return avn.softmax(alg.addition(bias, alg.mat_vec_mult(alg.transpose(weights), x)));
|
||||
}
|
||||
|
||||
std::vector<std::vector<real_t>> MLPPSoftmaxRegOld::Evaluate(std::vector<std::vector<real_t>> X) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
|
||||
return avn.softmax(alg.mat_vec_add(alg.matmult(X, weights), bias));
|
||||
}
|
||||
@ -187,7 +187,7 @@ std::vector<std::vector<real_t>> MLPPSoftmaxRegOld::Evaluate(std::vector<std::ve
|
||||
// softmax ( wTx + b )
|
||||
void MLPPSoftmaxRegOld::forwardPass() {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
|
||||
y_hat = avn.softmax(alg.mat_vec_add(alg.matmult(inputSet, weights), bias));
|
||||
}
|
||||
|
@ -247,7 +247,7 @@ real_t MLPPStat::heinzMean(const real_t A, const real_t B, const real_t x) {
|
||||
|
||||
real_t MLPPStat::neumanSandorMean(const real_t a, const real_t b) {
|
||||
MLPPActivation avn;
|
||||
return (a - b) / 2 * avn.arsinh((a - b) / (a + b));
|
||||
return (a - b) / 2 * avn.arsinh_normr((a - b) / (a + b));
|
||||
}
|
||||
|
||||
real_t MLPPStat::stolarskyMean(const real_t x, const real_t y, const real_t p) {
|
||||
|
@ -5,7 +5,7 @@
|
||||
//
|
||||
|
||||
#include "stat_old.h"
|
||||
#include "../activation/activation.h"
|
||||
#include "../activation/activation_old.h"
|
||||
#include "../data/data.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include <algorithm>
|
||||
@ -189,7 +189,7 @@ real_t MLPPStatOld::heinzMean(const real_t A, const real_t B, const real_t x) {
|
||||
}
|
||||
|
||||
real_t MLPPStatOld::neumanSandorMean(const real_t a, const real_t b) {
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
return (a - b) / 2 * avn.arsinh((a - b) / (a + b));
|
||||
}
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
//
|
||||
|
||||
#include "svc_old.h"
|
||||
#include "../activation/activation.h"
|
||||
#include "../activation/activation_old.h"
|
||||
#include "../cost/cost.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../regularization/reg.h"
|
||||
@ -24,7 +24,6 @@ real_t MLPPSVCOld::modelTest(std::vector<real_t> x) {
|
||||
|
||||
void MLPPSVCOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
@ -57,7 +56,6 @@ void MLPPSVCOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
|
||||
void MLPPSVCOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
|
||||
@ -101,7 +99,6 @@ void MLPPSVCOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
|
||||
|
||||
void MLPPSVCOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
@ -172,31 +169,29 @@ real_t MLPPSVCOld::Cost(std::vector<real_t> z, std::vector<real_t> y, std::vecto
|
||||
|
||||
std::vector<real_t> MLPPSVCOld::Evaluate(std::vector<std::vector<real_t>> X) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
return avn.sign(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPSVCOld::propagate(std::vector<std::vector<real_t>> X) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights));
|
||||
}
|
||||
|
||||
real_t MLPPSVCOld::Evaluate(std::vector<real_t> x) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
return avn.sign(alg.dot(weights, x) + bias);
|
||||
}
|
||||
|
||||
real_t MLPPSVCOld::propagate(std::vector<real_t> x) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
return alg.dot(weights, x) + bias;
|
||||
}
|
||||
|
||||
// sign ( wTx + b )
|
||||
void MLPPSVCOld::forwardPass() {
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
|
||||
z = propagate(inputSet);
|
||||
y_hat = avn.sign(z);
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
#include "tanh_reg_old.h"
|
||||
|
||||
#include "../activation/activation.h"
|
||||
#include "../activation/activation_old.h"
|
||||
#include "../cost/cost.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../regularization/reg.h"
|
||||
@ -31,7 +31,7 @@ real_t MLPPTanhRegOld::modelTest(std::vector<real_t> x) {
|
||||
}
|
||||
|
||||
void MLPPTanhRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
@ -104,7 +104,7 @@ void MLPPTanhRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
|
||||
}
|
||||
|
||||
void MLPPTanhRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
|
||||
@ -167,7 +167,7 @@ real_t MLPPTanhRegOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
|
||||
|
||||
std::vector<real_t> MLPPTanhRegOld::Evaluate(std::vector<std::vector<real_t>> X) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
return avn.tanh(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
|
||||
}
|
||||
|
||||
@ -178,7 +178,7 @@ std::vector<real_t> MLPPTanhRegOld::propagate(std::vector<std::vector<real_t>> X
|
||||
|
||||
real_t MLPPTanhRegOld::Evaluate(std::vector<real_t> x) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
return avn.tanh(alg.dot(weights, x) + bias);
|
||||
}
|
||||
|
||||
@ -189,7 +189,7 @@ real_t MLPPTanhRegOld::propagate(std::vector<real_t> x) {
|
||||
|
||||
// Tanh ( wTx + b )
|
||||
void MLPPTanhRegOld::forwardPass() {
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
|
||||
z = propagate(inputSet);
|
||||
y_hat = avn.tanh(z);
|
||||
|
@ -8,7 +8,7 @@
|
||||
|
||||
#include "core/log/logger.h"
|
||||
|
||||
#include "../activation/activation.h"
|
||||
#include "../activation/activation_old.h"
|
||||
#include "../cost/cost.h"
|
||||
#include "../lin_alg/lin_alg.h"
|
||||
#include "../regularization/reg.h"
|
||||
@ -228,7 +228,7 @@ void MLPPWGANOld::updateGeneratorParameters(std::vector<std::vector<std::vector<
|
||||
|
||||
std::tuple<std::vector<std::vector<std::vector<real_t>>>, std::vector<real_t>> MLPPWGANOld::computeDiscriminatorGradients(std::vector<real_t> y_hat, std::vector<real_t> outputSet) {
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
|
||||
@ -263,7 +263,7 @@ std::tuple<std::vector<std::vector<std::vector<real_t>>>, std::vector<real_t>> M
|
||||
|
||||
std::vector<std::vector<std::vector<real_t>>> MLPPWGANOld::computeGeneratorGradients(std::vector<real_t> y_hat, std::vector<real_t> outputSet) {
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
MLPPActivationOld avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
|
||||
|
@ -47,6 +47,7 @@
|
||||
#include "../mlpp/uni_lin_reg/uni_lin_reg.h"
|
||||
#include "../mlpp/wgan/wgan.h"
|
||||
|
||||
#include "../mlpp/activation/activation_old.h"
|
||||
#include "../mlpp/ann/ann_old.h"
|
||||
#include "../mlpp/auto_encoder/auto_encoder_old.h"
|
||||
#include "../mlpp/bernoulli_nb/bernoulli_nb_old.h"
|
||||
@ -56,6 +57,7 @@
|
||||
#include "../mlpp/gan/gan_old.h"
|
||||
#include "../mlpp/gaussian_nb/gaussian_nb_old.h"
|
||||
#include "../mlpp/hidden_layer/hidden_layer_old.h"
|
||||
#include "../mlpp/lin_alg/lin_alg_old.h"
|
||||
#include "../mlpp/lin_reg/lin_reg_old.h"
|
||||
#include "../mlpp/log_reg/log_reg_old.h"
|
||||
#include "../mlpp/mann/mann_old.h"
|
||||
@ -1067,8 +1069,8 @@ void MLPPTests::test_outlier_finder(bool ui) {
|
||||
PLOG_MSG(Variant(outlier_finder.model_test(input_set)));
|
||||
}
|
||||
void MLPPTests::test_new_math_functions() {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
MLPPLinAlgOld alg;
|
||||
MLPPActivationOld avn;
|
||||
MLPPData data;
|
||||
|
||||
// Testing new Functions
|
||||
@ -1107,11 +1109,9 @@ void MLPPTests::test_new_math_functions() {
|
||||
|
||||
alg.printMatrix(alg.gramSchmidtProcess(P));
|
||||
|
||||
MLPPLinAlg::QRDResult qrd_result = alg.qrd(P); // It works!
|
||||
|
||||
alg.printMatrix(qrd_result.Q);
|
||||
|
||||
alg.printMatrix(qrd_result.R);
|
||||
//MLPPLinAlg::QRDResult qrd_result = alg.qrd(P); // It works!
|
||||
//alg.printMatrix(qrd_result.Q);
|
||||
//alg.printMatrix(qrd_result.R);
|
||||
}
|
||||
void MLPPTests::test_positive_definiteness_checker() {
|
||||
//MLPPStat stat;
|
||||
|
Loading…
Reference in New Issue
Block a user