Renamed Activation to MLPPActivation.

This commit is contained in:
Relintai 2023-01-24 19:23:30 +01:00
parent ab0d41203b
commit 18c4ae6ea1
25 changed files with 338 additions and 338 deletions

View File

@ -122,7 +122,7 @@ int main() {
// // OBJECTS
Stat stat;
LinAlg alg;
Activation avn;
MLPPActivation avn;
Cost cost;
Data data;
Convolutions conv;

View File

@ -10,14 +10,14 @@
#include <cmath>
#include <iostream>
double Activation::linear(double z, bool deriv) {
double MLPPActivation::linear(double z, bool deriv) {
if (deriv) {
return 1;
}
return z;
}
std::vector<double> Activation::linear(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::linear(std::vector<double> z, bool deriv) {
if (deriv) {
LinAlg alg;
return alg.onevec(z.size());
@ -25,7 +25,7 @@ std::vector<double> Activation::linear(std::vector<double> z, bool deriv) {
return z;
}
std::vector<std::vector<double>> Activation::linear(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::linear(std::vector<std::vector<double>> z, bool deriv) {
if (deriv) {
LinAlg alg;
return alg.onemat(z.size(), z[0].size());
@ -33,14 +33,14 @@ std::vector<std::vector<double>> Activation::linear(std::vector<std::vector<doub
return z;
}
double Activation::sigmoid(double z, bool deriv) {
double MLPPActivation::sigmoid(double z, bool deriv) {
if (deriv) {
return sigmoid(z) * (1 - sigmoid(z));
}
return 1 / (1 + exp(-z));
}
std::vector<double> Activation::sigmoid(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::sigmoid(std::vector<double> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), sigmoid(z)));
@ -48,7 +48,7 @@ std::vector<double> Activation::sigmoid(std::vector<double> z, bool deriv) {
return alg.elementWiseDivision(alg.onevec(z.size()), alg.addition(alg.onevec(z.size()), alg.exp(alg.scalarMultiply(-1, z))));
}
std::vector<std::vector<double>> Activation::sigmoid(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::sigmoid(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), sigmoid(z)));
@ -56,7 +56,7 @@ std::vector<std::vector<double>> Activation::sigmoid(std::vector<std::vector<dou
return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.addition(alg.onemat(z.size(), z[0].size()), alg.exp(alg.scalarMultiply(-1, z))));
}
std::vector<double> Activation::softmax(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::softmax(std::vector<double> z, bool deriv) {
LinAlg alg;
std::vector<double> a;
a.resize(z.size());
@ -72,7 +72,7 @@ std::vector<double> Activation::softmax(std::vector<double> z, bool deriv) {
return a;
}
std::vector<std::vector<double>> Activation::softmax(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::softmax(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
std::vector<std::vector<double>> a;
a.resize(z.size());
@ -83,7 +83,7 @@ std::vector<std::vector<double>> Activation::softmax(std::vector<std::vector<dou
return a;
}
std::vector<double> Activation::adjSoftmax(std::vector<double> z) {
std::vector<double> MLPPActivation::adjSoftmax(std::vector<double> z) {
LinAlg alg;
std::vector<double> a;
double C = -*std::max_element(z.begin(), z.end());
@ -92,7 +92,7 @@ std::vector<double> Activation::adjSoftmax(std::vector<double> z) {
return softmax(z);
}
std::vector<std::vector<double>> Activation::adjSoftmax(std::vector<std::vector<double>> z) {
std::vector<std::vector<double>> MLPPActivation::adjSoftmax(std::vector<std::vector<double>> z) {
LinAlg alg;
std::vector<std::vector<double>> a;
a.resize(z.size());
@ -103,7 +103,7 @@ std::vector<std::vector<double>> Activation::adjSoftmax(std::vector<std::vector<
return a;
}
std::vector<std::vector<double>> Activation::softmaxDeriv(std::vector<double> z) {
std::vector<std::vector<double>> MLPPActivation::softmaxDeriv(std::vector<double> z) {
LinAlg alg;
std::vector<std::vector<double>> deriv;
std::vector<double> a = softmax(z);
@ -123,7 +123,7 @@ std::vector<std::vector<double>> Activation::softmaxDeriv(std::vector<double> z)
return deriv;
}
std::vector<std::vector<std::vector<double>>> Activation::softmaxDeriv(std::vector<std::vector<double>> z) {
std::vector<std::vector<std::vector<double>>> MLPPActivation::softmaxDeriv(std::vector<std::vector<double>> z) {
LinAlg alg;
std::vector<std::vector<std::vector<double>>> deriv;
std::vector<std::vector<double>> a = softmax(z);
@ -144,14 +144,14 @@ std::vector<std::vector<std::vector<double>>> Activation::softmaxDeriv(std::vect
return deriv;
}
double Activation::softplus(double z, bool deriv) {
double MLPPActivation::softplus(double z, bool deriv) {
if (deriv) {
return sigmoid(z);
}
return std::log(1 + exp(z));
}
std::vector<double> Activation::softplus(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::softplus(std::vector<double> z, bool deriv) {
if (deriv) {
return sigmoid(z);
}
@ -159,7 +159,7 @@ std::vector<double> Activation::softplus(std::vector<double> z, bool deriv) {
return alg.log(alg.addition(alg.onevec(z.size()), alg.exp(z)));
}
std::vector<std::vector<double>> Activation::softplus(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::softplus(std::vector<std::vector<double>> z, bool deriv) {
if (deriv) {
return sigmoid(z);
}
@ -167,14 +167,14 @@ std::vector<std::vector<double>> Activation::softplus(std::vector<std::vector<do
return alg.log(alg.addition(alg.onemat(z.size(), z[0].size()), alg.exp(z)));
}
double Activation::softsign(double z, bool deriv) {
double MLPPActivation::softsign(double z, bool deriv) {
if (deriv) {
return 1 / ((1 + abs(z)) * (1 + abs(z)));
}
return z / (1 + abs(z));
}
std::vector<double> Activation::softsign(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::softsign(std::vector<double> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.onevec(z.size()), alg.exponentiate(alg.addition(alg.onevec(z.size()), alg.abs(z)), 2));
@ -182,7 +182,7 @@ std::vector<double> Activation::softsign(std::vector<double> z, bool deriv) {
return alg.elementWiseDivision(z, alg.addition(alg.onevec(z.size()), alg.abs(z)));
}
std::vector<std::vector<double>> Activation::softsign(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::softsign(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.exponentiate(alg.addition(alg.onemat(z.size(), z[0].size()), alg.abs(z)), 2));
@ -190,14 +190,14 @@ std::vector<std::vector<double>> Activation::softsign(std::vector<std::vector<do
return alg.elementWiseDivision(z, alg.addition(alg.onemat(z.size(), z[0].size()), alg.abs(z)));
}
double Activation::gaussianCDF(double z, bool deriv) {
double MLPPActivation::gaussianCDF(double z, bool deriv) {
if (deriv) {
return (1 / sqrt(2 * M_PI)) * exp(-z * z / 2);
}
return 0.5 * (1 + erf(z / sqrt(2)));
}
std::vector<double> Activation::gaussianCDF(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::gaussianCDF(std::vector<double> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.scalarMultiply(1 / sqrt(2 * M_PI), alg.exp(alg.scalarMultiply(-1 / 2, alg.hadamard_product(z, z))));
@ -205,7 +205,7 @@ std::vector<double> Activation::gaussianCDF(std::vector<double> z, bool deriv) {
return alg.scalarMultiply(0.5, alg.addition(alg.onevec(z.size()), alg.erf(alg.scalarMultiply(1 / sqrt(2), z))));
}
std::vector<std::vector<double>> Activation::gaussianCDF(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::gaussianCDF(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.scalarMultiply(1 / sqrt(2 * M_PI), alg.exp(alg.scalarMultiply(-1 / 2, alg.hadamard_product(z, z))));
@ -213,14 +213,14 @@ std::vector<std::vector<double>> Activation::gaussianCDF(std::vector<std::vector
return alg.scalarMultiply(0.5, alg.addition(alg.onemat(z.size(), z[0].size()), alg.erf(alg.scalarMultiply(1 / sqrt(2), z))));
}
double Activation::cloglog(double z, bool deriv) {
double MLPPActivation::cloglog(double z, bool deriv) {
if (deriv) {
return exp(z - exp(z));
}
return 1 - exp(-exp(z));
}
std::vector<double> Activation::cloglog(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::cloglog(std::vector<double> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.exp(alg.scalarMultiply(-1, alg.exp(z)));
@ -228,7 +228,7 @@ std::vector<double> Activation::cloglog(std::vector<double> z, bool deriv) {
return alg.scalarMultiply(-1, alg.scalarAdd(-1, alg.exp(alg.scalarMultiply(-1, alg.exp(z)))));
}
std::vector<std::vector<double>> Activation::cloglog(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::cloglog(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.exp(alg.scalarMultiply(-1, alg.exp(z)));
@ -236,14 +236,14 @@ std::vector<std::vector<double>> Activation::cloglog(std::vector<std::vector<dou
return alg.scalarMultiply(-1, alg.scalarAdd(-1, alg.exp(alg.scalarMultiply(-1, alg.exp(z)))));
}
double Activation::logit(double z, bool deriv) {
double MLPPActivation::logit(double z, bool deriv) {
if (deriv) {
return 1 / z - 1 / (z - 1);
}
return std::log(z / (1 - z));
}
std::vector<double> Activation::logit(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::logit(std::vector<double> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.subtraction(alg.elementWiseDivision(alg.onevec(z.size()), z), alg.elementWiseDivision(alg.onevec(z.size()), alg.subtraction(z, alg.onevec(z.size()))));
@ -251,7 +251,7 @@ std::vector<double> Activation::logit(std::vector<double> z, bool deriv) {
return alg.log(alg.elementWiseDivision(z, alg.subtraction(alg.onevec(z.size()), z)));
}
std::vector<std::vector<double>> Activation::logit(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::logit(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.subtraction(alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), z), alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.subtraction(z, alg.onemat(z.size(), z[0].size()))));
@ -259,14 +259,14 @@ std::vector<std::vector<double>> Activation::logit(std::vector<std::vector<doubl
return alg.log(alg.elementWiseDivision(z, alg.subtraction(alg.onemat(z.size(), z[0].size()), z)));
}
double Activation::unitStep(double z, bool deriv) {
double MLPPActivation::unitStep(double z, bool deriv) {
if (deriv) {
return 0;
}
return z < 0 ? 0 : 1;
}
std::vector<double> Activation::unitStep(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::unitStep(std::vector<double> z, bool deriv) {
if (deriv) {
std::vector<double> deriv;
deriv.resize(z.size());
@ -284,7 +284,7 @@ std::vector<double> Activation::unitStep(std::vector<double> z, bool deriv) {
return a;
}
std::vector<std::vector<double>> Activation::unitStep(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::unitStep(std::vector<std::vector<double>> z, bool deriv) {
if (deriv) {
std::vector<std::vector<double>> deriv;
deriv.resize(z.size());
@ -302,14 +302,14 @@ std::vector<std::vector<double>> Activation::unitStep(std::vector<std::vector<do
return a;
}
double Activation::swish(double z, bool deriv) {
double MLPPActivation::swish(double z, bool deriv) {
if (deriv) {
return swish(z) + sigmoid(z) * (1 - swish(z));
}
return z * sigmoid(z);
}
std::vector<double> Activation::swish(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::swish(std::vector<double> z, bool deriv) {
LinAlg alg;
if (deriv) {
alg.addition(swish(z), alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), swish(z))));
@ -317,7 +317,7 @@ std::vector<double> Activation::swish(std::vector<double> z, bool deriv) {
return alg.hadamard_product(z, sigmoid(z));
}
std::vector<std::vector<double>> Activation::swish(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::swish(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
if (deriv) {
alg.addition(swish(z), alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), swish(z))));
@ -325,14 +325,14 @@ std::vector<std::vector<double>> Activation::swish(std::vector<std::vector<doubl
return alg.hadamard_product(z, sigmoid(z));
}
double Activation::mish(double z, bool deriv) {
double MLPPActivation::mish(double z, bool deriv) {
if (deriv) {
return sech(softplus(z)) * sech(softplus(z)) * z * sigmoid(z) + mish(z) / z;
}
return z * tanh(softplus(z));
}
std::vector<double> Activation::mish(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::mish(std::vector<double> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.addition(alg.hadamard_product(alg.hadamard_product(alg.hadamard_product(sech(softplus(z)), sech(softplus(z))), z), sigmoid(z)), alg.elementWiseDivision(mish(z), z));
@ -340,7 +340,7 @@ std::vector<double> Activation::mish(std::vector<double> z, bool deriv) {
return alg.hadamard_product(z, tanh(softplus(z)));
}
std::vector<std::vector<double>> Activation::mish(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::mish(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.addition(alg.hadamard_product(alg.hadamard_product(alg.hadamard_product(sech(softplus(z)), sech(softplus(z))), z), sigmoid(z)), alg.elementWiseDivision(mish(z), z));
@ -348,14 +348,14 @@ std::vector<std::vector<double>> Activation::mish(std::vector<std::vector<double
return alg.hadamard_product(z, tanh(softplus(z)));
}
double Activation::sinc(double z, bool deriv) {
double MLPPActivation::sinc(double z, bool deriv) {
if (deriv) {
return (z * std::cos(z) - std::sin(z)) / (z * z);
}
return std::sin(z) / z;
}
std::vector<double> Activation::sinc(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::sinc(std::vector<double> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.subtraction(alg.hadamard_product(z, alg.cos(z)), alg.sin(z)), alg.hadamard_product(z, z));
@ -363,7 +363,7 @@ std::vector<double> Activation::sinc(std::vector<double> z, bool deriv) {
return alg.elementWiseDivision(alg.sin(z), z);
}
std::vector<std::vector<double>> Activation::sinc(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::sinc(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.subtraction(alg.hadamard_product(z, alg.cos(z)), alg.sin(z)), alg.hadamard_product(z, z));
@ -371,7 +371,7 @@ std::vector<std::vector<double>> Activation::sinc(std::vector<std::vector<double
return alg.elementWiseDivision(alg.sin(z), z);
}
double Activation::RELU(double z, bool deriv) {
double MLPPActivation::RELU(double z, bool deriv) {
if (deriv) {
if (z <= 0) {
return 0;
@ -382,7 +382,7 @@ double Activation::RELU(double z, bool deriv) {
return fmax(0, z);
}
std::vector<double> Activation::RELU(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::RELU(std::vector<double> z, bool deriv) {
if (deriv) {
std::vector<double> deriv;
deriv.resize(z.size());
@ -400,7 +400,7 @@ std::vector<double> Activation::RELU(std::vector<double> z, bool deriv) {
return a;
}
std::vector<std::vector<double>> Activation::RELU(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::RELU(std::vector<std::vector<double>> z, bool deriv) {
if (deriv) {
std::vector<std::vector<double>> deriv;
deriv.resize(z.size());
@ -418,7 +418,7 @@ std::vector<std::vector<double>> Activation::RELU(std::vector<std::vector<double
return a;
}
double Activation::leakyReLU(double z, double c, bool deriv) {
double MLPPActivation::leakyReLU(double z, double c, bool deriv) {
if (deriv) {
if (z <= 0) {
return c;
@ -429,7 +429,7 @@ double Activation::leakyReLU(double z, double c, bool deriv) {
return fmax(c * z, z);
}
std::vector<double> Activation::leakyReLU(std::vector<double> z, double c, bool deriv) {
std::vector<double> MLPPActivation::leakyReLU(std::vector<double> z, double c, bool deriv) {
if (deriv) {
std::vector<double> deriv;
deriv.resize(z.size());
@ -447,7 +447,7 @@ std::vector<double> Activation::leakyReLU(std::vector<double> z, double c, bool
return a;
}
std::vector<std::vector<double>> Activation::leakyReLU(std::vector<std::vector<double>> z, double c, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::leakyReLU(std::vector<std::vector<double>> z, double c, bool deriv) {
if (deriv) {
std::vector<std::vector<double>> deriv;
deriv.resize(z.size());
@ -465,7 +465,7 @@ std::vector<std::vector<double>> Activation::leakyReLU(std::vector<std::vector<d
return a;
}
double Activation::ELU(double z, double c, bool deriv) {
double MLPPActivation::ELU(double z, double c, bool deriv) {
if (deriv) {
if (z <= 0) {
return c * exp(z);
@ -480,7 +480,7 @@ double Activation::ELU(double z, double c, bool deriv) {
}
}
std::vector<double> Activation::ELU(std::vector<double> z, double c, bool deriv) {
std::vector<double> MLPPActivation::ELU(std::vector<double> z, double c, bool deriv) {
if (deriv) {
std::vector<double> deriv;
deriv.resize(z.size());
@ -498,7 +498,7 @@ std::vector<double> Activation::ELU(std::vector<double> z, double c, bool deriv)
return a;
}
std::vector<std::vector<double>> Activation::ELU(std::vector<std::vector<double>> z, double c, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::ELU(std::vector<std::vector<double>> z, double c, bool deriv) {
if (deriv) {
std::vector<std::vector<double>> deriv;
deriv.resize(z.size());
@ -516,14 +516,14 @@ std::vector<std::vector<double>> Activation::ELU(std::vector<std::vector<double>
return a;
}
double Activation::SELU(double z, double lambda, double c, bool deriv) {
double MLPPActivation::SELU(double z, double lambda, double c, bool deriv) {
if (deriv) {
return ELU(z, c, 1);
}
return lambda * ELU(z, c);
}
std::vector<double> Activation::SELU(std::vector<double> z, double lambda, double c, bool deriv) {
std::vector<double> MLPPActivation::SELU(std::vector<double> z, double lambda, double c, bool deriv) {
if (deriv) {
std::vector<double> deriv;
deriv.resize(z.size());
@ -541,7 +541,7 @@ std::vector<double> Activation::SELU(std::vector<double> z, double lambda, doubl
return a;
}
std::vector<std::vector<double>> Activation::SELU(std::vector<std::vector<double>> z, double lambda, double c, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::SELU(std::vector<std::vector<double>> z, double lambda, double c, bool deriv) {
if (deriv) {
std::vector<std::vector<double>> deriv;
deriv.resize(z.size());
@ -559,14 +559,14 @@ std::vector<std::vector<double>> Activation::SELU(std::vector<std::vector<double
return a;
}
double Activation::GELU(double z, bool deriv) {
double MLPPActivation::GELU(double z, bool deriv) {
if (deriv) {
return 0.5 * tanh(0.0356774 * std::pow(z, 3) + 0.797885 * z) + (0.0535161 * std::pow(z, 3) + 0.398942 * z) * std::pow(sech(0.0356774 * std::pow(z, 3) + 0.797885 * z), 2) + 0.5;
}
return 0.5 * z * (1 + tanh(sqrt(2 / M_PI) * (z + 0.044715 * std::pow(z, 3))));
}
std::vector<double> Activation::GELU(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::GELU(std::vector<double> z, bool deriv) {
if (deriv) {
std::vector<double> deriv;
deriv.resize(z.size());
@ -584,7 +584,7 @@ std::vector<double> Activation::GELU(std::vector<double> z, bool deriv) {
return a;
}
std::vector<std::vector<double>> Activation::GELU(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::GELU(std::vector<std::vector<double>> z, bool deriv) {
if (deriv) {
std::vector<std::vector<double>> deriv;
deriv.resize(z.size());
@ -602,7 +602,7 @@ std::vector<std::vector<double>> Activation::GELU(std::vector<std::vector<double
return a;
}
double Activation::sign(double z, bool deriv) {
double MLPPActivation::sign(double z, bool deriv) {
if (deriv) {
return 0;
}
@ -615,7 +615,7 @@ double Activation::sign(double z, bool deriv) {
}
}
std::vector<double> Activation::sign(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::sign(std::vector<double> z, bool deriv) {
if (deriv) {
std::vector<double> deriv;
deriv.resize(z.size());
@ -633,7 +633,7 @@ std::vector<double> Activation::sign(std::vector<double> z, bool deriv) {
return a;
}
std::vector<std::vector<double>> Activation::sign(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::sign(std::vector<std::vector<double>> z, bool deriv) {
if (deriv) {
std::vector<std::vector<double>> deriv;
deriv.resize(z.size());
@ -651,14 +651,14 @@ std::vector<std::vector<double>> Activation::sign(std::vector<std::vector<double
return a;
}
double Activation::sinh(double z, bool deriv) {
double MLPPActivation::sinh(double z, bool deriv) {
if (deriv) {
return cosh(z);
}
return 0.5 * (exp(z) - exp(-z));
}
std::vector<double> Activation::sinh(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::sinh(std::vector<double> z, bool deriv) {
if (deriv) {
return cosh(z);
}
@ -666,7 +666,7 @@ std::vector<double> Activation::sinh(std::vector<double> z, bool deriv) {
return alg.scalarMultiply(0.5, alg.subtraction(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z))));
}
std::vector<std::vector<double>> Activation::sinh(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::sinh(std::vector<std::vector<double>> z, bool deriv) {
if (deriv) {
return cosh(z);
}
@ -674,14 +674,14 @@ std::vector<std::vector<double>> Activation::sinh(std::vector<std::vector<double
return alg.scalarMultiply(0.5, alg.subtraction(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z))));
}
double Activation::cosh(double z, bool deriv) {
double MLPPActivation::cosh(double z, bool deriv) {
if (deriv) {
return sinh(z);
}
return 0.5 * (exp(z) + exp(-z));
}
std::vector<double> Activation::cosh(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::cosh(std::vector<double> z, bool deriv) {
if (deriv) {
return sinh(z);
}
@ -689,7 +689,7 @@ std::vector<double> Activation::cosh(std::vector<double> z, bool deriv) {
return alg.scalarMultiply(0.5, alg.addition(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z))));
}
std::vector<std::vector<double>> Activation::cosh(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::cosh(std::vector<std::vector<double>> z, bool deriv) {
if (deriv) {
return sinh(z);
}
@ -697,14 +697,14 @@ std::vector<std::vector<double>> Activation::cosh(std::vector<std::vector<double
return alg.scalarMultiply(0.5, alg.addition(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z))));
}
double Activation::tanh(double z, bool deriv) {
double MLPPActivation::tanh(double z, bool deriv) {
if (deriv) {
return 1 - tanh(z) * tanh(z);
}
return (exp(z) - exp(-z)) / (exp(z) + exp(-z));
}
std::vector<double> Activation::tanh(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::tanh(std::vector<double> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.scalarMultiply(-1, alg.scalarAdd(-1, alg.hadamard_product(tanh(z), tanh(z))));
@ -712,7 +712,7 @@ std::vector<double> Activation::tanh(std::vector<double> z, bool deriv) {
return alg.elementWiseDivision(alg.subtraction(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z))), alg.addition(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z))));
}
std::vector<std::vector<double>> Activation::tanh(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::tanh(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.scalarMultiply(-1, alg.scalarAdd(-1, alg.hadamard_product(tanh(z), tanh(z))));
@ -721,14 +721,14 @@ std::vector<std::vector<double>> Activation::tanh(std::vector<std::vector<double
return alg.elementWiseDivision(alg.subtraction(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z))), alg.addition(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z))));
}
double Activation::csch(double z, bool deriv) {
double MLPPActivation::csch(double z, bool deriv) {
if (deriv) {
return -csch(z) * coth(z);
}
return 1 / sinh(z);
}
std::vector<double> Activation::csch(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::csch(std::vector<double> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), coth(z));
@ -736,7 +736,7 @@ std::vector<double> Activation::csch(std::vector<double> z, bool deriv) {
return alg.elementWiseDivision(alg.onevec(z.size()), sinh(z));
}
std::vector<std::vector<double>> Activation::csch(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::csch(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), coth(z));
@ -744,14 +744,14 @@ std::vector<std::vector<double>> Activation::csch(std::vector<std::vector<double
return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), sinh(z));
}
double Activation::sech(double z, bool deriv) {
double MLPPActivation::sech(double z, bool deriv) {
if (deriv) {
return -sech(z) * tanh(z);
}
return 1 / cosh(z);
}
std::vector<double> Activation::sech(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::sech(std::vector<double> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.hadamard_product(alg.scalarMultiply(-1, sech(z)), tanh(z));
@ -761,7 +761,7 @@ std::vector<double> Activation::sech(std::vector<double> z, bool deriv) {
// return activation(z, deriv, static_cast<void (*)(double, bool)>(&sech));
}
std::vector<std::vector<double>> Activation::sech(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::sech(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.hadamard_product(alg.scalarMultiply(-1, sech(z)), tanh(z));
@ -771,14 +771,14 @@ std::vector<std::vector<double>> Activation::sech(std::vector<std::vector<double
// return activation(z, deriv, static_cast<void (*)(double, bool)>(&sech));
}
double Activation::coth(double z, bool deriv) {
double MLPPActivation::coth(double z, bool deriv) {
if (deriv) {
return -csch(z) * csch(z);
}
return 1 / tanh(z);
}
std::vector<double> Activation::coth(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::coth(std::vector<double> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), csch(z));
@ -786,7 +786,7 @@ std::vector<double> Activation::coth(std::vector<double> z, bool deriv) {
return alg.elementWiseDivision(alg.onevec(z.size()), tanh(z));
}
std::vector<std::vector<double>> Activation::coth(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::coth(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), csch(z));
@ -794,14 +794,14 @@ std::vector<std::vector<double>> Activation::coth(std::vector<std::vector<double
return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), tanh(z));
}
double Activation::arsinh(double z, bool deriv) {
double MLPPActivation::arsinh(double z, bool deriv) {
if (deriv) {
return 1 / sqrt(z * z + 1);
}
return std::log(z + sqrt(z * z + 1));
}
std::vector<double> Activation::arsinh(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::arsinh(std::vector<double> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.onevec(z.size()), alg.sqrt(alg.addition(alg.hadamard_product(z, z), alg.onevec(z.size()))));
@ -809,7 +809,7 @@ std::vector<double> Activation::arsinh(std::vector<double> z, bool deriv) {
return alg.log(alg.addition(z, alg.sqrt(alg.addition(alg.hadamard_product(z, z), alg.onevec(z.size())))));
}
std::vector<std::vector<double>> Activation::arsinh(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::arsinh(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.sqrt(alg.addition(alg.hadamard_product(z, z), alg.onemat(z.size(), z[0].size()))));
@ -817,14 +817,14 @@ std::vector<std::vector<double>> Activation::arsinh(std::vector<std::vector<doub
return alg.log(alg.addition(z, alg.sqrt(alg.addition(alg.hadamard_product(z, z), alg.onemat(z.size(), z[0].size())))));
}
double Activation::arcosh(double z, bool deriv) {
double MLPPActivation::arcosh(double z, bool deriv) {
if (deriv) {
return 1 / sqrt(z * z - 1);
}
return std::log(z + sqrt(z * z - 1));
}
std::vector<double> Activation::arcosh(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::arcosh(std::vector<double> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.onevec(z.size()), alg.sqrt(alg.subtraction(alg.hadamard_product(z, z), alg.onevec(z.size()))));
@ -832,7 +832,7 @@ std::vector<double> Activation::arcosh(std::vector<double> z, bool deriv) {
return alg.log(alg.addition(z, alg.sqrt(alg.subtraction(alg.hadamard_product(z, z), alg.onevec(z.size())))));
}
std::vector<std::vector<double>> Activation::arcosh(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::arcosh(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.sqrt(alg.subtraction(alg.hadamard_product(z, z), alg.onemat(z.size(), z[0].size()))));
@ -840,14 +840,14 @@ std::vector<std::vector<double>> Activation::arcosh(std::vector<std::vector<doub
return alg.log(alg.addition(z, alg.sqrt(alg.subtraction(alg.hadamard_product(z, z), alg.onemat(z.size(), z[0].size())))));
}
double Activation::artanh(double z, bool deriv) {
double MLPPActivation::artanh(double z, bool deriv) {
if (deriv) {
return 1 / (1 - z * z);
}
return 0.5 * std::log((1 + z) / (1 - z));
}
std::vector<double> Activation::artanh(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::artanh(std::vector<double> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.onevec(z.size()), alg.subtraction(alg.onevec(z.size()), alg.hadamard_product(z, z)));
@ -855,7 +855,7 @@ std::vector<double> Activation::artanh(std::vector<double> z, bool deriv) {
return alg.scalarMultiply(0.5, alg.log(alg.elementWiseDivision(alg.addition(alg.onevec(z.size()), z), alg.subtraction(alg.onevec(z.size()), z))));
}
std::vector<std::vector<double>> Activation::artanh(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::artanh(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.subtraction(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z)));
@ -863,14 +863,14 @@ std::vector<std::vector<double>> Activation::artanh(std::vector<std::vector<doub
return alg.scalarMultiply(0.5, alg.log(alg.elementWiseDivision(alg.addition(alg.onemat(z.size(), z[0].size()), z), alg.subtraction(alg.onemat(z.size(), z[0].size()), z))));
}
double Activation::arcsch(double z, bool deriv) {
double MLPPActivation::arcsch(double z, bool deriv) {
if (deriv) {
return -1 / ((z * z) * sqrt(1 + (1 / (z * z))));
}
return std::log(sqrt(1 + (1 / (z * z))) + (1 / z));
}
std::vector<double> Activation::arcsch(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::arcsch(std::vector<double> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.full(z.size(), -1), alg.hadamard_product(alg.hadamard_product(z, z), alg.sqrt(alg.addition(alg.onevec(z.size()), alg.elementWiseDivision(alg.onevec(z.size()), alg.hadamard_product(z, z))))));
@ -878,7 +878,7 @@ std::vector<double> Activation::arcsch(std::vector<double> z, bool deriv) {
return alg.log(alg.addition(alg.sqrt(alg.addition(alg.onevec(z.size()), alg.elementWiseDivision(alg.onevec(z.size()), alg.hadamard_product(z, z)))), alg.elementWiseDivision(alg.onevec(z.size()), z)));
}
std::vector<std::vector<double>> Activation::arcsch(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::arcsch(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.full(z.size(), z[0].size(), -1), alg.hadamard_product(alg.hadamard_product(z, z), alg.sqrt(alg.addition(alg.onemat(z.size(), z[0].size()), alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z))))));
@ -886,14 +886,14 @@ std::vector<std::vector<double>> Activation::arcsch(std::vector<std::vector<doub
return alg.log(alg.addition(alg.sqrt(alg.addition(alg.onemat(z.size(), z[0].size()), alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z)))), alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), z)));
}
double Activation::arsech(double z, bool deriv) {
double MLPPActivation::arsech(double z, bool deriv) {
if (deriv) {
return -1 / (z * sqrt(1 - z * z));
}
return std::log((1 / z) + ((1 / z) + 1) * ((1 / z) - 1));
}
std::vector<double> Activation::arsech(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::arsech(std::vector<double> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.full(z.size(), -1), alg.hadamard_product(z, alg.sqrt(alg.subtraction(alg.onevec(z.size()), alg.hadamard_product(z, z)))));
@ -901,7 +901,7 @@ std::vector<double> Activation::arsech(std::vector<double> z, bool deriv) {
return alg.log(alg.addition(alg.elementWiseDivision(alg.onevec(z.size()), z), alg.hadamard_product(alg.addition(alg.elementWiseDivision(alg.onevec(z.size()), z), alg.onevec(z.size())), alg.subtraction(alg.elementWiseDivision(alg.onevec(z.size()), z), alg.onevec(z.size())))));
}
std::vector<std::vector<double>> Activation::arsech(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::arsech(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.full(z.size(), z[0].size(), -1), alg.hadamard_product(z, alg.sqrt(alg.subtraction(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z)))));
@ -909,14 +909,14 @@ std::vector<std::vector<double>> Activation::arsech(std::vector<std::vector<doub
return alg.log(alg.addition(alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), z), alg.hadamard_product(alg.addition(alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), z), alg.onemat(z.size(), z[0].size())), alg.subtraction(alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), z), alg.onemat(z.size(), z[0].size())))));
}
double Activation::arcoth(double z, bool deriv) {
double MLPPActivation::arcoth(double z, bool deriv) {
if (deriv) {
return 1 / (1 - z * z);
}
return 0.5 * std::log((1 + z) / (z - 1));
}
std::vector<double> Activation::arcoth(std::vector<double> z, bool deriv) {
std::vector<double> MLPPActivation::arcoth(std::vector<double> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.onevec(z.size()), alg.subtraction(alg.onevec(z.size()), alg.hadamard_product(z, z)));
@ -924,7 +924,7 @@ std::vector<double> Activation::arcoth(std::vector<double> z, bool deriv) {
return alg.scalarMultiply(0.5, alg.log(alg.elementWiseDivision(alg.addition(alg.onevec(z.size()), z), alg.subtraction(z, alg.onevec(z.size())))));
}
std::vector<std::vector<double>> Activation::arcoth(std::vector<std::vector<double>> z, bool deriv) {
std::vector<std::vector<double>> MLPPActivation::arcoth(std::vector<std::vector<double>> z, bool deriv) {
LinAlg alg;
if (deriv) {
return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.subtraction(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z)));
@ -933,7 +933,7 @@ std::vector<std::vector<double>> Activation::arcoth(std::vector<std::vector<doub
}
// TO DO: Implement this template activation
std::vector<double> Activation::activation(std::vector<double> z, bool deriv, double (*function)(double, bool)) {
std::vector<double> MLPPActivation::activation(std::vector<double> z, bool deriv, double (*function)(double, bool)) {
if (deriv) {
std::vector<double> deriv;
deriv.resize(z.size());

View File

@ -10,7 +10,7 @@
#include <vector>
class Activation {
class MLPPActivation {
public:
double linear(double z, bool deriv = 0);
std::vector<double> linear(std::vector<double> z, bool deriv = 0);

View File

@ -720,7 +720,7 @@ void ANN::updateParameters(std::vector<std::vector<std::vector<double>>> hiddenL
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> ANN::computeGradients(std::vector<double> y_hat, std::vector<double> outputSet) {
// std::cout << "BEGIN" << std::endl;
class Cost cost;
Activation avn;
MLPPActivation avn;
LinAlg alg;
Reg regularization;

View File

@ -15,7 +15,7 @@
AutoEncoder::AutoEncoder(std::vector<std::vector<double>> inputSet, int n_hidden) :
inputSet(inputSet), n_hidden(n_hidden), n(inputSet.size()), k(inputSet[0].size()) {
Activation avn;
MLPPActivation avn;
y_hat.resize(inputSet.size());
weights1 = Utilities::weightInitialization(k, n_hidden);
@ -33,7 +33,7 @@ std::vector<double> AutoEncoder::modelTest(std::vector<double> x) {
}
void AutoEncoder::gradientDescent(double learning_rate, int max_epoch, bool UI) {
Activation avn;
MLPPActivation avn;
LinAlg alg;
double cost_prev = 0;
int epoch = 1;
@ -86,7 +86,7 @@ void AutoEncoder::gradientDescent(double learning_rate, int max_epoch, bool UI)
}
void AutoEncoder::SGD(double learning_rate, int max_epoch, bool UI) {
Activation avn;
MLPPActivation avn;
LinAlg alg;
double cost_prev = 0;
int epoch = 1;
@ -137,7 +137,7 @@ void AutoEncoder::SGD(double learning_rate, int max_epoch, bool UI) {
}
void AutoEncoder::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
Activation avn;
MLPPActivation avn;
LinAlg alg;
double cost_prev = 0;
int epoch = 1;
@ -214,7 +214,7 @@ double AutoEncoder::Cost(std::vector<std::vector<double>> y_hat, std::vector<std
std::vector<std::vector<double>> AutoEncoder::Evaluate(std::vector<std::vector<double>> X) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
std::vector<std::vector<double>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
std::vector<std::vector<double>> a2 = avn.sigmoid(z2);
return alg.mat_vec_add(alg.matmult(a2, weights2), bias2);
@ -222,7 +222,7 @@ std::vector<std::vector<double>> AutoEncoder::Evaluate(std::vector<std::vector<d
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> AutoEncoder::propagate(std::vector<std::vector<double>> X) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
std::vector<std::vector<double>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
std::vector<std::vector<double>> a2 = avn.sigmoid(z2);
return { z2, a2 };
@ -230,7 +230,7 @@ std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> A
std::vector<double> AutoEncoder::Evaluate(std::vector<double> x) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
std::vector<double> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
std::vector<double> a2 = avn.sigmoid(z2);
return alg.addition(alg.mat_vec_mult(alg.transpose(weights2), a2), bias2);
@ -238,7 +238,7 @@ std::vector<double> AutoEncoder::Evaluate(std::vector<double> x) {
std::tuple<std::vector<double>, std::vector<double>> AutoEncoder::propagate(std::vector<double> x) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
std::vector<double> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
std::vector<double> a2 = avn.sigmoid(z2);
return { z2, a2 };
@ -246,7 +246,7 @@ std::tuple<std::vector<double>, std::vector<double>> AutoEncoder::propagate(std:
void AutoEncoder::forwardPass() {
LinAlg alg;
Activation avn;
MLPPActivation avn;
z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1);
a2 = avn.sigmoid(z2);
y_hat = alg.mat_vec_add(alg.matmult(a2, weights2), bias2);

View File

@ -30,7 +30,7 @@ double CLogLogReg::modelTest(std::vector<double> x) {
}
void CLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
Activation avn;
MLPPActivation avn;
LinAlg alg;
Reg regularization;
double cost_prev = 0;
@ -64,7 +64,7 @@ void CLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
}
void CLogLogReg::MLE(double learning_rate, int max_epoch, bool UI) {
Activation avn;
MLPPActivation avn;
LinAlg alg;
Reg regularization;
double cost_prev = 0;
@ -137,7 +137,7 @@ void CLogLogReg::SGD(double learning_rate, int max_epoch, bool UI) {
}
void CLogLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
Activation avn;
MLPPActivation avn;
LinAlg alg;
Reg regularization;
double cost_prev = 0;
@ -192,7 +192,7 @@ double CLogLogReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
std::vector<double> CLogLogReg::Evaluate(std::vector<std::vector<double>> X) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
return avn.cloglog(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
}
@ -203,7 +203,7 @@ std::vector<double> CLogLogReg::propagate(std::vector<std::vector<double>> X) {
double CLogLogReg::Evaluate(std::vector<double> x) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
return avn.cloglog(alg.dot(weights, x) + bias);
}
@ -215,7 +215,7 @@ double CLogLogReg::propagate(std::vector<double> x) {
// cloglog ( wTx + b )
void CLogLogReg::forwardPass() {
LinAlg alg;
Activation avn;
MLPPActivation avn;
z = propagate(inputSet);
y_hat = avn.cloglog(z);

View File

@ -33,7 +33,7 @@ double DualSVC::modelTest(std::vector<double> x) {
void DualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) {
class Cost cost;
Activation avn;
MLPPActivation avn;
LinAlg alg;
Reg regularization;
double cost_prev = 0;
@ -81,7 +81,7 @@ void DualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) {
// void DualSVC::SGD(double learning_rate, int max_epoch, bool UI){
// class Cost cost;
// Activation avn;
// MLPPActivation avn;
// LinAlg alg;
// Reg regularization;
@ -114,7 +114,7 @@ void DualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) {
// void DualSVC::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){
// class Cost cost;
// Activation avn;
// MLPPActivation avn;
// LinAlg alg;
// Reg regularization;
// double cost_prev = 0;
@ -168,7 +168,7 @@ double DualSVC::Cost(std::vector<double> alpha, std::vector<std::vector<double>>
}
std::vector<double> DualSVC::Evaluate(std::vector<std::vector<double>> X) {
Activation avn;
MLPPActivation avn;
return avn.sign(propagate(X));
}
@ -189,7 +189,7 @@ std::vector<double> DualSVC::propagate(std::vector<std::vector<double>> X) {
}
double DualSVC::Evaluate(std::vector<double> x) {
Activation avn;
MLPPActivation avn;
return avn.sign(propagate(x));
}
@ -207,7 +207,7 @@ double DualSVC::propagate(std::vector<double> x) {
void DualSVC::forwardPass() {
LinAlg alg;
Activation avn;
MLPPActivation avn;
z = propagate(inputSet);
y_hat = avn.sign(z);

View File

@ -209,7 +209,7 @@ void GAN::updateGeneratorParameters(std::vector<std::vector<std::vector<double>>
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> GAN::computeDiscriminatorGradients(std::vector<double> y_hat, std::vector<double> outputSet) {
class Cost cost;
Activation avn;
MLPPActivation avn;
LinAlg alg;
Reg regularization;
@ -245,7 +245,7 @@ std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> G
std::vector<std::vector<std::vector<double>>> GAN::computeGeneratorGradients(std::vector<double> y_hat, std::vector<double> outputSet) {
class Cost cost;
Activation avn;
MLPPActivation avn;
LinAlg alg;
Reg regularization;

View File

@ -18,95 +18,95 @@ HiddenLayer::HiddenLayer(int n_hidden, std::string activation, std::vector<std::
weights = Utilities::weightInitialization(input[0].size(), n_hidden, weightInit);
bias = Utilities::biasInitialization(n_hidden);
activation_map["Linear"] = &Activation::linear;
activationTest_map["Linear"] = &Activation::linear;
activation_map["Linear"] = &MLPPActivation::linear;
activationTest_map["Linear"] = &MLPPActivation::linear;
activation_map["Sigmoid"] = &Activation::sigmoid;
activationTest_map["Sigmoid"] = &Activation::sigmoid;
activation_map["Sigmoid"] = &MLPPActivation::sigmoid;
activationTest_map["Sigmoid"] = &MLPPActivation::sigmoid;
activation_map["Swish"] = &Activation::swish;
activationTest_map["Swish"] = &Activation::swish;
activation_map["Swish"] = &MLPPActivation::swish;
activationTest_map["Swish"] = &MLPPActivation::swish;
activation_map["Mish"] = &Activation::mish;
activationTest_map["Mish"] = &Activation::mish;
activation_map["Mish"] = &MLPPActivation::mish;
activationTest_map["Mish"] = &MLPPActivation::mish;
activation_map["SinC"] = &Activation::sinc;
activationTest_map["SinC"] = &Activation::sinc;
activation_map["SinC"] = &MLPPActivation::sinc;
activationTest_map["SinC"] = &MLPPActivation::sinc;
activation_map["Softplus"] = &Activation::softplus;
activationTest_map["Softplus"] = &Activation::softplus;
activation_map["Softplus"] = &MLPPActivation::softplus;
activationTest_map["Softplus"] = &MLPPActivation::softplus;
activation_map["Softsign"] = &Activation::softsign;
activationTest_map["Softsign"] = &Activation::softsign;
activation_map["Softsign"] = &MLPPActivation::softsign;
activationTest_map["Softsign"] = &MLPPActivation::softsign;
activation_map["CLogLog"] = &Activation::cloglog;
activationTest_map["CLogLog"] = &Activation::cloglog;
activation_map["CLogLog"] = &MLPPActivation::cloglog;
activationTest_map["CLogLog"] = &MLPPActivation::cloglog;
activation_map["Logit"] = &Activation::logit;
activationTest_map["Logit"] = &Activation::logit;
activation_map["Logit"] = &MLPPActivation::logit;
activationTest_map["Logit"] = &MLPPActivation::logit;
activation_map["GaussianCDF"] = &Activation::gaussianCDF;
activationTest_map["GaussianCDF"] = &Activation::gaussianCDF;
activation_map["GaussianCDF"] = &MLPPActivation::gaussianCDF;
activationTest_map["GaussianCDF"] = &MLPPActivation::gaussianCDF;
activation_map["RELU"] = &Activation::RELU;
activationTest_map["RELU"] = &Activation::RELU;
activation_map["RELU"] = &MLPPActivation::RELU;
activationTest_map["RELU"] = &MLPPActivation::RELU;
activation_map["GELU"] = &Activation::GELU;
activationTest_map["GELU"] = &Activation::GELU;
activation_map["GELU"] = &MLPPActivation::GELU;
activationTest_map["GELU"] = &MLPPActivation::GELU;
activation_map["Sign"] = &Activation::sign;
activationTest_map["Sign"] = &Activation::sign;
activation_map["Sign"] = &MLPPActivation::sign;
activationTest_map["Sign"] = &MLPPActivation::sign;
activation_map["UnitStep"] = &Activation::unitStep;
activationTest_map["UnitStep"] = &Activation::unitStep;
activation_map["UnitStep"] = &MLPPActivation::unitStep;
activationTest_map["UnitStep"] = &MLPPActivation::unitStep;
activation_map["Sinh"] = &Activation::sinh;
activationTest_map["Sinh"] = &Activation::sinh;
activation_map["Sinh"] = &MLPPActivation::sinh;
activationTest_map["Sinh"] = &MLPPActivation::sinh;
activation_map["Cosh"] = &Activation::cosh;
activationTest_map["Cosh"] = &Activation::cosh;
activation_map["Cosh"] = &MLPPActivation::cosh;
activationTest_map["Cosh"] = &MLPPActivation::cosh;
activation_map["Tanh"] = &Activation::tanh;
activationTest_map["Tanh"] = &Activation::tanh;
activation_map["Tanh"] = &MLPPActivation::tanh;
activationTest_map["Tanh"] = &MLPPActivation::tanh;
activation_map["Csch"] = &Activation::csch;
activationTest_map["Csch"] = &Activation::csch;
activation_map["Csch"] = &MLPPActivation::csch;
activationTest_map["Csch"] = &MLPPActivation::csch;
activation_map["Sech"] = &Activation::sech;
activationTest_map["Sech"] = &Activation::sech;
activation_map["Sech"] = &MLPPActivation::sech;
activationTest_map["Sech"] = &MLPPActivation::sech;
activation_map["Coth"] = &Activation::coth;
activationTest_map["Coth"] = &Activation::coth;
activation_map["Coth"] = &MLPPActivation::coth;
activationTest_map["Coth"] = &MLPPActivation::coth;
activation_map["Arsinh"] = &Activation::arsinh;
activationTest_map["Arsinh"] = &Activation::arsinh;
activation_map["Arsinh"] = &MLPPActivation::arsinh;
activationTest_map["Arsinh"] = &MLPPActivation::arsinh;
activation_map["Arcosh"] = &Activation::arcosh;
activationTest_map["Arcosh"] = &Activation::arcosh;
activation_map["Arcosh"] = &MLPPActivation::arcosh;
activationTest_map["Arcosh"] = &MLPPActivation::arcosh;
activation_map["Artanh"] = &Activation::artanh;
activationTest_map["Artanh"] = &Activation::artanh;
activation_map["Artanh"] = &MLPPActivation::artanh;
activationTest_map["Artanh"] = &MLPPActivation::artanh;
activation_map["Arcsch"] = &Activation::arcsch;
activationTest_map["Arcsch"] = &Activation::arcsch;
activation_map["Arcsch"] = &MLPPActivation::arcsch;
activationTest_map["Arcsch"] = &MLPPActivation::arcsch;
activation_map["Arsech"] = &Activation::arsech;
activationTest_map["Arsech"] = &Activation::arsech;
activation_map["Arsech"] = &MLPPActivation::arsech;
activationTest_map["Arsech"] = &MLPPActivation::arsech;
activation_map["Arcoth"] = &Activation::arcoth;
activationTest_map["Arcoth"] = &Activation::arcoth;
activation_map["Arcoth"] = &MLPPActivation::arcoth;
activationTest_map["Arcoth"] = &MLPPActivation::arcoth;
}
void HiddenLayer::forwardPass() {
LinAlg alg;
Activation avn;
MLPPActivation avn;
z = alg.mat_vec_add(alg.matmult(input, weights), bias);
a = (avn.*activation_map[activation])(z, 0);
}
void HiddenLayer::Test(std::vector<double> x) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias);
a_test = (avn.*activationTest_map[activation])(z_test, 0);
}

View File

@ -30,8 +30,8 @@ public:
std::vector<std::vector<double>> z;
std::vector<std::vector<double>> a;
std::map<std::string, std::vector<std::vector<double>> (Activation::*)(std::vector<std::vector<double>>, bool)> activation_map;
std::map<std::string, std::vector<double> (Activation::*)(std::vector<double>, bool)> activationTest_map;
std::map<std::string, std::vector<std::vector<double>> (MLPPActivation::*)(std::vector<std::vector<double>>, bool)> activation_map;
std::map<std::string, std::vector<double> (MLPPActivation::*)(std::vector<double>, bool)> activationTest_map;
std::vector<double> z_test;
std::vector<double> a_test;

View File

@ -188,13 +188,13 @@ double LogReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
std::vector<double> LogReg::Evaluate(std::vector<std::vector<double>> X) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
return avn.sigmoid(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
}
double LogReg::Evaluate(std::vector<double> x) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
return avn.sigmoid(alg.dot(weights, x) + bias);
}

View File

@ -54,7 +54,7 @@ std::vector<double> MANN::modelTest(std::vector<double> x) {
void MANN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
class Cost cost;
Activation avn;
MLPPActivation avn;
LinAlg alg;
Reg regularization;

View File

@ -18,7 +18,7 @@
MLP::MLP(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int n_hidden, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n_hidden(n_hidden), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
Activation avn;
MLPPActivation avn;
y_hat.resize(n);
weights1 = Utilities::weightInitialization(k, n_hidden);
@ -36,7 +36,7 @@ double MLP::modelTest(std::vector<double> x) {
}
void MLP::gradientDescent(double learning_rate, int max_epoch, bool UI) {
Activation avn;
MLPPActivation avn;
LinAlg alg;
Reg regularization;
double cost_prev = 0;
@ -95,7 +95,7 @@ void MLP::gradientDescent(double learning_rate, int max_epoch, bool UI) {
}
void MLP::SGD(double learning_rate, int max_epoch, bool UI) {
Activation avn;
MLPPActivation avn;
LinAlg alg;
Reg regularization;
double cost_prev = 0;
@ -149,7 +149,7 @@ void MLP::SGD(double learning_rate, int max_epoch, bool UI) {
}
void MLP::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
Activation avn;
MLPPActivation avn;
LinAlg alg;
Reg regularization;
double cost_prev = 0;
@ -233,7 +233,7 @@ double MLP::Cost(std::vector<double> y_hat, std::vector<double> y) {
std::vector<double> MLP::Evaluate(std::vector<std::vector<double>> X) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
std::vector<std::vector<double>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
std::vector<std::vector<double>> a2 = avn.sigmoid(z2);
return avn.sigmoid(alg.scalarAdd(bias2, alg.mat_vec_mult(a2, weights2)));
@ -241,7 +241,7 @@ std::vector<double> MLP::Evaluate(std::vector<std::vector<double>> X) {
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> MLP::propagate(std::vector<std::vector<double>> X) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
std::vector<std::vector<double>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
std::vector<std::vector<double>> a2 = avn.sigmoid(z2);
return { z2, a2 };
@ -249,7 +249,7 @@ std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> M
double MLP::Evaluate(std::vector<double> x) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
std::vector<double> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
std::vector<double> a2 = avn.sigmoid(z2);
return avn.sigmoid(alg.dot(weights2, a2) + bias2);
@ -257,7 +257,7 @@ double MLP::Evaluate(std::vector<double> x) {
std::tuple<std::vector<double>, std::vector<double>> MLP::propagate(std::vector<double> x) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
std::vector<double> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
std::vector<double> a2 = avn.sigmoid(z2);
return { z2, a2 };
@ -265,7 +265,7 @@ std::tuple<std::vector<double>, std::vector<double>> MLP::propagate(std::vector<
void MLP::forwardPass() {
LinAlg alg;
Activation avn;
MLPPActivation avn;
z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1);
a2 = avn.sigmoid(z2);
y_hat = avn.sigmoid(alg.scalarAdd(bias2, alg.mat_vec_mult(a2, weights2)));

View File

@ -17,86 +17,86 @@ MultiOutputLayer::MultiOutputLayer(int n_output, int n_hidden, std::string activ
weights = Utilities::weightInitialization(n_hidden, n_output, weightInit);
bias = Utilities::biasInitialization(n_output);
activation_map["Linear"] = &Activation::linear;
activationTest_map["Linear"] = &Activation::linear;
activation_map["Linear"] = &MLPPActivation::linear;
activationTest_map["Linear"] = &MLPPActivation::linear;
activation_map["Sigmoid"] = &Activation::sigmoid;
activationTest_map["Sigmoid"] = &Activation::sigmoid;
activation_map["Sigmoid"] = &MLPPActivation::sigmoid;
activationTest_map["Sigmoid"] = &MLPPActivation::sigmoid;
activation_map["Softmax"] = &Activation::softmax;
activationTest_map["Softmax"] = &Activation::softmax;
activation_map["Softmax"] = &MLPPActivation::softmax;
activationTest_map["Softmax"] = &MLPPActivation::softmax;
activation_map["Swish"] = &Activation::swish;
activationTest_map["Swish"] = &Activation::swish;
activation_map["Swish"] = &MLPPActivation::swish;
activationTest_map["Swish"] = &MLPPActivation::swish;
activation_map["Mish"] = &Activation::mish;
activationTest_map["Mish"] = &Activation::mish;
activation_map["Mish"] = &MLPPActivation::mish;
activationTest_map["Mish"] = &MLPPActivation::mish;
activation_map["SinC"] = &Activation::sinc;
activationTest_map["SinC"] = &Activation::sinc;
activation_map["SinC"] = &MLPPActivation::sinc;
activationTest_map["SinC"] = &MLPPActivation::sinc;
activation_map["Softplus"] = &Activation::softplus;
activationTest_map["Softplus"] = &Activation::softplus;
activation_map["Softplus"] = &MLPPActivation::softplus;
activationTest_map["Softplus"] = &MLPPActivation::softplus;
activation_map["Softsign"] = &Activation::softsign;
activationTest_map["Softsign"] = &Activation::softsign;
activation_map["Softsign"] = &MLPPActivation::softsign;
activationTest_map["Softsign"] = &MLPPActivation::softsign;
activation_map["CLogLog"] = &Activation::cloglog;
activationTest_map["CLogLog"] = &Activation::cloglog;
activation_map["CLogLog"] = &MLPPActivation::cloglog;
activationTest_map["CLogLog"] = &MLPPActivation::cloglog;
activation_map["Logit"] = &Activation::logit;
activationTest_map["Logit"] = &Activation::logit;
activation_map["Logit"] = &MLPPActivation::logit;
activationTest_map["Logit"] = &MLPPActivation::logit;
activation_map["GaussianCDF"] = &Activation::gaussianCDF;
activationTest_map["GaussianCDF"] = &Activation::gaussianCDF;
activation_map["GaussianCDF"] = &MLPPActivation::gaussianCDF;
activationTest_map["GaussianCDF"] = &MLPPActivation::gaussianCDF;
activation_map["RELU"] = &Activation::RELU;
activationTest_map["RELU"] = &Activation::RELU;
activation_map["RELU"] = &MLPPActivation::RELU;
activationTest_map["RELU"] = &MLPPActivation::RELU;
activation_map["GELU"] = &Activation::GELU;
activationTest_map["GELU"] = &Activation::GELU;
activation_map["GELU"] = &MLPPActivation::GELU;
activationTest_map["GELU"] = &MLPPActivation::GELU;
activation_map["Sign"] = &Activation::sign;
activationTest_map["Sign"] = &Activation::sign;
activation_map["Sign"] = &MLPPActivation::sign;
activationTest_map["Sign"] = &MLPPActivation::sign;
activation_map["UnitStep"] = &Activation::unitStep;
activationTest_map["UnitStep"] = &Activation::unitStep;
activation_map["UnitStep"] = &MLPPActivation::unitStep;
activationTest_map["UnitStep"] = &MLPPActivation::unitStep;
activation_map["Sinh"] = &Activation::sinh;
activationTest_map["Sinh"] = &Activation::sinh;
activation_map["Sinh"] = &MLPPActivation::sinh;
activationTest_map["Sinh"] = &MLPPActivation::sinh;
activation_map["Cosh"] = &Activation::cosh;
activationTest_map["Cosh"] = &Activation::cosh;
activation_map["Cosh"] = &MLPPActivation::cosh;
activationTest_map["Cosh"] = &MLPPActivation::cosh;
activation_map["Tanh"] = &Activation::tanh;
activationTest_map["Tanh"] = &Activation::tanh;
activation_map["Tanh"] = &MLPPActivation::tanh;
activationTest_map["Tanh"] = &MLPPActivation::tanh;
activation_map["Csch"] = &Activation::csch;
activationTest_map["Csch"] = &Activation::csch;
activation_map["Csch"] = &MLPPActivation::csch;
activationTest_map["Csch"] = &MLPPActivation::csch;
activation_map["Sech"] = &Activation::sech;
activationTest_map["Sech"] = &Activation::sech;
activation_map["Sech"] = &MLPPActivation::sech;
activationTest_map["Sech"] = &MLPPActivation::sech;
activation_map["Coth"] = &Activation::coth;
activationTest_map["Coth"] = &Activation::coth;
activation_map["Coth"] = &MLPPActivation::coth;
activationTest_map["Coth"] = &MLPPActivation::coth;
activation_map["Arsinh"] = &Activation::arsinh;
activationTest_map["Arsinh"] = &Activation::arsinh;
activation_map["Arsinh"] = &MLPPActivation::arsinh;
activationTest_map["Arsinh"] = &MLPPActivation::arsinh;
activation_map["Arcosh"] = &Activation::arcosh;
activationTest_map["Arcosh"] = &Activation::arcosh;
activation_map["Arcosh"] = &MLPPActivation::arcosh;
activationTest_map["Arcosh"] = &MLPPActivation::arcosh;
activation_map["Artanh"] = &Activation::artanh;
activationTest_map["Artanh"] = &Activation::artanh;
activation_map["Artanh"] = &MLPPActivation::artanh;
activationTest_map["Artanh"] = &MLPPActivation::artanh;
activation_map["Arcsch"] = &Activation::arcsch;
activationTest_map["Arcsch"] = &Activation::arcsch;
activation_map["Arcsch"] = &MLPPActivation::arcsch;
activationTest_map["Arcsch"] = &MLPPActivation::arcsch;
activation_map["Arsech"] = &Activation::arsech;
activationTest_map["Arsech"] = &Activation::arsech;
activation_map["Arsech"] = &MLPPActivation::arsech;
activationTest_map["Arsech"] = &MLPPActivation::arsech;
activation_map["Arcoth"] = &Activation::arcoth;
activationTest_map["Arcoth"] = &Activation::arcoth;
activation_map["Arcoth"] = &MLPPActivation::arcoth;
activationTest_map["Arcoth"] = &MLPPActivation::arcoth;
costDeriv_map["MSE"] = &Cost::MSEDeriv;
cost_map["MSE"] = &Cost::MSE;
@ -118,14 +118,14 @@ MultiOutputLayer::MultiOutputLayer(int n_output, int n_hidden, std::string activ
void MultiOutputLayer::forwardPass() {
LinAlg alg;
Activation avn;
MLPPActivation avn;
z = alg.mat_vec_add(alg.matmult(input, weights), bias);
a = (avn.*activation_map[activation])(z, 0);
}
void MultiOutputLayer::Test(std::vector<double> x) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias);
a_test = (avn.*activationTest_map[activation])(z_test, 0);
}

View File

@ -33,8 +33,8 @@ public:
std::vector<std::vector<double>> z;
std::vector<std::vector<double>> a;
std::map<std::string, std::vector<std::vector<double>> (Activation::*)(std::vector<std::vector<double>>, bool)> activation_map;
std::map<std::string, std::vector<double> (Activation::*)(std::vector<double>, bool)> activationTest_map;
std::map<std::string, std::vector<std::vector<double>> (MLPPActivation::*)(std::vector<std::vector<double>>, bool)> activation_map;
std::map<std::string, std::vector<double> (MLPPActivation::*)(std::vector<double>, bool)> activationTest_map;
std::map<std::string, double (Cost::*)(std::vector<std::vector<double>>, std::vector<std::vector<double>>)> cost_map;
std::map<std::string, std::vector<std::vector<double>> (Cost::*)(std::vector<std::vector<double>>, std::vector<std::vector<double>>)> costDeriv_map;

View File

@ -17,83 +17,83 @@ OutputLayer::OutputLayer(int n_hidden, std::string activation, std::string cost,
weights = Utilities::weightInitialization(n_hidden, weightInit);
bias = Utilities::biasInitialization();
activation_map["Linear"] = &Activation::linear;
activationTest_map["Linear"] = &Activation::linear;
activation_map["Linear"] = &MLPPActivation::linear;
activationTest_map["Linear"] = &MLPPActivation::linear;
activation_map["Sigmoid"] = &Activation::sigmoid;
activationTest_map["Sigmoid"] = &Activation::sigmoid;
activation_map["Sigmoid"] = &MLPPActivation::sigmoid;
activationTest_map["Sigmoid"] = &MLPPActivation::sigmoid;
activation_map["Swish"] = &Activation::swish;
activationTest_map["Swish"] = &Activation::swish;
activation_map["Swish"] = &MLPPActivation::swish;
activationTest_map["Swish"] = &MLPPActivation::swish;
activation_map["Mish"] = &Activation::mish;
activationTest_map["Mish"] = &Activation::mish;
activation_map["Mish"] = &MLPPActivation::mish;
activationTest_map["Mish"] = &MLPPActivation::mish;
activation_map["SinC"] = &Activation::sinc;
activationTest_map["SinC"] = &Activation::sinc;
activation_map["SinC"] = &MLPPActivation::sinc;
activationTest_map["SinC"] = &MLPPActivation::sinc;
activation_map["Softplus"] = &Activation::softplus;
activationTest_map["Softplus"] = &Activation::softplus;
activation_map["Softplus"] = &MLPPActivation::softplus;
activationTest_map["Softplus"] = &MLPPActivation::softplus;
activation_map["Softsign"] = &Activation::softsign;
activationTest_map["Softsign"] = &Activation::softsign;
activation_map["Softsign"] = &MLPPActivation::softsign;
activationTest_map["Softsign"] = &MLPPActivation::softsign;
activation_map["CLogLog"] = &Activation::cloglog;
activationTest_map["CLogLog"] = &Activation::cloglog;
activation_map["CLogLog"] = &MLPPActivation::cloglog;
activationTest_map["CLogLog"] = &MLPPActivation::cloglog;
activation_map["Logit"] = &Activation::logit;
activationTest_map["Logit"] = &Activation::logit;
activation_map["Logit"] = &MLPPActivation::logit;
activationTest_map["Logit"] = &MLPPActivation::logit;
activation_map["GaussianCDF"] = &Activation::gaussianCDF;
activationTest_map["GaussianCDF"] = &Activation::gaussianCDF;
activation_map["GaussianCDF"] = &MLPPActivation::gaussianCDF;
activationTest_map["GaussianCDF"] = &MLPPActivation::gaussianCDF;
activation_map["RELU"] = &Activation::RELU;
activationTest_map["RELU"] = &Activation::RELU;
activation_map["RELU"] = &MLPPActivation::RELU;
activationTest_map["RELU"] = &MLPPActivation::RELU;
activation_map["GELU"] = &Activation::GELU;
activationTest_map["GELU"] = &Activation::GELU;
activation_map["GELU"] = &MLPPActivation::GELU;
activationTest_map["GELU"] = &MLPPActivation::GELU;
activation_map["Sign"] = &Activation::sign;
activationTest_map["Sign"] = &Activation::sign;
activation_map["Sign"] = &MLPPActivation::sign;
activationTest_map["Sign"] = &MLPPActivation::sign;
activation_map["UnitStep"] = &Activation::unitStep;
activationTest_map["UnitStep"] = &Activation::unitStep;
activation_map["UnitStep"] = &MLPPActivation::unitStep;
activationTest_map["UnitStep"] = &MLPPActivation::unitStep;
activation_map["Sinh"] = &Activation::sinh;
activationTest_map["Sinh"] = &Activation::sinh;
activation_map["Sinh"] = &MLPPActivation::sinh;
activationTest_map["Sinh"] = &MLPPActivation::sinh;
activation_map["Cosh"] = &Activation::cosh;
activationTest_map["Cosh"] = &Activation::cosh;
activation_map["Cosh"] = &MLPPActivation::cosh;
activationTest_map["Cosh"] = &MLPPActivation::cosh;
activation_map["Tanh"] = &Activation::tanh;
activationTest_map["Tanh"] = &Activation::tanh;
activation_map["Tanh"] = &MLPPActivation::tanh;
activationTest_map["Tanh"] = &MLPPActivation::tanh;
activation_map["Csch"] = &Activation::csch;
activationTest_map["Csch"] = &Activation::csch;
activation_map["Csch"] = &MLPPActivation::csch;
activationTest_map["Csch"] = &MLPPActivation::csch;
activation_map["Sech"] = &Activation::sech;
activationTest_map["Sech"] = &Activation::sech;
activation_map["Sech"] = &MLPPActivation::sech;
activationTest_map["Sech"] = &MLPPActivation::sech;
activation_map["Coth"] = &Activation::coth;
activationTest_map["Coth"] = &Activation::coth;
activation_map["Coth"] = &MLPPActivation::coth;
activationTest_map["Coth"] = &MLPPActivation::coth;
activation_map["Arsinh"] = &Activation::arsinh;
activationTest_map["Arsinh"] = &Activation::arsinh;
activation_map["Arsinh"] = &MLPPActivation::arsinh;
activationTest_map["Arsinh"] = &MLPPActivation::arsinh;
activation_map["Arcosh"] = &Activation::arcosh;
activationTest_map["Arcosh"] = &Activation::arcosh;
activation_map["Arcosh"] = &MLPPActivation::arcosh;
activationTest_map["Arcosh"] = &MLPPActivation::arcosh;
activation_map["Artanh"] = &Activation::artanh;
activationTest_map["Artanh"] = &Activation::artanh;
activation_map["Artanh"] = &MLPPActivation::artanh;
activationTest_map["Artanh"] = &MLPPActivation::artanh;
activation_map["Arcsch"] = &Activation::arcsch;
activationTest_map["Arcsch"] = &Activation::arcsch;
activation_map["Arcsch"] = &MLPPActivation::arcsch;
activationTest_map["Arcsch"] = &MLPPActivation::arcsch;
activation_map["Arsech"] = &Activation::arsech;
activationTest_map["Arsech"] = &Activation::arsech;
activation_map["Arsech"] = &MLPPActivation::arsech;
activationTest_map["Arsech"] = &MLPPActivation::arsech;
activation_map["Arcoth"] = &Activation::arcoth;
activationTest_map["Arcoth"] = &Activation::arcoth;
activation_map["Arcoth"] = &MLPPActivation::arcoth;
activationTest_map["Arcoth"] = &MLPPActivation::arcoth;
costDeriv_map["MSE"] = &Cost::MSEDeriv;
cost_map["MSE"] = &Cost::MSE;
@ -115,14 +115,14 @@ OutputLayer::OutputLayer(int n_hidden, std::string activation, std::string cost,
void OutputLayer::forwardPass() {
LinAlg alg;
Activation avn;
MLPPActivation avn;
z = alg.scalarAdd(bias, alg.mat_vec_mult(input, weights));
a = (avn.*activation_map[activation])(z, 0);
}
void OutputLayer::Test(std::vector<double> x) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
z_test = alg.dot(weights, x) + bias;
a_test = (avn.*activationTest_map[activation])(z_test, 0);
}

View File

@ -32,8 +32,8 @@ public:
std::vector<double> z;
std::vector<double> a;
std::map<std::string, std::vector<double> (Activation::*)(std::vector<double>, bool)> activation_map;
std::map<std::string, double (Activation::*)(double, bool)> activationTest_map;
std::map<std::string, std::vector<double> (MLPPActivation::*)(std::vector<double>, bool)> activation_map;
std::map<std::string, double (MLPPActivation::*)(double, bool)> activationTest_map;
std::map<std::string, double (Cost::*)(std::vector<double>, std::vector<double>)> cost_map;
std::map<std::string, std::vector<double> (Cost::*)(std::vector<double>, std::vector<double>)> costDeriv_map;

View File

@ -31,7 +31,7 @@ double ProbitReg::modelTest(std::vector<double> x) {
}
void ProbitReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
Activation avn;
MLPPActivation avn;
LinAlg alg;
Reg regularization;
double cost_prev = 0;
@ -64,7 +64,7 @@ void ProbitReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
}
void ProbitReg::MLE(double learning_rate, int max_epoch, bool UI) {
Activation avn;
MLPPActivation avn;
LinAlg alg;
Reg regularization;
double cost_prev = 0;
@ -98,7 +98,7 @@ void ProbitReg::MLE(double learning_rate, int max_epoch, bool UI) {
void ProbitReg::SGD(double learning_rate, int max_epoch, bool UI) {
// NOTE: ∂y_hat/∂z is sparse
Activation avn;
MLPPActivation avn;
LinAlg alg;
Reg regularization;
double cost_prev = 0;
@ -139,7 +139,7 @@ void ProbitReg::SGD(double learning_rate, int max_epoch, bool UI) {
}
void ProbitReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
Activation avn;
MLPPActivation avn;
LinAlg alg;
Reg regularization;
double cost_prev = 0;
@ -215,7 +215,7 @@ double ProbitReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
std::vector<double> ProbitReg::Evaluate(std::vector<std::vector<double>> X) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
return avn.gaussianCDF(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
}
@ -226,7 +226,7 @@ std::vector<double> ProbitReg::propagate(std::vector<std::vector<double>> X) {
double ProbitReg::Evaluate(std::vector<double> x) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
return avn.gaussianCDF(alg.dot(weights, x) + bias);
}
@ -238,7 +238,7 @@ double ProbitReg::propagate(std::vector<double> x) {
// gaussianCDF ( wTx + b )
void ProbitReg::forwardPass() {
LinAlg alg;
Activation avn;
MLPPActivation avn;
z = propagate(inputSet);
y_hat = avn.gaussianCDF(z);

View File

@ -118,7 +118,7 @@ std::vector<std::vector<double>> Reg::regDerivTerm(std::vector<std::vector<doubl
}
double Reg::regDerivTerm(std::vector<double> weights, double lambda, double alpha, std::string reg, int j) {
Activation act;
MLPPActivation act;
if (reg == "Ridge") {
return lambda * weights[j];
} else if (reg == "Lasso") {
@ -141,7 +141,7 @@ double Reg::regDerivTerm(std::vector<double> weights, double lambda, double alph
}
double Reg::regDerivTerm(std::vector<std::vector<double>> weights, double lambda, double alpha, std::string reg, int i, int j) {
Activation act;
MLPPActivation act;
if (reg == "Ridge") {
return lambda * weights[i][j];
} else if (reg == "Lasso") {

View File

@ -35,7 +35,7 @@ std::vector<std::vector<double>> SoftmaxNet::modelSetTest(std::vector<std::vecto
}
void SoftmaxNet::gradientDescent(double learning_rate, int max_epoch, bool UI) {
Activation avn;
MLPPActivation avn;
LinAlg alg;
Reg regularization;
double cost_prev = 0;
@ -91,7 +91,7 @@ void SoftmaxNet::gradientDescent(double learning_rate, int max_epoch, bool UI) {
}
void SoftmaxNet::SGD(double learning_rate, int max_epoch, bool UI) {
Activation avn;
MLPPActivation avn;
LinAlg alg;
Reg regularization;
double cost_prev = 0;
@ -145,7 +145,7 @@ void SoftmaxNet::SGD(double learning_rate, int max_epoch, bool UI) {
}
void SoftmaxNet::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
Activation avn;
MLPPActivation avn;
LinAlg alg;
Reg regularization;
double cost_prev = 0;
@ -252,7 +252,7 @@ double SoftmaxNet::Cost(std::vector<std::vector<double>> y_hat, std::vector<std:
std::vector<std::vector<double>> SoftmaxNet::Evaluate(std::vector<std::vector<double>> X) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
std::vector<std::vector<double>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
std::vector<std::vector<double>> a2 = avn.sigmoid(z2);
return avn.adjSoftmax(alg.mat_vec_add(alg.matmult(a2, weights2), bias2));
@ -260,7 +260,7 @@ std::vector<std::vector<double>> SoftmaxNet::Evaluate(std::vector<std::vector<do
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> SoftmaxNet::propagate(std::vector<std::vector<double>> X) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
std::vector<std::vector<double>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
std::vector<std::vector<double>> a2 = avn.sigmoid(z2);
return { z2, a2 };
@ -268,7 +268,7 @@ std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> S
std::vector<double> SoftmaxNet::Evaluate(std::vector<double> x) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
std::vector<double> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
std::vector<double> a2 = avn.sigmoid(z2);
return avn.adjSoftmax(alg.addition(alg.mat_vec_mult(alg.transpose(weights2), a2), bias2));
@ -276,7 +276,7 @@ std::vector<double> SoftmaxNet::Evaluate(std::vector<double> x) {
std::tuple<std::vector<double>, std::vector<double>> SoftmaxNet::propagate(std::vector<double> x) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
std::vector<double> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
std::vector<double> a2 = avn.sigmoid(z2);
return { z2, a2 };
@ -284,7 +284,7 @@ std::tuple<std::vector<double>, std::vector<double>> SoftmaxNet::propagate(std::
void SoftmaxNet::forwardPass() {
LinAlg alg;
Activation avn;
MLPPActivation avn;
z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1);
a2 = avn.sigmoid(z2);
y_hat = avn.adjSoftmax(alg.mat_vec_add(alg.matmult(a2, weights2), bias2));

View File

@ -171,13 +171,13 @@ double SoftmaxReg::Cost(std::vector<std::vector<double>> y_hat, std::vector<std:
std::vector<double> SoftmaxReg::Evaluate(std::vector<double> x) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
return avn.softmax(alg.addition(bias, alg.mat_vec_mult(alg.transpose(weights), x)));
}
std::vector<std::vector<double>> SoftmaxReg::Evaluate(std::vector<std::vector<double>> X) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
return avn.softmax(alg.mat_vec_add(alg.matmult(X, weights), bias));
}
@ -185,7 +185,7 @@ std::vector<std::vector<double>> SoftmaxReg::Evaluate(std::vector<std::vector<do
// softmax ( wTx + b )
void SoftmaxReg::forwardPass() {
LinAlg alg;
Activation avn;
MLPPActivation avn;
y_hat = avn.softmax(alg.mat_vec_add(alg.matmult(inputSet, weights), bias));
}

View File

@ -190,7 +190,7 @@ double Stat::heinzMean(const double A, const double B, const double x) {
}
double Stat::neumanSandorMean(const double a, const double b) {
Activation avn;
MLPPActivation avn;
return (a - b) / 2 * avn.arsinh((a - b) / (a + b));
}

View File

@ -32,7 +32,7 @@ double SVC::modelTest(std::vector<double> x) {
void SVC::gradientDescent(double learning_rate, int max_epoch, bool UI) {
class Cost cost;
Activation avn;
MLPPActivation avn;
LinAlg alg;
Reg regularization;
double cost_prev = 0;
@ -65,7 +65,7 @@ void SVC::gradientDescent(double learning_rate, int max_epoch, bool UI) {
void SVC::SGD(double learning_rate, int max_epoch, bool UI) {
class Cost cost;
Activation avn;
MLPPActivation avn;
LinAlg alg;
Reg regularization;
@ -108,7 +108,7 @@ void SVC::SGD(double learning_rate, int max_epoch, bool UI) {
void SVC::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
class Cost cost;
Activation avn;
MLPPActivation avn;
LinAlg alg;
Reg regularization;
double cost_prev = 0;
@ -165,32 +165,32 @@ double SVC::Cost(std::vector<double> z, std::vector<double> y, std::vector<doubl
std::vector<double> SVC::Evaluate(std::vector<std::vector<double>> X) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
return avn.sign(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
}
std::vector<double> SVC::propagate(std::vector<std::vector<double>> X) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights));
}
double SVC::Evaluate(std::vector<double> x) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
return avn.sign(alg.dot(weights, x) + bias);
}
double SVC::propagate(std::vector<double> x) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
return alg.dot(weights, x) + bias;
}
// sign ( wTx + b )
void SVC::forwardPass() {
LinAlg alg;
Activation avn;
MLPPActivation avn;
z = propagate(inputSet);
y_hat = avn.sign(z);

View File

@ -31,7 +31,7 @@ double TanhReg::modelTest(std::vector<double> x) {
}
void TanhReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
Activation avn;
MLPPActivation avn;
LinAlg alg;
Reg regularization;
double cost_prev = 0;
@ -104,7 +104,7 @@ void TanhReg::SGD(double learning_rate, int max_epoch, bool UI) {
}
void TanhReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
Activation avn;
MLPPActivation avn;
LinAlg alg;
Reg regularization;
double cost_prev = 0;
@ -164,7 +164,7 @@ double TanhReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
std::vector<double> TanhReg::Evaluate(std::vector<std::vector<double>> X) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
return avn.tanh(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
}
@ -175,7 +175,7 @@ std::vector<double> TanhReg::propagate(std::vector<std::vector<double>> X) {
double TanhReg::Evaluate(std::vector<double> x) {
LinAlg alg;
Activation avn;
MLPPActivation avn;
return avn.tanh(alg.dot(weights, x) + bias);
}
@ -187,7 +187,7 @@ double TanhReg::propagate(std::vector<double> x) {
// Tanh ( wTx + b )
void TanhReg::forwardPass() {
LinAlg alg;
Activation avn;
MLPPActivation avn;
z = propagate(inputSet);
y_hat = avn.tanh(z);

View File

@ -218,7 +218,7 @@ void WGAN::updateGeneratorParameters(std::vector<std::vector<std::vector<double>
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> WGAN::computeDiscriminatorGradients(std::vector<double> y_hat, std::vector<double> outputSet) {
class Cost cost;
Activation avn;
MLPPActivation avn;
LinAlg alg;
Reg regularization;
@ -254,7 +254,7 @@ std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> W
std::vector<std::vector<std::vector<double>>> WGAN::computeGeneratorGradients(std::vector<double> y_hat, std::vector<double> outputSet) {
class Cost cost;
Activation avn;
MLPPActivation avn;
LinAlg alg;
Reg regularization;