diff --git a/main.cpp b/main.cpp index db813fb..040e031 100644 --- a/main.cpp +++ b/main.cpp @@ -122,7 +122,7 @@ int main() { // // OBJECTS Stat stat; LinAlg alg; - Activation avn; + MLPPActivation avn; Cost cost; Data data; Convolutions conv; diff --git a/mlpp/activation/activation.cpp b/mlpp/activation/activation.cpp index 2bcf859..9f78d70 100644 --- a/mlpp/activation/activation.cpp +++ b/mlpp/activation/activation.cpp @@ -10,14 +10,14 @@ #include #include -double Activation::linear(double z, bool deriv) { +double MLPPActivation::linear(double z, bool deriv) { if (deriv) { return 1; } return z; } -std::vector Activation::linear(std::vector z, bool deriv) { +std::vector MLPPActivation::linear(std::vector z, bool deriv) { if (deriv) { LinAlg alg; return alg.onevec(z.size()); @@ -25,7 +25,7 @@ std::vector Activation::linear(std::vector z, bool deriv) { return z; } -std::vector> Activation::linear(std::vector> z, bool deriv) { +std::vector> MLPPActivation::linear(std::vector> z, bool deriv) { if (deriv) { LinAlg alg; return alg.onemat(z.size(), z[0].size()); @@ -33,14 +33,14 @@ std::vector> Activation::linear(std::vector Activation::sigmoid(std::vector z, bool deriv) { +std::vector MLPPActivation::sigmoid(std::vector z, bool deriv) { LinAlg alg; if (deriv) { return alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), sigmoid(z))); @@ -48,7 +48,7 @@ std::vector Activation::sigmoid(std::vector z, bool deriv) { return alg.elementWiseDivision(alg.onevec(z.size()), alg.addition(alg.onevec(z.size()), alg.exp(alg.scalarMultiply(-1, z)))); } -std::vector> Activation::sigmoid(std::vector> z, bool deriv) { +std::vector> MLPPActivation::sigmoid(std::vector> z, bool deriv) { LinAlg alg; if (deriv) { return alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), sigmoid(z))); @@ -56,7 +56,7 @@ std::vector> Activation::sigmoid(std::vector Activation::softmax(std::vector z, bool deriv) { +std::vector MLPPActivation::softmax(std::vector z, bool deriv) { LinAlg alg; std::vector a; a.resize(z.size()); @@ -72,7 +72,7 @@ std::vector Activation::softmax(std::vector z, bool deriv) { return a; } -std::vector> Activation::softmax(std::vector> z, bool deriv) { +std::vector> MLPPActivation::softmax(std::vector> z, bool deriv) { LinAlg alg; std::vector> a; a.resize(z.size()); @@ -83,7 +83,7 @@ std::vector> Activation::softmax(std::vector Activation::adjSoftmax(std::vector z) { +std::vector MLPPActivation::adjSoftmax(std::vector z) { LinAlg alg; std::vector a; double C = -*std::max_element(z.begin(), z.end()); @@ -92,7 +92,7 @@ std::vector Activation::adjSoftmax(std::vector z) { return softmax(z); } -std::vector> Activation::adjSoftmax(std::vector> z) { +std::vector> MLPPActivation::adjSoftmax(std::vector> z) { LinAlg alg; std::vector> a; a.resize(z.size()); @@ -103,7 +103,7 @@ std::vector> Activation::adjSoftmax(std::vector> Activation::softmaxDeriv(std::vector z) { +std::vector> MLPPActivation::softmaxDeriv(std::vector z) { LinAlg alg; std::vector> deriv; std::vector a = softmax(z); @@ -123,7 +123,7 @@ std::vector> Activation::softmaxDeriv(std::vector z) return deriv; } -std::vector>> Activation::softmaxDeriv(std::vector> z) { +std::vector>> MLPPActivation::softmaxDeriv(std::vector> z) { LinAlg alg; std::vector>> deriv; std::vector> a = softmax(z); @@ -144,14 +144,14 @@ std::vector>> Activation::softmaxDeriv(std::vect return deriv; } -double Activation::softplus(double z, bool deriv) { +double MLPPActivation::softplus(double z, bool deriv) { if (deriv) { return sigmoid(z); } return std::log(1 + exp(z)); } -std::vector Activation::softplus(std::vector z, bool deriv) { +std::vector MLPPActivation::softplus(std::vector z, bool deriv) { if (deriv) { return sigmoid(z); } @@ -159,7 +159,7 @@ std::vector Activation::softplus(std::vector z, bool deriv) { return alg.log(alg.addition(alg.onevec(z.size()), alg.exp(z))); } -std::vector> Activation::softplus(std::vector> z, bool deriv) { +std::vector> MLPPActivation::softplus(std::vector> z, bool deriv) { if (deriv) { return sigmoid(z); } @@ -167,14 +167,14 @@ std::vector> Activation::softplus(std::vector Activation::softsign(std::vector z, bool deriv) { +std::vector MLPPActivation::softsign(std::vector z, bool deriv) { LinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.onevec(z.size()), alg.exponentiate(alg.addition(alg.onevec(z.size()), alg.abs(z)), 2)); @@ -182,7 +182,7 @@ std::vector Activation::softsign(std::vector z, bool deriv) { return alg.elementWiseDivision(z, alg.addition(alg.onevec(z.size()), alg.abs(z))); } -std::vector> Activation::softsign(std::vector> z, bool deriv) { +std::vector> MLPPActivation::softsign(std::vector> z, bool deriv) { LinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.exponentiate(alg.addition(alg.onemat(z.size(), z[0].size()), alg.abs(z)), 2)); @@ -190,14 +190,14 @@ std::vector> Activation::softsign(std::vector Activation::gaussianCDF(std::vector z, bool deriv) { +std::vector MLPPActivation::gaussianCDF(std::vector z, bool deriv) { LinAlg alg; if (deriv) { return alg.scalarMultiply(1 / sqrt(2 * M_PI), alg.exp(alg.scalarMultiply(-1 / 2, alg.hadamard_product(z, z)))); @@ -205,7 +205,7 @@ std::vector Activation::gaussianCDF(std::vector z, bool deriv) { return alg.scalarMultiply(0.5, alg.addition(alg.onevec(z.size()), alg.erf(alg.scalarMultiply(1 / sqrt(2), z)))); } -std::vector> Activation::gaussianCDF(std::vector> z, bool deriv) { +std::vector> MLPPActivation::gaussianCDF(std::vector> z, bool deriv) { LinAlg alg; if (deriv) { return alg.scalarMultiply(1 / sqrt(2 * M_PI), alg.exp(alg.scalarMultiply(-1 / 2, alg.hadamard_product(z, z)))); @@ -213,14 +213,14 @@ std::vector> Activation::gaussianCDF(std::vector Activation::cloglog(std::vector z, bool deriv) { +std::vector MLPPActivation::cloglog(std::vector z, bool deriv) { LinAlg alg; if (deriv) { return alg.exp(alg.scalarMultiply(-1, alg.exp(z))); @@ -228,7 +228,7 @@ std::vector Activation::cloglog(std::vector z, bool deriv) { return alg.scalarMultiply(-1, alg.scalarAdd(-1, alg.exp(alg.scalarMultiply(-1, alg.exp(z))))); } -std::vector> Activation::cloglog(std::vector> z, bool deriv) { +std::vector> MLPPActivation::cloglog(std::vector> z, bool deriv) { LinAlg alg; if (deriv) { return alg.exp(alg.scalarMultiply(-1, alg.exp(z))); @@ -236,14 +236,14 @@ std::vector> Activation::cloglog(std::vector Activation::logit(std::vector z, bool deriv) { +std::vector MLPPActivation::logit(std::vector z, bool deriv) { LinAlg alg; if (deriv) { return alg.subtraction(alg.elementWiseDivision(alg.onevec(z.size()), z), alg.elementWiseDivision(alg.onevec(z.size()), alg.subtraction(z, alg.onevec(z.size())))); @@ -251,7 +251,7 @@ std::vector Activation::logit(std::vector z, bool deriv) { return alg.log(alg.elementWiseDivision(z, alg.subtraction(alg.onevec(z.size()), z))); } -std::vector> Activation::logit(std::vector> z, bool deriv) { +std::vector> MLPPActivation::logit(std::vector> z, bool deriv) { LinAlg alg; if (deriv) { return alg.subtraction(alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), z), alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.subtraction(z, alg.onemat(z.size(), z[0].size())))); @@ -259,14 +259,14 @@ std::vector> Activation::logit(std::vector Activation::unitStep(std::vector z, bool deriv) { +std::vector MLPPActivation::unitStep(std::vector z, bool deriv) { if (deriv) { std::vector deriv; deriv.resize(z.size()); @@ -284,7 +284,7 @@ std::vector Activation::unitStep(std::vector z, bool deriv) { return a; } -std::vector> Activation::unitStep(std::vector> z, bool deriv) { +std::vector> MLPPActivation::unitStep(std::vector> z, bool deriv) { if (deriv) { std::vector> deriv; deriv.resize(z.size()); @@ -302,14 +302,14 @@ std::vector> Activation::unitStep(std::vector Activation::swish(std::vector z, bool deriv) { +std::vector MLPPActivation::swish(std::vector z, bool deriv) { LinAlg alg; if (deriv) { alg.addition(swish(z), alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), swish(z)))); @@ -317,7 +317,7 @@ std::vector Activation::swish(std::vector z, bool deriv) { return alg.hadamard_product(z, sigmoid(z)); } -std::vector> Activation::swish(std::vector> z, bool deriv) { +std::vector> MLPPActivation::swish(std::vector> z, bool deriv) { LinAlg alg; if (deriv) { alg.addition(swish(z), alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), swish(z)))); @@ -325,14 +325,14 @@ std::vector> Activation::swish(std::vector Activation::mish(std::vector z, bool deriv) { +std::vector MLPPActivation::mish(std::vector z, bool deriv) { LinAlg alg; if (deriv) { return alg.addition(alg.hadamard_product(alg.hadamard_product(alg.hadamard_product(sech(softplus(z)), sech(softplus(z))), z), sigmoid(z)), alg.elementWiseDivision(mish(z), z)); @@ -340,7 +340,7 @@ std::vector Activation::mish(std::vector z, bool deriv) { return alg.hadamard_product(z, tanh(softplus(z))); } -std::vector> Activation::mish(std::vector> z, bool deriv) { +std::vector> MLPPActivation::mish(std::vector> z, bool deriv) { LinAlg alg; if (deriv) { return alg.addition(alg.hadamard_product(alg.hadamard_product(alg.hadamard_product(sech(softplus(z)), sech(softplus(z))), z), sigmoid(z)), alg.elementWiseDivision(mish(z), z)); @@ -348,14 +348,14 @@ std::vector> Activation::mish(std::vector Activation::sinc(std::vector z, bool deriv) { +std::vector MLPPActivation::sinc(std::vector z, bool deriv) { LinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.subtraction(alg.hadamard_product(z, alg.cos(z)), alg.sin(z)), alg.hadamard_product(z, z)); @@ -363,7 +363,7 @@ std::vector Activation::sinc(std::vector z, bool deriv) { return alg.elementWiseDivision(alg.sin(z), z); } -std::vector> Activation::sinc(std::vector> z, bool deriv) { +std::vector> MLPPActivation::sinc(std::vector> z, bool deriv) { LinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.subtraction(alg.hadamard_product(z, alg.cos(z)), alg.sin(z)), alg.hadamard_product(z, z)); @@ -371,7 +371,7 @@ std::vector> Activation::sinc(std::vector Activation::RELU(std::vector z, bool deriv) { +std::vector MLPPActivation::RELU(std::vector z, bool deriv) { if (deriv) { std::vector deriv; deriv.resize(z.size()); @@ -400,7 +400,7 @@ std::vector Activation::RELU(std::vector z, bool deriv) { return a; } -std::vector> Activation::RELU(std::vector> z, bool deriv) { +std::vector> MLPPActivation::RELU(std::vector> z, bool deriv) { if (deriv) { std::vector> deriv; deriv.resize(z.size()); @@ -418,7 +418,7 @@ std::vector> Activation::RELU(std::vector Activation::leakyReLU(std::vector z, double c, bool deriv) { +std::vector MLPPActivation::leakyReLU(std::vector z, double c, bool deriv) { if (deriv) { std::vector deriv; deriv.resize(z.size()); @@ -447,7 +447,7 @@ std::vector Activation::leakyReLU(std::vector z, double c, bool return a; } -std::vector> Activation::leakyReLU(std::vector> z, double c, bool deriv) { +std::vector> MLPPActivation::leakyReLU(std::vector> z, double c, bool deriv) { if (deriv) { std::vector> deriv; deriv.resize(z.size()); @@ -465,7 +465,7 @@ std::vector> Activation::leakyReLU(std::vector Activation::ELU(std::vector z, double c, bool deriv) { +std::vector MLPPActivation::ELU(std::vector z, double c, bool deriv) { if (deriv) { std::vector deriv; deriv.resize(z.size()); @@ -498,7 +498,7 @@ std::vector Activation::ELU(std::vector z, double c, bool deriv) return a; } -std::vector> Activation::ELU(std::vector> z, double c, bool deriv) { +std::vector> MLPPActivation::ELU(std::vector> z, double c, bool deriv) { if (deriv) { std::vector> deriv; deriv.resize(z.size()); @@ -516,14 +516,14 @@ std::vector> Activation::ELU(std::vector return a; } -double Activation::SELU(double z, double lambda, double c, bool deriv) { +double MLPPActivation::SELU(double z, double lambda, double c, bool deriv) { if (deriv) { return ELU(z, c, 1); } return lambda * ELU(z, c); } -std::vector Activation::SELU(std::vector z, double lambda, double c, bool deriv) { +std::vector MLPPActivation::SELU(std::vector z, double lambda, double c, bool deriv) { if (deriv) { std::vector deriv; deriv.resize(z.size()); @@ -541,7 +541,7 @@ std::vector Activation::SELU(std::vector z, double lambda, doubl return a; } -std::vector> Activation::SELU(std::vector> z, double lambda, double c, bool deriv) { +std::vector> MLPPActivation::SELU(std::vector> z, double lambda, double c, bool deriv) { if (deriv) { std::vector> deriv; deriv.resize(z.size()); @@ -559,14 +559,14 @@ std::vector> Activation::SELU(std::vector Activation::GELU(std::vector z, bool deriv) { +std::vector MLPPActivation::GELU(std::vector z, bool deriv) { if (deriv) { std::vector deriv; deriv.resize(z.size()); @@ -584,7 +584,7 @@ std::vector Activation::GELU(std::vector z, bool deriv) { return a; } -std::vector> Activation::GELU(std::vector> z, bool deriv) { +std::vector> MLPPActivation::GELU(std::vector> z, bool deriv) { if (deriv) { std::vector> deriv; deriv.resize(z.size()); @@ -602,7 +602,7 @@ std::vector> Activation::GELU(std::vector Activation::sign(std::vector z, bool deriv) { +std::vector MLPPActivation::sign(std::vector z, bool deriv) { if (deriv) { std::vector deriv; deriv.resize(z.size()); @@ -633,7 +633,7 @@ std::vector Activation::sign(std::vector z, bool deriv) { return a; } -std::vector> Activation::sign(std::vector> z, bool deriv) { +std::vector> MLPPActivation::sign(std::vector> z, bool deriv) { if (deriv) { std::vector> deriv; deriv.resize(z.size()); @@ -651,14 +651,14 @@ std::vector> Activation::sign(std::vector Activation::sinh(std::vector z, bool deriv) { +std::vector MLPPActivation::sinh(std::vector z, bool deriv) { if (deriv) { return cosh(z); } @@ -666,7 +666,7 @@ std::vector Activation::sinh(std::vector z, bool deriv) { return alg.scalarMultiply(0.5, alg.subtraction(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z)))); } -std::vector> Activation::sinh(std::vector> z, bool deriv) { +std::vector> MLPPActivation::sinh(std::vector> z, bool deriv) { if (deriv) { return cosh(z); } @@ -674,14 +674,14 @@ std::vector> Activation::sinh(std::vector Activation::cosh(std::vector z, bool deriv) { +std::vector MLPPActivation::cosh(std::vector z, bool deriv) { if (deriv) { return sinh(z); } @@ -689,7 +689,7 @@ std::vector Activation::cosh(std::vector z, bool deriv) { return alg.scalarMultiply(0.5, alg.addition(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z)))); } -std::vector> Activation::cosh(std::vector> z, bool deriv) { +std::vector> MLPPActivation::cosh(std::vector> z, bool deriv) { if (deriv) { return sinh(z); } @@ -697,14 +697,14 @@ std::vector> Activation::cosh(std::vector Activation::tanh(std::vector z, bool deriv) { +std::vector MLPPActivation::tanh(std::vector z, bool deriv) { LinAlg alg; if (deriv) { return alg.scalarMultiply(-1, alg.scalarAdd(-1, alg.hadamard_product(tanh(z), tanh(z)))); @@ -712,7 +712,7 @@ std::vector Activation::tanh(std::vector z, bool deriv) { return alg.elementWiseDivision(alg.subtraction(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z))), alg.addition(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z)))); } -std::vector> Activation::tanh(std::vector> z, bool deriv) { +std::vector> MLPPActivation::tanh(std::vector> z, bool deriv) { LinAlg alg; if (deriv) { return alg.scalarMultiply(-1, alg.scalarAdd(-1, alg.hadamard_product(tanh(z), tanh(z)))); @@ -721,14 +721,14 @@ std::vector> Activation::tanh(std::vector Activation::csch(std::vector z, bool deriv) { +std::vector MLPPActivation::csch(std::vector z, bool deriv) { LinAlg alg; if (deriv) { return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), coth(z)); @@ -736,7 +736,7 @@ std::vector Activation::csch(std::vector z, bool deriv) { return alg.elementWiseDivision(alg.onevec(z.size()), sinh(z)); } -std::vector> Activation::csch(std::vector> z, bool deriv) { +std::vector> MLPPActivation::csch(std::vector> z, bool deriv) { LinAlg alg; if (deriv) { return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), coth(z)); @@ -744,14 +744,14 @@ std::vector> Activation::csch(std::vector Activation::sech(std::vector z, bool deriv) { +std::vector MLPPActivation::sech(std::vector z, bool deriv) { LinAlg alg; if (deriv) { return alg.hadamard_product(alg.scalarMultiply(-1, sech(z)), tanh(z)); @@ -761,7 +761,7 @@ std::vector Activation::sech(std::vector z, bool deriv) { // return activation(z, deriv, static_cast(&sech)); } -std::vector> Activation::sech(std::vector> z, bool deriv) { +std::vector> MLPPActivation::sech(std::vector> z, bool deriv) { LinAlg alg; if (deriv) { return alg.hadamard_product(alg.scalarMultiply(-1, sech(z)), tanh(z)); @@ -771,14 +771,14 @@ std::vector> Activation::sech(std::vector(&sech)); } -double Activation::coth(double z, bool deriv) { +double MLPPActivation::coth(double z, bool deriv) { if (deriv) { return -csch(z) * csch(z); } return 1 / tanh(z); } -std::vector Activation::coth(std::vector z, bool deriv) { +std::vector MLPPActivation::coth(std::vector z, bool deriv) { LinAlg alg; if (deriv) { return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), csch(z)); @@ -786,7 +786,7 @@ std::vector Activation::coth(std::vector z, bool deriv) { return alg.elementWiseDivision(alg.onevec(z.size()), tanh(z)); } -std::vector> Activation::coth(std::vector> z, bool deriv) { +std::vector> MLPPActivation::coth(std::vector> z, bool deriv) { LinAlg alg; if (deriv) { return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), csch(z)); @@ -794,14 +794,14 @@ std::vector> Activation::coth(std::vector Activation::arsinh(std::vector z, bool deriv) { +std::vector MLPPActivation::arsinh(std::vector z, bool deriv) { LinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.onevec(z.size()), alg.sqrt(alg.addition(alg.hadamard_product(z, z), alg.onevec(z.size())))); @@ -809,7 +809,7 @@ std::vector Activation::arsinh(std::vector z, bool deriv) { return alg.log(alg.addition(z, alg.sqrt(alg.addition(alg.hadamard_product(z, z), alg.onevec(z.size()))))); } -std::vector> Activation::arsinh(std::vector> z, bool deriv) { +std::vector> MLPPActivation::arsinh(std::vector> z, bool deriv) { LinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.sqrt(alg.addition(alg.hadamard_product(z, z), alg.onemat(z.size(), z[0].size())))); @@ -817,14 +817,14 @@ std::vector> Activation::arsinh(std::vector Activation::arcosh(std::vector z, bool deriv) { +std::vector MLPPActivation::arcosh(std::vector z, bool deriv) { LinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.onevec(z.size()), alg.sqrt(alg.subtraction(alg.hadamard_product(z, z), alg.onevec(z.size())))); @@ -832,7 +832,7 @@ std::vector Activation::arcosh(std::vector z, bool deriv) { return alg.log(alg.addition(z, alg.sqrt(alg.subtraction(alg.hadamard_product(z, z), alg.onevec(z.size()))))); } -std::vector> Activation::arcosh(std::vector> z, bool deriv) { +std::vector> MLPPActivation::arcosh(std::vector> z, bool deriv) { LinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.sqrt(alg.subtraction(alg.hadamard_product(z, z), alg.onemat(z.size(), z[0].size())))); @@ -840,14 +840,14 @@ std::vector> Activation::arcosh(std::vector Activation::artanh(std::vector z, bool deriv) { +std::vector MLPPActivation::artanh(std::vector z, bool deriv) { LinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.onevec(z.size()), alg.subtraction(alg.onevec(z.size()), alg.hadamard_product(z, z))); @@ -855,7 +855,7 @@ std::vector Activation::artanh(std::vector z, bool deriv) { return alg.scalarMultiply(0.5, alg.log(alg.elementWiseDivision(alg.addition(alg.onevec(z.size()), z), alg.subtraction(alg.onevec(z.size()), z)))); } -std::vector> Activation::artanh(std::vector> z, bool deriv) { +std::vector> MLPPActivation::artanh(std::vector> z, bool deriv) { LinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.subtraction(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z))); @@ -863,14 +863,14 @@ std::vector> Activation::artanh(std::vector Activation::arcsch(std::vector z, bool deriv) { +std::vector MLPPActivation::arcsch(std::vector z, bool deriv) { LinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.full(z.size(), -1), alg.hadamard_product(alg.hadamard_product(z, z), alg.sqrt(alg.addition(alg.onevec(z.size()), alg.elementWiseDivision(alg.onevec(z.size()), alg.hadamard_product(z, z)))))); @@ -878,7 +878,7 @@ std::vector Activation::arcsch(std::vector z, bool deriv) { return alg.log(alg.addition(alg.sqrt(alg.addition(alg.onevec(z.size()), alg.elementWiseDivision(alg.onevec(z.size()), alg.hadamard_product(z, z)))), alg.elementWiseDivision(alg.onevec(z.size()), z))); } -std::vector> Activation::arcsch(std::vector> z, bool deriv) { +std::vector> MLPPActivation::arcsch(std::vector> z, bool deriv) { LinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.full(z.size(), z[0].size(), -1), alg.hadamard_product(alg.hadamard_product(z, z), alg.sqrt(alg.addition(alg.onemat(z.size(), z[0].size()), alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z)))))); @@ -886,14 +886,14 @@ std::vector> Activation::arcsch(std::vector Activation::arsech(std::vector z, bool deriv) { +std::vector MLPPActivation::arsech(std::vector z, bool deriv) { LinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.full(z.size(), -1), alg.hadamard_product(z, alg.sqrt(alg.subtraction(alg.onevec(z.size()), alg.hadamard_product(z, z))))); @@ -901,7 +901,7 @@ std::vector Activation::arsech(std::vector z, bool deriv) { return alg.log(alg.addition(alg.elementWiseDivision(alg.onevec(z.size()), z), alg.hadamard_product(alg.addition(alg.elementWiseDivision(alg.onevec(z.size()), z), alg.onevec(z.size())), alg.subtraction(alg.elementWiseDivision(alg.onevec(z.size()), z), alg.onevec(z.size()))))); } -std::vector> Activation::arsech(std::vector> z, bool deriv) { +std::vector> MLPPActivation::arsech(std::vector> z, bool deriv) { LinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.full(z.size(), z[0].size(), -1), alg.hadamard_product(z, alg.sqrt(alg.subtraction(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z))))); @@ -909,14 +909,14 @@ std::vector> Activation::arsech(std::vector Activation::arcoth(std::vector z, bool deriv) { +std::vector MLPPActivation::arcoth(std::vector z, bool deriv) { LinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.onevec(z.size()), alg.subtraction(alg.onevec(z.size()), alg.hadamard_product(z, z))); @@ -924,7 +924,7 @@ std::vector Activation::arcoth(std::vector z, bool deriv) { return alg.scalarMultiply(0.5, alg.log(alg.elementWiseDivision(alg.addition(alg.onevec(z.size()), z), alg.subtraction(z, alg.onevec(z.size()))))); } -std::vector> Activation::arcoth(std::vector> z, bool deriv) { +std::vector> MLPPActivation::arcoth(std::vector> z, bool deriv) { LinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.subtraction(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z))); @@ -933,7 +933,7 @@ std::vector> Activation::arcoth(std::vector Activation::activation(std::vector z, bool deriv, double (*function)(double, bool)) { +std::vector MLPPActivation::activation(std::vector z, bool deriv, double (*function)(double, bool)) { if (deriv) { std::vector deriv; deriv.resize(z.size()); diff --git a/mlpp/activation/activation.h b/mlpp/activation/activation.h index 65f6cec..380faf5 100644 --- a/mlpp/activation/activation.h +++ b/mlpp/activation/activation.h @@ -10,7 +10,7 @@ #include -class Activation { +class MLPPActivation { public: double linear(double z, bool deriv = 0); std::vector linear(std::vector z, bool deriv = 0); diff --git a/mlpp/ann/ann.cpp b/mlpp/ann/ann.cpp index a560f2f..efda628 100644 --- a/mlpp/ann/ann.cpp +++ b/mlpp/ann/ann.cpp @@ -720,7 +720,7 @@ void ANN::updateParameters(std::vector>> hiddenL std::tuple>>, std::vector> ANN::computeGradients(std::vector y_hat, std::vector outputSet) { // std::cout << "BEGIN" << std::endl; class Cost cost; - Activation avn; + MLPPActivation avn; LinAlg alg; Reg regularization; diff --git a/mlpp/auto_encoder/auto_encoder.cpp b/mlpp/auto_encoder/auto_encoder.cpp index d04bc58..1f77f07 100644 --- a/mlpp/auto_encoder/auto_encoder.cpp +++ b/mlpp/auto_encoder/auto_encoder.cpp @@ -15,7 +15,7 @@ AutoEncoder::AutoEncoder(std::vector> inputSet, int n_hidden) : inputSet(inputSet), n_hidden(n_hidden), n(inputSet.size()), k(inputSet[0].size()) { - Activation avn; + MLPPActivation avn; y_hat.resize(inputSet.size()); weights1 = Utilities::weightInitialization(k, n_hidden); @@ -33,7 +33,7 @@ std::vector AutoEncoder::modelTest(std::vector x) { } void AutoEncoder::gradientDescent(double learning_rate, int max_epoch, bool UI) { - Activation avn; + MLPPActivation avn; LinAlg alg; double cost_prev = 0; int epoch = 1; @@ -86,7 +86,7 @@ void AutoEncoder::gradientDescent(double learning_rate, int max_epoch, bool UI) } void AutoEncoder::SGD(double learning_rate, int max_epoch, bool UI) { - Activation avn; + MLPPActivation avn; LinAlg alg; double cost_prev = 0; int epoch = 1; @@ -137,7 +137,7 @@ void AutoEncoder::SGD(double learning_rate, int max_epoch, bool UI) { } void AutoEncoder::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { - Activation avn; + MLPPActivation avn; LinAlg alg; double cost_prev = 0; int epoch = 1; @@ -214,7 +214,7 @@ double AutoEncoder::Cost(std::vector> y_hat, std::vector> AutoEncoder::Evaluate(std::vector> X) { LinAlg alg; - Activation avn; + MLPPActivation avn; std::vector> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector> a2 = avn.sigmoid(z2); return alg.mat_vec_add(alg.matmult(a2, weights2), bias2); @@ -222,7 +222,7 @@ std::vector> AutoEncoder::Evaluate(std::vector>, std::vector>> AutoEncoder::propagate(std::vector> X) { LinAlg alg; - Activation avn; + MLPPActivation avn; std::vector> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector> a2 = avn.sigmoid(z2); return { z2, a2 }; @@ -230,7 +230,7 @@ std::tuple>, std::vector>> A std::vector AutoEncoder::Evaluate(std::vector x) { LinAlg alg; - Activation avn; + MLPPActivation avn; std::vector z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector a2 = avn.sigmoid(z2); return alg.addition(alg.mat_vec_mult(alg.transpose(weights2), a2), bias2); @@ -238,7 +238,7 @@ std::vector AutoEncoder::Evaluate(std::vector x) { std::tuple, std::vector> AutoEncoder::propagate(std::vector x) { LinAlg alg; - Activation avn; + MLPPActivation avn; std::vector z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector a2 = avn.sigmoid(z2); return { z2, a2 }; @@ -246,7 +246,7 @@ std::tuple, std::vector> AutoEncoder::propagate(std: void AutoEncoder::forwardPass() { LinAlg alg; - Activation avn; + MLPPActivation avn; z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1); a2 = avn.sigmoid(z2); y_hat = alg.mat_vec_add(alg.matmult(a2, weights2), bias2); diff --git a/mlpp/c_log_log_reg/c_log_log_reg.cpp b/mlpp/c_log_log_reg/c_log_log_reg.cpp index b90b232..616a15c 100644 --- a/mlpp/c_log_log_reg/c_log_log_reg.cpp +++ b/mlpp/c_log_log_reg/c_log_log_reg.cpp @@ -30,7 +30,7 @@ double CLogLogReg::modelTest(std::vector x) { } void CLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { - Activation avn; + MLPPActivation avn; LinAlg alg; Reg regularization; double cost_prev = 0; @@ -64,7 +64,7 @@ void CLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { } void CLogLogReg::MLE(double learning_rate, int max_epoch, bool UI) { - Activation avn; + MLPPActivation avn; LinAlg alg; Reg regularization; double cost_prev = 0; @@ -137,7 +137,7 @@ void CLogLogReg::SGD(double learning_rate, int max_epoch, bool UI) { } void CLogLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { - Activation avn; + MLPPActivation avn; LinAlg alg; Reg regularization; double cost_prev = 0; @@ -192,7 +192,7 @@ double CLogLogReg::Cost(std::vector y_hat, std::vector y) { std::vector CLogLogReg::Evaluate(std::vector> X) { LinAlg alg; - Activation avn; + MLPPActivation avn; return avn.cloglog(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); } @@ -203,7 +203,7 @@ std::vector CLogLogReg::propagate(std::vector> X) { double CLogLogReg::Evaluate(std::vector x) { LinAlg alg; - Activation avn; + MLPPActivation avn; return avn.cloglog(alg.dot(weights, x) + bias); } @@ -215,7 +215,7 @@ double CLogLogReg::propagate(std::vector x) { // cloglog ( wTx + b ) void CLogLogReg::forwardPass() { LinAlg alg; - Activation avn; + MLPPActivation avn; z = propagate(inputSet); y_hat = avn.cloglog(z); diff --git a/mlpp/dual_svc/dual_svc.cpp b/mlpp/dual_svc/dual_svc.cpp index 3e6396b..d33c194 100644 --- a/mlpp/dual_svc/dual_svc.cpp +++ b/mlpp/dual_svc/dual_svc.cpp @@ -33,7 +33,7 @@ double DualSVC::modelTest(std::vector x) { void DualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) { class Cost cost; - Activation avn; + MLPPActivation avn; LinAlg alg; Reg regularization; double cost_prev = 0; @@ -81,7 +81,7 @@ void DualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) { // void DualSVC::SGD(double learning_rate, int max_epoch, bool UI){ // class Cost cost; -// Activation avn; +// MLPPActivation avn; // LinAlg alg; // Reg regularization; @@ -114,7 +114,7 @@ void DualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) { // void DualSVC::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){ // class Cost cost; -// Activation avn; +// MLPPActivation avn; // LinAlg alg; // Reg regularization; // double cost_prev = 0; @@ -168,7 +168,7 @@ double DualSVC::Cost(std::vector alpha, std::vector> } std::vector DualSVC::Evaluate(std::vector> X) { - Activation avn; + MLPPActivation avn; return avn.sign(propagate(X)); } @@ -189,7 +189,7 @@ std::vector DualSVC::propagate(std::vector> X) { } double DualSVC::Evaluate(std::vector x) { - Activation avn; + MLPPActivation avn; return avn.sign(propagate(x)); } @@ -207,7 +207,7 @@ double DualSVC::propagate(std::vector x) { void DualSVC::forwardPass() { LinAlg alg; - Activation avn; + MLPPActivation avn; z = propagate(inputSet); y_hat = avn.sign(z); diff --git a/mlpp/gan/gan.cpp b/mlpp/gan/gan.cpp index a2f2ac0..809a54d 100644 --- a/mlpp/gan/gan.cpp +++ b/mlpp/gan/gan.cpp @@ -209,7 +209,7 @@ void GAN::updateGeneratorParameters(std::vector> std::tuple>>, std::vector> GAN::computeDiscriminatorGradients(std::vector y_hat, std::vector outputSet) { class Cost cost; - Activation avn; + MLPPActivation avn; LinAlg alg; Reg regularization; @@ -245,7 +245,7 @@ std::tuple>>, std::vector> G std::vector>> GAN::computeGeneratorGradients(std::vector y_hat, std::vector outputSet) { class Cost cost; - Activation avn; + MLPPActivation avn; LinAlg alg; Reg regularization; diff --git a/mlpp/hidden_layer/hidden_layer.cpp b/mlpp/hidden_layer/hidden_layer.cpp index a283116..af512eb 100644 --- a/mlpp/hidden_layer/hidden_layer.cpp +++ b/mlpp/hidden_layer/hidden_layer.cpp @@ -18,95 +18,95 @@ HiddenLayer::HiddenLayer(int n_hidden, std::string activation, std::vector x) { LinAlg alg; - Activation avn; + MLPPActivation avn; z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias); a_test = (avn.*activationTest_map[activation])(z_test, 0); } diff --git a/mlpp/hidden_layer/hidden_layer.h b/mlpp/hidden_layer/hidden_layer.h index 3ce5963..cfab1e6 100644 --- a/mlpp/hidden_layer/hidden_layer.h +++ b/mlpp/hidden_layer/hidden_layer.h @@ -30,8 +30,8 @@ public: std::vector> z; std::vector> a; - std::map> (Activation::*)(std::vector>, bool)> activation_map; - std::map (Activation::*)(std::vector, bool)> activationTest_map; + std::map> (MLPPActivation::*)(std::vector>, bool)> activation_map; + std::map (MLPPActivation::*)(std::vector, bool)> activationTest_map; std::vector z_test; std::vector a_test; diff --git a/mlpp/log_reg/log_reg.cpp b/mlpp/log_reg/log_reg.cpp index 93df77d..5aacf3a 100644 --- a/mlpp/log_reg/log_reg.cpp +++ b/mlpp/log_reg/log_reg.cpp @@ -188,13 +188,13 @@ double LogReg::Cost(std::vector y_hat, std::vector y) { std::vector LogReg::Evaluate(std::vector> X) { LinAlg alg; - Activation avn; + MLPPActivation avn; return avn.sigmoid(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); } double LogReg::Evaluate(std::vector x) { LinAlg alg; - Activation avn; + MLPPActivation avn; return avn.sigmoid(alg.dot(weights, x) + bias); } diff --git a/mlpp/mann/mann.cpp b/mlpp/mann/mann.cpp index 134aa25..d42f471 100644 --- a/mlpp/mann/mann.cpp +++ b/mlpp/mann/mann.cpp @@ -54,7 +54,7 @@ std::vector MANN::modelTest(std::vector x) { void MANN::gradientDescent(double learning_rate, int max_epoch, bool UI) { class Cost cost; - Activation avn; + MLPPActivation avn; LinAlg alg; Reg regularization; diff --git a/mlpp/mlp/mlp.cpp b/mlpp/mlp/mlp.cpp index 68b534e..833b280 100644 --- a/mlpp/mlp/mlp.cpp +++ b/mlpp/mlp/mlp.cpp @@ -18,7 +18,7 @@ MLP::MLP(std::vector> inputSet, std::vector outputSet, int n_hidden, std::string reg, double lambda, double alpha) : inputSet(inputSet), outputSet(outputSet), n_hidden(n_hidden), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { - Activation avn; + MLPPActivation avn; y_hat.resize(n); weights1 = Utilities::weightInitialization(k, n_hidden); @@ -36,7 +36,7 @@ double MLP::modelTest(std::vector x) { } void MLP::gradientDescent(double learning_rate, int max_epoch, bool UI) { - Activation avn; + MLPPActivation avn; LinAlg alg; Reg regularization; double cost_prev = 0; @@ -95,7 +95,7 @@ void MLP::gradientDescent(double learning_rate, int max_epoch, bool UI) { } void MLP::SGD(double learning_rate, int max_epoch, bool UI) { - Activation avn; + MLPPActivation avn; LinAlg alg; Reg regularization; double cost_prev = 0; @@ -149,7 +149,7 @@ void MLP::SGD(double learning_rate, int max_epoch, bool UI) { } void MLP::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { - Activation avn; + MLPPActivation avn; LinAlg alg; Reg regularization; double cost_prev = 0; @@ -233,7 +233,7 @@ double MLP::Cost(std::vector y_hat, std::vector y) { std::vector MLP::Evaluate(std::vector> X) { LinAlg alg; - Activation avn; + MLPPActivation avn; std::vector> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector> a2 = avn.sigmoid(z2); return avn.sigmoid(alg.scalarAdd(bias2, alg.mat_vec_mult(a2, weights2))); @@ -241,7 +241,7 @@ std::vector MLP::Evaluate(std::vector> X) { std::tuple>, std::vector>> MLP::propagate(std::vector> X) { LinAlg alg; - Activation avn; + MLPPActivation avn; std::vector> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector> a2 = avn.sigmoid(z2); return { z2, a2 }; @@ -249,7 +249,7 @@ std::tuple>, std::vector>> M double MLP::Evaluate(std::vector x) { LinAlg alg; - Activation avn; + MLPPActivation avn; std::vector z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector a2 = avn.sigmoid(z2); return avn.sigmoid(alg.dot(weights2, a2) + bias2); @@ -257,7 +257,7 @@ double MLP::Evaluate(std::vector x) { std::tuple, std::vector> MLP::propagate(std::vector x) { LinAlg alg; - Activation avn; + MLPPActivation avn; std::vector z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector a2 = avn.sigmoid(z2); return { z2, a2 }; @@ -265,7 +265,7 @@ std::tuple, std::vector> MLP::propagate(std::vector< void MLP::forwardPass() { LinAlg alg; - Activation avn; + MLPPActivation avn; z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1); a2 = avn.sigmoid(z2); y_hat = avn.sigmoid(alg.scalarAdd(bias2, alg.mat_vec_mult(a2, weights2))); diff --git a/mlpp/multi_output_layer/multi_output_layer.cpp b/mlpp/multi_output_layer/multi_output_layer.cpp index 370ae0c..2a6af1e 100644 --- a/mlpp/multi_output_layer/multi_output_layer.cpp +++ b/mlpp/multi_output_layer/multi_output_layer.cpp @@ -17,86 +17,86 @@ MultiOutputLayer::MultiOutputLayer(int n_output, int n_hidden, std::string activ weights = Utilities::weightInitialization(n_hidden, n_output, weightInit); bias = Utilities::biasInitialization(n_output); - activation_map["Linear"] = &Activation::linear; - activationTest_map["Linear"] = &Activation::linear; + activation_map["Linear"] = &MLPPActivation::linear; + activationTest_map["Linear"] = &MLPPActivation::linear; - activation_map["Sigmoid"] = &Activation::sigmoid; - activationTest_map["Sigmoid"] = &Activation::sigmoid; + activation_map["Sigmoid"] = &MLPPActivation::sigmoid; + activationTest_map["Sigmoid"] = &MLPPActivation::sigmoid; - activation_map["Softmax"] = &Activation::softmax; - activationTest_map["Softmax"] = &Activation::softmax; + activation_map["Softmax"] = &MLPPActivation::softmax; + activationTest_map["Softmax"] = &MLPPActivation::softmax; - activation_map["Swish"] = &Activation::swish; - activationTest_map["Swish"] = &Activation::swish; + activation_map["Swish"] = &MLPPActivation::swish; + activationTest_map["Swish"] = &MLPPActivation::swish; - activation_map["Mish"] = &Activation::mish; - activationTest_map["Mish"] = &Activation::mish; + activation_map["Mish"] = &MLPPActivation::mish; + activationTest_map["Mish"] = &MLPPActivation::mish; - activation_map["SinC"] = &Activation::sinc; - activationTest_map["SinC"] = &Activation::sinc; + activation_map["SinC"] = &MLPPActivation::sinc; + activationTest_map["SinC"] = &MLPPActivation::sinc; - activation_map["Softplus"] = &Activation::softplus; - activationTest_map["Softplus"] = &Activation::softplus; + activation_map["Softplus"] = &MLPPActivation::softplus; + activationTest_map["Softplus"] = &MLPPActivation::softplus; - activation_map["Softsign"] = &Activation::softsign; - activationTest_map["Softsign"] = &Activation::softsign; + activation_map["Softsign"] = &MLPPActivation::softsign; + activationTest_map["Softsign"] = &MLPPActivation::softsign; - activation_map["CLogLog"] = &Activation::cloglog; - activationTest_map["CLogLog"] = &Activation::cloglog; + activation_map["CLogLog"] = &MLPPActivation::cloglog; + activationTest_map["CLogLog"] = &MLPPActivation::cloglog; - activation_map["Logit"] = &Activation::logit; - activationTest_map["Logit"] = &Activation::logit; + activation_map["Logit"] = &MLPPActivation::logit; + activationTest_map["Logit"] = &MLPPActivation::logit; - activation_map["GaussianCDF"] = &Activation::gaussianCDF; - activationTest_map["GaussianCDF"] = &Activation::gaussianCDF; + activation_map["GaussianCDF"] = &MLPPActivation::gaussianCDF; + activationTest_map["GaussianCDF"] = &MLPPActivation::gaussianCDF; - activation_map["RELU"] = &Activation::RELU; - activationTest_map["RELU"] = &Activation::RELU; + activation_map["RELU"] = &MLPPActivation::RELU; + activationTest_map["RELU"] = &MLPPActivation::RELU; - activation_map["GELU"] = &Activation::GELU; - activationTest_map["GELU"] = &Activation::GELU; + activation_map["GELU"] = &MLPPActivation::GELU; + activationTest_map["GELU"] = &MLPPActivation::GELU; - activation_map["Sign"] = &Activation::sign; - activationTest_map["Sign"] = &Activation::sign; + activation_map["Sign"] = &MLPPActivation::sign; + activationTest_map["Sign"] = &MLPPActivation::sign; - activation_map["UnitStep"] = &Activation::unitStep; - activationTest_map["UnitStep"] = &Activation::unitStep; + activation_map["UnitStep"] = &MLPPActivation::unitStep; + activationTest_map["UnitStep"] = &MLPPActivation::unitStep; - activation_map["Sinh"] = &Activation::sinh; - activationTest_map["Sinh"] = &Activation::sinh; + activation_map["Sinh"] = &MLPPActivation::sinh; + activationTest_map["Sinh"] = &MLPPActivation::sinh; - activation_map["Cosh"] = &Activation::cosh; - activationTest_map["Cosh"] = &Activation::cosh; + activation_map["Cosh"] = &MLPPActivation::cosh; + activationTest_map["Cosh"] = &MLPPActivation::cosh; - activation_map["Tanh"] = &Activation::tanh; - activationTest_map["Tanh"] = &Activation::tanh; + activation_map["Tanh"] = &MLPPActivation::tanh; + activationTest_map["Tanh"] = &MLPPActivation::tanh; - activation_map["Csch"] = &Activation::csch; - activationTest_map["Csch"] = &Activation::csch; + activation_map["Csch"] = &MLPPActivation::csch; + activationTest_map["Csch"] = &MLPPActivation::csch; - activation_map["Sech"] = &Activation::sech; - activationTest_map["Sech"] = &Activation::sech; + activation_map["Sech"] = &MLPPActivation::sech; + activationTest_map["Sech"] = &MLPPActivation::sech; - activation_map["Coth"] = &Activation::coth; - activationTest_map["Coth"] = &Activation::coth; + activation_map["Coth"] = &MLPPActivation::coth; + activationTest_map["Coth"] = &MLPPActivation::coth; - activation_map["Arsinh"] = &Activation::arsinh; - activationTest_map["Arsinh"] = &Activation::arsinh; + activation_map["Arsinh"] = &MLPPActivation::arsinh; + activationTest_map["Arsinh"] = &MLPPActivation::arsinh; - activation_map["Arcosh"] = &Activation::arcosh; - activationTest_map["Arcosh"] = &Activation::arcosh; + activation_map["Arcosh"] = &MLPPActivation::arcosh; + activationTest_map["Arcosh"] = &MLPPActivation::arcosh; - activation_map["Artanh"] = &Activation::artanh; - activationTest_map["Artanh"] = &Activation::artanh; + activation_map["Artanh"] = &MLPPActivation::artanh; + activationTest_map["Artanh"] = &MLPPActivation::artanh; - activation_map["Arcsch"] = &Activation::arcsch; - activationTest_map["Arcsch"] = &Activation::arcsch; + activation_map["Arcsch"] = &MLPPActivation::arcsch; + activationTest_map["Arcsch"] = &MLPPActivation::arcsch; - activation_map["Arsech"] = &Activation::arsech; - activationTest_map["Arsech"] = &Activation::arsech; + activation_map["Arsech"] = &MLPPActivation::arsech; + activationTest_map["Arsech"] = &MLPPActivation::arsech; - activation_map["Arcoth"] = &Activation::arcoth; - activationTest_map["Arcoth"] = &Activation::arcoth; + activation_map["Arcoth"] = &MLPPActivation::arcoth; + activationTest_map["Arcoth"] = &MLPPActivation::arcoth; costDeriv_map["MSE"] = &Cost::MSEDeriv; cost_map["MSE"] = &Cost::MSE; @@ -118,14 +118,14 @@ MultiOutputLayer::MultiOutputLayer(int n_output, int n_hidden, std::string activ void MultiOutputLayer::forwardPass() { LinAlg alg; - Activation avn; + MLPPActivation avn; z = alg.mat_vec_add(alg.matmult(input, weights), bias); a = (avn.*activation_map[activation])(z, 0); } void MultiOutputLayer::Test(std::vector x) { LinAlg alg; - Activation avn; + MLPPActivation avn; z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias); a_test = (avn.*activationTest_map[activation])(z_test, 0); } diff --git a/mlpp/multi_output_layer/multi_output_layer.h b/mlpp/multi_output_layer/multi_output_layer.h index fea1e71..219efe5 100644 --- a/mlpp/multi_output_layer/multi_output_layer.h +++ b/mlpp/multi_output_layer/multi_output_layer.h @@ -33,8 +33,8 @@ public: std::vector> z; std::vector> a; - std::map> (Activation::*)(std::vector>, bool)> activation_map; - std::map (Activation::*)(std::vector, bool)> activationTest_map; + std::map> (MLPPActivation::*)(std::vector>, bool)> activation_map; + std::map (MLPPActivation::*)(std::vector, bool)> activationTest_map; std::map>, std::vector>)> cost_map; std::map> (Cost::*)(std::vector>, std::vector>)> costDeriv_map; diff --git a/mlpp/output_layer/output_layer.cpp b/mlpp/output_layer/output_layer.cpp index 190c7e6..358d314 100644 --- a/mlpp/output_layer/output_layer.cpp +++ b/mlpp/output_layer/output_layer.cpp @@ -17,83 +17,83 @@ OutputLayer::OutputLayer(int n_hidden, std::string activation, std::string cost, weights = Utilities::weightInitialization(n_hidden, weightInit); bias = Utilities::biasInitialization(); - activation_map["Linear"] = &Activation::linear; - activationTest_map["Linear"] = &Activation::linear; + activation_map["Linear"] = &MLPPActivation::linear; + activationTest_map["Linear"] = &MLPPActivation::linear; - activation_map["Sigmoid"] = &Activation::sigmoid; - activationTest_map["Sigmoid"] = &Activation::sigmoid; + activation_map["Sigmoid"] = &MLPPActivation::sigmoid; + activationTest_map["Sigmoid"] = &MLPPActivation::sigmoid; - activation_map["Swish"] = &Activation::swish; - activationTest_map["Swish"] = &Activation::swish; + activation_map["Swish"] = &MLPPActivation::swish; + activationTest_map["Swish"] = &MLPPActivation::swish; - activation_map["Mish"] = &Activation::mish; - activationTest_map["Mish"] = &Activation::mish; + activation_map["Mish"] = &MLPPActivation::mish; + activationTest_map["Mish"] = &MLPPActivation::mish; - activation_map["SinC"] = &Activation::sinc; - activationTest_map["SinC"] = &Activation::sinc; + activation_map["SinC"] = &MLPPActivation::sinc; + activationTest_map["SinC"] = &MLPPActivation::sinc; - activation_map["Softplus"] = &Activation::softplus; - activationTest_map["Softplus"] = &Activation::softplus; + activation_map["Softplus"] = &MLPPActivation::softplus; + activationTest_map["Softplus"] = &MLPPActivation::softplus; - activation_map["Softsign"] = &Activation::softsign; - activationTest_map["Softsign"] = &Activation::softsign; + activation_map["Softsign"] = &MLPPActivation::softsign; + activationTest_map["Softsign"] = &MLPPActivation::softsign; - activation_map["CLogLog"] = &Activation::cloglog; - activationTest_map["CLogLog"] = &Activation::cloglog; + activation_map["CLogLog"] = &MLPPActivation::cloglog; + activationTest_map["CLogLog"] = &MLPPActivation::cloglog; - activation_map["Logit"] = &Activation::logit; - activationTest_map["Logit"] = &Activation::logit; + activation_map["Logit"] = &MLPPActivation::logit; + activationTest_map["Logit"] = &MLPPActivation::logit; - activation_map["GaussianCDF"] = &Activation::gaussianCDF; - activationTest_map["GaussianCDF"] = &Activation::gaussianCDF; + activation_map["GaussianCDF"] = &MLPPActivation::gaussianCDF; + activationTest_map["GaussianCDF"] = &MLPPActivation::gaussianCDF; - activation_map["RELU"] = &Activation::RELU; - activationTest_map["RELU"] = &Activation::RELU; + activation_map["RELU"] = &MLPPActivation::RELU; + activationTest_map["RELU"] = &MLPPActivation::RELU; - activation_map["GELU"] = &Activation::GELU; - activationTest_map["GELU"] = &Activation::GELU; + activation_map["GELU"] = &MLPPActivation::GELU; + activationTest_map["GELU"] = &MLPPActivation::GELU; - activation_map["Sign"] = &Activation::sign; - activationTest_map["Sign"] = &Activation::sign; + activation_map["Sign"] = &MLPPActivation::sign; + activationTest_map["Sign"] = &MLPPActivation::sign; - activation_map["UnitStep"] = &Activation::unitStep; - activationTest_map["UnitStep"] = &Activation::unitStep; + activation_map["UnitStep"] = &MLPPActivation::unitStep; + activationTest_map["UnitStep"] = &MLPPActivation::unitStep; - activation_map["Sinh"] = &Activation::sinh; - activationTest_map["Sinh"] = &Activation::sinh; + activation_map["Sinh"] = &MLPPActivation::sinh; + activationTest_map["Sinh"] = &MLPPActivation::sinh; - activation_map["Cosh"] = &Activation::cosh; - activationTest_map["Cosh"] = &Activation::cosh; + activation_map["Cosh"] = &MLPPActivation::cosh; + activationTest_map["Cosh"] = &MLPPActivation::cosh; - activation_map["Tanh"] = &Activation::tanh; - activationTest_map["Tanh"] = &Activation::tanh; + activation_map["Tanh"] = &MLPPActivation::tanh; + activationTest_map["Tanh"] = &MLPPActivation::tanh; - activation_map["Csch"] = &Activation::csch; - activationTest_map["Csch"] = &Activation::csch; + activation_map["Csch"] = &MLPPActivation::csch; + activationTest_map["Csch"] = &MLPPActivation::csch; - activation_map["Sech"] = &Activation::sech; - activationTest_map["Sech"] = &Activation::sech; + activation_map["Sech"] = &MLPPActivation::sech; + activationTest_map["Sech"] = &MLPPActivation::sech; - activation_map["Coth"] = &Activation::coth; - activationTest_map["Coth"] = &Activation::coth; + activation_map["Coth"] = &MLPPActivation::coth; + activationTest_map["Coth"] = &MLPPActivation::coth; - activation_map["Arsinh"] = &Activation::arsinh; - activationTest_map["Arsinh"] = &Activation::arsinh; + activation_map["Arsinh"] = &MLPPActivation::arsinh; + activationTest_map["Arsinh"] = &MLPPActivation::arsinh; - activation_map["Arcosh"] = &Activation::arcosh; - activationTest_map["Arcosh"] = &Activation::arcosh; + activation_map["Arcosh"] = &MLPPActivation::arcosh; + activationTest_map["Arcosh"] = &MLPPActivation::arcosh; - activation_map["Artanh"] = &Activation::artanh; - activationTest_map["Artanh"] = &Activation::artanh; + activation_map["Artanh"] = &MLPPActivation::artanh; + activationTest_map["Artanh"] = &MLPPActivation::artanh; - activation_map["Arcsch"] = &Activation::arcsch; - activationTest_map["Arcsch"] = &Activation::arcsch; + activation_map["Arcsch"] = &MLPPActivation::arcsch; + activationTest_map["Arcsch"] = &MLPPActivation::arcsch; - activation_map["Arsech"] = &Activation::arsech; - activationTest_map["Arsech"] = &Activation::arsech; + activation_map["Arsech"] = &MLPPActivation::arsech; + activationTest_map["Arsech"] = &MLPPActivation::arsech; - activation_map["Arcoth"] = &Activation::arcoth; - activationTest_map["Arcoth"] = &Activation::arcoth; + activation_map["Arcoth"] = &MLPPActivation::arcoth; + activationTest_map["Arcoth"] = &MLPPActivation::arcoth; costDeriv_map["MSE"] = &Cost::MSEDeriv; cost_map["MSE"] = &Cost::MSE; @@ -115,14 +115,14 @@ OutputLayer::OutputLayer(int n_hidden, std::string activation, std::string cost, void OutputLayer::forwardPass() { LinAlg alg; - Activation avn; + MLPPActivation avn; z = alg.scalarAdd(bias, alg.mat_vec_mult(input, weights)); a = (avn.*activation_map[activation])(z, 0); } void OutputLayer::Test(std::vector x) { LinAlg alg; - Activation avn; + MLPPActivation avn; z_test = alg.dot(weights, x) + bias; a_test = (avn.*activationTest_map[activation])(z_test, 0); } diff --git a/mlpp/output_layer/output_layer.h b/mlpp/output_layer/output_layer.h index 1da085a..3e60016 100644 --- a/mlpp/output_layer/output_layer.h +++ b/mlpp/output_layer/output_layer.h @@ -32,8 +32,8 @@ public: std::vector z; std::vector a; - std::map (Activation::*)(std::vector, bool)> activation_map; - std::map activationTest_map; + std::map (MLPPActivation::*)(std::vector, bool)> activation_map; + std::map activationTest_map; std::map, std::vector)> cost_map; std::map (Cost::*)(std::vector, std::vector)> costDeriv_map; diff --git a/mlpp/probit_reg/probit_reg.cpp b/mlpp/probit_reg/probit_reg.cpp index ec0018d..84a5b50 100644 --- a/mlpp/probit_reg/probit_reg.cpp +++ b/mlpp/probit_reg/probit_reg.cpp @@ -31,7 +31,7 @@ double ProbitReg::modelTest(std::vector x) { } void ProbitReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { - Activation avn; + MLPPActivation avn; LinAlg alg; Reg regularization; double cost_prev = 0; @@ -64,7 +64,7 @@ void ProbitReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { } void ProbitReg::MLE(double learning_rate, int max_epoch, bool UI) { - Activation avn; + MLPPActivation avn; LinAlg alg; Reg regularization; double cost_prev = 0; @@ -98,7 +98,7 @@ void ProbitReg::MLE(double learning_rate, int max_epoch, bool UI) { void ProbitReg::SGD(double learning_rate, int max_epoch, bool UI) { // NOTE: ∂y_hat/∂z is sparse - Activation avn; + MLPPActivation avn; LinAlg alg; Reg regularization; double cost_prev = 0; @@ -139,7 +139,7 @@ void ProbitReg::SGD(double learning_rate, int max_epoch, bool UI) { } void ProbitReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { - Activation avn; + MLPPActivation avn; LinAlg alg; Reg regularization; double cost_prev = 0; @@ -215,7 +215,7 @@ double ProbitReg::Cost(std::vector y_hat, std::vector y) { std::vector ProbitReg::Evaluate(std::vector> X) { LinAlg alg; - Activation avn; + MLPPActivation avn; return avn.gaussianCDF(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); } @@ -226,7 +226,7 @@ std::vector ProbitReg::propagate(std::vector> X) { double ProbitReg::Evaluate(std::vector x) { LinAlg alg; - Activation avn; + MLPPActivation avn; return avn.gaussianCDF(alg.dot(weights, x) + bias); } @@ -238,7 +238,7 @@ double ProbitReg::propagate(std::vector x) { // gaussianCDF ( wTx + b ) void ProbitReg::forwardPass() { LinAlg alg; - Activation avn; + MLPPActivation avn; z = propagate(inputSet); y_hat = avn.gaussianCDF(z); diff --git a/mlpp/regularization/reg.cpp b/mlpp/regularization/reg.cpp index f3d860d..af198ae 100644 --- a/mlpp/regularization/reg.cpp +++ b/mlpp/regularization/reg.cpp @@ -118,7 +118,7 @@ std::vector> Reg::regDerivTerm(std::vector weights, double lambda, double alpha, std::string reg, int j) { - Activation act; + MLPPActivation act; if (reg == "Ridge") { return lambda * weights[j]; } else if (reg == "Lasso") { @@ -141,7 +141,7 @@ double Reg::regDerivTerm(std::vector weights, double lambda, double alph } double Reg::regDerivTerm(std::vector> weights, double lambda, double alpha, std::string reg, int i, int j) { - Activation act; + MLPPActivation act; if (reg == "Ridge") { return lambda * weights[i][j]; } else if (reg == "Lasso") { diff --git a/mlpp/softmax_net/softmax_net.cpp b/mlpp/softmax_net/softmax_net.cpp index d6d7061..f3cc3bc 100644 --- a/mlpp/softmax_net/softmax_net.cpp +++ b/mlpp/softmax_net/softmax_net.cpp @@ -35,7 +35,7 @@ std::vector> SoftmaxNet::modelSetTest(std::vector> y_hat, std::vector> SoftmaxNet::Evaluate(std::vector> X) { LinAlg alg; - Activation avn; + MLPPActivation avn; std::vector> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector> a2 = avn.sigmoid(z2); return avn.adjSoftmax(alg.mat_vec_add(alg.matmult(a2, weights2), bias2)); @@ -260,7 +260,7 @@ std::vector> SoftmaxNet::Evaluate(std::vector>, std::vector>> SoftmaxNet::propagate(std::vector> X) { LinAlg alg; - Activation avn; + MLPPActivation avn; std::vector> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector> a2 = avn.sigmoid(z2); return { z2, a2 }; @@ -268,7 +268,7 @@ std::tuple>, std::vector>> S std::vector SoftmaxNet::Evaluate(std::vector x) { LinAlg alg; - Activation avn; + MLPPActivation avn; std::vector z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector a2 = avn.sigmoid(z2); return avn.adjSoftmax(alg.addition(alg.mat_vec_mult(alg.transpose(weights2), a2), bias2)); @@ -276,7 +276,7 @@ std::vector SoftmaxNet::Evaluate(std::vector x) { std::tuple, std::vector> SoftmaxNet::propagate(std::vector x) { LinAlg alg; - Activation avn; + MLPPActivation avn; std::vector z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector a2 = avn.sigmoid(z2); return { z2, a2 }; @@ -284,7 +284,7 @@ std::tuple, std::vector> SoftmaxNet::propagate(std:: void SoftmaxNet::forwardPass() { LinAlg alg; - Activation avn; + MLPPActivation avn; z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1); a2 = avn.sigmoid(z2); y_hat = avn.adjSoftmax(alg.mat_vec_add(alg.matmult(a2, weights2), bias2)); diff --git a/mlpp/softmax_reg/softmax_reg.cpp b/mlpp/softmax_reg/softmax_reg.cpp index 52b7ba0..1418f1b 100644 --- a/mlpp/softmax_reg/softmax_reg.cpp +++ b/mlpp/softmax_reg/softmax_reg.cpp @@ -171,13 +171,13 @@ double SoftmaxReg::Cost(std::vector> y_hat, std::vector SoftmaxReg::Evaluate(std::vector x) { LinAlg alg; - Activation avn; + MLPPActivation avn; return avn.softmax(alg.addition(bias, alg.mat_vec_mult(alg.transpose(weights), x))); } std::vector> SoftmaxReg::Evaluate(std::vector> X) { LinAlg alg; - Activation avn; + MLPPActivation avn; return avn.softmax(alg.mat_vec_add(alg.matmult(X, weights), bias)); } @@ -185,7 +185,7 @@ std::vector> SoftmaxReg::Evaluate(std::vector x) { void SVC::gradientDescent(double learning_rate, int max_epoch, bool UI) { class Cost cost; - Activation avn; + MLPPActivation avn; LinAlg alg; Reg regularization; double cost_prev = 0; @@ -65,7 +65,7 @@ void SVC::gradientDescent(double learning_rate, int max_epoch, bool UI) { void SVC::SGD(double learning_rate, int max_epoch, bool UI) { class Cost cost; - Activation avn; + MLPPActivation avn; LinAlg alg; Reg regularization; @@ -108,7 +108,7 @@ void SVC::SGD(double learning_rate, int max_epoch, bool UI) { void SVC::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { class Cost cost; - Activation avn; + MLPPActivation avn; LinAlg alg; Reg regularization; double cost_prev = 0; @@ -165,32 +165,32 @@ double SVC::Cost(std::vector z, std::vector y, std::vector SVC::Evaluate(std::vector> X) { LinAlg alg; - Activation avn; + MLPPActivation avn; return avn.sign(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); } std::vector SVC::propagate(std::vector> X) { LinAlg alg; - Activation avn; + MLPPActivation avn; return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)); } double SVC::Evaluate(std::vector x) { LinAlg alg; - Activation avn; + MLPPActivation avn; return avn.sign(alg.dot(weights, x) + bias); } double SVC::propagate(std::vector x) { LinAlg alg; - Activation avn; + MLPPActivation avn; return alg.dot(weights, x) + bias; } // sign ( wTx + b ) void SVC::forwardPass() { LinAlg alg; - Activation avn; + MLPPActivation avn; z = propagate(inputSet); y_hat = avn.sign(z); diff --git a/mlpp/tanh_reg/tanh_reg.cpp b/mlpp/tanh_reg/tanh_reg.cpp index db90207..1c7e81c 100644 --- a/mlpp/tanh_reg/tanh_reg.cpp +++ b/mlpp/tanh_reg/tanh_reg.cpp @@ -31,7 +31,7 @@ double TanhReg::modelTest(std::vector x) { } void TanhReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { - Activation avn; + MLPPActivation avn; LinAlg alg; Reg regularization; double cost_prev = 0; @@ -104,7 +104,7 @@ void TanhReg::SGD(double learning_rate, int max_epoch, bool UI) { } void TanhReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { - Activation avn; + MLPPActivation avn; LinAlg alg; Reg regularization; double cost_prev = 0; @@ -164,7 +164,7 @@ double TanhReg::Cost(std::vector y_hat, std::vector y) { std::vector TanhReg::Evaluate(std::vector> X) { LinAlg alg; - Activation avn; + MLPPActivation avn; return avn.tanh(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); } @@ -175,7 +175,7 @@ std::vector TanhReg::propagate(std::vector> X) { double TanhReg::Evaluate(std::vector x) { LinAlg alg; - Activation avn; + MLPPActivation avn; return avn.tanh(alg.dot(weights, x) + bias); } @@ -187,7 +187,7 @@ double TanhReg::propagate(std::vector x) { // Tanh ( wTx + b ) void TanhReg::forwardPass() { LinAlg alg; - Activation avn; + MLPPActivation avn; z = propagate(inputSet); y_hat = avn.tanh(z); diff --git a/mlpp/wgan/wgan.cpp b/mlpp/wgan/wgan.cpp index 5fc35c6..988bf04 100644 --- a/mlpp/wgan/wgan.cpp +++ b/mlpp/wgan/wgan.cpp @@ -218,7 +218,7 @@ void WGAN::updateGeneratorParameters(std::vector std::tuple>>, std::vector> WGAN::computeDiscriminatorGradients(std::vector y_hat, std::vector outputSet) { class Cost cost; - Activation avn; + MLPPActivation avn; LinAlg alg; Reg regularization; @@ -254,7 +254,7 @@ std::tuple>>, std::vector> W std::vector>> WGAN::computeGeneratorGradients(std::vector y_hat, std::vector outputSet) { class Cost cost; - Activation avn; + MLPPActivation avn; LinAlg alg; Reg regularization;