From 539167fee92b6c202a2bf01fffb12bba9976ff39 Mon Sep 17 00:00:00 2001 From: Relintai Date: Sat, 22 Apr 2023 14:11:07 +0200 Subject: [PATCH] More cleanups. --- mlpp/activation/activation.cpp | 971 +----------------- mlpp/activation/activation.h | 129 --- mlpp/activation/activation_old.cpp | 96 +- mlpp/ann/ann_old.cpp | 30 +- mlpp/auto_encoder/auto_encoder_old.cpp | 20 +- mlpp/c_log_log_reg/c_log_log_reg_old.cpp | 14 +- mlpp/dual_svc/dual_svc.cpp | 2 +- mlpp/dual_svc/dual_svc_old.cpp | 14 +- mlpp/gan/gan_old.cpp | 6 +- mlpp/hidden_layer/hidden_layer_old.cpp | 108 +- mlpp/hidden_layer/hidden_layer_old.h | 6 +- mlpp/lin_alg/lin_alg.cpp | 53 - mlpp/lin_alg/lin_alg.h | 8 - mlpp/lin_alg/lin_alg_old.cpp | 21 + mlpp/lin_reg/lin_reg_old.cpp | 30 +- mlpp/log_reg/log_reg_old.cpp | 6 +- mlpp/mann/mann_old.cpp | 4 +- mlpp/mlp/mlp.cpp | 2 +- mlpp/mlp/mlp_old.cpp | 20 +- .../multi_output_layer_old.cpp | 112 +- .../multi_output_layer_old.h | 6 +- mlpp/output_layer/output_layer_old.cpp | 108 +- mlpp/output_layer/output_layer_old.h | 6 +- mlpp/probit_reg/probit_reg_old.cpp | 16 +- mlpp/regularization/reg.cpp | 16 +- mlpp/regularization/reg_old.cpp | 6 +- mlpp/softmax_net/softmax_net_old.cpp | 18 +- mlpp/softmax_reg/softmax_reg_old.cpp | 8 +- mlpp/stat/stat.cpp | 2 +- mlpp/stat/stat_old.cpp | 4 +- mlpp/svc/svc_old.cpp | 13 +- mlpp/tanh_reg/tanh_reg_old.cpp | 12 +- mlpp/wgan/wgan_old.cpp | 6 +- test/mlpp_tests.cpp | 14 +- 34 files changed, 387 insertions(+), 1500 deletions(-) diff --git a/mlpp/activation/activation.cpp b/mlpp/activation/activation.cpp index 6d4eb10..82bd40c 100644 --- a/mlpp/activation/activation.cpp +++ b/mlpp/activation/activation.cpp @@ -1214,7 +1214,7 @@ Vector> MLPPActivation::softmax_deriv_derivm(const Ref MLPPActivation::softplus_normv(const Ref &z) { MLPPLinAlg alg; @@ -1334,7 +1334,7 @@ Ref MLPPActivation::cloglog_derivm(const Ref &z) { //LOGIT real_t MLPPActivation::logit_normr(real_t z) { - return std::log(z / (1 - z)); + return Math::log(z / (1 - z)); } Ref MLPPActivation::logit_normv(const Ref &z) { MLPPLinAlg alg; @@ -1458,7 +1458,7 @@ Ref MLPPActivation::swish_derivm(const Ref &z) { //MISH real_t MLPPActivation::mish_normr(real_t z) { - return z * tanh(softplus(z)); + return z * tanh(softplus_normr(z)); } Ref MLPPActivation::mish_normv(const Ref &z) { MLPPLinAlg alg; @@ -1472,7 +1472,7 @@ Ref MLPPActivation::mish_normm(const Ref &z) { } real_t MLPPActivation::mish_derivr(real_t z) { - return sech(softplus_normr(z)) * sech(softplus_normr(z)) * z * sigmoid_normr(z) + mish_normr(z) / z; + return sech_normr(softplus_normr(z)) * sech_normr(softplus_normr(z)) * z * sigmoid_normr(z) + mish_normr(z) / z; } Ref MLPPActivation::mish_derivv(const Ref &z) { MLPPLinAlg alg; @@ -1502,7 +1502,7 @@ Ref MLPPActivation::mish_derivm(const Ref &z) { //SINC real_t MLPPActivation::sinc_normr(real_t z) { - return std::sin(z) / z; + return Math::sin(z) / z; } Ref MLPPActivation::sinc_normv(const Ref &z) { MLPPLinAlg alg; @@ -1516,7 +1516,7 @@ Ref MLPPActivation::sinc_normm(const Ref &z) { } real_t MLPPActivation::sinc_derivr(real_t z) { - return (z * std::cos(z) - std::sin(z)) / (z * z); + return (z * Math::cos(z) - Math::sin(z)) / (z * z); } Ref MLPPActivation::sinc_derivv(const Ref &z) { MLPPLinAlg alg; @@ -1770,7 +1770,7 @@ Ref MLPPActivation::elu_derivm(const Ref &z, real_t c) { //SELU real_t MLPPActivation::selu_normr(real_t z, real_t lambda, real_t c) { - return lambda * ELU(z, c); + return lambda * elu_normr(z, c); } Ref MLPPActivation::selu_normv(const Ref &z, real_t lambda, real_t c) { Ref a; @@ -1880,7 +1880,7 @@ Ref MLPPActivation::gelu_normm(const Ref &z) { } real_t MLPPActivation::gelu_derivr(real_t z) { - return 0.5 * tanh(0.0356774 * std::pow(z, 3) + 0.797885 * z) + (0.0535161 * std::pow(z, 3) + 0.398942 * z) * std::pow(sech(0.0356774 * std::pow(z, 3) + 0.797885 * z), 2) + 0.5; + return 0.5 * tanh_normr(0.0356774 * Math::pow(z, 3) + 0.797885 * z) + (0.0535161 * Math::pow(z, 3) + 0.398942 * z) * Math::pow(sech_normr(0.0356774 * Math::pow(z, 3) + 0.797885 * z), 2) + 0.5; } Ref MLPPActivation::gelu_derivv(const Ref &z) { Ref a; @@ -2091,7 +2091,7 @@ Ref MLPPActivation::csch_normm(const Ref &z) { } real_t MLPPActivation::csch_derivr(real_t z) { - return -csch(z) * coth(z); + return -csch_normr(z) * coth_normr(z); } Ref MLPPActivation::csch_derivv(const Ref &z) { MLPPLinAlg alg; @@ -2127,7 +2127,7 @@ Ref MLPPActivation::sech_normm(const Ref &z) { } real_t MLPPActivation::sech_derivr(real_t z) { - return -sech(z) * tanh(z); + return -sech_normr(z) * tanh_normr(z); } Ref MLPPActivation::sech_derivv(const Ref &z) { @@ -2174,7 +2174,7 @@ Ref MLPPActivation::coth_derivm(const Ref &z) { //ARSINH real_t MLPPActivation::arsinh_normr(real_t z) { - return std::log(z + sqrt(z * z + 1)); + return Math::log(z + sqrt(z * z + 1)); } Ref MLPPActivation::arsinh_normv(const Ref &z) { @@ -2208,7 +2208,7 @@ Ref MLPPActivation::arsinh_derivm(const Ref &z) { //ARCOSH real_t MLPPActivation::arcosh_normr(real_t z) { - return std::log(z + sqrt(z * z - 1)); + return Math::log(z + sqrt(z * z - 1)); } Ref MLPPActivation::arcosh_normv(const Ref &z) { MLPPLinAlg alg; @@ -2240,7 +2240,7 @@ Ref MLPPActivation::arcosh_derivm(const Ref &z) { //ARTANH real_t MLPPActivation::artanh_normr(real_t z) { - return 0.5 * std::log((1 + z) / (1 - z)); + return 0.5 * Math::log((1 + z) / (1 - z)); } Ref MLPPActivation::artanh_normv(const Ref &z) { MLPPLinAlg alg; @@ -2272,7 +2272,7 @@ Ref MLPPActivation::artanh_derivm(const Ref &z) { //ARCSCH real_t MLPPActivation::arcsch_normr(real_t z) { - return std::log(sqrt(1 + (1 / (z * z))) + (1 / z)); + return Math::log(sqrt(1 + (1 / (z * z))) + (1 / z)); } Ref MLPPActivation::arcsch_normv(const Ref &z) { MLPPLinAlg alg; @@ -2321,7 +2321,7 @@ Ref MLPPActivation::arcsch_derivm(const Ref &z) { //ARSECH real_t MLPPActivation::arsech_normr(real_t z) { - return std::log((1 / z) + ((1 / z) + 1) * ((1 / z) - 1)); + return Math::log((1 / z) + ((1 / z) + 1) * ((1 / z) - 1)); } Ref MLPPActivation::arsech_normv(const Ref &z) { @@ -2382,7 +2382,7 @@ Ref MLPPActivation::arsech_derivm(const Ref &z) { //ARCOTH real_t MLPPActivation::arcoth_normr(real_t z) { - return 0.5 * std::log((1 + z) / (z - 1)); + return 0.5 * Math::log((1 + z) / (z - 1)); } Ref MLPPActivation::arcoth_normv(const Ref &z) { MLPPLinAlg alg; @@ -2771,942 +2771,3 @@ void MLPPActivation::_bind_methods() { BIND_ENUM_CONSTANT(ACTIVATION_FUNCTION_ARSECH); BIND_ENUM_CONSTANT(ACTIVATION_FUNCTION_ARCOTH); } - -//======================== OLD ============================= - -real_t MLPPActivation::linear(real_t z, bool deriv) { - if (deriv) { - return 1; - } - return z; -} - -std::vector MLPPActivation::linear(std::vector z, bool deriv) { - if (deriv) { - MLPPLinAlg alg; - return alg.onevec(z.size()); - } - return z; -} - -std::vector> MLPPActivation::linear(std::vector> z, bool deriv) { - if (deriv) { - MLPPLinAlg alg; - return alg.onemat(z.size(), z[0].size()); - } - return z; -} - -real_t MLPPActivation::sigmoid(real_t z, bool deriv) { - if (deriv) { - return sigmoid(z) * (1 - sigmoid(z)); - } - return 1 / (1 + exp(-z)); -} - -std::vector MLPPActivation::sigmoid(std::vector z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), sigmoid(z))); - } - return alg.elementWiseDivision(alg.onevec(z.size()), alg.addition(alg.onevec(z.size()), alg.exp(alg.scalarMultiply(-1, z)))); -} - -std::vector> MLPPActivation::sigmoid(std::vector> z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), sigmoid(z))); - } - return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.addition(alg.onemat(z.size(), z[0].size()), alg.exp(alg.scalarMultiply(-1, z)))); -} - -std::vector MLPPActivation::softmax(std::vector z, bool deriv) { - MLPPLinAlg alg; - std::vector a; - a.resize(z.size()); - std::vector expZ = alg.exp(z); - real_t sum = 0; - - for (uint32_t i = 0; i < z.size(); i++) { - sum += expZ[i]; - } - for (uint32_t i = 0; i < z.size(); i++) { - a[i] = expZ[i] / sum; - } - return a; -} - -std::vector> MLPPActivation::softmax(std::vector> z, bool deriv) { - std::vector> a; - a.resize(z.size()); - - for (uint32_t i = 0; i < z.size(); i++) { - a[i] = softmax(z[i]); - } - return a; -} - -std::vector MLPPActivation::adjSoftmax(std::vector z) { - MLPPLinAlg alg; - std::vector a; - real_t C = -*std::max_element(z.begin(), z.end()); - z = alg.scalarAdd(C, z); - - return softmax(z); -} - -std::vector> MLPPActivation::adjSoftmax(std::vector> z) { - std::vector> a; - a.resize(z.size()); - - for (uint32_t i = 0; i < z.size(); i++) { - a[i] = adjSoftmax(z[i]); - } - return a; -} - -std::vector> MLPPActivation::softmaxDeriv(std::vector z) { - std::vector> deriv; - std::vector a = softmax(z); - deriv.resize(a.size()); - for (uint32_t i = 0; i < deriv.size(); i++) { - deriv[i].resize(a.size()); - } - for (uint32_t i = 0; i < a.size(); i++) { - for (uint32_t j = 0; j < z.size(); j++) { - if (i == j) { - deriv[i][j] = a[i] * (1 - a[i]); - } else { - deriv[i][j] = -a[i] * a[j]; - } - } - } - return deriv; -} - -std::vector>> MLPPActivation::softmaxDeriv(std::vector> z) { - MLPPLinAlg alg; - std::vector>> deriv; - std::vector> a = softmax(z); - - deriv.resize(a.size()); - for (uint32_t i = 0; i < deriv.size(); i++) { - deriv[i].resize(a.size()); - } - for (uint32_t i = 0; i < a.size(); i++) { - for (uint32_t j = 0; j < z.size(); j++) { - if (i == j) { - deriv[i][j] = alg.subtraction(a[i], alg.hadamard_product(a[i], a[i])); - } else { - deriv[i][j] = alg.scalarMultiply(-1, alg.hadamard_product(a[i], a[j])); - } - } - } - return deriv; -} - -real_t MLPPActivation::softplus(real_t z, bool deriv) { - if (deriv) { - return sigmoid(z); - } - return std::log(1 + exp(z)); -} - -std::vector MLPPActivation::softplus(std::vector z, bool deriv) { - if (deriv) { - return sigmoid(z); - } - MLPPLinAlg alg; - return alg.log(alg.addition(alg.onevec(z.size()), alg.exp(z))); -} - -std::vector> MLPPActivation::softplus(std::vector> z, bool deriv) { - if (deriv) { - return sigmoid(z); - } - MLPPLinAlg alg; - return alg.log(alg.addition(alg.onemat(z.size(), z[0].size()), alg.exp(z))); -} - -real_t MLPPActivation::softsign(real_t z, bool deriv) { - if (deriv) { - return 1 / ((1 + abs(z)) * (1 + abs(z))); - } - return z / (1 + abs(z)); -} - -std::vector MLPPActivation::softsign(std::vector z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.elementWiseDivision(alg.onevec(z.size()), alg.exponentiate(alg.addition(alg.onevec(z.size()), alg.abs(z)), 2)); - } - return alg.elementWiseDivision(z, alg.addition(alg.onevec(z.size()), alg.abs(z))); -} - -std::vector> MLPPActivation::softsign(std::vector> z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.exponentiate(alg.addition(alg.onemat(z.size(), z[0].size()), alg.abs(z)), 2)); - } - return alg.elementWiseDivision(z, alg.addition(alg.onemat(z.size(), z[0].size()), alg.abs(z))); -} - -real_t MLPPActivation::gaussianCDF(real_t z, bool deriv) { - if (deriv) { - return (1 / sqrt(2 * M_PI)) * exp(-z * z / 2); - } - return 0.5 * (1 + erf(z / sqrt(2))); -} - -std::vector MLPPActivation::gaussianCDF(std::vector z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.scalarMultiply(1 / sqrt(2 * M_PI), alg.exp(alg.scalarMultiply(-1 / 2, alg.hadamard_product(z, z)))); - } - return alg.scalarMultiply(0.5, alg.addition(alg.onevec(z.size()), alg.erf(alg.scalarMultiply(1 / sqrt(2), z)))); -} - -std::vector> MLPPActivation::gaussianCDF(std::vector> z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.scalarMultiply(1 / sqrt(2 * M_PI), alg.exp(alg.scalarMultiply(-1 / 2, alg.hadamard_product(z, z)))); - } - return alg.scalarMultiply(0.5, alg.addition(alg.onemat(z.size(), z[0].size()), alg.erf(alg.scalarMultiply(1 / sqrt(2), z)))); -} - -real_t MLPPActivation::cloglog(real_t z, bool deriv) { - if (deriv) { - return exp(z - exp(z)); - } - return 1 - exp(-exp(z)); -} - -std::vector MLPPActivation::cloglog(std::vector z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.exp(alg.scalarMultiply(-1, alg.exp(z))); - } - return alg.scalarMultiply(-1, alg.scalarAdd(-1, alg.exp(alg.scalarMultiply(-1, alg.exp(z))))); -} - -std::vector> MLPPActivation::cloglog(std::vector> z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.exp(alg.scalarMultiply(-1, alg.exp(z))); - } - return alg.scalarMultiply(-1, alg.scalarAdd(-1, alg.exp(alg.scalarMultiply(-1, alg.exp(z))))); -} - -real_t MLPPActivation::logit(real_t z, bool deriv) { - if (deriv) { - return 1 / z - 1 / (z - 1); - } - return std::log(z / (1 - z)); -} - -std::vector MLPPActivation::logit(std::vector z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.subtraction(alg.elementWiseDivision(alg.onevec(z.size()), z), alg.elementWiseDivision(alg.onevec(z.size()), alg.subtraction(z, alg.onevec(z.size())))); - } - return alg.log(alg.elementWiseDivision(z, alg.subtraction(alg.onevec(z.size()), z))); -} - -std::vector> MLPPActivation::logit(std::vector> z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.subtraction(alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), z), alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.subtraction(z, alg.onemat(z.size(), z[0].size())))); - } - return alg.log(alg.elementWiseDivision(z, alg.subtraction(alg.onemat(z.size(), z[0].size()), z))); -} - -real_t MLPPActivation::unitStep(real_t z, bool deriv) { - if (deriv) { - return 0; - } - return z < 0 ? 0 : 1; -} - -std::vector MLPPActivation::unitStep(std::vector z, bool deriv) { - if (deriv) { - std::vector lderiv; - lderiv.resize(z.size()); - for (uint32_t i = 0; i < z.size(); i++) { - lderiv[i] = unitStep(z[i], true); - } - return lderiv; - } - std::vector a; - a.resize(z.size()); - - for (uint32_t i = 0; i < a.size(); i++) { - a[i] = unitStep(z[i]); - } - return a; -} - -std::vector> MLPPActivation::unitStep(std::vector> z, bool deriv) { - if (deriv) { - std::vector> lderiv; - lderiv.resize(z.size()); - for (uint32_t i = 0; i < z.size(); i++) { - lderiv[i] = unitStep(z[i], true); - } - return lderiv; - } - std::vector> a; - a.resize(z.size()); - - for (uint32_t i = 0; i < a.size(); i++) { - a[i] = unitStep(z[i]); - } - return a; -} - -real_t MLPPActivation::swish(real_t z, bool deriv) { - if (deriv) { - return swish(z) + sigmoid(z) * (1 - swish(z)); - } - return z * sigmoid(z); -} - -std::vector MLPPActivation::swish(std::vector z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - alg.addition(swish(z), alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), swish(z)))); - } - return alg.hadamard_product(z, sigmoid(z)); -} - -std::vector> MLPPActivation::swish(std::vector> z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - alg.addition(swish(z), alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), swish(z)))); - } - return alg.hadamard_product(z, sigmoid(z)); -} - -real_t MLPPActivation::mish(real_t z, bool deriv) { - if (deriv) { - return sech(softplus(z)) * sech(softplus(z)) * z * sigmoid(z) + mish(z) / z; - } - return z * tanh(softplus(z)); -} - -std::vector MLPPActivation::mish(std::vector z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.addition(alg.hadamard_product(alg.hadamard_product(alg.hadamard_product(sech(softplus(z)), sech(softplus(z))), z), sigmoid(z)), alg.elementWiseDivision(mish(z), z)); - } - return alg.hadamard_product(z, tanh(softplus(z))); -} - -std::vector> MLPPActivation::mish(std::vector> z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.addition(alg.hadamard_product(alg.hadamard_product(alg.hadamard_product(sech(softplus(z)), sech(softplus(z))), z), sigmoid(z)), alg.elementWiseDivision(mish(z), z)); - } - return alg.hadamard_product(z, tanh(softplus(z))); -} - -real_t MLPPActivation::sinc(real_t z, bool deriv) { - if (deriv) { - return (z * std::cos(z) - std::sin(z)) / (z * z); - } - return std::sin(z) / z; -} - -std::vector MLPPActivation::sinc(std::vector z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.elementWiseDivision(alg.subtraction(alg.hadamard_product(z, alg.cos(z)), alg.sin(z)), alg.hadamard_product(z, z)); - } - return alg.elementWiseDivision(alg.sin(z), z); -} - -std::vector> MLPPActivation::sinc(std::vector> z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.elementWiseDivision(alg.subtraction(alg.hadamard_product(z, alg.cos(z)), alg.sin(z)), alg.hadamard_product(z, z)); - } - return alg.elementWiseDivision(alg.sin(z), z); -} - -real_t MLPPActivation::RELU(real_t z, bool deriv) { - if (deriv) { - if (z <= 0) { - return 0; - } else { - return 1; - } - } - return fmax(0, z); -} - -std::vector MLPPActivation::RELU(std::vector z, bool deriv) { - if (deriv) { - std::vector lderiv; - lderiv.resize(z.size()); - for (uint32_t i = 0; i < z.size(); i++) { - lderiv[i] = RELU(z[i], true); - } - return lderiv; - } - std::vector a; - a.resize(z.size()); - - for (uint32_t i = 0; i < a.size(); i++) { - a[i] = RELU(z[i]); - } - return a; -} - -std::vector> MLPPActivation::RELU(std::vector> z, bool deriv) { - if (deriv) { - std::vector> lderiv; - lderiv.resize(z.size()); - for (uint32_t i = 0; i < z.size(); i++) { - lderiv[i] = RELU(z[i], true); - } - return lderiv; - } - std::vector> a; - a.resize(z.size()); - - for (uint32_t i = 0; i < a.size(); i++) { - a[i] = RELU(z[i]); - } - return a; -} - -real_t MLPPActivation::leakyReLU(real_t z, real_t c, bool deriv) { - if (deriv) { - if (z <= 0) { - return c; - } else { - return 1; - } - } - return fmax(c * z, z); -} - -std::vector MLPPActivation::leakyReLU(std::vector z, real_t c, bool deriv) { - if (deriv) { - std::vector lderiv; - lderiv.resize(z.size()); - for (uint32_t i = 0; i < z.size(); i++) { - lderiv[i] = leakyReLU(z[i], c, true); - } - return lderiv; - } - std::vector a; - a.resize(z.size()); - - for (uint32_t i = 0; i < a.size(); i++) { - a[i] = leakyReLU(z[i], c); - } - return a; -} - -std::vector> MLPPActivation::leakyReLU(std::vector> z, real_t c, bool deriv) { - if (deriv) { - std::vector> lderiv; - lderiv.resize(z.size()); - for (uint32_t i = 0; i < z.size(); i++) { - lderiv[i] = leakyReLU(z[i], c, true); - } - return lderiv; - } - std::vector> a; - a.resize(z.size()); - - for (uint32_t i = 0; i < a.size(); i++) { - a[i] = leakyReLU(z[i], c); - } - return a; -} - -real_t MLPPActivation::ELU(real_t z, real_t c, bool deriv) { - if (deriv) { - if (z <= 0) { - return c * exp(z); - } else { - return 1; - } - } - if (z >= 0) { - return z; - } else { - return c * (exp(z) - 1); - } -} - -std::vector MLPPActivation::ELU(std::vector z, real_t c, bool deriv) { - if (deriv) { - std::vector lderiv; - lderiv.resize(z.size()); - for (uint32_t i = 0; i < z.size(); i++) { - lderiv[i] = ELU(z[i], c, true); - } - return lderiv; - } - std::vector a; - a.resize(z.size()); - - for (uint32_t i = 0; i < a.size(); i++) { - a[i] = ELU(z[i], c); - } - return a; -} - -std::vector> MLPPActivation::ELU(std::vector> z, real_t c, bool deriv) { - if (deriv) { - std::vector> lderiv; - lderiv.resize(z.size()); - for (uint32_t i = 0; i < z.size(); i++) { - lderiv[i] = ELU(z[i], c, true); - } - return lderiv; - } - std::vector> a; - a.resize(z.size()); - - for (uint32_t i = 0; i < a.size(); i++) { - a[i] = ELU(z[i], c); - } - return a; -} - -real_t MLPPActivation::SELU(real_t z, real_t lambda, real_t c, bool deriv) { - if (deriv) { - return ELU(z, c, true); - } - return lambda * ELU(z, c); -} - -std::vector MLPPActivation::SELU(std::vector z, real_t lambda, real_t c, bool deriv) { - if (deriv) { - std::vector lderiv; - lderiv.resize(z.size()); - for (uint32_t i = 0; i < z.size(); i++) { - lderiv[i] = SELU(z[i], lambda, c, true); - } - return lderiv; - } - std::vector a; - a.resize(z.size()); - - for (uint32_t i = 0; i < a.size(); i++) { - a[i] = SELU(z[i], lambda, c); - } - return a; -} - -std::vector> MLPPActivation::SELU(std::vector> z, real_t lambda, real_t c, bool deriv) { - if (deriv) { - std::vector> lderiv; - lderiv.resize(z.size()); - for (uint32_t i = 0; i < z.size(); i++) { - lderiv[i] = SELU(z[i], lambda, c, true); - } - return lderiv; - } - std::vector> a; - a.resize(z.size()); - - for (uint32_t i = 0; i < a.size(); i++) { - a[i] = SELU(z[i], lambda, c); - } - return a; -} - -real_t MLPPActivation::GELU(real_t z, bool deriv) { - if (deriv) { - return 0.5 * tanh(0.0356774 * std::pow(z, 3) + 0.797885 * z) + (0.0535161 * std::pow(z, 3) + 0.398942 * z) * std::pow(sech(0.0356774 * std::pow(z, 3) + 0.797885 * z), 2) + 0.5; - } - return 0.5 * z * (1 + tanh(sqrt(2 / M_PI) * (z + 0.044715 * std::pow(z, 3)))); -} - -std::vector MLPPActivation::GELU(std::vector z, bool deriv) { - if (deriv) { - std::vector lderiv; - lderiv.resize(z.size()); - for (uint32_t i = 0; i < z.size(); i++) { - lderiv[i] = GELU(z[i], true); - } - return lderiv; - } - std::vector a; - a.resize(z.size()); - - for (uint32_t i = 0; i < a.size(); i++) { - a[i] = GELU(z[i]); - } - return a; -} - -std::vector> MLPPActivation::GELU(std::vector> z, bool deriv) { - if (deriv) { - std::vector> lderiv; - lderiv.resize(z.size()); - for (uint32_t i = 0; i < z.size(); i++) { - lderiv[i] = GELU(z[i], true); - } - return lderiv; - } - std::vector> a; - a.resize(z.size()); - - for (uint32_t i = 0; i < a.size(); i++) { - a[i] = GELU(z[i]); - } - return a; -} - -real_t MLPPActivation::sign(real_t z, bool deriv) { - if (deriv) { - return 0; - } - if (z < 0) { - return -1; - } else if (z == 0) { - return 0; - } else { - return 1; - } -} - -std::vector MLPPActivation::sign(std::vector z, bool deriv) { - if (deriv) { - std::vector lderiv; - lderiv.resize(z.size()); - for (uint32_t i = 0; i < z.size(); i++) { - lderiv[i] = sign(z[i], true); - } - return lderiv; - } - std::vector a; - a.resize(z.size()); - - for (uint32_t i = 0; i < a.size(); i++) { - a[i] = sign(z[i]); - } - return a; -} - -std::vector> MLPPActivation::sign(std::vector> z, bool deriv) { - if (deriv) { - std::vector> lderiv; - lderiv.resize(z.size()); - for (uint32_t i = 0; i < z.size(); i++) { - lderiv[i] = sign(z[i], true); - } - return lderiv; - } - std::vector> a; - a.resize(z.size()); - - for (uint32_t i = 0; i < a.size(); i++) { - a[i] = sign(z[i]); - } - return a; -} - -real_t MLPPActivation::sinh(real_t z, bool deriv) { - if (deriv) { - return cosh(z); - } - return 0.5 * (exp(z) - exp(-z)); -} - -std::vector MLPPActivation::sinh(std::vector z, bool deriv) { - if (deriv) { - return cosh(z); - } - MLPPLinAlg alg; - return alg.scalarMultiply(0.5, alg.subtraction(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z)))); -} - -std::vector> MLPPActivation::sinh(std::vector> z, bool deriv) { - if (deriv) { - return cosh(z); - } - MLPPLinAlg alg; - return alg.scalarMultiply(0.5, alg.subtraction(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z)))); -} - -real_t MLPPActivation::cosh(real_t z, bool deriv) { - if (deriv) { - return sinh(z); - } - return 0.5 * (exp(z) + exp(-z)); -} - -std::vector MLPPActivation::cosh(std::vector z, bool deriv) { - if (deriv) { - return sinh(z); - } - MLPPLinAlg alg; - return alg.scalarMultiply(0.5, alg.addition(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z)))); -} - -std::vector> MLPPActivation::cosh(std::vector> z, bool deriv) { - if (deriv) { - return sinh(z); - } - MLPPLinAlg alg; - return alg.scalarMultiply(0.5, alg.addition(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z)))); -} - -real_t MLPPActivation::tanh(real_t z, bool deriv) { - if (deriv) { - return 1 - tanh(z) * tanh(z); - } - return (exp(z) - exp(-z)) / (exp(z) + exp(-z)); -} - -std::vector MLPPActivation::tanh(std::vector z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.scalarMultiply(-1, alg.scalarAdd(-1, alg.hadamard_product(tanh(z), tanh(z)))); - } - return alg.elementWiseDivision(alg.subtraction(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z))), alg.addition(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z)))); -} - -std::vector> MLPPActivation::tanh(std::vector> z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.scalarMultiply(-1, alg.scalarAdd(-1, alg.hadamard_product(tanh(z), tanh(z)))); - } - - return alg.elementWiseDivision(alg.subtraction(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z))), alg.addition(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z)))); -} - -real_t MLPPActivation::csch(real_t z, bool deriv) { - if (deriv) { - return -csch(z) * coth(z); - } - return 1 / sinh(z); -} - -std::vector MLPPActivation::csch(std::vector z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), coth(z)); - } - return alg.elementWiseDivision(alg.onevec(z.size()), sinh(z)); -} - -std::vector> MLPPActivation::csch(std::vector> z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), coth(z)); - } - return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), sinh(z)); -} - -real_t MLPPActivation::sech(real_t z, bool deriv) { - if (deriv) { - return -sech(z) * tanh(z); - } - return 1 / cosh(z); -} - -std::vector MLPPActivation::sech(std::vector z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.hadamard_product(alg.scalarMultiply(-1, sech(z)), tanh(z)); - } - return alg.elementWiseDivision(alg.onevec(z.size()), cosh(z)); - - // return activation(z, deriv, static_cast(&sech)); -} - -std::vector> MLPPActivation::sech(std::vector> z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.hadamard_product(alg.scalarMultiply(-1, sech(z)), tanh(z)); - } - return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), cosh(z)); - - // return activation(z, deriv, static_cast(&sech)); -} - -real_t MLPPActivation::coth(real_t z, bool deriv) { - if (deriv) { - return -csch(z) * csch(z); - } - return 1 / tanh(z); -} - -std::vector MLPPActivation::coth(std::vector z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), csch(z)); - } - return alg.elementWiseDivision(alg.onevec(z.size()), tanh(z)); -} - -std::vector> MLPPActivation::coth(std::vector> z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), csch(z)); - } - return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), tanh(z)); -} - -real_t MLPPActivation::arsinh(real_t z, bool deriv) { - if (deriv) { - return 1 / sqrt(z * z + 1); - } - return std::log(z + sqrt(z * z + 1)); -} - -std::vector MLPPActivation::arsinh(std::vector z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.elementWiseDivision(alg.onevec(z.size()), alg.sqrt(alg.addition(alg.hadamard_product(z, z), alg.onevec(z.size())))); - } - return alg.log(alg.addition(z, alg.sqrt(alg.addition(alg.hadamard_product(z, z), alg.onevec(z.size()))))); -} - -std::vector> MLPPActivation::arsinh(std::vector> z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.sqrt(alg.addition(alg.hadamard_product(z, z), alg.onemat(z.size(), z[0].size())))); - } - return alg.log(alg.addition(z, alg.sqrt(alg.addition(alg.hadamard_product(z, z), alg.onemat(z.size(), z[0].size()))))); -} - -real_t MLPPActivation::arcosh(real_t z, bool deriv) { - if (deriv) { - return 1 / sqrt(z * z - 1); - } - return std::log(z + sqrt(z * z - 1)); -} - -std::vector MLPPActivation::arcosh(std::vector z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.elementWiseDivision(alg.onevec(z.size()), alg.sqrt(alg.subtraction(alg.hadamard_product(z, z), alg.onevec(z.size())))); - } - return alg.log(alg.addition(z, alg.sqrt(alg.subtraction(alg.hadamard_product(z, z), alg.onevec(z.size()))))); -} - -std::vector> MLPPActivation::arcosh(std::vector> z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.sqrt(alg.subtraction(alg.hadamard_product(z, z), alg.onemat(z.size(), z[0].size())))); - } - return alg.log(alg.addition(z, alg.sqrt(alg.subtraction(alg.hadamard_product(z, z), alg.onemat(z.size(), z[0].size()))))); -} - -real_t MLPPActivation::artanh(real_t z, bool deriv) { - if (deriv) { - return 1 / (1 - z * z); - } - return 0.5 * std::log((1 + z) / (1 - z)); -} - -std::vector MLPPActivation::artanh(std::vector z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.elementWiseDivision(alg.onevec(z.size()), alg.subtraction(alg.onevec(z.size()), alg.hadamard_product(z, z))); - } - return alg.scalarMultiply(0.5, alg.log(alg.elementWiseDivision(alg.addition(alg.onevec(z.size()), z), alg.subtraction(alg.onevec(z.size()), z)))); -} - -std::vector> MLPPActivation::artanh(std::vector> z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.subtraction(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z))); - } - return alg.scalarMultiply(0.5, alg.log(alg.elementWiseDivision(alg.addition(alg.onemat(z.size(), z[0].size()), z), alg.subtraction(alg.onemat(z.size(), z[0].size()), z)))); -} - -real_t MLPPActivation::arcsch(real_t z, bool deriv) { - if (deriv) { - return -1 / ((z * z) * sqrt(1 + (1 / (z * z)))); - } - return std::log(sqrt(1 + (1 / (z * z))) + (1 / z)); -} - -std::vector MLPPActivation::arcsch(std::vector z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.elementWiseDivision(alg.full(z.size(), -1), alg.hadamard_product(alg.hadamard_product(z, z), alg.sqrt(alg.addition(alg.onevec(z.size()), alg.elementWiseDivision(alg.onevec(z.size()), alg.hadamard_product(z, z)))))); - } - return alg.log(alg.addition(alg.sqrt(alg.addition(alg.onevec(z.size()), alg.elementWiseDivision(alg.onevec(z.size()), alg.hadamard_product(z, z)))), alg.elementWiseDivision(alg.onevec(z.size()), z))); -} - -std::vector> MLPPActivation::arcsch(std::vector> z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.elementWiseDivision(alg.full(z.size(), z[0].size(), -1), alg.hadamard_product(alg.hadamard_product(z, z), alg.sqrt(alg.addition(alg.onemat(z.size(), z[0].size()), alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z)))))); - } - return alg.log(alg.addition(alg.sqrt(alg.addition(alg.onemat(z.size(), z[0].size()), alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z)))), alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), z))); -} - -real_t MLPPActivation::arsech(real_t z, bool deriv) { - if (deriv) { - return -1 / (z * sqrt(1 - z * z)); - } - return std::log((1 / z) + ((1 / z) + 1) * ((1 / z) - 1)); -} - -std::vector MLPPActivation::arsech(std::vector z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.elementWiseDivision(alg.full(z.size(), -1), alg.hadamard_product(z, alg.sqrt(alg.subtraction(alg.onevec(z.size()), alg.hadamard_product(z, z))))); - } - return alg.log(alg.addition(alg.elementWiseDivision(alg.onevec(z.size()), z), alg.hadamard_product(alg.addition(alg.elementWiseDivision(alg.onevec(z.size()), z), alg.onevec(z.size())), alg.subtraction(alg.elementWiseDivision(alg.onevec(z.size()), z), alg.onevec(z.size()))))); -} - -std::vector> MLPPActivation::arsech(std::vector> z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.elementWiseDivision(alg.full(z.size(), z[0].size(), -1), alg.hadamard_product(z, alg.sqrt(alg.subtraction(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z))))); - } - return alg.log(alg.addition(alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), z), alg.hadamard_product(alg.addition(alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), z), alg.onemat(z.size(), z[0].size())), alg.subtraction(alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), z), alg.onemat(z.size(), z[0].size()))))); -} - -real_t MLPPActivation::arcoth(real_t z, bool deriv) { - if (deriv) { - return 1 / (1 - z * z); - } - return 0.5 * std::log((1 + z) / (z - 1)); -} - -std::vector MLPPActivation::arcoth(std::vector z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.elementWiseDivision(alg.onevec(z.size()), alg.subtraction(alg.onevec(z.size()), alg.hadamard_product(z, z))); - } - return alg.scalarMultiply(0.5, alg.log(alg.elementWiseDivision(alg.addition(alg.onevec(z.size()), z), alg.subtraction(z, alg.onevec(z.size()))))); -} - -std::vector> MLPPActivation::arcoth(std::vector> z, bool deriv) { - MLPPLinAlg alg; - if (deriv) { - return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.subtraction(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z))); - } - return alg.scalarMultiply(0.5, alg.log(alg.elementWiseDivision(alg.addition(alg.onemat(z.size(), z[0].size()), z), alg.subtraction(z, alg.onemat(z.size(), z[0].size()))))); -} - -// TO DO: Implement this template activation -std::vector MLPPActivation::activation(std::vector z, bool deriv, real_t (*function)(real_t, bool)) { - if (deriv) { - std::vector lderiv; - lderiv.resize(z.size()); - for (uint32_t i = 0; i < z.size(); i++) { - lderiv[i] = function(z[i], true); - } - return lderiv; - } - std::vector a; - a.resize(z.size()); - for (uint32_t i = 0; i < z.size(); i++) { - a[i] = function(z[i], deriv); - } - return a; -} diff --git a/mlpp/activation/activation.h b/mlpp/activation/activation.h index c2d6df2..81ee2d6 100644 --- a/mlpp/activation/activation.h +++ b/mlpp/activation/activation.h @@ -407,135 +407,6 @@ public: Ref arcoth_derivv(const Ref &z); Ref arcoth_derivm(const Ref &z); - // ========= OLD =========== - - real_t linear(real_t z, bool deriv = false); - std::vector linear(std::vector z, bool deriv = false); - std::vector> linear(std::vector> z, bool deriv = false); - - real_t sigmoid(real_t z, bool deriv = false); - std::vector sigmoid(std::vector z, bool deriv = false); - std::vector> sigmoid(std::vector> z, bool deriv = false); - - std::vector softmax(std::vector z, bool deriv = false); - std::vector> softmax(std::vector> z, bool deriv = false); - - std::vector adjSoftmax(std::vector z); - std::vector> adjSoftmax(std::vector> z); - - std::vector> softmaxDeriv(std::vector z); - std::vector>> softmaxDeriv(std::vector> z); - - real_t softplus(real_t z, bool deriv = false); - std::vector softplus(std::vector z, bool deriv = false); - std::vector> softplus(std::vector> z, bool deriv = false); - - real_t softsign(real_t z, bool deriv = false); - std::vector softsign(std::vector z, bool deriv = false); - std::vector> softsign(std::vector> z, bool deriv = false); - - real_t gaussianCDF(real_t z, bool deriv = false); - std::vector gaussianCDF(std::vector z, bool deriv = false); - std::vector> gaussianCDF(std::vector> z, bool deriv = false); - - real_t cloglog(real_t z, bool deriv = false); - std::vector cloglog(std::vector z, bool deriv = false); - std::vector> cloglog(std::vector> z, bool deriv = false); - - real_t logit(real_t z, bool deriv = false); - std::vector logit(std::vector z, bool deriv = false); - std::vector> logit(std::vector> z, bool deriv = false); - - real_t unitStep(real_t z, bool deriv = false); - std::vector unitStep(std::vector z, bool deriv = false); - std::vector> unitStep(std::vector> z, bool deriv = false); - - real_t swish(real_t z, bool deriv = false); - std::vector swish(std::vector z, bool deriv = false); - std::vector> swish(std::vector> z, bool deriv = false); - - real_t mish(real_t z, bool deriv = false); - std::vector mish(std::vector z, bool deriv = false); - std::vector> mish(std::vector> z, bool deriv = false); - - real_t sinc(real_t z, bool deriv = false); - std::vector sinc(std::vector z, bool deriv = false); - std::vector> sinc(std::vector> z, bool deriv = false); - - real_t RELU(real_t z, bool deriv = false); - std::vector RELU(std::vector z, bool deriv = false); - std::vector> RELU(std::vector> z, bool deriv = false); - - real_t leakyReLU(real_t z, real_t c, bool deriv = false); - std::vector leakyReLU(std::vector z, real_t c, bool deriv = false); - std::vector> leakyReLU(std::vector> z, real_t c, bool deriv = false); - - real_t ELU(real_t z, real_t c, bool deriv = false); - std::vector ELU(std::vector z, real_t c, bool deriv = false); - std::vector> ELU(std::vector> z, real_t c, bool deriv = false); - - real_t SELU(real_t z, real_t lambda, real_t c, bool deriv = false); - std::vector SELU(std::vector z, real_t lambda, real_t c, bool deriv = false); - std::vector> SELU(std::vector>, real_t lambda, real_t c, bool deriv = false); - - real_t GELU(real_t z, bool deriv = false); - std::vector GELU(std::vector z, bool deriv = false); - std::vector> GELU(std::vector> z, bool deriv = false); - - real_t sign(real_t z, bool deriv = false); - std::vector sign(std::vector z, bool deriv = false); - std::vector> sign(std::vector> z, bool deriv = false); - - real_t sinh(real_t z, bool deriv = false); - std::vector sinh(std::vector z, bool deriv = false); - std::vector> sinh(std::vector> z, bool deriv = false); - - real_t cosh(real_t z, bool deriv = false); - std::vector cosh(std::vector z, bool deriv = false); - std::vector> cosh(std::vector> z, bool deriv = false); - - real_t tanh(real_t z, bool deriv = false); - std::vector tanh(std::vector z, bool deriv = false); - std::vector> tanh(std::vector> z, bool deriv = false); - - real_t csch(real_t z, bool deriv = false); - std::vector csch(std::vector z, bool deriv = false); - std::vector> csch(std::vector> z, bool deriv = false); - - real_t sech(real_t z, bool deriv = false); - std::vector sech(std::vector z, bool deriv = false); - std::vector> sech(std::vector> z, bool deriv = false); - - real_t coth(real_t z, bool deriv = false); - std::vector coth(std::vector z, bool deriv = false); - std::vector> coth(std::vector> z, bool deriv = false); - - real_t arsinh(real_t z, bool deriv = false); - std::vector arsinh(std::vector z, bool deriv = false); - std::vector> arsinh(std::vector> z, bool deriv = false); - - real_t arcosh(real_t z, bool deriv = false); - std::vector arcosh(std::vector z, bool deriv = false); - std::vector> arcosh(std::vector> z, bool deriv = false); - - real_t artanh(real_t z, bool deriv = false); - std::vector artanh(std::vector z, bool deriv = false); - std::vector> artanh(std::vector> z, bool deriv = false); - - real_t arcsch(real_t z, bool deriv = false); - std::vector arcsch(std::vector z, bool deriv = false); - std::vector> arcsch(std::vector> z, bool deriv = false); - - real_t arsech(real_t z, bool deriv = false); - std::vector arsech(std::vector z, bool deriv = false); - std::vector> arsech(std::vector> z, bool deriv = false); - - real_t arcoth(real_t z, bool deriv = false); - std::vector arcoth(std::vector z, bool deriv = false); - std::vector> arcoth(std::vector> z, bool deriv = false); - - std::vector activation(std::vector z, bool deriv, real_t (*function)(real_t, bool)); - protected: static void _bind_methods(); }; diff --git a/mlpp/activation/activation_old.cpp b/mlpp/activation/activation_old.cpp index 9f0c2d8..47c5da4 100644 --- a/mlpp/activation/activation_old.cpp +++ b/mlpp/activation/activation_old.cpp @@ -5,7 +5,7 @@ // #include "activation_old.h" -#include "../lin_alg/lin_alg.h" +#include "../lin_alg/lin_alg_old.h" #include #include @@ -20,7 +20,7 @@ real_t MLPPActivationOld::linear(real_t z, bool deriv) { std::vector MLPPActivationOld::linear(std::vector z, bool deriv) { if (deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; return alg.onevec(z.size()); } return z; @@ -28,7 +28,7 @@ std::vector MLPPActivationOld::linear(std::vector z, bool deriv) std::vector> MLPPActivationOld::linear(std::vector> z, bool deriv) { if (deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; return alg.onemat(z.size(), z[0].size()); } return z; @@ -42,7 +42,7 @@ real_t MLPPActivationOld::sigmoid(real_t z, bool deriv) { } std::vector MLPPActivationOld::sigmoid(std::vector z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), sigmoid(z))); } @@ -50,7 +50,7 @@ std::vector MLPPActivationOld::sigmoid(std::vector z, bool deriv } std::vector> MLPPActivationOld::sigmoid(std::vector> z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), sigmoid(z))); } @@ -58,7 +58,7 @@ std::vector> MLPPActivationOld::sigmoid(std::vector MLPPActivationOld::softmax(std::vector z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; std::vector a; a.resize(z.size()); std::vector expZ = alg.exp(z); @@ -84,7 +84,7 @@ std::vector> MLPPActivationOld::softmax(std::vector MLPPActivationOld::adjSoftmax(std::vector z) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; std::vector a; real_t C = -*std::max_element(z.begin(), z.end()); z = alg.scalarAdd(C, z); @@ -122,7 +122,7 @@ std::vector> MLPPActivationOld::softmaxDeriv(std::vector>> MLPPActivationOld::softmaxDeriv(std::vector> z) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; std::vector>> deriv; std::vector> a = softmax(z); @@ -153,7 +153,7 @@ std::vector MLPPActivationOld::softplus(std::vector z, bool deri if (deriv) { return sigmoid(z); } - MLPPLinAlg alg; + MLPPLinAlgOld alg; return alg.log(alg.addition(alg.onevec(z.size()), alg.exp(z))); } @@ -161,7 +161,7 @@ std::vector> MLPPActivationOld::softplus(std::vector MLPPActivationOld::softsign(std::vector z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.elementWiseDivision(alg.onevec(z.size()), alg.exponentiate(alg.addition(alg.onevec(z.size()), alg.abs(z)), 2)); } @@ -181,7 +181,7 @@ std::vector MLPPActivationOld::softsign(std::vector z, bool deri } std::vector> MLPPActivationOld::softsign(std::vector> z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.exponentiate(alg.addition(alg.onemat(z.size(), z[0].size()), alg.abs(z)), 2)); } @@ -196,7 +196,7 @@ real_t MLPPActivationOld::gaussianCDF(real_t z, bool deriv) { } std::vector MLPPActivationOld::gaussianCDF(std::vector z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.scalarMultiply(1 / sqrt(2 * M_PI), alg.exp(alg.scalarMultiply(-1 / 2, alg.hadamard_product(z, z)))); } @@ -204,7 +204,7 @@ std::vector MLPPActivationOld::gaussianCDF(std::vector z, bool d } std::vector> MLPPActivationOld::gaussianCDF(std::vector> z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.scalarMultiply(1 / sqrt(2 * M_PI), alg.exp(alg.scalarMultiply(-1 / 2, alg.hadamard_product(z, z)))); } @@ -219,7 +219,7 @@ real_t MLPPActivationOld::cloglog(real_t z, bool deriv) { } std::vector MLPPActivationOld::cloglog(std::vector z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.exp(alg.scalarMultiply(-1, alg.exp(z))); } @@ -227,7 +227,7 @@ std::vector MLPPActivationOld::cloglog(std::vector z, bool deriv } std::vector> MLPPActivationOld::cloglog(std::vector> z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.exp(alg.scalarMultiply(-1, alg.exp(z))); } @@ -242,7 +242,7 @@ real_t MLPPActivationOld::logit(real_t z, bool deriv) { } std::vector MLPPActivationOld::logit(std::vector z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.subtraction(alg.elementWiseDivision(alg.onevec(z.size()), z), alg.elementWiseDivision(alg.onevec(z.size()), alg.subtraction(z, alg.onevec(z.size())))); } @@ -250,7 +250,7 @@ std::vector MLPPActivationOld::logit(std::vector z, bool deriv) } std::vector> MLPPActivationOld::logit(std::vector> z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.subtraction(alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), z), alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.subtraction(z, alg.onemat(z.size(), z[0].size())))); } @@ -308,7 +308,7 @@ real_t MLPPActivationOld::swish(real_t z, bool deriv) { } std::vector MLPPActivationOld::swish(std::vector z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { alg.addition(swish(z), alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), swish(z)))); } @@ -316,7 +316,7 @@ std::vector MLPPActivationOld::swish(std::vector z, bool deriv) } std::vector> MLPPActivationOld::swish(std::vector> z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { alg.addition(swish(z), alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), swish(z)))); } @@ -331,7 +331,7 @@ real_t MLPPActivationOld::mish(real_t z, bool deriv) { } std::vector MLPPActivationOld::mish(std::vector z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.addition(alg.hadamard_product(alg.hadamard_product(alg.hadamard_product(sech(softplus(z)), sech(softplus(z))), z), sigmoid(z)), alg.elementWiseDivision(mish(z), z)); } @@ -339,7 +339,7 @@ std::vector MLPPActivationOld::mish(std::vector z, bool deriv) { } std::vector> MLPPActivationOld::mish(std::vector> z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.addition(alg.hadamard_product(alg.hadamard_product(alg.hadamard_product(sech(softplus(z)), sech(softplus(z))), z), sigmoid(z)), alg.elementWiseDivision(mish(z), z)); } @@ -354,7 +354,7 @@ real_t MLPPActivationOld::sinc(real_t z, bool deriv) { } std::vector MLPPActivationOld::sinc(std::vector z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.elementWiseDivision(alg.subtraction(alg.hadamard_product(z, alg.cos(z)), alg.sin(z)), alg.hadamard_product(z, z)); } @@ -362,7 +362,7 @@ std::vector MLPPActivationOld::sinc(std::vector z, bool deriv) { } std::vector> MLPPActivationOld::sinc(std::vector> z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.elementWiseDivision(alg.subtraction(alg.hadamard_product(z, alg.cos(z)), alg.sin(z)), alg.hadamard_product(z, z)); } @@ -660,7 +660,7 @@ std::vector MLPPActivationOld::sinh(std::vector z, bool deriv) { if (deriv) { return cosh(z); } - MLPPLinAlg alg; + MLPPLinAlgOld alg; return alg.scalarMultiply(0.5, alg.subtraction(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z)))); } @@ -668,7 +668,7 @@ std::vector> MLPPActivationOld::sinh(std::vector MLPPActivationOld::cosh(std::vector z, bool deriv) { if (deriv) { return sinh(z); } - MLPPLinAlg alg; + MLPPLinAlgOld alg; return alg.scalarMultiply(0.5, alg.addition(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z)))); } @@ -691,7 +691,7 @@ std::vector> MLPPActivationOld::cosh(std::vector MLPPActivationOld::tanh(std::vector z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.scalarMultiply(-1, alg.scalarAdd(-1, alg.hadamard_product(tanh(z), tanh(z)))); } @@ -711,7 +711,7 @@ std::vector MLPPActivationOld::tanh(std::vector z, bool deriv) { } std::vector> MLPPActivationOld::tanh(std::vector> z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.scalarMultiply(-1, alg.scalarAdd(-1, alg.hadamard_product(tanh(z), tanh(z)))); } @@ -727,7 +727,7 @@ real_t MLPPActivationOld::csch(real_t z, bool deriv) { } std::vector MLPPActivationOld::csch(std::vector z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), coth(z)); } @@ -735,7 +735,7 @@ std::vector MLPPActivationOld::csch(std::vector z, bool deriv) { } std::vector> MLPPActivationOld::csch(std::vector> z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), coth(z)); } @@ -750,7 +750,7 @@ real_t MLPPActivationOld::sech(real_t z, bool deriv) { } std::vector MLPPActivationOld::sech(std::vector z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.hadamard_product(alg.scalarMultiply(-1, sech(z)), tanh(z)); } @@ -760,7 +760,7 @@ std::vector MLPPActivationOld::sech(std::vector z, bool deriv) { } std::vector> MLPPActivationOld::sech(std::vector> z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.hadamard_product(alg.scalarMultiply(-1, sech(z)), tanh(z)); } @@ -777,7 +777,7 @@ real_t MLPPActivationOld::coth(real_t z, bool deriv) { } std::vector MLPPActivationOld::coth(std::vector z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), csch(z)); } @@ -785,7 +785,7 @@ std::vector MLPPActivationOld::coth(std::vector z, bool deriv) { } std::vector> MLPPActivationOld::coth(std::vector> z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), csch(z)); } @@ -800,7 +800,7 @@ real_t MLPPActivationOld::arsinh(real_t z, bool deriv) { } std::vector MLPPActivationOld::arsinh(std::vector z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.elementWiseDivision(alg.onevec(z.size()), alg.sqrt(alg.addition(alg.hadamard_product(z, z), alg.onevec(z.size())))); } @@ -808,7 +808,7 @@ std::vector MLPPActivationOld::arsinh(std::vector z, bool deriv) } std::vector> MLPPActivationOld::arsinh(std::vector> z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.sqrt(alg.addition(alg.hadamard_product(z, z), alg.onemat(z.size(), z[0].size())))); } @@ -823,7 +823,7 @@ real_t MLPPActivationOld::arcosh(real_t z, bool deriv) { } std::vector MLPPActivationOld::arcosh(std::vector z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.elementWiseDivision(alg.onevec(z.size()), alg.sqrt(alg.subtraction(alg.hadamard_product(z, z), alg.onevec(z.size())))); } @@ -831,7 +831,7 @@ std::vector MLPPActivationOld::arcosh(std::vector z, bool deriv) } std::vector> MLPPActivationOld::arcosh(std::vector> z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.sqrt(alg.subtraction(alg.hadamard_product(z, z), alg.onemat(z.size(), z[0].size())))); } @@ -846,7 +846,7 @@ real_t MLPPActivationOld::artanh(real_t z, bool deriv) { } std::vector MLPPActivationOld::artanh(std::vector z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.elementWiseDivision(alg.onevec(z.size()), alg.subtraction(alg.onevec(z.size()), alg.hadamard_product(z, z))); } @@ -854,7 +854,7 @@ std::vector MLPPActivationOld::artanh(std::vector z, bool deriv) } std::vector> MLPPActivationOld::artanh(std::vector> z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.subtraction(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z))); } @@ -869,7 +869,7 @@ real_t MLPPActivationOld::arcsch(real_t z, bool deriv) { } std::vector MLPPActivationOld::arcsch(std::vector z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.elementWiseDivision(alg.full(z.size(), -1), alg.hadamard_product(alg.hadamard_product(z, z), alg.sqrt(alg.addition(alg.onevec(z.size()), alg.elementWiseDivision(alg.onevec(z.size()), alg.hadamard_product(z, z)))))); } @@ -877,7 +877,7 @@ std::vector MLPPActivationOld::arcsch(std::vector z, bool deriv) } std::vector> MLPPActivationOld::arcsch(std::vector> z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.elementWiseDivision(alg.full(z.size(), z[0].size(), -1), alg.hadamard_product(alg.hadamard_product(z, z), alg.sqrt(alg.addition(alg.onemat(z.size(), z[0].size()), alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z)))))); } @@ -892,7 +892,7 @@ real_t MLPPActivationOld::arsech(real_t z, bool deriv) { } std::vector MLPPActivationOld::arsech(std::vector z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.elementWiseDivision(alg.full(z.size(), -1), alg.hadamard_product(z, alg.sqrt(alg.subtraction(alg.onevec(z.size()), alg.hadamard_product(z, z))))); } @@ -900,7 +900,7 @@ std::vector MLPPActivationOld::arsech(std::vector z, bool deriv) } std::vector> MLPPActivationOld::arsech(std::vector> z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.elementWiseDivision(alg.full(z.size(), z[0].size(), -1), alg.hadamard_product(z, alg.sqrt(alg.subtraction(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z))))); } @@ -915,7 +915,7 @@ real_t MLPPActivationOld::arcoth(real_t z, bool deriv) { } std::vector MLPPActivationOld::arcoth(std::vector z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.elementWiseDivision(alg.onevec(z.size()), alg.subtraction(alg.onevec(z.size()), alg.hadamard_product(z, z))); } @@ -923,7 +923,7 @@ std::vector MLPPActivationOld::arcoth(std::vector z, bool deriv) } std::vector> MLPPActivationOld::arcoth(std::vector> z, bool deriv) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; if (deriv) { return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.subtraction(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z))); } diff --git a/mlpp/ann/ann_old.cpp b/mlpp/ann/ann_old.cpp index 3613299..ed3e945 100644 --- a/mlpp/ann/ann_old.cpp +++ b/mlpp/ann/ann_old.cpp @@ -5,9 +5,9 @@ // #include "ann_old.h" -#include "../activation/activation.h" +#include "../activation/activation_old.h" #include "../cost/cost.h" -#include "../lin_alg/lin_alg.h" +#include "../lin_alg/lin_alg_old.h" #include "../regularization/reg.h" #include "../utilities/utilities.h" @@ -62,7 +62,7 @@ real_t MLPPANNOld::modelTest(std::vector x) { void MLPPANNOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { class MLPPCost cost; - MLPPLinAlg alg; + MLPPLinAlgOld alg; real_t cost_prev = 0; int epoch = 1; forwardPass(); @@ -98,7 +98,7 @@ void MLPPANNOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { void MLPPANNOld::SGD(real_t learning_rate, int max_epoch, bool UI) { class MLPPCost cost; - MLPPLinAlg alg; + MLPPLinAlgOld alg; real_t cost_prev = 0; int epoch = 1; @@ -139,7 +139,7 @@ void MLPPANNOld::SGD(real_t learning_rate, int max_epoch, bool UI) { void MLPPANNOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) { class MLPPCost cost; - MLPPLinAlg alg; + MLPPLinAlgOld alg; real_t cost_prev = 0; int epoch = 1; @@ -184,7 +184,7 @@ void MLPPANNOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, void MLPPANNOld::Momentum(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool NAG, bool UI) { class MLPPCost cost; - MLPPLinAlg alg; + MLPPLinAlgOld alg; real_t cost_prev = 0; int epoch = 1; @@ -246,7 +246,7 @@ void MLPPANNOld::Momentum(real_t learning_rate, int max_epoch, int mini_batch_si void MLPPANNOld::Adagrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t e, bool UI) { class MLPPCost cost; - MLPPLinAlg alg; + MLPPLinAlgOld alg; real_t cost_prev = 0; int epoch = 1; @@ -307,7 +307,7 @@ void MLPPANNOld::Adagrad(real_t learning_rate, int max_epoch, int mini_batch_siz void MLPPANNOld::Adadelta(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t e, bool UI) { class MLPPCost cost; - MLPPLinAlg alg; + MLPPLinAlgOld alg; real_t cost_prev = 0; int epoch = 1; @@ -368,7 +368,7 @@ void MLPPANNOld::Adadelta(real_t learning_rate, int max_epoch, int mini_batch_si void MLPPANNOld::Adam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) { class MLPPCost cost; - MLPPLinAlg alg; + MLPPLinAlgOld alg; real_t cost_prev = 0; int epoch = 1; @@ -441,7 +441,7 @@ void MLPPANNOld::Adam(real_t learning_rate, int max_epoch, int mini_batch_size, void MLPPANNOld::Adamax(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) { class MLPPCost cost; - MLPPLinAlg alg; + MLPPLinAlgOld alg; real_t cost_prev = 0; int epoch = 1; @@ -512,7 +512,7 @@ void MLPPANNOld::Adamax(real_t learning_rate, int max_epoch, int mini_batch_size void MLPPANNOld::Nadam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) { class MLPPCost cost; - MLPPLinAlg alg; + MLPPLinAlgOld alg; real_t cost_prev = 0; int epoch = 1; @@ -587,7 +587,7 @@ void MLPPANNOld::Nadam(real_t learning_rate, int max_epoch, int mini_batch_size, void MLPPANNOld::AMSGrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) { class MLPPCost cost; - MLPPLinAlg alg; + MLPPLinAlgOld alg; real_t cost_prev = 0; int epoch = 1; @@ -757,7 +757,7 @@ void MLPPANNOld::forwardPass() { } void MLPPANNOld::updateParameters(std::vector>> hiddenLayerUpdations, std::vector outputLayerUpdation, real_t learning_rate) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; outputLayer->weights = alg.subtraction(outputLayer->weights, outputLayerUpdation); outputLayer->bias -= learning_rate * alg.sum_elements(outputLayer->delta) / n; @@ -776,8 +776,8 @@ void MLPPANNOld::updateParameters(std::vector>> std::tuple>>, std::vector> MLPPANNOld::computeGradients(std::vector y_hat, std::vector outputSet) { // std::cout << "BEGIN" << std::endl; class MLPPCost cost; - MLPPActivation avn; - MLPPLinAlg alg; + MLPPActivationOld avn; + MLPPLinAlgOld alg; MLPPReg regularization; std::vector>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads. diff --git a/mlpp/auto_encoder/auto_encoder_old.cpp b/mlpp/auto_encoder/auto_encoder_old.cpp index 2db06e8..b0b1e14 100644 --- a/mlpp/auto_encoder/auto_encoder_old.cpp +++ b/mlpp/auto_encoder/auto_encoder_old.cpp @@ -6,7 +6,7 @@ #include "auto_encoder_old.h" -#include "../activation/activation.h" +#include "../activation/activation_old.h" #include "../cost/cost.h" #include "../lin_alg/lin_alg.h" #include "../utilities/utilities.h" @@ -23,7 +23,7 @@ std::vector MLPPAutoEncoderOld::modelTest(std::vector x) { } void MLPPAutoEncoderOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { - MLPPActivation avn; + MLPPActivationOld avn; MLPPLinAlg alg; real_t cost_prev = 0; int epoch = 1; @@ -76,7 +76,7 @@ void MLPPAutoEncoderOld::gradientDescent(real_t learning_rate, int max_epoch, bo } void MLPPAutoEncoderOld::SGD(real_t learning_rate, int max_epoch, bool UI) { - MLPPActivation avn; + MLPPActivationOld avn; MLPPLinAlg alg; real_t cost_prev = 0; int epoch = 1; @@ -130,7 +130,7 @@ void MLPPAutoEncoderOld::SGD(real_t learning_rate, int max_epoch, bool UI) { } void MLPPAutoEncoderOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) { - MLPPActivation avn; + MLPPActivationOld avn; MLPPLinAlg alg; real_t cost_prev = 0; int epoch = 1; @@ -210,7 +210,7 @@ MLPPAutoEncoderOld::MLPPAutoEncoderOld(std::vector> pinputSe n = inputSet.size(); k = inputSet[0].size(); - MLPPActivation avn; + MLPPActivationOld avn; y_hat.resize(inputSet.size()); weights1 = MLPPUtilities::weightInitialization(k, n_hidden); @@ -226,7 +226,7 @@ real_t MLPPAutoEncoderOld::Cost(std::vector> y_hat, std::vec std::vector> MLPPAutoEncoderOld::Evaluate(std::vector> X) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; std::vector> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector> a2 = avn.sigmoid(z2); return alg.mat_vec_add(alg.matmult(a2, weights2), bias2); @@ -234,7 +234,7 @@ std::vector> MLPPAutoEncoderOld::Evaluate(std::vector>, std::vector>> MLPPAutoEncoderOld::propagate(std::vector> X) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; std::vector> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector> a2 = avn.sigmoid(z2); return { z2, a2 }; @@ -242,7 +242,7 @@ std::tuple>, std::vector>> M std::vector MLPPAutoEncoderOld::Evaluate(std::vector x) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; std::vector z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector a2 = avn.sigmoid(z2); return alg.addition(alg.mat_vec_mult(alg.transpose(weights2), a2), bias2); @@ -250,7 +250,7 @@ std::vector MLPPAutoEncoderOld::Evaluate(std::vector x) { std::tuple, std::vector> MLPPAutoEncoderOld::propagate(std::vector x) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; std::vector z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector a2 = avn.sigmoid(z2); return { z2, a2 }; @@ -258,7 +258,7 @@ std::tuple, std::vector> MLPPAutoEncoderOld::propaga void MLPPAutoEncoderOld::forwardPass() { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1); a2 = avn.sigmoid(z2); y_hat = alg.mat_vec_add(alg.matmult(a2, weights2), bias2); diff --git a/mlpp/c_log_log_reg/c_log_log_reg_old.cpp b/mlpp/c_log_log_reg/c_log_log_reg_old.cpp index b96b0cb..9249058 100644 --- a/mlpp/c_log_log_reg/c_log_log_reg_old.cpp +++ b/mlpp/c_log_log_reg/c_log_log_reg_old.cpp @@ -6,7 +6,7 @@ #include "c_log_log_reg_old.h" -#include "../activation/activation.h" +#include "../activation/activation_old.h" #include "../cost/cost.h" #include "../lin_alg/lin_alg.h" #include "../regularization/reg.h" @@ -31,7 +31,7 @@ real_t MLPPCLogLogRegOld::modelTest(std::vector x) { } void MLPPCLogLogRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { - MLPPActivation avn; + MLPPActivationOld avn; MLPPLinAlg alg; MLPPReg regularization; real_t cost_prev = 0; @@ -65,7 +65,7 @@ void MLPPCLogLogRegOld::gradientDescent(real_t learning_rate, int max_epoch, boo } void MLPPCLogLogRegOld::MLE(real_t learning_rate, int max_epoch, bool UI) { - MLPPActivation avn; + MLPPActivationOld avn; MLPPLinAlg alg; MLPPReg regularization; real_t cost_prev = 0; @@ -138,7 +138,7 @@ void MLPPCLogLogRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) { } void MLPPCLogLogRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) { - MLPPActivation avn; + MLPPActivationOld avn; MLPPLinAlg alg; MLPPReg regularization; real_t cost_prev = 0; @@ -195,7 +195,7 @@ real_t MLPPCLogLogRegOld::Cost(std::vector y_hat, std::vector y) std::vector MLPPCLogLogRegOld::Evaluate(std::vector> X) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; return avn.cloglog(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); } @@ -206,7 +206,7 @@ std::vector MLPPCLogLogRegOld::propagate(std::vector real_t MLPPCLogLogRegOld::Evaluate(std::vector x) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; return avn.cloglog(alg.dot(weights, x) + bias); } @@ -217,7 +217,7 @@ real_t MLPPCLogLogRegOld::propagate(std::vector x) { // cloglog ( wTx + b ) void MLPPCLogLogRegOld::forwardPass() { - MLPPActivation avn; + MLPPActivationOld avn; z = propagate(inputSet); y_hat = avn.cloglog(z); diff --git a/mlpp/dual_svc/dual_svc.cpp b/mlpp/dual_svc/dual_svc.cpp index ad20ca2..0176897 100644 --- a/mlpp/dual_svc/dual_svc.cpp +++ b/mlpp/dual_svc/dual_svc.cpp @@ -203,7 +203,7 @@ real_t MLPPDualSVC::cost(const Ref &alpha, const Ref &X, real_t MLPPDualSVC::evaluatev(const Ref &x) { MLPPActivation avn; - return avn.sign(propagatev(x)); + return avn.sign_normr(propagatev(x)); } real_t MLPPDualSVC::propagatev(const Ref &x) { diff --git a/mlpp/dual_svc/dual_svc_old.cpp b/mlpp/dual_svc/dual_svc_old.cpp index 8de36b5..c51f4d8 100644 --- a/mlpp/dual_svc/dual_svc_old.cpp +++ b/mlpp/dual_svc/dual_svc_old.cpp @@ -5,7 +5,7 @@ // #include "dual_svc_old.h" -#include "../activation/activation.h" +#include "../activation/activation_old.h" #include "../cost/cost.h" #include "../lin_alg/lin_alg.h" #include "../regularization/reg.h" @@ -38,7 +38,7 @@ real_t MLPPDualSVCOld::modelTest(std::vector x) { void MLPPDualSVCOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { class MLPPCost cost; - MLPPActivation avn; + MLPPActivationOld avn; MLPPLinAlg alg; MLPPReg regularization; real_t cost_prev = 0; @@ -86,7 +86,7 @@ void MLPPDualSVCOld::gradientDescent(real_t learning_rate, int max_epoch, bool U // void MLPPDualSVCOld::SGD(real_t learning_rate, int max_epoch, bool UI){ // class MLPPCost cost; -// MLPPActivation avn; +// MLPPActivationOld avn; // MLPPLinAlg alg; // MLPPReg regularization; @@ -119,7 +119,7 @@ void MLPPDualSVCOld::gradientDescent(real_t learning_rate, int max_epoch, bool U // void MLPPDualSVCOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI){ // class MLPPCost cost; -// MLPPActivation avn; +// MLPPActivationOld avn; // MLPPLinAlg alg; // MLPPReg regularization; // real_t cost_prev = 0; @@ -173,7 +173,7 @@ real_t MLPPDualSVCOld::Cost(std::vector alpha, std::vector MLPPDualSVCOld::Evaluate(std::vector> X) { - MLPPActivation avn; + MLPPActivationOld avn; return avn.sign(propagate(X)); } @@ -194,7 +194,7 @@ std::vector MLPPDualSVCOld::propagate(std::vector> X } real_t MLPPDualSVCOld::Evaluate(std::vector x) { - MLPPActivation avn; + MLPPActivationOld avn; return avn.sign(propagate(x)); } @@ -211,7 +211,7 @@ real_t MLPPDualSVCOld::propagate(std::vector x) { } void MLPPDualSVCOld::forwardPass() { - MLPPActivation avn; + MLPPActivationOld avn; z = propagate(inputSet); y_hat = avn.sign(z); diff --git a/mlpp/gan/gan_old.cpp b/mlpp/gan/gan_old.cpp index 84f9a8d..b935020 100644 --- a/mlpp/gan/gan_old.cpp +++ b/mlpp/gan/gan_old.cpp @@ -5,7 +5,7 @@ // #include "gan_old.h" -#include "../activation/activation.h" +#include "../activation/activation_old.h" #include "../cost/cost.h" #include "../lin_alg/lin_alg.h" #include "../regularization/reg.h" @@ -211,7 +211,7 @@ void MLPPGANOld::updateGeneratorParameters(std::vector>>, std::vector> MLPPGANOld::computeDiscriminatorGradients(std::vector y_hat, std::vector outputSet) { class MLPPCost cost; - MLPPActivation avn; + MLPPActivationOld avn; MLPPLinAlg alg; MLPPReg regularization; @@ -247,7 +247,7 @@ std::tuple>>, std::vector> M std::vector>> MLPPGANOld::computeGeneratorGradients(std::vector y_hat, std::vector outputSet) { class MLPPCost cost; - MLPPActivation avn; + MLPPActivationOld avn; MLPPLinAlg alg; MLPPReg regularization; diff --git a/mlpp/hidden_layer/hidden_layer_old.cpp b/mlpp/hidden_layer/hidden_layer_old.cpp index b623cbf..da10483 100644 --- a/mlpp/hidden_layer/hidden_layer_old.cpp +++ b/mlpp/hidden_layer/hidden_layer_old.cpp @@ -23,88 +23,88 @@ MLPPOldHiddenLayer::MLPPOldHiddenLayer(int p_n_hidden, std::string p_activation, weights = MLPPUtilities::weightInitialization(input[0].size(), n_hidden, weightInit); bias = MLPPUtilities::biasInitialization(n_hidden); - activation_map["Linear"] = &MLPPActivation::linear; - activationTest_map["Linear"] = &MLPPActivation::linear; + activation_map["Linear"] = &MLPPActivationOld::linear; + activationTest_map["Linear"] = &MLPPActivationOld::linear; - activation_map["Sigmoid"] = &MLPPActivation::sigmoid; - activationTest_map["Sigmoid"] = &MLPPActivation::sigmoid; + activation_map["Sigmoid"] = &MLPPActivationOld::sigmoid; + activationTest_map["Sigmoid"] = &MLPPActivationOld::sigmoid; - activation_map["Swish"] = &MLPPActivation::swish; - activationTest_map["Swish"] = &MLPPActivation::swish; + activation_map["Swish"] = &MLPPActivationOld::swish; + activationTest_map["Swish"] = &MLPPActivationOld::swish; - activation_map["Mish"] = &MLPPActivation::mish; - activationTest_map["Mish"] = &MLPPActivation::mish; + activation_map["Mish"] = &MLPPActivationOld::mish; + activationTest_map["Mish"] = &MLPPActivationOld::mish; - activation_map["SinC"] = &MLPPActivation::sinc; - activationTest_map["SinC"] = &MLPPActivation::sinc; + activation_map["SinC"] = &MLPPActivationOld::sinc; + activationTest_map["SinC"] = &MLPPActivationOld::sinc; - activation_map["Softplus"] = &MLPPActivation::softplus; - activationTest_map["Softplus"] = &MLPPActivation::softplus; + activation_map["Softplus"] = &MLPPActivationOld::softplus; + activationTest_map["Softplus"] = &MLPPActivationOld::softplus; - activation_map["Softsign"] = &MLPPActivation::softsign; - activationTest_map["Softsign"] = &MLPPActivation::softsign; + activation_map["Softsign"] = &MLPPActivationOld::softsign; + activationTest_map["Softsign"] = &MLPPActivationOld::softsign; - activation_map["CLogLog"] = &MLPPActivation::cloglog; - activationTest_map["CLogLog"] = &MLPPActivation::cloglog; + activation_map["CLogLog"] = &MLPPActivationOld::cloglog; + activationTest_map["CLogLog"] = &MLPPActivationOld::cloglog; - activation_map["Logit"] = &MLPPActivation::logit; - activationTest_map["Logit"] = &MLPPActivation::logit; + activation_map["Logit"] = &MLPPActivationOld::logit; + activationTest_map["Logit"] = &MLPPActivationOld::logit; - activation_map["GaussianCDF"] = &MLPPActivation::gaussianCDF; - activationTest_map["GaussianCDF"] = &MLPPActivation::gaussianCDF; + activation_map["GaussianCDF"] = &MLPPActivationOld::gaussianCDF; + activationTest_map["GaussianCDF"] = &MLPPActivationOld::gaussianCDF; - activation_map["RELU"] = &MLPPActivation::RELU; - activationTest_map["RELU"] = &MLPPActivation::RELU; + activation_map["RELU"] = &MLPPActivationOld::RELU; + activationTest_map["RELU"] = &MLPPActivationOld::RELU; - activation_map["GELU"] = &MLPPActivation::GELU; - activationTest_map["GELU"] = &MLPPActivation::GELU; + activation_map["GELU"] = &MLPPActivationOld::GELU; + activationTest_map["GELU"] = &MLPPActivationOld::GELU; - activation_map["Sign"] = &MLPPActivation::sign; - activationTest_map["Sign"] = &MLPPActivation::sign; + activation_map["Sign"] = &MLPPActivationOld::sign; + activationTest_map["Sign"] = &MLPPActivationOld::sign; - activation_map["UnitStep"] = &MLPPActivation::unitStep; - activationTest_map["UnitStep"] = &MLPPActivation::unitStep; + activation_map["UnitStep"] = &MLPPActivationOld::unitStep; + activationTest_map["UnitStep"] = &MLPPActivationOld::unitStep; - activation_map["Sinh"] = &MLPPActivation::sinh; - activationTest_map["Sinh"] = &MLPPActivation::sinh; + activation_map["Sinh"] = &MLPPActivationOld::sinh; + activationTest_map["Sinh"] = &MLPPActivationOld::sinh; - activation_map["Cosh"] = &MLPPActivation::cosh; - activationTest_map["Cosh"] = &MLPPActivation::cosh; + activation_map["Cosh"] = &MLPPActivationOld::cosh; + activationTest_map["Cosh"] = &MLPPActivationOld::cosh; - activation_map["Tanh"] = &MLPPActivation::tanh; - activationTest_map["Tanh"] = &MLPPActivation::tanh; + activation_map["Tanh"] = &MLPPActivationOld::tanh; + activationTest_map["Tanh"] = &MLPPActivationOld::tanh; - activation_map["Csch"] = &MLPPActivation::csch; - activationTest_map["Csch"] = &MLPPActivation::csch; + activation_map["Csch"] = &MLPPActivationOld::csch; + activationTest_map["Csch"] = &MLPPActivationOld::csch; - activation_map["Sech"] = &MLPPActivation::sech; - activationTest_map["Sech"] = &MLPPActivation::sech; + activation_map["Sech"] = &MLPPActivationOld::sech; + activationTest_map["Sech"] = &MLPPActivationOld::sech; - activation_map["Coth"] = &MLPPActivation::coth; - activationTest_map["Coth"] = &MLPPActivation::coth; + activation_map["Coth"] = &MLPPActivationOld::coth; + activationTest_map["Coth"] = &MLPPActivationOld::coth; - activation_map["Arsinh"] = &MLPPActivation::arsinh; - activationTest_map["Arsinh"] = &MLPPActivation::arsinh; + activation_map["Arsinh"] = &MLPPActivationOld::arsinh; + activationTest_map["Arsinh"] = &MLPPActivationOld::arsinh; - activation_map["Arcosh"] = &MLPPActivation::arcosh; - activationTest_map["Arcosh"] = &MLPPActivation::arcosh; + activation_map["Arcosh"] = &MLPPActivationOld::arcosh; + activationTest_map["Arcosh"] = &MLPPActivationOld::arcosh; - activation_map["Artanh"] = &MLPPActivation::artanh; - activationTest_map["Artanh"] = &MLPPActivation::artanh; + activation_map["Artanh"] = &MLPPActivationOld::artanh; + activationTest_map["Artanh"] = &MLPPActivationOld::artanh; - activation_map["Arcsch"] = &MLPPActivation::arcsch; - activationTest_map["Arcsch"] = &MLPPActivation::arcsch; + activation_map["Arcsch"] = &MLPPActivationOld::arcsch; + activationTest_map["Arcsch"] = &MLPPActivationOld::arcsch; - activation_map["Arsech"] = &MLPPActivation::arsech; - activationTest_map["Arsech"] = &MLPPActivation::arsech; + activation_map["Arsech"] = &MLPPActivationOld::arsech; + activationTest_map["Arsech"] = &MLPPActivationOld::arsech; - activation_map["Arcoth"] = &MLPPActivation::arcoth; - activationTest_map["Arcoth"] = &MLPPActivation::arcoth; + activation_map["Arcoth"] = &MLPPActivationOld::arcoth; + activationTest_map["Arcoth"] = &MLPPActivationOld::arcoth; } void MLPPOldHiddenLayer::forwardPass() { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; z = alg.mat_vec_add(alg.matmult(input, weights), bias); a = (avn.*activation_map[activation])(z, false); @@ -112,7 +112,7 @@ void MLPPOldHiddenLayer::forwardPass() { void MLPPOldHiddenLayer::Test(std::vector x) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias); a_test = (avn.*activationTest_map[activation])(z_test, false); } diff --git a/mlpp/hidden_layer/hidden_layer_old.h b/mlpp/hidden_layer/hidden_layer_old.h index 216175f..23ca450 100644 --- a/mlpp/hidden_layer/hidden_layer_old.h +++ b/mlpp/hidden_layer/hidden_layer_old.h @@ -13,7 +13,7 @@ #include "core/object/reference.h" -#include "../activation/activation.h" +#include "../activation/activation_old.h" #include "../regularization/reg.h" #include "../utilities/utilities.h" @@ -39,8 +39,8 @@ public: std::vector> z; std::vector> a; - std::map> (MLPPActivation::*)(std::vector>, bool)> activation_map; - std::map (MLPPActivation::*)(std::vector, bool)> activationTest_map; + std::map> (MLPPActivationOld::*)(std::vector>, bool)> activation_map; + std::map (MLPPActivationOld::*)(std::vector, bool)> activationTest_map; std::vector z_test; std::vector a_test; diff --git a/mlpp/lin_alg/lin_alg.cpp b/mlpp/lin_alg/lin_alg.cpp index b4cc199..1f6e01b 100644 --- a/mlpp/lin_alg/lin_alg.cpp +++ b/mlpp/lin_alg/lin_alg.cpp @@ -2118,59 +2118,6 @@ Ref MLPPLinAlg::subtract_matrix_rows(const Ref &a, const return c; } -std::vector MLPPLinAlg::log(std::vector a) { - std::vector b; - b.resize(a.size()); - for (uint32_t i = 0; i < a.size(); i++) { - b[i] = Math::log(a[i]); - } - return b; -} - -std::vector MLPPLinAlg::log10(std::vector a) { - std::vector b; - b.resize(a.size()); - for (uint32_t i = 0; i < a.size(); i++) { - b[i] = Math::log10(a[i]); - } - return b; -} - -std::vector MLPPLinAlg::exp(std::vector a) { - std::vector b; - b.resize(a.size()); - for (uint32_t i = 0; i < a.size(); i++) { - b[i] = Math::exp(a[i]); - } - return b; -} - -std::vector MLPPLinAlg::erf(std::vector a) { - std::vector b; - b.resize(a.size()); - for (uint32_t i = 0; i < a.size(); i++) { - b[i] = Math::erf(a[i]); - } - return b; -} - -std::vector MLPPLinAlg::exponentiate(std::vector a, real_t p) { - std::vector b; - b.resize(a.size()); - for (uint32_t i = 0; i < b.size(); i++) { - b[i] = Math::pow(a[i], p); - } - return b; -} - -std::vector MLPPLinAlg::sqrt(std::vector a) { - return exponentiate(a, 0.5); -} - -std::vector MLPPLinAlg::cbrt(std::vector a) { - return exponentiate(a, real_t(1) / real_t(3)); -} - Ref MLPPLinAlg::lognv(const Ref &a) { ERR_FAIL_COND_V(!a.is_valid(), Ref()); diff --git a/mlpp/lin_alg/lin_alg.h b/mlpp/lin_alg/lin_alg.h index 221f667..9c4594a 100644 --- a/mlpp/lin_alg/lin_alg.h +++ b/mlpp/lin_alg/lin_alg.h @@ -227,14 +227,6 @@ public: std::vector subtractMatrixRows(std::vector a, std::vector> B); Ref subtract_matrix_rows(const Ref &a, const Ref &B); - std::vector log(std::vector a); - std::vector log10(std::vector a); - std::vector exp(std::vector a); - std::vector erf(std::vector a); - std::vector exponentiate(std::vector a, real_t p); - std::vector sqrt(std::vector a); - std::vector cbrt(std::vector a); - Ref lognv(const Ref &a); Ref log10nv(const Ref &a); Ref expnv(const Ref &a); diff --git a/mlpp/lin_alg/lin_alg_old.cpp b/mlpp/lin_alg/lin_alg_old.cpp index d62662d..e945992 100644 --- a/mlpp/lin_alg/lin_alg_old.cpp +++ b/mlpp/lin_alg/lin_alg_old.cpp @@ -253,6 +253,19 @@ std::vector> MLPPLinAlgOld::cbrt(std::vector> MLPPLinAlgOld::matrixPower(std::vector> A, int n) { + std::vector> B = identity(A.size()); + if (n == 0) { + return identity(A.size()); + } else if (n < 0) { + A = inverse(A); + } + for (int i = 0; i < std::abs(n); i++) { + B = matmult(B, A); + } + return B; +} + std::vector> MLPPLinAlgOld::abs(std::vector> A) { std::vector> B; B.resize(A.size()); @@ -1070,6 +1083,14 @@ std::vector MLPPLinAlgOld::cbrt(std::vector a) { return exponentiate(a, real_t(1) / real_t(3)); } +real_t MLPPLinAlgOld::dot(std::vector a, std::vector b) { + real_t c = 0; + for (uint32_t i = 0; i < a.size(); i++) { + c += a[i] * b[i]; + } + return c; +} + std::vector MLPPLinAlgOld::cross(std::vector a, std::vector b) { // Cross products exist in R^7 also. Though, I will limit it to R^3 as Wolfram does this. std::vector> mat = { onevec(3), a, b }; diff --git a/mlpp/lin_reg/lin_reg_old.cpp b/mlpp/lin_reg/lin_reg_old.cpp index 9557004..345bcff 100644 --- a/mlpp/lin_reg/lin_reg_old.cpp +++ b/mlpp/lin_reg/lin_reg_old.cpp @@ -7,7 +7,7 @@ #include "lin_reg_old.h" #include "../cost/cost.h" -#include "../lin_alg/lin_alg.h" +#include "../lin_alg/lin_alg_old.h" #include "../regularization/reg.h" #include "../stat/stat.h" #include "../utilities/utilities.h" @@ -40,7 +40,7 @@ real_t MLPPLinRegOld::modelTest(std::vector x) { } void MLPPLinRegOld::NewtonRaphson(real_t learning_rate, int max_epoch, bool UI) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; MLPPReg regularization; real_t cost_prev = 0; int epoch = 1; @@ -72,7 +72,7 @@ void MLPPLinRegOld::NewtonRaphson(real_t learning_rate, int max_epoch, bool UI) } void MLPPLinRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; MLPPReg regularization; real_t cost_prev = 0; int epoch = 1; @@ -103,7 +103,7 @@ void MLPPLinRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI } void MLPPLinRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; MLPPReg regularization; real_t cost_prev = 0; int epoch = 1; @@ -142,7 +142,7 @@ void MLPPLinRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) { } void MLPPLinRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; MLPPReg regularization; real_t cost_prev = 0; int epoch = 1; @@ -182,7 +182,7 @@ void MLPPLinRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_siz } void MLPPLinRegOld::Momentum(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool UI) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; MLPPReg regularization; real_t cost_prev = 0; int epoch = 1; @@ -229,7 +229,7 @@ void MLPPLinRegOld::Momentum(real_t learning_rate, int max_epoch, int mini_batch } void MLPPLinRegOld::NAG(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool UI) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; MLPPReg regularization; real_t cost_prev = 0; int epoch = 1; @@ -278,7 +278,7 @@ void MLPPLinRegOld::NAG(real_t learning_rate, int max_epoch, int mini_batch_size } void MLPPLinRegOld::Adagrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t e, bool UI) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; MLPPReg regularization; real_t cost_prev = 0; int epoch = 1; @@ -326,7 +326,7 @@ void MLPPLinRegOld::Adagrad(real_t learning_rate, int max_epoch, int mini_batch_ void MLPPLinRegOld::Adadelta(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t e, bool UI) { // Adagrad upgrade. Momentum is applied. - MLPPLinAlg alg; + MLPPLinAlgOld alg; MLPPReg regularization; real_t cost_prev = 0; int epoch = 1; @@ -373,7 +373,7 @@ void MLPPLinRegOld::Adadelta(real_t learning_rate, int max_epoch, int mini_batch } void MLPPLinRegOld::Adam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; MLPPReg regularization; real_t cost_prev = 0; int epoch = 1; @@ -426,7 +426,7 @@ void MLPPLinRegOld::Adam(real_t learning_rate, int max_epoch, int mini_batch_siz } void MLPPLinRegOld::Adamax(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; MLPPReg regularization; real_t cost_prev = 0; int epoch = 1; @@ -477,7 +477,7 @@ void MLPPLinRegOld::Adamax(real_t learning_rate, int max_epoch, int mini_batch_s } void MLPPLinRegOld::Nadam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; MLPPReg regularization; real_t cost_prev = 0; int epoch = 1; @@ -531,7 +531,7 @@ void MLPPLinRegOld::Nadam(real_t learning_rate, int max_epoch, int mini_batch_si } void MLPPLinRegOld::normalEquation() { - MLPPLinAlg alg; + MLPPLinAlgOld alg; MLPPStat stat; std::vector x_means; std::vector> inputSetT = alg.transpose(inputSet); @@ -583,12 +583,12 @@ real_t MLPPLinRegOld::Cost(std::vector y_hat, std::vector y) { } std::vector MLPPLinRegOld::Evaluate(std::vector> X) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)); } real_t MLPPLinRegOld::Evaluate(std::vector x) { - MLPPLinAlg alg; + MLPPLinAlgOld alg; return alg.dot(weights, x) + bias; } diff --git a/mlpp/log_reg/log_reg_old.cpp b/mlpp/log_reg/log_reg_old.cpp index 6353269..fdbdd99 100644 --- a/mlpp/log_reg/log_reg_old.cpp +++ b/mlpp/log_reg/log_reg_old.cpp @@ -6,7 +6,7 @@ #include "log_reg_old.h" -#include "../activation/activation.h" +#include "../activation/activation_old.h" #include "../cost/cost.h" #include "../lin_alg/lin_alg.h" #include "../regularization/reg.h" @@ -197,13 +197,13 @@ real_t MLPPLogRegOld::Cost(std::vector y_hat, std::vector y) { std::vector MLPPLogRegOld::Evaluate(std::vector> X) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; return avn.sigmoid(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); } real_t MLPPLogRegOld::Evaluate(std::vector x) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; return avn.sigmoid(alg.dot(weights, x) + bias); } diff --git a/mlpp/mann/mann_old.cpp b/mlpp/mann/mann_old.cpp index bb0abd1..fd79b15 100644 --- a/mlpp/mann/mann_old.cpp +++ b/mlpp/mann/mann_old.cpp @@ -6,7 +6,7 @@ #include "mann_old.h" -#include "../activation/activation.h" +#include "../activation/activation_old.h" #include "../cost/cost.h" #include "../lin_alg/lin_alg.h" #include "../regularization/reg.h" @@ -54,7 +54,7 @@ std::vector MLPPMANNOld::modelTest(std::vector x) { void MLPPMANNOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { class MLPPCost cost; - MLPPActivation avn; + MLPPActivationOld avn; MLPPLinAlg alg; MLPPReg regularization; diff --git a/mlpp/mlp/mlp.cpp b/mlpp/mlp/mlp.cpp index a494253..e3a08ab 100644 --- a/mlpp/mlp/mlp.cpp +++ b/mlpp/mlp/mlp.cpp @@ -380,7 +380,7 @@ real_t MLPPMLP::evaluatev(const Ref &x) { Ref pz2 = alg.additionnv(alg.mat_vec_multv(alg.transposem(_weights1), x), _bias1); Ref pa2 = avn.sigmoid_normv(pz2); - return avn.sigmoid(alg.dotv(_weights2, pa2) + _bias2); + return avn.sigmoid_normr(alg.dotv(_weights2, pa2) + _bias2); } void MLPPMLP::propagatev(const Ref &x, Ref z2_out, Ref a2_out) { diff --git a/mlpp/mlp/mlp_old.cpp b/mlpp/mlp/mlp_old.cpp index 325f0e5..fdd357b 100644 --- a/mlpp/mlp/mlp_old.cpp +++ b/mlpp/mlp/mlp_old.cpp @@ -8,7 +8,7 @@ #include "core/log/logger.h" -#include "../activation/activation.h" +#include "../activation/activation_old.h" #include "../cost/cost.h" #include "../lin_alg/lin_alg.h" #include "../regularization/reg.h" @@ -27,7 +27,7 @@ MLPPMLPOld::MLPPMLPOld(std::vector> p_inputSet, std::vector< lambda = p_lambda; alpha = p_alpha; - MLPPActivation avn; + MLPPActivationOld avn; y_hat.resize(n); weights1 = MLPPUtilities::weightInitialization(k, n_hidden); @@ -45,7 +45,7 @@ real_t MLPPMLPOld::modelTest(std::vector x) { } void MLPPMLPOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { - MLPPActivation avn; + MLPPActivationOld avn; MLPPLinAlg alg; MLPPReg regularization; real_t cost_prev = 0; @@ -104,7 +104,7 @@ void MLPPMLPOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { } void MLPPMLPOld::SGD(real_t learning_rate, int max_epoch, bool UI) { - MLPPActivation avn; + MLPPActivationOld avn; MLPPLinAlg alg; MLPPReg regularization; real_t cost_prev = 0; @@ -160,7 +160,7 @@ void MLPPMLPOld::SGD(real_t learning_rate, int max_epoch, bool UI) { } void MLPPMLPOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) { - MLPPActivation avn; + MLPPActivationOld avn; MLPPLinAlg alg; MLPPReg regularization; real_t cost_prev = 0; @@ -249,7 +249,7 @@ real_t MLPPMLPOld::Cost(std::vector y_hat, std::vector y) { std::vector MLPPMLPOld::Evaluate(std::vector> X) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; std::vector> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector> a2 = avn.sigmoid(z2); return avn.sigmoid(alg.scalarAdd(bias2, alg.mat_vec_mult(a2, weights2))); @@ -257,7 +257,7 @@ std::vector MLPPMLPOld::Evaluate(std::vector> X) { std::tuple>, std::vector>> MLPPMLPOld::propagate(std::vector> X) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; std::vector> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector> a2 = avn.sigmoid(z2); return { z2, a2 }; @@ -265,7 +265,7 @@ std::tuple>, std::vector>> M real_t MLPPMLPOld::Evaluate(std::vector x) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; std::vector z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector a2 = avn.sigmoid(z2); return avn.sigmoid(alg.dot(weights2, a2) + bias2); @@ -273,7 +273,7 @@ real_t MLPPMLPOld::Evaluate(std::vector x) { std::tuple, std::vector> MLPPMLPOld::propagate(std::vector x) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; std::vector z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector a2 = avn.sigmoid(z2); return { z2, a2 }; @@ -281,7 +281,7 @@ std::tuple, std::vector> MLPPMLPOld::propagate(std:: void MLPPMLPOld::forwardPass() { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1); a2 = avn.sigmoid(z2); y_hat = avn.sigmoid(alg.scalarAdd(bias2, alg.mat_vec_mult(a2, weights2))); diff --git a/mlpp/multi_output_layer/multi_output_layer_old.cpp b/mlpp/multi_output_layer/multi_output_layer_old.cpp index 127af61..ec74ffd 100644 --- a/mlpp/multi_output_layer/multi_output_layer_old.cpp +++ b/mlpp/multi_output_layer/multi_output_layer_old.cpp @@ -25,86 +25,86 @@ MLPPOldMultiOutputLayer::MLPPOldMultiOutputLayer(int p_n_output, int p_n_hidden, weights = MLPPUtilities::weightInitialization(n_hidden, n_output, weightInit); bias = MLPPUtilities::biasInitialization(n_output); - activation_map["Linear"] = &MLPPActivation::linear; - activationTest_map["Linear"] = &MLPPActivation::linear; + activation_map["Linear"] = &MLPPActivationOld::linear; + activationTest_map["Linear"] = &MLPPActivationOld::linear; - activation_map["Sigmoid"] = &MLPPActivation::sigmoid; - activationTest_map["Sigmoid"] = &MLPPActivation::sigmoid; + activation_map["Sigmoid"] = &MLPPActivationOld::sigmoid; + activationTest_map["Sigmoid"] = &MLPPActivationOld::sigmoid; - activation_map["Softmax"] = &MLPPActivation::softmax; - activationTest_map["Softmax"] = &MLPPActivation::softmax; + activation_map["Softmax"] = &MLPPActivationOld::softmax; + activationTest_map["Softmax"] = &MLPPActivationOld::softmax; - activation_map["Swish"] = &MLPPActivation::swish; - activationTest_map["Swish"] = &MLPPActivation::swish; + activation_map["Swish"] = &MLPPActivationOld::swish; + activationTest_map["Swish"] = &MLPPActivationOld::swish; - activation_map["Mish"] = &MLPPActivation::mish; - activationTest_map["Mish"] = &MLPPActivation::mish; + activation_map["Mish"] = &MLPPActivationOld::mish; + activationTest_map["Mish"] = &MLPPActivationOld::mish; - activation_map["SinC"] = &MLPPActivation::sinc; - activationTest_map["SinC"] = &MLPPActivation::sinc; + activation_map["SinC"] = &MLPPActivationOld::sinc; + activationTest_map["SinC"] = &MLPPActivationOld::sinc; - activation_map["Softplus"] = &MLPPActivation::softplus; - activationTest_map["Softplus"] = &MLPPActivation::softplus; + activation_map["Softplus"] = &MLPPActivationOld::softplus; + activationTest_map["Softplus"] = &MLPPActivationOld::softplus; - activation_map["Softsign"] = &MLPPActivation::softsign; - activationTest_map["Softsign"] = &MLPPActivation::softsign; + activation_map["Softsign"] = &MLPPActivationOld::softsign; + activationTest_map["Softsign"] = &MLPPActivationOld::softsign; - activation_map["CLogLog"] = &MLPPActivation::cloglog; - activationTest_map["CLogLog"] = &MLPPActivation::cloglog; + activation_map["CLogLog"] = &MLPPActivationOld::cloglog; + activationTest_map["CLogLog"] = &MLPPActivationOld::cloglog; - activation_map["Logit"] = &MLPPActivation::logit; - activationTest_map["Logit"] = &MLPPActivation::logit; + activation_map["Logit"] = &MLPPActivationOld::logit; + activationTest_map["Logit"] = &MLPPActivationOld::logit; - activation_map["GaussianCDF"] = &MLPPActivation::gaussianCDF; - activationTest_map["GaussianCDF"] = &MLPPActivation::gaussianCDF; + activation_map["GaussianCDF"] = &MLPPActivationOld::gaussianCDF; + activationTest_map["GaussianCDF"] = &MLPPActivationOld::gaussianCDF; - activation_map["RELU"] = &MLPPActivation::RELU; - activationTest_map["RELU"] = &MLPPActivation::RELU; + activation_map["RELU"] = &MLPPActivationOld::RELU; + activationTest_map["RELU"] = &MLPPActivationOld::RELU; - activation_map["GELU"] = &MLPPActivation::GELU; - activationTest_map["GELU"] = &MLPPActivation::GELU; + activation_map["GELU"] = &MLPPActivationOld::GELU; + activationTest_map["GELU"] = &MLPPActivationOld::GELU; - activation_map["Sign"] = &MLPPActivation::sign; - activationTest_map["Sign"] = &MLPPActivation::sign; + activation_map["Sign"] = &MLPPActivationOld::sign; + activationTest_map["Sign"] = &MLPPActivationOld::sign; - activation_map["UnitStep"] = &MLPPActivation::unitStep; - activationTest_map["UnitStep"] = &MLPPActivation::unitStep; + activation_map["UnitStep"] = &MLPPActivationOld::unitStep; + activationTest_map["UnitStep"] = &MLPPActivationOld::unitStep; - activation_map["Sinh"] = &MLPPActivation::sinh; - activationTest_map["Sinh"] = &MLPPActivation::sinh; + activation_map["Sinh"] = &MLPPActivationOld::sinh; + activationTest_map["Sinh"] = &MLPPActivationOld::sinh; - activation_map["Cosh"] = &MLPPActivation::cosh; - activationTest_map["Cosh"] = &MLPPActivation::cosh; + activation_map["Cosh"] = &MLPPActivationOld::cosh; + activationTest_map["Cosh"] = &MLPPActivationOld::cosh; - activation_map["Tanh"] = &MLPPActivation::tanh; - activationTest_map["Tanh"] = &MLPPActivation::tanh; + activation_map["Tanh"] = &MLPPActivationOld::tanh; + activationTest_map["Tanh"] = &MLPPActivationOld::tanh; - activation_map["Csch"] = &MLPPActivation::csch; - activationTest_map["Csch"] = &MLPPActivation::csch; + activation_map["Csch"] = &MLPPActivationOld::csch; + activationTest_map["Csch"] = &MLPPActivationOld::csch; - activation_map["Sech"] = &MLPPActivation::sech; - activationTest_map["Sech"] = &MLPPActivation::sech; + activation_map["Sech"] = &MLPPActivationOld::sech; + activationTest_map["Sech"] = &MLPPActivationOld::sech; - activation_map["Coth"] = &MLPPActivation::coth; - activationTest_map["Coth"] = &MLPPActivation::coth; + activation_map["Coth"] = &MLPPActivationOld::coth; + activationTest_map["Coth"] = &MLPPActivationOld::coth; - activation_map["Arsinh"] = &MLPPActivation::arsinh; - activationTest_map["Arsinh"] = &MLPPActivation::arsinh; + activation_map["Arsinh"] = &MLPPActivationOld::arsinh; + activationTest_map["Arsinh"] = &MLPPActivationOld::arsinh; - activation_map["Arcosh"] = &MLPPActivation::arcosh; - activationTest_map["Arcosh"] = &MLPPActivation::arcosh; + activation_map["Arcosh"] = &MLPPActivationOld::arcosh; + activationTest_map["Arcosh"] = &MLPPActivationOld::arcosh; - activation_map["Artanh"] = &MLPPActivation::artanh; - activationTest_map["Artanh"] = &MLPPActivation::artanh; + activation_map["Artanh"] = &MLPPActivationOld::artanh; + activationTest_map["Artanh"] = &MLPPActivationOld::artanh; - activation_map["Arcsch"] = &MLPPActivation::arcsch; - activationTest_map["Arcsch"] = &MLPPActivation::arcsch; + activation_map["Arcsch"] = &MLPPActivationOld::arcsch; + activationTest_map["Arcsch"] = &MLPPActivationOld::arcsch; - activation_map["Arsech"] = &MLPPActivation::arsech; - activationTest_map["Arsech"] = &MLPPActivation::arsech; + activation_map["Arsech"] = &MLPPActivationOld::arsech; + activationTest_map["Arsech"] = &MLPPActivationOld::arsech; - activation_map["Arcoth"] = &MLPPActivation::arcoth; - activationTest_map["Arcoth"] = &MLPPActivation::arcoth; + activation_map["Arcoth"] = &MLPPActivationOld::arcoth; + activationTest_map["Arcoth"] = &MLPPActivationOld::arcoth; costDeriv_map["MSE"] = &MLPPCost::MSEDeriv; cost_map["MSE"] = &MLPPCost::MSE; @@ -126,14 +126,14 @@ MLPPOldMultiOutputLayer::MLPPOldMultiOutputLayer(int p_n_output, int p_n_hidden, void MLPPOldMultiOutputLayer::forwardPass() { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; z = alg.mat_vec_add(alg.matmult(input, weights), bias); a = (avn.*activation_map[activation])(z, false); } void MLPPOldMultiOutputLayer::Test(std::vector x) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias); a_test = (avn.*activationTest_map[activation])(z_test, false); } diff --git a/mlpp/multi_output_layer/multi_output_layer_old.h b/mlpp/multi_output_layer/multi_output_layer_old.h index ad30286..a3da569 100644 --- a/mlpp/multi_output_layer/multi_output_layer_old.h +++ b/mlpp/multi_output_layer/multi_output_layer_old.h @@ -13,7 +13,7 @@ #include "core/object/reference.h" -#include "../activation/activation.h" +#include "../activation/activation_old.h" #include "../cost/cost.h" #include "../regularization/reg.h" #include "../utilities/utilities.h" @@ -42,8 +42,8 @@ public: std::vector> z; std::vector> a; - std::map> (MLPPActivation::*)(std::vector>, bool)> activation_map; - std::map (MLPPActivation::*)(std::vector, bool)> activationTest_map; + std::map> (MLPPActivationOld::*)(std::vector>, bool)> activation_map; + std::map (MLPPActivationOld::*)(std::vector, bool)> activationTest_map; std::map>, std::vector>)> cost_map; std::map> (MLPPCost::*)(std::vector>, std::vector>)> costDeriv_map; diff --git a/mlpp/output_layer/output_layer_old.cpp b/mlpp/output_layer/output_layer_old.cpp index 1c62964..b64af60 100644 --- a/mlpp/output_layer/output_layer_old.cpp +++ b/mlpp/output_layer/output_layer_old.cpp @@ -24,83 +24,83 @@ MLPPOldOutputLayer::MLPPOldOutputLayer(int p_n_hidden, std::string p_activation, weights = MLPPUtilities::weightInitialization(n_hidden, weightInit); bias = MLPPUtilities::biasInitialization(); - activation_map["Linear"] = &MLPPActivation::linear; - activationTest_map["Linear"] = &MLPPActivation::linear; + activation_map["Linear"] = &MLPPActivationOld::linear; + activationTest_map["Linear"] = &MLPPActivationOld::linear; - activation_map["Sigmoid"] = &MLPPActivation::sigmoid; - activationTest_map["Sigmoid"] = &MLPPActivation::sigmoid; + activation_map["Sigmoid"] = &MLPPActivationOld::sigmoid; + activationTest_map["Sigmoid"] = &MLPPActivationOld::sigmoid; - activation_map["Swish"] = &MLPPActivation::swish; - activationTest_map["Swish"] = &MLPPActivation::swish; + activation_map["Swish"] = &MLPPActivationOld::swish; + activationTest_map["Swish"] = &MLPPActivationOld::swish; - activation_map["Mish"] = &MLPPActivation::mish; - activationTest_map["Mish"] = &MLPPActivation::mish; + activation_map["Mish"] = &MLPPActivationOld::mish; + activationTest_map["Mish"] = &MLPPActivationOld::mish; - activation_map["SinC"] = &MLPPActivation::sinc; - activationTest_map["SinC"] = &MLPPActivation::sinc; + activation_map["SinC"] = &MLPPActivationOld::sinc; + activationTest_map["SinC"] = &MLPPActivationOld::sinc; - activation_map["Softplus"] = &MLPPActivation::softplus; - activationTest_map["Softplus"] = &MLPPActivation::softplus; + activation_map["Softplus"] = &MLPPActivationOld::softplus; + activationTest_map["Softplus"] = &MLPPActivationOld::softplus; - activation_map["Softsign"] = &MLPPActivation::softsign; - activationTest_map["Softsign"] = &MLPPActivation::softsign; + activation_map["Softsign"] = &MLPPActivationOld::softsign; + activationTest_map["Softsign"] = &MLPPActivationOld::softsign; - activation_map["CLogLog"] = &MLPPActivation::cloglog; - activationTest_map["CLogLog"] = &MLPPActivation::cloglog; + activation_map["CLogLog"] = &MLPPActivationOld::cloglog; + activationTest_map["CLogLog"] = &MLPPActivationOld::cloglog; - activation_map["Logit"] = &MLPPActivation::logit; - activationTest_map["Logit"] = &MLPPActivation::logit; + activation_map["Logit"] = &MLPPActivationOld::logit; + activationTest_map["Logit"] = &MLPPActivationOld::logit; - activation_map["GaussianCDF"] = &MLPPActivation::gaussianCDF; - activationTest_map["GaussianCDF"] = &MLPPActivation::gaussianCDF; + activation_map["GaussianCDF"] = &MLPPActivationOld::gaussianCDF; + activationTest_map["GaussianCDF"] = &MLPPActivationOld::gaussianCDF; - activation_map["RELU"] = &MLPPActivation::RELU; - activationTest_map["RELU"] = &MLPPActivation::RELU; + activation_map["RELU"] = &MLPPActivationOld::RELU; + activationTest_map["RELU"] = &MLPPActivationOld::RELU; - activation_map["GELU"] = &MLPPActivation::GELU; - activationTest_map["GELU"] = &MLPPActivation::GELU; + activation_map["GELU"] = &MLPPActivationOld::GELU; + activationTest_map["GELU"] = &MLPPActivationOld::GELU; - activation_map["Sign"] = &MLPPActivation::sign; - activationTest_map["Sign"] = &MLPPActivation::sign; + activation_map["Sign"] = &MLPPActivationOld::sign; + activationTest_map["Sign"] = &MLPPActivationOld::sign; - activation_map["UnitStep"] = &MLPPActivation::unitStep; - activationTest_map["UnitStep"] = &MLPPActivation::unitStep; + activation_map["UnitStep"] = &MLPPActivationOld::unitStep; + activationTest_map["UnitStep"] = &MLPPActivationOld::unitStep; - activation_map["Sinh"] = &MLPPActivation::sinh; - activationTest_map["Sinh"] = &MLPPActivation::sinh; + activation_map["Sinh"] = &MLPPActivationOld::sinh; + activationTest_map["Sinh"] = &MLPPActivationOld::sinh; - activation_map["Cosh"] = &MLPPActivation::cosh; - activationTest_map["Cosh"] = &MLPPActivation::cosh; + activation_map["Cosh"] = &MLPPActivationOld::cosh; + activationTest_map["Cosh"] = &MLPPActivationOld::cosh; - activation_map["Tanh"] = &MLPPActivation::tanh; - activationTest_map["Tanh"] = &MLPPActivation::tanh; + activation_map["Tanh"] = &MLPPActivationOld::tanh; + activationTest_map["Tanh"] = &MLPPActivationOld::tanh; - activation_map["Csch"] = &MLPPActivation::csch; - activationTest_map["Csch"] = &MLPPActivation::csch; + activation_map["Csch"] = &MLPPActivationOld::csch; + activationTest_map["Csch"] = &MLPPActivationOld::csch; - activation_map["Sech"] = &MLPPActivation::sech; - activationTest_map["Sech"] = &MLPPActivation::sech; + activation_map["Sech"] = &MLPPActivationOld::sech; + activationTest_map["Sech"] = &MLPPActivationOld::sech; - activation_map["Coth"] = &MLPPActivation::coth; - activationTest_map["Coth"] = &MLPPActivation::coth; + activation_map["Coth"] = &MLPPActivationOld::coth; + activationTest_map["Coth"] = &MLPPActivationOld::coth; - activation_map["Arsinh"] = &MLPPActivation::arsinh; - activationTest_map["Arsinh"] = &MLPPActivation::arsinh; + activation_map["Arsinh"] = &MLPPActivationOld::arsinh; + activationTest_map["Arsinh"] = &MLPPActivationOld::arsinh; - activation_map["Arcosh"] = &MLPPActivation::arcosh; - activationTest_map["Arcosh"] = &MLPPActivation::arcosh; + activation_map["Arcosh"] = &MLPPActivationOld::arcosh; + activationTest_map["Arcosh"] = &MLPPActivationOld::arcosh; - activation_map["Artanh"] = &MLPPActivation::artanh; - activationTest_map["Artanh"] = &MLPPActivation::artanh; + activation_map["Artanh"] = &MLPPActivationOld::artanh; + activationTest_map["Artanh"] = &MLPPActivationOld::artanh; - activation_map["Arcsch"] = &MLPPActivation::arcsch; - activationTest_map["Arcsch"] = &MLPPActivation::arcsch; + activation_map["Arcsch"] = &MLPPActivationOld::arcsch; + activationTest_map["Arcsch"] = &MLPPActivationOld::arcsch; - activation_map["Arsech"] = &MLPPActivation::arsech; - activationTest_map["Arsech"] = &MLPPActivation::arsech; + activation_map["Arsech"] = &MLPPActivationOld::arsech; + activationTest_map["Arsech"] = &MLPPActivationOld::arsech; - activation_map["Arcoth"] = &MLPPActivation::arcoth; - activationTest_map["Arcoth"] = &MLPPActivation::arcoth; + activation_map["Arcoth"] = &MLPPActivationOld::arcoth; + activationTest_map["Arcoth"] = &MLPPActivationOld::arcoth; costDeriv_map["MSE"] = &MLPPCost::MSEDeriv; cost_map["MSE"] = &MLPPCost::MSE; @@ -122,14 +122,14 @@ MLPPOldOutputLayer::MLPPOldOutputLayer(int p_n_hidden, std::string p_activation, void MLPPOldOutputLayer::forwardPass() { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; z = alg.scalarAdd(bias, alg.mat_vec_mult(input, weights)); a = (avn.*activation_map[activation])(z, false); } void MLPPOldOutputLayer::Test(std::vector x) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; z_test = alg.dot(weights, x) + bias; a_test = (avn.*activationTest_map[activation])(z_test, false); } diff --git a/mlpp/output_layer/output_layer_old.h b/mlpp/output_layer/output_layer_old.h index bc43144..e15a74c 100644 --- a/mlpp/output_layer/output_layer_old.h +++ b/mlpp/output_layer/output_layer_old.h @@ -13,7 +13,7 @@ #include "core/object/reference.h" -#include "../activation/activation.h" +#include "../activation/activation_old.h" #include "../cost/cost.h" #include "../regularization/reg.h" #include "../utilities/utilities.h" @@ -41,8 +41,8 @@ public: std::vector z; std::vector a; - std::map (MLPPActivation::*)(std::vector, bool)> activation_map; - std::map activationTest_map; + std::map (MLPPActivationOld::*)(std::vector, bool)> activation_map; + std::map activationTest_map; std::map, std::vector)> cost_map; std::map (MLPPCost::*)(std::vector, std::vector)> costDeriv_map; diff --git a/mlpp/probit_reg/probit_reg_old.cpp b/mlpp/probit_reg/probit_reg_old.cpp index 16776d8..2f30eae 100644 --- a/mlpp/probit_reg/probit_reg_old.cpp +++ b/mlpp/probit_reg/probit_reg_old.cpp @@ -5,7 +5,7 @@ // #include "probit_reg_old.h" -#include "../activation/activation.h" +#include "../activation/activation_old.h" #include "../cost/cost.h" #include "../lin_alg/lin_alg.h" #include "../regularization/reg.h" @@ -30,7 +30,7 @@ real_t MLPPProbitRegOld::modelTest(std::vector x) { } void MLPPProbitRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { - MLPPActivation avn; + MLPPActivationOld avn; MLPPLinAlg alg; MLPPReg regularization; real_t cost_prev = 0; @@ -63,7 +63,7 @@ void MLPPProbitRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool } void MLPPProbitRegOld::MLE(real_t learning_rate, int max_epoch, bool UI) { - MLPPActivation avn; + MLPPActivationOld avn; MLPPLinAlg alg; MLPPReg regularization; real_t cost_prev = 0; @@ -97,7 +97,7 @@ void MLPPProbitRegOld::MLE(real_t learning_rate, int max_epoch, bool UI) { void MLPPProbitRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) { // NOTE: ∂y_hat/∂z is sparse - MLPPActivation avn; + MLPPActivationOld avn; MLPPLinAlg alg; MLPPReg regularization; real_t cost_prev = 0; @@ -138,7 +138,7 @@ void MLPPProbitRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) { } void MLPPProbitRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) { - MLPPActivation avn; + MLPPActivationOld avn; MLPPLinAlg alg; MLPPReg regularization; real_t cost_prev = 0; @@ -216,7 +216,7 @@ real_t MLPPProbitRegOld::Cost(std::vector y_hat, std::vector y) std::vector MLPPProbitRegOld::Evaluate(std::vector> X) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; return avn.gaussianCDF(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); } @@ -227,7 +227,7 @@ std::vector MLPPProbitRegOld::propagate(std::vector> real_t MLPPProbitRegOld::Evaluate(std::vector x) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; return avn.gaussianCDF(alg.dot(weights, x) + bias); } @@ -238,7 +238,7 @@ real_t MLPPProbitRegOld::propagate(std::vector x) { // gaussianCDF ( wTx + b ) void MLPPProbitRegOld::forwardPass() { - MLPPActivation avn; + MLPPActivationOld avn; z = propagate(inputSet); y_hat = avn.gaussianCDF(z); diff --git a/mlpp/regularization/reg.cpp b/mlpp/regularization/reg.cpp index 7b9e45f..922d6b0 100644 --- a/mlpp/regularization/reg.cpp +++ b/mlpp/regularization/reg.cpp @@ -169,9 +169,9 @@ real_t MLPPReg::reg_deriv_termvr(const Ref &weights, real_t lambda, if (reg == REGULARIZATION_TYPE_RIDGE) { return lambda * wj; } else if (reg == REGULARIZATION_TYPE_LASSO) { - return lambda * act.sign(wj); + return lambda * act.sign_normr(wj); } else if (reg == REGULARIZATION_TYPE_ELASTIC_NET) { - return alpha * lambda * act.sign(wj) + (1 - alpha) * lambda * wj; + return alpha * lambda * act.sign_normr(wj) + (1 - alpha) * lambda * wj; } else if (reg == REGULARIZATION_TYPE_WEIGHT_CLIPPING) { // Preparation for Wasserstein GANs. // We assume lambda is the lower clipping threshold, while alpha is the higher clipping threshold. // alpha > lambda. @@ -194,9 +194,9 @@ real_t MLPPReg::reg_deriv_termmr(const Ref &weights, real_t lambda, if (reg == REGULARIZATION_TYPE_RIDGE) { return lambda * wj; } else if (reg == REGULARIZATION_TYPE_LASSO) { - return lambda * act.sign(wj); + return lambda * act.sign_normr(wj); } else if (reg == REGULARIZATION_TYPE_ELASTIC_NET) { - return alpha * lambda * act.sign(wj) + (1 - alpha) * lambda * wj; + return alpha * lambda * act.sign_normr(wj) + (1 - alpha) * lambda * wj; } else if (reg == REGULARIZATION_TYPE_WEIGHT_CLIPPING) { // Preparation for Wasserstein GANs. // We assume lambda is the lower clipping threshold, while alpha is the higher clipping threshold. // alpha > lambda. @@ -322,9 +322,9 @@ real_t MLPPReg::regDerivTerm(std::vector weights, real_t lambda, real_t if (reg == "Ridge") { return lambda * weights[j]; } else if (reg == "Lasso") { - return lambda * act.sign(weights[j]); + return lambda * act.sign_normr(weights[j]); } else if (reg == "ElasticNet") { - return alpha * lambda * act.sign(weights[j]) + (1 - alpha) * lambda * weights[j]; + return alpha * lambda * act.sign_normr(weights[j]) + (1 - alpha) * lambda * weights[j]; } else if (reg == "WeightClipping") { // Preparation for Wasserstein GANs. // We assume lambda is the lower clipping threshold, while alpha is the higher clipping threshold. // alpha > lambda. @@ -345,9 +345,9 @@ real_t MLPPReg::regDerivTerm(std::vector> weights, real_t la if (reg == "Ridge") { return lambda * weights[i][j]; } else if (reg == "Lasso") { - return lambda * act.sign(weights[i][j]); + return lambda * act.sign_normr(weights[i][j]); } else if (reg == "ElasticNet") { - return alpha * lambda * act.sign(weights[i][j]) + (1 - alpha) * lambda * weights[i][j]; + return alpha * lambda * act.sign_normr(weights[i][j]) + (1 - alpha) * lambda * weights[i][j]; } else if (reg == "WeightClipping") { // Preparation for Wasserstein GANs. // We assume lambda is the lower clipping threshold, while alpha is the higher clipping threshold. // alpha > lambda. diff --git a/mlpp/regularization/reg_old.cpp b/mlpp/regularization/reg_old.cpp index 3d85658..6ed659e 100644 --- a/mlpp/regularization/reg_old.cpp +++ b/mlpp/regularization/reg_old.cpp @@ -8,7 +8,7 @@ #include "core/math/math_defs.h" -#include "../activation/activation.h" +#include "../activation/activation_old.h" #include "../lin_alg/lin_alg.h" #include @@ -66,7 +66,7 @@ std::vector> MLPPRegOld::regDerivTerm(std::vector weights, real_t lambda, real_t alpha, std::string reg, int j) { - MLPPActivation act; + MLPPActivationOld act; if (reg == "Ridge") { return lambda * weights[j]; } else if (reg == "Lasso") { @@ -89,7 +89,7 @@ real_t MLPPRegOld::regDerivTerm(std::vector weights, real_t lambda, real } real_t MLPPRegOld::regDerivTerm(std::vector> weights, real_t lambda, real_t alpha, std::string reg, int i, int j) { - MLPPActivation act; + MLPPActivationOld act; if (reg == "Ridge") { return lambda * weights[i][j]; } else if (reg == "Lasso") { diff --git a/mlpp/softmax_net/softmax_net_old.cpp b/mlpp/softmax_net/softmax_net_old.cpp index 0b1cd2e..4192eec 100644 --- a/mlpp/softmax_net/softmax_net_old.cpp +++ b/mlpp/softmax_net/softmax_net_old.cpp @@ -6,7 +6,7 @@ #include "softmax_net_old.h" -#include "../activation/activation.h" +#include "../activation/activation_old.h" #include "../cost/cost.h" #include "../data/data.h" #include "../lin_alg/lin_alg.h" @@ -44,7 +44,7 @@ std::vector> MLPPSoftmaxNetOld::modelSetTest(std::vector> y_hat, std::vect std::vector> MLPPSoftmaxNetOld::Evaluate(std::vector> X) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; std::vector> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector> a2 = avn.sigmoid(z2); return avn.adjSoftmax(alg.mat_vec_add(alg.matmult(a2, weights2), bias2)); @@ -278,7 +278,7 @@ std::vector> MLPPSoftmaxNetOld::Evaluate(std::vector>, std::vector>> MLPPSoftmaxNetOld::propagate(std::vector> X) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; std::vector> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector> a2 = avn.sigmoid(z2); return { z2, a2 }; @@ -286,7 +286,7 @@ std::tuple>, std::vector>> M std::vector MLPPSoftmaxNetOld::Evaluate(std::vector x) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; std::vector z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector a2 = avn.sigmoid(z2); return avn.adjSoftmax(alg.addition(alg.mat_vec_mult(alg.transpose(weights2), a2), bias2)); @@ -294,7 +294,7 @@ std::vector MLPPSoftmaxNetOld::Evaluate(std::vector x) { std::tuple, std::vector> MLPPSoftmaxNetOld::propagate(std::vector x) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; std::vector z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector a2 = avn.sigmoid(z2); return { z2, a2 }; @@ -302,7 +302,7 @@ std::tuple, std::vector> MLPPSoftmaxNetOld::propagat void MLPPSoftmaxNetOld::forwardPass() { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1); a2 = avn.sigmoid(z2); y_hat = avn.adjSoftmax(alg.mat_vec_add(alg.matmult(a2, weights2), bias2)); diff --git a/mlpp/softmax_reg/softmax_reg_old.cpp b/mlpp/softmax_reg/softmax_reg_old.cpp index 8226af8..31b72fe 100644 --- a/mlpp/softmax_reg/softmax_reg_old.cpp +++ b/mlpp/softmax_reg/softmax_reg_old.cpp @@ -5,7 +5,7 @@ // #include "softmax_reg_old.h" -#include "../activation/activation.h" +#include "../activation/activation_old.h" #include "../cost/cost.h" #include "../lin_alg/lin_alg.h" #include "../regularization/reg.h" @@ -173,13 +173,13 @@ real_t MLPPSoftmaxRegOld::Cost(std::vector> y_hat, std::vect std::vector MLPPSoftmaxRegOld::Evaluate(std::vector x) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; return avn.softmax(alg.addition(bias, alg.mat_vec_mult(alg.transpose(weights), x))); } std::vector> MLPPSoftmaxRegOld::Evaluate(std::vector> X) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; return avn.softmax(alg.mat_vec_add(alg.matmult(X, weights), bias)); } @@ -187,7 +187,7 @@ std::vector> MLPPSoftmaxRegOld::Evaluate(std::vector @@ -189,7 +189,7 @@ real_t MLPPStatOld::heinzMean(const real_t A, const real_t B, const real_t x) { } real_t MLPPStatOld::neumanSandorMean(const real_t a, const real_t b) { - MLPPActivation avn; + MLPPActivationOld avn; return (a - b) / 2 * avn.arsinh((a - b) / (a + b)); } diff --git a/mlpp/svc/svc_old.cpp b/mlpp/svc/svc_old.cpp index f0de1b4..23cb1e7 100644 --- a/mlpp/svc/svc_old.cpp +++ b/mlpp/svc/svc_old.cpp @@ -5,7 +5,7 @@ // #include "svc_old.h" -#include "../activation/activation.h" +#include "../activation/activation_old.h" #include "../cost/cost.h" #include "../lin_alg/lin_alg.h" #include "../regularization/reg.h" @@ -24,7 +24,6 @@ real_t MLPPSVCOld::modelTest(std::vector x) { void MLPPSVCOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { class MLPPCost cost; - MLPPActivation avn; MLPPLinAlg alg; MLPPReg regularization; real_t cost_prev = 0; @@ -57,7 +56,6 @@ void MLPPSVCOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { void MLPPSVCOld::SGD(real_t learning_rate, int max_epoch, bool UI) { class MLPPCost cost; - MLPPActivation avn; MLPPLinAlg alg; MLPPReg regularization; @@ -101,7 +99,6 @@ void MLPPSVCOld::SGD(real_t learning_rate, int max_epoch, bool UI) { void MLPPSVCOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) { class MLPPCost cost; - MLPPActivation avn; MLPPLinAlg alg; MLPPReg regularization; real_t cost_prev = 0; @@ -172,31 +169,29 @@ real_t MLPPSVCOld::Cost(std::vector z, std::vector y, std::vecto std::vector MLPPSVCOld::Evaluate(std::vector> X) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; return avn.sign(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); } std::vector MLPPSVCOld::propagate(std::vector> X) { MLPPLinAlg alg; - MLPPActivation avn; return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)); } real_t MLPPSVCOld::Evaluate(std::vector x) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; return avn.sign(alg.dot(weights, x) + bias); } real_t MLPPSVCOld::propagate(std::vector x) { MLPPLinAlg alg; - MLPPActivation avn; return alg.dot(weights, x) + bias; } // sign ( wTx + b ) void MLPPSVCOld::forwardPass() { - MLPPActivation avn; + MLPPActivationOld avn; z = propagate(inputSet); y_hat = avn.sign(z); diff --git a/mlpp/tanh_reg/tanh_reg_old.cpp b/mlpp/tanh_reg/tanh_reg_old.cpp index bdf9ab9..e77e5c6 100644 --- a/mlpp/tanh_reg/tanh_reg_old.cpp +++ b/mlpp/tanh_reg/tanh_reg_old.cpp @@ -6,7 +6,7 @@ #include "tanh_reg_old.h" -#include "../activation/activation.h" +#include "../activation/activation_old.h" #include "../cost/cost.h" #include "../lin_alg/lin_alg.h" #include "../regularization/reg.h" @@ -31,7 +31,7 @@ real_t MLPPTanhRegOld::modelTest(std::vector x) { } void MLPPTanhRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { - MLPPActivation avn; + MLPPActivationOld avn; MLPPLinAlg alg; MLPPReg regularization; real_t cost_prev = 0; @@ -104,7 +104,7 @@ void MLPPTanhRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) { } void MLPPTanhRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) { - MLPPActivation avn; + MLPPActivationOld avn; MLPPLinAlg alg; MLPPReg regularization; @@ -167,7 +167,7 @@ real_t MLPPTanhRegOld::Cost(std::vector y_hat, std::vector y) { std::vector MLPPTanhRegOld::Evaluate(std::vector> X) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; return avn.tanh(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); } @@ -178,7 +178,7 @@ std::vector MLPPTanhRegOld::propagate(std::vector> X real_t MLPPTanhRegOld::Evaluate(std::vector x) { MLPPLinAlg alg; - MLPPActivation avn; + MLPPActivationOld avn; return avn.tanh(alg.dot(weights, x) + bias); } @@ -189,7 +189,7 @@ real_t MLPPTanhRegOld::propagate(std::vector x) { // Tanh ( wTx + b ) void MLPPTanhRegOld::forwardPass() { - MLPPActivation avn; + MLPPActivationOld avn; z = propagate(inputSet); y_hat = avn.tanh(z); diff --git a/mlpp/wgan/wgan_old.cpp b/mlpp/wgan/wgan_old.cpp index 491a634..230e696 100644 --- a/mlpp/wgan/wgan_old.cpp +++ b/mlpp/wgan/wgan_old.cpp @@ -8,7 +8,7 @@ #include "core/log/logger.h" -#include "../activation/activation.h" +#include "../activation/activation_old.h" #include "../cost/cost.h" #include "../lin_alg/lin_alg.h" #include "../regularization/reg.h" @@ -228,7 +228,7 @@ void MLPPWGANOld::updateGeneratorParameters(std::vector>>, std::vector> MLPPWGANOld::computeDiscriminatorGradients(std::vector y_hat, std::vector outputSet) { class MLPPCost cost; - MLPPActivation avn; + MLPPActivationOld avn; MLPPLinAlg alg; MLPPReg regularization; @@ -263,7 +263,7 @@ std::tuple>>, std::vector> M std::vector>> MLPPWGANOld::computeGeneratorGradients(std::vector y_hat, std::vector outputSet) { class MLPPCost cost; - MLPPActivation avn; + MLPPActivationOld avn; MLPPLinAlg alg; MLPPReg regularization; diff --git a/test/mlpp_tests.cpp b/test/mlpp_tests.cpp index 596e799..3206c18 100644 --- a/test/mlpp_tests.cpp +++ b/test/mlpp_tests.cpp @@ -47,6 +47,7 @@ #include "../mlpp/uni_lin_reg/uni_lin_reg.h" #include "../mlpp/wgan/wgan.h" +#include "../mlpp/activation/activation_old.h" #include "../mlpp/ann/ann_old.h" #include "../mlpp/auto_encoder/auto_encoder_old.h" #include "../mlpp/bernoulli_nb/bernoulli_nb_old.h" @@ -56,6 +57,7 @@ #include "../mlpp/gan/gan_old.h" #include "../mlpp/gaussian_nb/gaussian_nb_old.h" #include "../mlpp/hidden_layer/hidden_layer_old.h" +#include "../mlpp/lin_alg/lin_alg_old.h" #include "../mlpp/lin_reg/lin_reg_old.h" #include "../mlpp/log_reg/log_reg_old.h" #include "../mlpp/mann/mann_old.h" @@ -1067,8 +1069,8 @@ void MLPPTests::test_outlier_finder(bool ui) { PLOG_MSG(Variant(outlier_finder.model_test(input_set))); } void MLPPTests::test_new_math_functions() { - MLPPLinAlg alg; - MLPPActivation avn; + MLPPLinAlgOld alg; + MLPPActivationOld avn; MLPPData data; // Testing new Functions @@ -1107,11 +1109,9 @@ void MLPPTests::test_new_math_functions() { alg.printMatrix(alg.gramSchmidtProcess(P)); - MLPPLinAlg::QRDResult qrd_result = alg.qrd(P); // It works! - - alg.printMatrix(qrd_result.Q); - - alg.printMatrix(qrd_result.R); + //MLPPLinAlg::QRDResult qrd_result = alg.qrd(P); // It works! + //alg.printMatrix(qrd_result.Q); + //alg.printMatrix(qrd_result.R); } void MLPPTests::test_positive_definiteness_checker() { //MLPPStat stat;