From 6fe1f32c3d2cacde5baa3385cb1883e11ddad5cc Mon Sep 17 00:00:00 2001 From: Relintai Date: Wed, 25 Jan 2023 00:29:02 +0100 Subject: [PATCH] Prefixed LinAlg with MLPP. --- mlpp/activation/activation.cpp | 100 ++++----- mlpp/ann/ann.cpp | 26 +-- mlpp/auto_encoder/auto_encoder.cpp | 16 +- mlpp/bernoulli_nb/bernoulli_nb.cpp | 2 +- mlpp/c_log_log_reg/c_log_log_reg.cpp | 18 +- mlpp/convolutions/convolutions.cpp | 14 +- mlpp/cost/cost.cpp | 44 ++-- mlpp/data/data.cpp | 28 +-- mlpp/dual_svc/dual_svc.cpp | 16 +- mlpp/exp_reg/exp_reg.cpp | 4 +- mlpp/gan/gan.cpp | 20 +- mlpp/gaussian_nb/gaussian_nb.cpp | 6 +- mlpp/hidden_layer/hidden_layer.cpp | 4 +- mlpp/kmeans/kmeans.cpp | 14 +- mlpp/knn/knn.cpp | 2 +- mlpp/lin_alg/lin_alg.cpp | 206 +++++++++--------- mlpp/lin_alg/lin_alg.h | 2 +- mlpp/lin_reg/lin_reg.cpp | 14 +- mlpp/log_reg/log_reg.cpp | 12 +- mlpp/mann/mann.cpp | 2 +- mlpp/mlp/mlp.cpp | 16 +- .../multi_output_layer/multi_output_layer.cpp | 4 +- mlpp/multinomial_nb/multinomial_nb.cpp | 2 +- .../numerical_analysis/numerical_analysis.cpp | 10 +- mlpp/output_layer/output_layer.cpp | 4 +- mlpp/pca/pca.cpp | 4 +- mlpp/probit_reg/probit_reg.cpp | 18 +- mlpp/regularization/reg.cpp | 4 +- mlpp/softmax_net/softmax_net.cpp | 18 +- mlpp/softmax_reg/softmax_reg.cpp | 12 +- mlpp/stat/stat.cpp | 2 +- mlpp/svc/svc.cpp | 16 +- mlpp/tanh_reg/tanh_reg.cpp | 16 +- mlpp/transforms/transforms.cpp | 2 +- mlpp/uni_lin_reg/uni_lin_reg.cpp | 2 +- mlpp/wgan/wgan.cpp | 20 +- 36 files changed, 350 insertions(+), 350 deletions(-) diff --git a/mlpp/activation/activation.cpp b/mlpp/activation/activation.cpp index 9f78d70..e38d12f 100644 --- a/mlpp/activation/activation.cpp +++ b/mlpp/activation/activation.cpp @@ -19,7 +19,7 @@ double MLPPActivation::linear(double z, bool deriv) { std::vector MLPPActivation::linear(std::vector z, bool deriv) { if (deriv) { - LinAlg alg; + MLPPLinAlg alg; return alg.onevec(z.size()); } return z; @@ -27,7 +27,7 @@ std::vector MLPPActivation::linear(std::vector z, bool deriv) { std::vector> MLPPActivation::linear(std::vector> z, bool deriv) { if (deriv) { - LinAlg alg; + MLPPLinAlg alg; return alg.onemat(z.size(), z[0].size()); } return z; @@ -41,7 +41,7 @@ double MLPPActivation::sigmoid(double z, bool deriv) { } std::vector MLPPActivation::sigmoid(std::vector z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), sigmoid(z))); } @@ -49,7 +49,7 @@ std::vector MLPPActivation::sigmoid(std::vector z, bool deriv) { } std::vector> MLPPActivation::sigmoid(std::vector> z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), sigmoid(z))); } @@ -57,7 +57,7 @@ std::vector> MLPPActivation::sigmoid(std::vector MLPPActivation::softmax(std::vector z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; std::vector a; a.resize(z.size()); std::vector expZ = alg.exp(z); @@ -73,7 +73,7 @@ std::vector MLPPActivation::softmax(std::vector z, bool deriv) { } std::vector> MLPPActivation::softmax(std::vector> z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; std::vector> a; a.resize(z.size()); @@ -84,7 +84,7 @@ std::vector> MLPPActivation::softmax(std::vector MLPPActivation::adjSoftmax(std::vector z) { - LinAlg alg; + MLPPLinAlg alg; std::vector a; double C = -*std::max_element(z.begin(), z.end()); z = alg.scalarAdd(C, z); @@ -93,7 +93,7 @@ std::vector MLPPActivation::adjSoftmax(std::vector z) { } std::vector> MLPPActivation::adjSoftmax(std::vector> z) { - LinAlg alg; + MLPPLinAlg alg; std::vector> a; a.resize(z.size()); @@ -104,7 +104,7 @@ std::vector> MLPPActivation::adjSoftmax(std::vector> MLPPActivation::softmaxDeriv(std::vector z) { - LinAlg alg; + MLPPLinAlg alg; std::vector> deriv; std::vector a = softmax(z); deriv.resize(a.size()); @@ -124,7 +124,7 @@ std::vector> MLPPActivation::softmaxDeriv(std::vector>> MLPPActivation::softmaxDeriv(std::vector> z) { - LinAlg alg; + MLPPLinAlg alg; std::vector>> deriv; std::vector> a = softmax(z); @@ -155,7 +155,7 @@ std::vector MLPPActivation::softplus(std::vector z, bool deriv) if (deriv) { return sigmoid(z); } - LinAlg alg; + MLPPLinAlg alg; return alg.log(alg.addition(alg.onevec(z.size()), alg.exp(z))); } @@ -163,7 +163,7 @@ std::vector> MLPPActivation::softplus(std::vector MLPPActivation::softsign(std::vector z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.onevec(z.size()), alg.exponentiate(alg.addition(alg.onevec(z.size()), alg.abs(z)), 2)); } @@ -183,7 +183,7 @@ std::vector MLPPActivation::softsign(std::vector z, bool deriv) } std::vector> MLPPActivation::softsign(std::vector> z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.exponentiate(alg.addition(alg.onemat(z.size(), z[0].size()), alg.abs(z)), 2)); } @@ -198,7 +198,7 @@ double MLPPActivation::gaussianCDF(double z, bool deriv) { } std::vector MLPPActivation::gaussianCDF(std::vector z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.scalarMultiply(1 / sqrt(2 * M_PI), alg.exp(alg.scalarMultiply(-1 / 2, alg.hadamard_product(z, z)))); } @@ -206,7 +206,7 @@ std::vector MLPPActivation::gaussianCDF(std::vector z, bool deri } std::vector> MLPPActivation::gaussianCDF(std::vector> z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.scalarMultiply(1 / sqrt(2 * M_PI), alg.exp(alg.scalarMultiply(-1 / 2, alg.hadamard_product(z, z)))); } @@ -221,7 +221,7 @@ double MLPPActivation::cloglog(double z, bool deriv) { } std::vector MLPPActivation::cloglog(std::vector z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.exp(alg.scalarMultiply(-1, alg.exp(z))); } @@ -229,7 +229,7 @@ std::vector MLPPActivation::cloglog(std::vector z, bool deriv) { } std::vector> MLPPActivation::cloglog(std::vector> z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.exp(alg.scalarMultiply(-1, alg.exp(z))); } @@ -244,7 +244,7 @@ double MLPPActivation::logit(double z, bool deriv) { } std::vector MLPPActivation::logit(std::vector z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.subtraction(alg.elementWiseDivision(alg.onevec(z.size()), z), alg.elementWiseDivision(alg.onevec(z.size()), alg.subtraction(z, alg.onevec(z.size())))); } @@ -252,7 +252,7 @@ std::vector MLPPActivation::logit(std::vector z, bool deriv) { } std::vector> MLPPActivation::logit(std::vector> z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.subtraction(alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), z), alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.subtraction(z, alg.onemat(z.size(), z[0].size())))); } @@ -310,7 +310,7 @@ double MLPPActivation::swish(double z, bool deriv) { } std::vector MLPPActivation::swish(std::vector z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { alg.addition(swish(z), alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), swish(z)))); } @@ -318,7 +318,7 @@ std::vector MLPPActivation::swish(std::vector z, bool deriv) { } std::vector> MLPPActivation::swish(std::vector> z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { alg.addition(swish(z), alg.subtraction(sigmoid(z), alg.hadamard_product(sigmoid(z), swish(z)))); } @@ -333,7 +333,7 @@ double MLPPActivation::mish(double z, bool deriv) { } std::vector MLPPActivation::mish(std::vector z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.addition(alg.hadamard_product(alg.hadamard_product(alg.hadamard_product(sech(softplus(z)), sech(softplus(z))), z), sigmoid(z)), alg.elementWiseDivision(mish(z), z)); } @@ -341,7 +341,7 @@ std::vector MLPPActivation::mish(std::vector z, bool deriv) { } std::vector> MLPPActivation::mish(std::vector> z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.addition(alg.hadamard_product(alg.hadamard_product(alg.hadamard_product(sech(softplus(z)), sech(softplus(z))), z), sigmoid(z)), alg.elementWiseDivision(mish(z), z)); } @@ -356,7 +356,7 @@ double MLPPActivation::sinc(double z, bool deriv) { } std::vector MLPPActivation::sinc(std::vector z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.subtraction(alg.hadamard_product(z, alg.cos(z)), alg.sin(z)), alg.hadamard_product(z, z)); } @@ -364,7 +364,7 @@ std::vector MLPPActivation::sinc(std::vector z, bool deriv) { } std::vector> MLPPActivation::sinc(std::vector> z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.subtraction(alg.hadamard_product(z, alg.cos(z)), alg.sin(z)), alg.hadamard_product(z, z)); } @@ -662,7 +662,7 @@ std::vector MLPPActivation::sinh(std::vector z, bool deriv) { if (deriv) { return cosh(z); } - LinAlg alg; + MLPPLinAlg alg; return alg.scalarMultiply(0.5, alg.subtraction(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z)))); } @@ -670,7 +670,7 @@ std::vector> MLPPActivation::sinh(std::vector MLPPActivation::cosh(std::vector z, bool deriv) { if (deriv) { return sinh(z); } - LinAlg alg; + MLPPLinAlg alg; return alg.scalarMultiply(0.5, alg.addition(alg.exp(z), alg.exp(alg.scalarMultiply(-1, z)))); } @@ -693,7 +693,7 @@ std::vector> MLPPActivation::cosh(std::vector MLPPActivation::tanh(std::vector z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.scalarMultiply(-1, alg.scalarAdd(-1, alg.hadamard_product(tanh(z), tanh(z)))); } @@ -713,7 +713,7 @@ std::vector MLPPActivation::tanh(std::vector z, bool deriv) { } std::vector> MLPPActivation::tanh(std::vector> z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.scalarMultiply(-1, alg.scalarAdd(-1, alg.hadamard_product(tanh(z), tanh(z)))); } @@ -729,7 +729,7 @@ double MLPPActivation::csch(double z, bool deriv) { } std::vector MLPPActivation::csch(std::vector z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), coth(z)); } @@ -737,7 +737,7 @@ std::vector MLPPActivation::csch(std::vector z, bool deriv) { } std::vector> MLPPActivation::csch(std::vector> z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), coth(z)); } @@ -752,7 +752,7 @@ double MLPPActivation::sech(double z, bool deriv) { } std::vector MLPPActivation::sech(std::vector z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.hadamard_product(alg.scalarMultiply(-1, sech(z)), tanh(z)); } @@ -762,7 +762,7 @@ std::vector MLPPActivation::sech(std::vector z, bool deriv) { } std::vector> MLPPActivation::sech(std::vector> z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.hadamard_product(alg.scalarMultiply(-1, sech(z)), tanh(z)); } @@ -779,7 +779,7 @@ double MLPPActivation::coth(double z, bool deriv) { } std::vector MLPPActivation::coth(std::vector z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), csch(z)); } @@ -787,7 +787,7 @@ std::vector MLPPActivation::coth(std::vector z, bool deriv) { } std::vector> MLPPActivation::coth(std::vector> z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.hadamard_product(alg.scalarMultiply(-1, csch(z)), csch(z)); } @@ -802,7 +802,7 @@ double MLPPActivation::arsinh(double z, bool deriv) { } std::vector MLPPActivation::arsinh(std::vector z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.onevec(z.size()), alg.sqrt(alg.addition(alg.hadamard_product(z, z), alg.onevec(z.size())))); } @@ -810,7 +810,7 @@ std::vector MLPPActivation::arsinh(std::vector z, bool deriv) { } std::vector> MLPPActivation::arsinh(std::vector> z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.sqrt(alg.addition(alg.hadamard_product(z, z), alg.onemat(z.size(), z[0].size())))); } @@ -825,7 +825,7 @@ double MLPPActivation::arcosh(double z, bool deriv) { } std::vector MLPPActivation::arcosh(std::vector z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.onevec(z.size()), alg.sqrt(alg.subtraction(alg.hadamard_product(z, z), alg.onevec(z.size())))); } @@ -833,7 +833,7 @@ std::vector MLPPActivation::arcosh(std::vector z, bool deriv) { } std::vector> MLPPActivation::arcosh(std::vector> z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.sqrt(alg.subtraction(alg.hadamard_product(z, z), alg.onemat(z.size(), z[0].size())))); } @@ -848,7 +848,7 @@ double MLPPActivation::artanh(double z, bool deriv) { } std::vector MLPPActivation::artanh(std::vector z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.onevec(z.size()), alg.subtraction(alg.onevec(z.size()), alg.hadamard_product(z, z))); } @@ -856,7 +856,7 @@ std::vector MLPPActivation::artanh(std::vector z, bool deriv) { } std::vector> MLPPActivation::artanh(std::vector> z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.subtraction(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z))); } @@ -871,7 +871,7 @@ double MLPPActivation::arcsch(double z, bool deriv) { } std::vector MLPPActivation::arcsch(std::vector z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.full(z.size(), -1), alg.hadamard_product(alg.hadamard_product(z, z), alg.sqrt(alg.addition(alg.onevec(z.size()), alg.elementWiseDivision(alg.onevec(z.size()), alg.hadamard_product(z, z)))))); } @@ -879,7 +879,7 @@ std::vector MLPPActivation::arcsch(std::vector z, bool deriv) { } std::vector> MLPPActivation::arcsch(std::vector> z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.full(z.size(), z[0].size(), -1), alg.hadamard_product(alg.hadamard_product(z, z), alg.sqrt(alg.addition(alg.onemat(z.size(), z[0].size()), alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z)))))); } @@ -894,7 +894,7 @@ double MLPPActivation::arsech(double z, bool deriv) { } std::vector MLPPActivation::arsech(std::vector z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.full(z.size(), -1), alg.hadamard_product(z, alg.sqrt(alg.subtraction(alg.onevec(z.size()), alg.hadamard_product(z, z))))); } @@ -902,7 +902,7 @@ std::vector MLPPActivation::arsech(std::vector z, bool deriv) { } std::vector> MLPPActivation::arsech(std::vector> z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.full(z.size(), z[0].size(), -1), alg.hadamard_product(z, alg.sqrt(alg.subtraction(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z))))); } @@ -917,7 +917,7 @@ double MLPPActivation::arcoth(double z, bool deriv) { } std::vector MLPPActivation::arcoth(std::vector z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.onevec(z.size()), alg.subtraction(alg.onevec(z.size()), alg.hadamard_product(z, z))); } @@ -925,7 +925,7 @@ std::vector MLPPActivation::arcoth(std::vector z, bool deriv) { } std::vector> MLPPActivation::arcoth(std::vector> z, bool deriv) { - LinAlg alg; + MLPPLinAlg alg; if (deriv) { return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.subtraction(alg.onemat(z.size(), z[0].size()), alg.hadamard_product(z, z))); } diff --git a/mlpp/ann/ann.cpp b/mlpp/ann/ann.cpp index 1117d53..e772c73 100644 --- a/mlpp/ann/ann.cpp +++ b/mlpp/ann/ann.cpp @@ -55,7 +55,7 @@ double MLPPANN::modelTest(std::vector x) { void MLPPANN::gradientDescent(double learning_rate, int max_epoch, bool UI) { class MLPPCost cost; - LinAlg alg; + MLPPLinAlg alg; double cost_prev = 0; int epoch = 1; forwardPass(); @@ -89,7 +89,7 @@ void MLPPANN::gradientDescent(double learning_rate, int max_epoch, bool UI) { void MLPPANN::SGD(double learning_rate, int max_epoch, bool UI) { class MLPPCost cost; - LinAlg alg; + MLPPLinAlg alg; double cost_prev = 0; int epoch = 1; @@ -127,7 +127,7 @@ void MLPPANN::SGD(double learning_rate, int max_epoch, bool UI) { void MLPPANN::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { class MLPPCost cost; - LinAlg alg; + MLPPLinAlg alg; double cost_prev = 0; int epoch = 1; @@ -165,7 +165,7 @@ void MLPPANN::MBGD(double learning_rate, int max_epoch, int mini_batch_size, boo void MLPPANN::Momentum(double learning_rate, int max_epoch, int mini_batch_size, double gamma, bool NAG, bool UI) { class MLPPCost cost; - LinAlg alg; + MLPPLinAlg alg; double cost_prev = 0; int epoch = 1; @@ -222,7 +222,7 @@ void MLPPANN::Momentum(double learning_rate, int max_epoch, int mini_batch_size, void MLPPANN::Adagrad(double learning_rate, int max_epoch, int mini_batch_size, double e, bool UI) { class MLPPCost cost; - LinAlg alg; + MLPPLinAlg alg; double cost_prev = 0; int epoch = 1; @@ -278,7 +278,7 @@ void MLPPANN::Adagrad(double learning_rate, int max_epoch, int mini_batch_size, void MLPPANN::Adadelta(double learning_rate, int max_epoch, int mini_batch_size, double b1, double e, bool UI) { class MLPPCost cost; - LinAlg alg; + MLPPLinAlg alg; double cost_prev = 0; int epoch = 1; @@ -334,7 +334,7 @@ void MLPPANN::Adadelta(double learning_rate, int max_epoch, int mini_batch_size, void MLPPANN::Adam(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) { class MLPPCost cost; - LinAlg alg; + MLPPLinAlg alg; double cost_prev = 0; int epoch = 1; @@ -401,7 +401,7 @@ void MLPPANN::Adam(double learning_rate, int max_epoch, int mini_batch_size, dou void MLPPANN::Adamax(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) { class MLPPCost cost; - LinAlg alg; + MLPPLinAlg alg; double cost_prev = 0; int epoch = 1; @@ -466,7 +466,7 @@ void MLPPANN::Adamax(double learning_rate, int max_epoch, int mini_batch_size, d void MLPPANN::Nadam(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) { class MLPPCost cost; - LinAlg alg; + MLPPLinAlg alg; double cost_prev = 0; int epoch = 1; @@ -536,7 +536,7 @@ void MLPPANN::Nadam(double learning_rate, int max_epoch, int mini_batch_size, do void MLPPANN::AMSGrad(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) { class MLPPCost cost; - LinAlg alg; + MLPPLinAlg alg; double cost_prev = 0; int epoch = 1; @@ -661,7 +661,7 @@ void MLPPANN::addLayer(int n_hidden, std::string activation, std::string weightI } void MLPPANN::addOutputLayer(std::string activation, std::string loss, std::string weightInit, std::string reg, double lambda, double alpha) { - LinAlg alg; + MLPPLinAlg alg; if (!network.empty()) { outputLayer = new OutputLayer(network[network.size() - 1].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha); } else { @@ -701,7 +701,7 @@ void MLPPANN::forwardPass() { } void MLPPANN::updateParameters(std::vector>> hiddenLayerUpdations, std::vector outputLayerUpdation, double learning_rate) { - LinAlg alg; + MLPPLinAlg alg; outputLayer->weights = alg.subtraction(outputLayer->weights, outputLayerUpdation); outputLayer->bias -= learning_rate * alg.sum_elements(outputLayer->delta) / n; @@ -721,7 +721,7 @@ std::tuple>>, std::vector> M // std::cout << "BEGIN" << std::endl; class MLPPCost cost; MLPPActivation avn; - LinAlg alg; + MLPPLinAlg alg; Reg regularization; std::vector>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads. diff --git a/mlpp/auto_encoder/auto_encoder.cpp b/mlpp/auto_encoder/auto_encoder.cpp index 55f4475..279a81d 100644 --- a/mlpp/auto_encoder/auto_encoder.cpp +++ b/mlpp/auto_encoder/auto_encoder.cpp @@ -34,7 +34,7 @@ std::vector MLPPAutoEncoder::modelTest(std::vector x) { void MLPPAutoEncoder::gradientDescent(double learning_rate, int max_epoch, bool UI) { MLPPActivation avn; - LinAlg alg; + MLPPLinAlg alg; double cost_prev = 0; int epoch = 1; forwardPass(); @@ -87,7 +87,7 @@ void MLPPAutoEncoder::gradientDescent(double learning_rate, int max_epoch, bool void MLPPAutoEncoder::SGD(double learning_rate, int max_epoch, bool UI) { MLPPActivation avn; - LinAlg alg; + MLPPLinAlg alg; double cost_prev = 0; int epoch = 1; @@ -138,7 +138,7 @@ void MLPPAutoEncoder::SGD(double learning_rate, int max_epoch, bool UI) { void MLPPAutoEncoder::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { MLPPActivation avn; - LinAlg alg; + MLPPLinAlg alg; double cost_prev = 0; int epoch = 1; @@ -213,7 +213,7 @@ double MLPPAutoEncoder::Cost(std::vector> y_hat, std::vector } std::vector> MLPPAutoEncoder::Evaluate(std::vector> X) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; std::vector> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector> a2 = avn.sigmoid(z2); @@ -221,7 +221,7 @@ std::vector> MLPPAutoEncoder::Evaluate(std::vector>, std::vector>> MLPPAutoEncoder::propagate(std::vector> X) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; std::vector> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector> a2 = avn.sigmoid(z2); @@ -229,7 +229,7 @@ std::tuple>, std::vector>> M } std::vector MLPPAutoEncoder::Evaluate(std::vector x) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; std::vector z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector a2 = avn.sigmoid(z2); @@ -237,7 +237,7 @@ std::vector MLPPAutoEncoder::Evaluate(std::vector x) { } std::tuple, std::vector> MLPPAutoEncoder::propagate(std::vector x) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; std::vector z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector a2 = avn.sigmoid(z2); @@ -245,7 +245,7 @@ std::tuple, std::vector> MLPPAutoEncoder::propagate( } void MLPPAutoEncoder::forwardPass() { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1); a2 = avn.sigmoid(z2); diff --git a/mlpp/bernoulli_nb/bernoulli_nb.cpp b/mlpp/bernoulli_nb/bernoulli_nb.cpp index cc8ce88..e813def 100644 --- a/mlpp/bernoulli_nb/bernoulli_nb.cpp +++ b/mlpp/bernoulli_nb/bernoulli_nb.cpp @@ -74,7 +74,7 @@ double MLPPBernoulliNB::score() { } void MLPPBernoulliNB::computeVocab() { - LinAlg alg; + MLPPLinAlg alg; MLPPData data; vocab = data.vecToSet(alg.flatten(inputSet)); } diff --git a/mlpp/c_log_log_reg/c_log_log_reg.cpp b/mlpp/c_log_log_reg/c_log_log_reg.cpp index 7e8ff27..7dde514 100644 --- a/mlpp/c_log_log_reg/c_log_log_reg.cpp +++ b/mlpp/c_log_log_reg/c_log_log_reg.cpp @@ -31,7 +31,7 @@ double MLPPCLogLogReg::modelTest(std::vector x) { void MLPPCLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { MLPPActivation avn; - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; @@ -65,7 +65,7 @@ void MLPPCLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool U void MLPPCLogLogReg::MLE(double learning_rate, int max_epoch, bool UI) { MLPPActivation avn; - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; @@ -96,7 +96,7 @@ void MLPPCLogLogReg::MLE(double learning_rate, int max_epoch, bool UI) { } void MLPPCLogLogReg::SGD(double learning_rate, int max_epoch, bool UI) { - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; @@ -138,7 +138,7 @@ void MLPPCLogLogReg::SGD(double learning_rate, int max_epoch, bool UI) { void MLPPCLogLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { MLPPActivation avn; - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; @@ -191,30 +191,30 @@ double MLPPCLogLogReg::Cost(std::vector y_hat, std::vector y) { } std::vector MLPPCLogLogReg::Evaluate(std::vector> X) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; return avn.cloglog(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); } std::vector MLPPCLogLogReg::propagate(std::vector> X) { - LinAlg alg; + MLPPLinAlg alg; return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)); } double MLPPCLogLogReg::Evaluate(std::vector x) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; return avn.cloglog(alg.dot(weights, x) + bias); } double MLPPCLogLogReg::propagate(std::vector x) { - LinAlg alg; + MLPPLinAlg alg; return alg.dot(weights, x) + bias; } // cloglog ( wTx + b ) void MLPPCLogLogReg::forwardPass() { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; z = propagate(inputSet); diff --git a/mlpp/convolutions/convolutions.cpp b/mlpp/convolutions/convolutions.cpp index 74d3b78..b9b934f 100644 --- a/mlpp/convolutions/convolutions.cpp +++ b/mlpp/convolutions/convolutions.cpp @@ -15,7 +15,7 @@ MLPPConvolutions::MLPPConvolutions() : } std::vector> MLPPConvolutions::convolve(std::vector> input, std::vector> filter, int S, int P) { - LinAlg alg; + MLPPLinAlg alg; std::vector> featureMap; int N = input.size(); int F = filter.size(); @@ -71,7 +71,7 @@ std::vector> MLPPConvolutions::convolve(std::vector>> MLPPConvolutions::convolve(std::vector>> input, std::vector>> filter, int S, int P) { - LinAlg alg; + MLPPLinAlg alg; std::vector>> featureMap; int N = input[0].size(); int F = filter[0].size(); @@ -137,7 +137,7 @@ std::vector>> MLPPConvolutions::convolve(std::ve } std::vector> MLPPConvolutions::pool(std::vector> input, int F, int S, std::string type) { - LinAlg alg; + MLPPLinAlg alg; std::vector> pooledMap; int N = input.size(); int mapSize = floor((N - F) / S + 1); @@ -185,7 +185,7 @@ std::vector>> MLPPConvolutions::pool(std::vector } double MLPPConvolutions::globalPool(std::vector> input, std::string type) { - LinAlg alg; + MLPPLinAlg alg; if (type == "Average") { Stat stat; return stat.mean(alg.flatten(input)); @@ -272,7 +272,7 @@ std::vector> MLPPConvolutions::dy(std::vector> MLPPConvolutions::gradMagnitude(std::vector> input) { - LinAlg alg; + MLPPLinAlg alg; std::vector> xDeriv_2 = alg.hadamard_product(dx(input), dx(input)); std::vector> yDeriv_2 = alg.hadamard_product(dy(input), dy(input)); return alg.sqrt(alg.addition(xDeriv_2, yDeriv_2)); @@ -301,7 +301,7 @@ std::vector>> MLPPConvolutions::computeM(std::ve double const GAUSSIAN_PADDING = ((input.size() - 1) + GAUSSIAN_SIZE - input.size()) / 2; // Convs must be same. std::cout << GAUSSIAN_PADDING << std::endl; - LinAlg alg; + MLPPLinAlg alg; std::vector> xDeriv = dx(input); std::vector> yDeriv = dy(input); @@ -315,7 +315,7 @@ std::vector>> MLPPConvolutions::computeM(std::ve } std::vector> MLPPConvolutions::harrisCornerDetection(std::vector> input) { double const k = 0.05; // Empirically determined wherein k -> [0.04, 0.06], though conventionally 0.05 is typically used as well. - LinAlg alg; + MLPPLinAlg alg; std::vector>> M = computeM(input); std::vector> det = alg.subtraction(alg.hadamard_product(M[0], M[1]), alg.hadamard_product(M[2], M[2])); std::vector> trace = alg.addition(M[0], M[1]); diff --git a/mlpp/cost/cost.cpp b/mlpp/cost/cost.cpp index 37641a2..62b1f7d 100644 --- a/mlpp/cost/cost.cpp +++ b/mlpp/cost/cost.cpp @@ -30,12 +30,12 @@ double MLPPCost::MSE(std::vector> y_hat, std::vector MLPPCost::MSEDeriv(std::vector y_hat, std::vector y) { - LinAlg alg; + MLPPLinAlg alg; return alg.subtraction(y_hat, y); } std::vector> MLPPCost::MSEDeriv(std::vector> y_hat, std::vector> y) { - LinAlg alg; + MLPPLinAlg alg; return alg.subtraction(y_hat, y); } @@ -58,12 +58,12 @@ double MLPPCost::RMSE(std::vector> y_hat, std::vector MLPPCost::RMSEDeriv(std::vector y_hat, std::vector y) { - LinAlg alg; + MLPPLinAlg alg; return alg.scalarMultiply(1 / (2 * sqrt(MSE(y_hat, y))), MSEDeriv(y_hat, y)); } std::vector> MLPPCost::RMSEDeriv(std::vector> y_hat, std::vector> y) { - LinAlg alg; + MLPPLinAlg alg; return alg.scalarMultiply(1 / (2 / sqrt(MSE(y_hat, y))), MSEDeriv(y_hat, y)); } @@ -139,12 +139,12 @@ double MLPPCost::MBE(std::vector> y_hat, std::vector MLPPCost::MBEDeriv(std::vector y_hat, std::vector y) { - LinAlg alg; + MLPPLinAlg alg; return alg.onevec(y_hat.size()); } std::vector> MLPPCost::MBEDeriv(std::vector> y_hat, std::vector> y) { - LinAlg alg; + MLPPLinAlg alg; return alg.onemat(y_hat.size(), y_hat[0].size()); } @@ -171,12 +171,12 @@ double MLPPCost::LogLoss(std::vector> y_hat, std::vector MLPPCost::LogLossDeriv(std::vector y_hat, std::vector y) { - LinAlg alg; + MLPPLinAlg alg; return alg.addition(alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat)), alg.elementWiseDivision(alg.scalarMultiply(-1, alg.scalarAdd(-1, y)), alg.scalarMultiply(-1, alg.scalarAdd(-1, y_hat)))); } std::vector> MLPPCost::LogLossDeriv(std::vector> y_hat, std::vector> y) { - LinAlg alg; + MLPPLinAlg alg; return alg.addition(alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat)), alg.elementWiseDivision(alg.scalarMultiply(-1, alg.scalarAdd(-1, y)), alg.scalarMultiply(-1, alg.scalarAdd(-1, y_hat)))); } @@ -201,17 +201,17 @@ double MLPPCost::CrossEntropy(std::vector> y_hat, std::vecto } std::vector MLPPCost::CrossEntropyDeriv(std::vector y_hat, std::vector y) { - LinAlg alg; + MLPPLinAlg alg; return alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat)); } std::vector> MLPPCost::CrossEntropyDeriv(std::vector> y_hat, std::vector> y) { - LinAlg alg; + MLPPLinAlg alg; return alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat)); } double MLPPCost::HuberLoss(std::vector y_hat, std::vector y, double delta) { - LinAlg alg; + MLPPLinAlg alg; double sum = 0; for (int i = 0; i < y_hat.size(); i++) { if (abs(y[i] - y_hat[i]) <= delta) { @@ -224,7 +224,7 @@ double MLPPCost::HuberLoss(std::vector y_hat, std::vector y, dou } double MLPPCost::HuberLoss(std::vector> y_hat, std::vector> y, double delta) { - LinAlg alg; + MLPPLinAlg alg; double sum = 0; for (int i = 0; i < y_hat.size(); i++) { for (int j = 0; j < y_hat[i].size(); j++) { @@ -239,7 +239,7 @@ double MLPPCost::HuberLoss(std::vector> y_hat, std::vector MLPPCost::HuberLossDeriv(std::vector y_hat, std::vector y, double delta) { - LinAlg alg; + MLPPLinAlg alg; double sum = 0; std::vector deriv; deriv.resize(y_hat.size()); @@ -259,7 +259,7 @@ std::vector MLPPCost::HuberLossDeriv(std::vector y_hat, std::vec } std::vector> MLPPCost::HuberLossDeriv(std::vector> y_hat, std::vector> y, double delta) { - LinAlg alg; + MLPPLinAlg alg; double sum = 0; std::vector> deriv; deriv.resize(y_hat.size()); @@ -349,39 +349,39 @@ double MLPPCost::WassersteinLoss(std::vector> y_hat, std::ve } std::vector MLPPCost::WassersteinLossDeriv(std::vector y_hat, std::vector y) { - LinAlg alg; + MLPPLinAlg alg; return alg.scalarMultiply(-1, y); // Simple. } std::vector> MLPPCost::WassersteinLossDeriv(std::vector> y_hat, std::vector> y) { - LinAlg alg; + MLPPLinAlg alg; return alg.scalarMultiply(-1, y); // Simple. } double MLPPCost::HingeLoss(std::vector y_hat, std::vector y, std::vector weights, double C) { - LinAlg alg; + MLPPLinAlg alg; Reg regularization; return C * HingeLoss(y_hat, y) + regularization.regTerm(weights, 1, 0, "Ridge"); } double MLPPCost::HingeLoss(std::vector> y_hat, std::vector> y, std::vector> weights, double C) { - LinAlg alg; + MLPPLinAlg alg; Reg regularization; return C * HingeLoss(y_hat, y) + regularization.regTerm(weights, 1, 0, "Ridge"); } std::vector MLPPCost::HingeLossDeriv(std::vector y_hat, std::vector y, double C) { - LinAlg alg; + MLPPLinAlg alg; Reg regularization; return alg.scalarMultiply(C, HingeLossDeriv(y_hat, y)); } std::vector> MLPPCost::HingeLossDeriv(std::vector> y_hat, std::vector> y, double C) { - LinAlg alg; + MLPPLinAlg alg; Reg regularization; return alg.scalarMultiply(C, HingeLossDeriv(y_hat, y)); } double MLPPCost::dualFormSVM(std::vector alpha, std::vector> X, std::vector y) { - LinAlg alg; + MLPPLinAlg alg; std::vector> Y = alg.diag(y); // Y is a diagnoal matrix. Y[i][j] = y[i] if i = i, else Y[i][j] = 0. Yt = Y. std::vector> K = alg.matmult(X, alg.transpose(X)); // TO DO: DON'T forget to add non-linear kernelizations. std::vector> Q = alg.matmult(alg.matmult(alg.transpose(Y), K), Y); @@ -392,7 +392,7 @@ double MLPPCost::dualFormSVM(std::vector alpha, std::vector MLPPCost::dualFormSVMDeriv(std::vector alpha, std::vector> X, std::vector y) { - LinAlg alg; + MLPPLinAlg alg; std::vector> Y = alg.zeromat(y.size(), y.size()); for (int i = 0; i < y.size(); i++) { Y[i][i] = y[i]; // Y is a diagnoal matrix. Y[i][j] = y[i] if i = i, else Y[i][j] = 0. Yt = Y. diff --git a/mlpp/data/data.cpp b/mlpp/data/data.cpp index 30a2183..3ae512c 100644 --- a/mlpp/data/data.cpp +++ b/mlpp/data/data.cpp @@ -126,7 +126,7 @@ std::tuple>, std::vector>, s // MULTIVARIATE SUPERVISED void MLPPData::setData(int k, std::string fileName, std::vector> &inputSet, std::vector &outputSet) { - LinAlg alg; + MLPPLinAlg alg; std::string inputTemp; std::string outputTemp; @@ -154,7 +154,7 @@ void MLPPData::setData(int k, std::string fileName, std::vector inputName, std::string outputName, std::vector> inputSet, std::vector outputSet) { - LinAlg alg; + MLPPLinAlg alg; inputSet = alg.transpose(inputSet); for (int i = 0; i < inputSet.size(); i++) { std::cout << inputName[i] << std::endl; @@ -172,7 +172,7 @@ void MLPPData::printData(std::vector inputName, std::string outputN // UNSUPERVISED void MLPPData::setData(int k, std::string fileName, std::vector> &inputSet) { - LinAlg alg; + MLPPLinAlg alg; std::string inputTemp; inputSet.resize(k); @@ -196,7 +196,7 @@ void MLPPData::setData(int k, std::string fileName, std::vector inputName, std::vector> inputSet) { - LinAlg alg; + MLPPLinAlg alg; inputSet = alg.transpose(inputSet); for (int i = 0; i < inputSet.size(); i++) { std::cout << inputName[i] << std::endl; @@ -259,7 +259,7 @@ std::vector> MLPPData::rgb2gray(std::vector>> MLPPData::rgb2ycbcr(std::vector>> input) { - LinAlg alg; + MLPPLinAlg alg; std::vector>> YCbCr; YCbCr = alg.resize(YCbCr, input); for (int i = 0; i < YCbCr[0].size(); i++) { @@ -275,7 +275,7 @@ std::vector>> MLPPData::rgb2ycbcr(std::vector>> MLPPData::rgb2hsv(std::vector>> input) { - LinAlg alg; + MLPPLinAlg alg; std::vector>> HSV; HSV = alg.resize(HSV, input); for (int i = 0; i < HSV[0].size(); i++) { @@ -317,7 +317,7 @@ std::vector>> MLPPData::rgb2hsv(std::vector>> MLPPData::rgb2xyz(std::vector>> input) { - LinAlg alg; + MLPPLinAlg alg; std::vector>> XYZ; XYZ = alg.resize(XYZ, input); std::vector> RGB2XYZ = { { 0.4124564, 0.3575761, 0.1804375 }, { 0.2126726, 0.7151522, 0.0721750 }, { 0.0193339, 0.1191920, 0.9503041 } }; @@ -325,7 +325,7 @@ std::vector>> MLPPData::rgb2xyz(std::vector>> MLPPData::xyz2rgb(std::vector>> input) { - LinAlg alg; + MLPPLinAlg alg; std::vector>> XYZ; XYZ = alg.resize(XYZ, input); std::vector> RGB2XYZ = alg.inverse({ { 0.4124564, 0.3575761, 0.1804375 }, { 0.2126726, 0.7151522, 0.0721750 }, { 0.0193339, 0.1191920, 0.9503041 } }); @@ -520,7 +520,7 @@ std::vector> MLPPData::BOW(std::vector sentence } std::vector> MLPPData::TFIDF(std::vector sentences) { - LinAlg alg; + MLPPLinAlg alg; std::vector wordList = removeNullByte(removeStopWords(createWordList(sentences))); std::vector> segmented_sentences; @@ -620,7 +620,7 @@ std::tuple>, std::vector> MLPPData: for (int i = inputSize; i < BOW.size(); i++) { outputSet.push_back(BOW[i]); } - LinAlg alg; + MLPPLinAlg alg; SoftmaxNet *model; if (type == "Skipgram") { model = new SoftmaxNet(outputSet, inputSet, dimension); @@ -635,7 +635,7 @@ std::tuple>, std::vector> MLPPData: } std::vector> MLPPData::LSA(std::vector sentences, int dim) { - LinAlg alg; + MLPPLinAlg alg; std::vector> docWordData = BOW(sentences, "Binary"); auto [U, S, Vt] = alg.SVD(docWordData); @@ -678,7 +678,7 @@ void MLPPData::setInputNames(std::string fileName, std::vector &inp } std::vector> MLPPData::featureScaling(std::vector> X) { - LinAlg alg; + MLPPLinAlg alg; X = alg.transpose(X); std::vector max_elements, min_elements; max_elements.resize(X.size()); @@ -698,7 +698,7 @@ std::vector> MLPPData::featureScaling(std::vector> MLPPData::meanNormalization(std::vector> X) { - LinAlg alg; + MLPPLinAlg alg; Stat stat; // (X_j - mu_j) / std_j, for every j @@ -710,7 +710,7 @@ std::vector> MLPPData::meanNormalization(std::vector> MLPPData::meanCentering(std::vector> X) { - LinAlg alg; + MLPPLinAlg alg; Stat stat; for (int i = 0; i < X.size(); i++) { double mean_i = stat.mean(X[i]); diff --git a/mlpp/dual_svc/dual_svc.cpp b/mlpp/dual_svc/dual_svc.cpp index 9817f69..87fcc00 100644 --- a/mlpp/dual_svc/dual_svc.cpp +++ b/mlpp/dual_svc/dual_svc.cpp @@ -34,7 +34,7 @@ double MLPPDualSVC::modelTest(std::vector x) { void MLPPDualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) { class MLPPCost cost; MLPPActivation avn; - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; @@ -82,7 +82,7 @@ void MLPPDualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) // void MLPPDualSVC::SGD(double learning_rate, int max_epoch, bool UI){ // class MLPPCost cost; // MLPPActivation avn; -// LinAlg alg; +// MLPPLinAlg alg; // Reg regularization; // double cost_prev = 0; @@ -115,7 +115,7 @@ void MLPPDualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) // void MLPPDualSVC::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){ // class MLPPCost cost; // MLPPActivation avn; -// LinAlg alg; +// MLPPLinAlg alg; // Reg regularization; // double cost_prev = 0; // int epoch = 1; @@ -173,7 +173,7 @@ std::vector MLPPDualSVC::Evaluate(std::vector> X) { } std::vector MLPPDualSVC::propagate(std::vector> X) { - LinAlg alg; + MLPPLinAlg alg; std::vector z; for (int i = 0; i < X.size(); i++) { double sum = 0; @@ -194,7 +194,7 @@ double MLPPDualSVC::Evaluate(std::vector x) { } double MLPPDualSVC::propagate(std::vector x) { - LinAlg alg; + MLPPLinAlg alg; double z = 0; for (int j = 0; j < alpha.size(); j++) { if (alpha[j] != 0) { @@ -206,7 +206,7 @@ double MLPPDualSVC::propagate(std::vector x) { } void MLPPDualSVC::forwardPass() { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; z = propagate(inputSet); @@ -224,14 +224,14 @@ void MLPPDualSVC::alphaProjection() { } double MLPPDualSVC::kernelFunction(std::vector u, std::vector v, std::string kernel) { - LinAlg alg; + MLPPLinAlg alg; if (kernel == "Linear") { return alg.dot(u, v); } // warning: non-void function does not return a value in all control paths [-Wreturn-type] } std::vector> MLPPDualSVC::kernelFunction(std::vector> A, std::vector> B, std::string kernel) { - LinAlg alg; + MLPPLinAlg alg; if (kernel == "Linear") { return alg.matmult(inputSet, alg.transpose(inputSet)); } // warning: non-void function does not return a value in all control paths [-Wreturn-type] diff --git a/mlpp/exp_reg/exp_reg.cpp b/mlpp/exp_reg/exp_reg.cpp index 68e9dad..4e46ce3 100644 --- a/mlpp/exp_reg/exp_reg.cpp +++ b/mlpp/exp_reg/exp_reg.cpp @@ -32,7 +32,7 @@ double MLPPExpReg::modelTest(std::vector x) { } void MLPPExpReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; @@ -135,7 +135,7 @@ void MLPPExpReg::SGD(double learning_rate, int max_epoch, bool UI) { } void MLPPExpReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; diff --git a/mlpp/gan/gan.cpp b/mlpp/gan/gan.cpp index 8e4a27c..bfa0280 100644 --- a/mlpp/gan/gan.cpp +++ b/mlpp/gan/gan.cpp @@ -24,13 +24,13 @@ MLPPGAN::~MLPPGAN() { } std::vector> MLPPGAN::generateExample(int n) { - LinAlg alg; + MLPPLinAlg alg; return modelSetTestGenerator(alg.gaussianNoise(n, k)); } void MLPPGAN::gradientDescent(double learning_rate, int max_epoch, bool UI) { class MLPPCost cost; - LinAlg alg; + MLPPLinAlg alg; double cost_prev = 0; int epoch = 1; forwardPass(); @@ -77,7 +77,7 @@ void MLPPGAN::gradientDescent(double learning_rate, int max_epoch, bool UI) { } double MLPPGAN::score() { - LinAlg alg; + MLPPLinAlg alg; Utilities util; forwardPass(); return util.performance(y_hat, alg.onevec(n)); @@ -97,7 +97,7 @@ void MLPPGAN::save(std::string fileName) { } void MLPPGAN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, double lambda, double alpha) { - LinAlg alg; + MLPPLinAlg alg; if (network.empty()) { network.push_back(MLPPHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha)); network[0].forwardPass(); @@ -108,7 +108,7 @@ void MLPPGAN::addLayer(int n_hidden, std::string activation, std::string weightI } void MLPPGAN::addOutputLayer(std::string weightInit, std::string reg, double lambda, double alpha) { - LinAlg alg; + MLPPLinAlg alg; if (!network.empty()) { outputLayer = new OutputLayer(network[network.size() - 1].n_hidden, "Sigmoid", "LogLoss", network[network.size() - 1].a, weightInit, reg, lambda, alpha); } else { @@ -160,7 +160,7 @@ double MLPPGAN::Cost(std::vector y_hat, std::vector y) { } void MLPPGAN::forwardPass() { - LinAlg alg; + MLPPLinAlg alg; if (!network.empty()) { network[0].input = alg.gaussianNoise(n, k); network[0].forwardPass(); @@ -178,7 +178,7 @@ void MLPPGAN::forwardPass() { } void MLPPGAN::updateDiscriminatorParameters(std::vector>> hiddenLayerUpdations, std::vector outputLayerUpdation, double learning_rate) { - LinAlg alg; + MLPPLinAlg alg; outputLayer->weights = alg.subtraction(outputLayer->weights, outputLayerUpdation); outputLayer->bias -= learning_rate * alg.sum_elements(outputLayer->delta) / n; @@ -195,7 +195,7 @@ void MLPPGAN::updateDiscriminatorParameters(std::vector>> hiddenLayerUpdations, double learning_rate) { - LinAlg alg; + MLPPLinAlg alg; if (!network.empty()) { for (int i = network.size() / 2; i >= 0; i--) { @@ -210,7 +210,7 @@ void MLPPGAN::updateGeneratorParameters(std::vector>>, std::vector> MLPPGAN::computeDiscriminatorGradients(std::vector y_hat, std::vector outputSet) { class MLPPCost cost; MLPPActivation avn; - LinAlg alg; + MLPPLinAlg alg; Reg regularization; std::vector>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads. @@ -246,7 +246,7 @@ std::tuple>>, std::vector> M std::vector>> MLPPGAN::computeGeneratorGradients(std::vector y_hat, std::vector outputSet) { class MLPPCost cost; MLPPActivation avn; - LinAlg alg; + MLPPLinAlg alg; Reg regularization; std::vector>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads. diff --git a/mlpp/gaussian_nb/gaussian_nb.cpp b/mlpp/gaussian_nb/gaussian_nb.cpp index 0e78b32..bbc3aeb 100644 --- a/mlpp/gaussian_nb/gaussian_nb.cpp +++ b/mlpp/gaussian_nb/gaussian_nb.cpp @@ -18,7 +18,7 @@ MLPPGaussianNB::MLPPGaussianNB(std::vector> inputSet, std::v inputSet(inputSet), outputSet(outputSet), class_num(class_num) { y_hat.resize(outputSet.size()); Evaluate(); - LinAlg alg; + MLPPLinAlg alg; } std::vector MLPPGaussianNB::modelSetTest(std::vector> X) { @@ -31,7 +31,7 @@ std::vector MLPPGaussianNB::modelSetTest(std::vector double MLPPGaussianNB::modelTest(std::vector x) { Stat stat; - LinAlg alg; + MLPPLinAlg alg; double score[class_num]; double y_hat_i = 1; @@ -49,7 +49,7 @@ double MLPPGaussianNB::score() { void MLPPGaussianNB::Evaluate() { Stat stat; - LinAlg alg; + MLPPLinAlg alg; // Computing mu_k_y and sigma_k_y mu.resize(class_num); diff --git a/mlpp/hidden_layer/hidden_layer.cpp b/mlpp/hidden_layer/hidden_layer.cpp index 8963208..4775529 100644 --- a/mlpp/hidden_layer/hidden_layer.cpp +++ b/mlpp/hidden_layer/hidden_layer.cpp @@ -98,14 +98,14 @@ MLPPHiddenLayer::MLPPHiddenLayer(int n_hidden, std::string activation, std::vect } void MLPPHiddenLayer::forwardPass() { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; z = alg.mat_vec_add(alg.matmult(input, weights), bias); a = (avn.*activation_map[activation])(z, 0); } void MLPPHiddenLayer::Test(std::vector x) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias); a_test = (avn.*activationTest_map[activation])(z_test, 0); diff --git a/mlpp/kmeans/kmeans.cpp b/mlpp/kmeans/kmeans.cpp index 4c6b707..7b3be7b 100644 --- a/mlpp/kmeans/kmeans.cpp +++ b/mlpp/kmeans/kmeans.cpp @@ -23,7 +23,7 @@ MLPPKMeans::MLPPKMeans(std::vector> inputSet, int k, std::st } std::vector> MLPPKMeans::modelSetTest(std::vector> X) { - LinAlg alg; + MLPPLinAlg alg; std::vector> closestCentroids; for (int i = 0; i < inputSet.size(); i++) { std::vector closestCentroid = mu[0]; @@ -39,7 +39,7 @@ std::vector> MLPPKMeans::modelSetTest(std::vector MLPPKMeans::modelTest(std::vector x) { - LinAlg alg; + MLPPLinAlg alg; std::vector closestCentroid = mu[0]; for (int j = 0; j < mu.size(); j++) { if (alg.euclideanDistance(x, mu[j]) < alg.euclideanDistance(x, closestCentroid)) { @@ -85,7 +85,7 @@ double MLPPKMeans::score() { } std::vector MLPPKMeans::silhouette_scores() { - LinAlg alg; + MLPPLinAlg alg; std::vector> closestCentroids = modelSetTest(inputSet); std::vector silhouette_scores; for (int i = 0; i < inputSet.size(); i++) { @@ -136,7 +136,7 @@ std::vector MLPPKMeans::silhouette_scores() { // This simply computes r_nk void MLPPKMeans::Evaluate() { - LinAlg alg; + MLPPLinAlg alg; r.resize(inputSet.size()); for (int i = 0; i < r.size(); i++) { @@ -163,7 +163,7 @@ void MLPPKMeans::Evaluate() { // This simply computes or re-computes mu_k void MLPPKMeans::computeMu() { - LinAlg alg; + MLPPLinAlg alg; for (int i = 0; i < mu.size(); i++) { std::vector num; num.resize(r.size()); @@ -197,7 +197,7 @@ void MLPPKMeans::centroidInitialization(int k) { } void MLPPKMeans::kmeansppInitialization(int k) { - LinAlg alg; + MLPPLinAlg alg; std::random_device rd; std::default_random_engine generator(rd()); std::uniform_int_distribution distribution(0, int(inputSet.size() - 1)); @@ -223,7 +223,7 @@ void MLPPKMeans::kmeansppInitialization(int k) { } double MLPPKMeans::Cost() { - LinAlg alg; + MLPPLinAlg alg; double sum = 0; for (int i = 0; i < r.size(); i++) { for (int j = 0; j < r[0].size(); j++) { diff --git a/mlpp/knn/knn.cpp b/mlpp/knn/knn.cpp index a2a1649..33a2e23 100644 --- a/mlpp/knn/knn.cpp +++ b/mlpp/knn/knn.cpp @@ -63,7 +63,7 @@ int MLPPKNN::determineClass(std::vector knn) { } std::vector MLPPKNN::nearestNeighbors(std::vector x) { - LinAlg alg; + MLPPLinAlg alg; // The nearest neighbors std::vector knn; diff --git a/mlpp/lin_alg/lin_alg.cpp b/mlpp/lin_alg/lin_alg.cpp index 8661851..701551e 100644 --- a/mlpp/lin_alg/lin_alg.cpp +++ b/mlpp/lin_alg/lin_alg.cpp @@ -13,18 +13,18 @@ -std::vector> LinAlg::gramMatrix(std::vector> A) { +std::vector> MLPPLinAlg::gramMatrix(std::vector> A) { return matmult(transpose(A), A); // AtA } -bool LinAlg::linearIndependenceChecker(std::vector> A) { +bool MLPPLinAlg::linearIndependenceChecker(std::vector> A) { if (det(gramMatrix(A), A.size()) == 0) { return false; } return true; } -std::vector> LinAlg::gaussianNoise(int n, int m) { +std::vector> MLPPLinAlg::gaussianNoise(int n, int m) { std::random_device rd; std::default_random_engine generator(rd()); @@ -40,7 +40,7 @@ std::vector> LinAlg::gaussianNoise(int n, int m) { return A; } -std::vector> LinAlg::addition(std::vector> A, std::vector> B) { +std::vector> MLPPLinAlg::addition(std::vector> A, std::vector> B) { std::vector> C; C.resize(A.size()); for (int i = 0; i < C.size(); i++) { @@ -55,7 +55,7 @@ std::vector> LinAlg::addition(std::vector> LinAlg::subtraction(std::vector> A, std::vector> B) { +std::vector> MLPPLinAlg::subtraction(std::vector> A, std::vector> B) { std::vector> C; C.resize(A.size()); for (int i = 0; i < C.size(); i++) { @@ -70,7 +70,7 @@ std::vector> LinAlg::subtraction(std::vector> LinAlg::matmult(std::vector> A, std::vector> B) { +std::vector> MLPPLinAlg::matmult(std::vector> A, std::vector> B) { std::vector> C; C.resize(A.size()); for (int i = 0; i < C.size(); i++) { @@ -87,7 +87,7 @@ std::vector> LinAlg::matmult(std::vector return C; } -std::vector> LinAlg::hadamard_product(std::vector> A, std::vector> B) { +std::vector> MLPPLinAlg::hadamard_product(std::vector> A, std::vector> B) { std::vector> C; C.resize(A.size()); for (int i = 0; i < C.size(); i++) { @@ -102,7 +102,7 @@ std::vector> LinAlg::hadamard_product(std::vector> LinAlg::kronecker_product(std::vector> A, std::vector> B) { +std::vector> MLPPLinAlg::kronecker_product(std::vector> A, std::vector> B) { std::vector> C; // [1,1,1,1] [1,2,3,4,5] @@ -131,7 +131,7 @@ std::vector> LinAlg::kronecker_product(std::vector> LinAlg::elementWiseDivision(std::vector> A, std::vector> B) { +std::vector> MLPPLinAlg::elementWiseDivision(std::vector> A, std::vector> B) { std::vector> C; C.resize(A.size()); for (int i = 0; i < C.size(); i++) { @@ -145,7 +145,7 @@ std::vector> LinAlg::elementWiseDivision(std::vector> LinAlg::transpose(std::vector> A) { +std::vector> MLPPLinAlg::transpose(std::vector> A) { std::vector> AT; AT.resize(A[0].size()); for (int i = 0; i < AT.size(); i++) { @@ -160,7 +160,7 @@ std::vector> LinAlg::transpose(std::vector> LinAlg::scalarMultiply(double scalar, std::vector> A) { +std::vector> MLPPLinAlg::scalarMultiply(double scalar, std::vector> A) { for (int i = 0; i < A.size(); i++) { for (int j = 0; j < A[i].size(); j++) { A[i][j] *= scalar; @@ -169,7 +169,7 @@ std::vector> LinAlg::scalarMultiply(double scalar, std::vect return A; } -std::vector> LinAlg::scalarAdd(double scalar, std::vector> A) { +std::vector> MLPPLinAlg::scalarAdd(double scalar, std::vector> A) { for (int i = 0; i < A.size(); i++) { for (int j = 0; j < A[i].size(); j++) { A[i][j] += scalar; @@ -178,7 +178,7 @@ std::vector> LinAlg::scalarAdd(double scalar, std::vector> LinAlg::log(std::vector> A) { +std::vector> MLPPLinAlg::log(std::vector> A) { std::vector> B; B.resize(A.size()); for (int i = 0; i < B.size(); i++) { @@ -192,7 +192,7 @@ std::vector> LinAlg::log(std::vector> A) return B; } -std::vector> LinAlg::log10(std::vector> A) { +std::vector> MLPPLinAlg::log10(std::vector> A) { std::vector> B; B.resize(A.size()); for (int i = 0; i < B.size(); i++) { @@ -206,7 +206,7 @@ std::vector> LinAlg::log10(std::vector> return B; } -std::vector> LinAlg::exp(std::vector> A) { +std::vector> MLPPLinAlg::exp(std::vector> A) { std::vector> B; B.resize(A.size()); for (int i = 0; i < B.size(); i++) { @@ -220,7 +220,7 @@ std::vector> LinAlg::exp(std::vector> A) return B; } -std::vector> LinAlg::erf(std::vector> A) { +std::vector> MLPPLinAlg::erf(std::vector> A) { std::vector> B; B.resize(A.size()); for (int i = 0; i < B.size(); i++) { @@ -234,7 +234,7 @@ std::vector> LinAlg::erf(std::vector> A) return B; } -std::vector> LinAlg::exponentiate(std::vector> A, double p) { +std::vector> MLPPLinAlg::exponentiate(std::vector> A, double p) { for (int i = 0; i < A.size(); i++) { for (int j = 0; j < A[i].size(); j++) { A[i][j] = std::pow(A[i][j], p); @@ -243,15 +243,15 @@ std::vector> LinAlg::exponentiate(std::vector> LinAlg::sqrt(std::vector> A) { +std::vector> MLPPLinAlg::sqrt(std::vector> A) { return exponentiate(A, 0.5); } -std::vector> LinAlg::cbrt(std::vector> A) { +std::vector> MLPPLinAlg::cbrt(std::vector> A) { return exponentiate(A, double(1) / double(3)); } -std::vector> LinAlg::matrixPower(std::vector> A, int n) { +std::vector> MLPPLinAlg::matrixPower(std::vector> A, int n) { std::vector> B = identity(A.size()); if (n == 0) { return identity(A.size()); @@ -264,7 +264,7 @@ std::vector> LinAlg::matrixPower(std::vector> LinAlg::abs(std::vector> A) { +std::vector> MLPPLinAlg::abs(std::vector> A) { std::vector> B; B.resize(A.size()); for (int i = 0; i < B.size(); i++) { @@ -278,7 +278,7 @@ std::vector> LinAlg::abs(std::vector> A) return B; } -double LinAlg::det(std::vector> A, int d) { +double MLPPLinAlg::det(std::vector> A, int d) { double deter = 0; std::vector> B; B.resize(d); @@ -313,7 +313,7 @@ double LinAlg::det(std::vector> A, int d) { return deter; } -double LinAlg::trace(std::vector> A) { +double MLPPLinAlg::trace(std::vector> A) { double trace = 0; for (int i = 0; i < A.size(); i++) { trace += A[i][i]; @@ -321,7 +321,7 @@ double LinAlg::trace(std::vector> A) { return trace; } -std::vector> LinAlg::cofactor(std::vector> A, int n, int i, int j) { +std::vector> MLPPLinAlg::cofactor(std::vector> A, int n, int i, int j) { std::vector> cof; cof.resize(A.size()); for (int i = 0; i < cof.size(); i++) { @@ -344,7 +344,7 @@ std::vector> LinAlg::cofactor(std::vector> LinAlg::adjoint(std::vector> A) { +std::vector> MLPPLinAlg::adjoint(std::vector> A) { //Resizing the initial adjoint matrix std::vector> adj; adj.resize(A.size()); @@ -379,16 +379,16 @@ std::vector> LinAlg::adjoint(std::vector } // The inverse can be computed as (1 / determinant(A)) * adjoint(A) -std::vector> LinAlg::inverse(std::vector> A) { +std::vector> MLPPLinAlg::inverse(std::vector> A) { return scalarMultiply(1 / det(A, int(A.size())), adjoint(A)); } // This is simply the Moore-Penrose least squares approximation of the inverse. -std::vector> LinAlg::pinverse(std::vector> A) { +std::vector> MLPPLinAlg::pinverse(std::vector> A) { return matmult(inverse(matmult(transpose(A), A)), transpose(A)); } -std::vector> LinAlg::zeromat(int n, int m) { +std::vector> MLPPLinAlg::zeromat(int n, int m) { std::vector> zeromat; zeromat.resize(n); for (int i = 0; i < zeromat.size(); i++) { @@ -397,11 +397,11 @@ std::vector> LinAlg::zeromat(int n, int m) { return zeromat; } -std::vector> LinAlg::onemat(int n, int m) { +std::vector> MLPPLinAlg::onemat(int n, int m) { return full(n, m, 1); } -std::vector> LinAlg::full(int n, int m, int k) { +std::vector> MLPPLinAlg::full(int n, int m, int k) { std::vector> full; full.resize(n); for (int i = 0; i < full.size(); i++) { @@ -415,7 +415,7 @@ std::vector> LinAlg::full(int n, int m, int k) { return full; } -std::vector> LinAlg::sin(std::vector> A) { +std::vector> MLPPLinAlg::sin(std::vector> A) { std::vector> B; B.resize(A.size()); for (int i = 0; i < B.size(); i++) { @@ -429,7 +429,7 @@ std::vector> LinAlg::sin(std::vector> A) return B; } -std::vector> LinAlg::cos(std::vector> A) { +std::vector> MLPPLinAlg::cos(std::vector> A) { std::vector> B; B.resize(A.size()); for (int i = 0; i < B.size(); i++) { @@ -443,7 +443,7 @@ std::vector> LinAlg::cos(std::vector> A) return B; } -std::vector LinAlg::max(std::vector a, std::vector b) { +std::vector MLPPLinAlg::max(std::vector a, std::vector b) { std::vector c; c.resize(a.size()); for (int i = 0; i < c.size(); i++) { @@ -456,15 +456,15 @@ std::vector LinAlg::max(std::vector a, std::vector b) { return c; } -double LinAlg::max(std::vector> A) { +double MLPPLinAlg::max(std::vector> A) { return max(flatten(A)); } -double LinAlg::min(std::vector> A) { +double MLPPLinAlg::min(std::vector> A) { return min(flatten(A)); } -std::vector> LinAlg::round(std::vector> A) { +std::vector> MLPPLinAlg::round(std::vector> A) { std::vector> B; B.resize(A.size()); for (int i = 0; i < B.size(); i++) { @@ -478,7 +478,7 @@ std::vector> LinAlg::round(std::vector> return B; } -double LinAlg::norm_2(std::vector> A) { +double MLPPLinAlg::norm_2(std::vector> A) { double sum = 0; for (int i = 0; i < A.size(); i++) { for (int j = 0; j < A[i].size(); j++) { @@ -488,7 +488,7 @@ double LinAlg::norm_2(std::vector> A) { return std::sqrt(sum); } -std::vector> LinAlg::identity(double d) { +std::vector> MLPPLinAlg::identity(double d) { std::vector> identityMat; identityMat.resize(d); for (int i = 0; i < identityMat.size(); i++) { @@ -506,7 +506,7 @@ std::vector> LinAlg::identity(double d) { return identityMat; } -std::vector> LinAlg::cov(std::vector> A) { +std::vector> MLPPLinAlg::cov(std::vector> A) { Stat stat; std::vector> covMat; covMat.resize(A.size()); @@ -521,7 +521,7 @@ std::vector> LinAlg::cov(std::vector> A) return covMat; } -std::tuple>, std::vector>> LinAlg::eig(std::vector> A) { +std::tuple>, std::vector>> MLPPLinAlg::eig(std::vector> A) { /* A (the entered parameter) in most use cases will be X'X, XX', etc. and must be symmetric. That simply means that 1) X' = X and 2) X is a square matrix. This function that computes the @@ -641,7 +641,7 @@ std::tuple>, std::vector>> L return { eigenvectors, a_new }; } -std::tuple>, std::vector>, std::vector>> LinAlg::SVD(std::vector> A) { +std::tuple>, std::vector>, std::vector>> MLPPLinAlg::SVD(std::vector> A) { auto [left_eigenvecs, eigenvals] = eig(matmult(A, transpose(A))); auto [right_eigenvecs, right_eigenvals] = eig(matmult(transpose(A), A)); @@ -655,12 +655,12 @@ std::tuple>, std::vector>, s return { left_eigenvecs, sigma, right_eigenvecs }; } -std::vector LinAlg::vectorProjection(std::vector a, std::vector b) { +std::vector MLPPLinAlg::vectorProjection(std::vector a, std::vector b) { double product = dot(a, b) / dot(a, a); return scalarMultiply(product, a); // Projection of vector a onto b. Denotated as proj_a(b). } -std::vector> LinAlg::gramSchmidtProcess(std::vector> A) { +std::vector> MLPPLinAlg::gramSchmidtProcess(std::vector> A) { A = transpose(A); // C++ vectors lack a mechanism to directly index columns. So, we transpose *a copy* of A for this purpose for ease of use. std::vector> B; B.resize(A.size()); @@ -680,13 +680,13 @@ std::vector> LinAlg::gramSchmidtProcess(std::vector>, std::vector>> LinAlg::QRD(std::vector> A) { +std::tuple>, std::vector>> MLPPLinAlg::QRD(std::vector> A) { std::vector> Q = gramSchmidtProcess(A); std::vector> R = matmult(transpose(Q), A); return { Q, R }; } -std::tuple>, std::vector>> LinAlg::chol(std::vector> A) { +std::tuple>, std::vector>> MLPPLinAlg::chol(std::vector> A) { std::vector> L = zeromat(A.size(), A[0].size()); for (int j = 0; j < L.size(); j++) { // Matrices entered must be square. No problem here. for (int i = j; i < L.size(); i++) { @@ -708,7 +708,7 @@ std::tuple>, std::vector>> L return { L, transpose(L) }; // Indeed, L.T is our upper triangular matrix. } -double LinAlg::sum_elements(std::vector> A) { +double MLPPLinAlg::sum_elements(std::vector> A) { double sum = 0; for (int i = 0; i < A.size(); i++) { for (int j = 0; j < A[i].size(); j++) { @@ -718,7 +718,7 @@ double LinAlg::sum_elements(std::vector> A) { return sum; } -std::vector LinAlg::flatten(std::vector> A) { +std::vector MLPPLinAlg::flatten(std::vector> A) { std::vector a; for (int i = 0; i < A.size(); i++) { for (int j = 0; j < A[i].size(); j++) { @@ -728,11 +728,11 @@ std::vector LinAlg::flatten(std::vector> A) { return a; } -std::vector LinAlg::solve(std::vector> A, std::vector b) { +std::vector MLPPLinAlg::solve(std::vector> A, std::vector b) { return mat_vec_mult(inverse(A), b); } -bool LinAlg::positiveDefiniteChecker(std::vector> A) { +bool MLPPLinAlg::positiveDefiniteChecker(std::vector> A) { auto [eigenvectors, eigenvals] = eig(A); std::vector eigenvals_vec; for (int i = 0; i < eigenvals.size(); i++) { @@ -746,7 +746,7 @@ bool LinAlg::positiveDefiniteChecker(std::vector> A) { return true; } -bool LinAlg::negativeDefiniteChecker(std::vector> A) { +bool MLPPLinAlg::negativeDefiniteChecker(std::vector> A) { auto [eigenvectors, eigenvals] = eig(A); std::vector eigenvals_vec; for (int i = 0; i < eigenvals.size(); i++) { @@ -760,7 +760,7 @@ bool LinAlg::negativeDefiniteChecker(std::vector> A) { return true; } -bool LinAlg::zeroEigenvalue(std::vector> A) { +bool MLPPLinAlg::zeroEigenvalue(std::vector> A) { auto [eigenvectors, eigenvals] = eig(A); std::vector eigenvals_vec; for (int i = 0; i < eigenvals.size(); i++) { @@ -774,7 +774,7 @@ bool LinAlg::zeroEigenvalue(std::vector> A) { return false; } -void LinAlg::printMatrix(std::vector> A) { +void MLPPLinAlg::printMatrix(std::vector> A) { for (int i = 0; i < A.size(); i++) { for (int j = 0; j < A[i].size(); j++) { std::cout << A[i][j] << " "; @@ -783,7 +783,7 @@ void LinAlg::printMatrix(std::vector> A) { } } -std::vector> LinAlg::outerProduct(std::vector a, std::vector b) { +std::vector> MLPPLinAlg::outerProduct(std::vector a, std::vector b) { std::vector> C; C.resize(a.size()); for (int i = 0; i < C.size(); i++) { @@ -792,7 +792,7 @@ std::vector> LinAlg::outerProduct(std::vector a, std return C; } -std::vector LinAlg::hadamard_product(std::vector a, std::vector b) { +std::vector MLPPLinAlg::hadamard_product(std::vector a, std::vector b) { std::vector c; c.resize(a.size()); @@ -803,7 +803,7 @@ std::vector LinAlg::hadamard_product(std::vector a, std::vector< return c; } -std::vector LinAlg::elementWiseDivision(std::vector a, std::vector b) { +std::vector MLPPLinAlg::elementWiseDivision(std::vector a, std::vector b) { std::vector c; c.resize(a.size()); @@ -813,21 +813,21 @@ std::vector LinAlg::elementWiseDivision(std::vector a, std::vect return c; } -std::vector LinAlg::scalarMultiply(double scalar, std::vector a) { +std::vector MLPPLinAlg::scalarMultiply(double scalar, std::vector a) { for (int i = 0; i < a.size(); i++) { a[i] *= scalar; } return a; } -std::vector LinAlg::scalarAdd(double scalar, std::vector a) { +std::vector MLPPLinAlg::scalarAdd(double scalar, std::vector a) { for (int i = 0; i < a.size(); i++) { a[i] += scalar; } return a; } -std::vector LinAlg::addition(std::vector a, std::vector b) { +std::vector MLPPLinAlg::addition(std::vector a, std::vector b) { std::vector c; c.resize(a.size()); for (int i = 0; i < a.size(); i++) { @@ -836,7 +836,7 @@ std::vector LinAlg::addition(std::vector a, std::vector return c; } -std::vector LinAlg::subtraction(std::vector a, std::vector b) { +std::vector MLPPLinAlg::subtraction(std::vector a, std::vector b) { std::vector c; c.resize(a.size()); for (int i = 0; i < a.size(); i++) { @@ -845,14 +845,14 @@ std::vector LinAlg::subtraction(std::vector a, std::vector LinAlg::subtractMatrixRows(std::vector a, std::vector> B) { +std::vector MLPPLinAlg::subtractMatrixRows(std::vector a, std::vector> B) { for (int i = 0; i < B.size(); i++) { a = subtraction(a, B[i]); } return a; } -std::vector LinAlg::log(std::vector a) { +std::vector MLPPLinAlg::log(std::vector a) { std::vector b; b.resize(a.size()); for (int i = 0; i < a.size(); i++) { @@ -861,7 +861,7 @@ std::vector LinAlg::log(std::vector a) { return b; } -std::vector LinAlg::log10(std::vector a) { +std::vector MLPPLinAlg::log10(std::vector a) { std::vector b; b.resize(a.size()); for (int i = 0; i < a.size(); i++) { @@ -870,7 +870,7 @@ std::vector LinAlg::log10(std::vector a) { return b; } -std::vector LinAlg::exp(std::vector a) { +std::vector MLPPLinAlg::exp(std::vector a) { std::vector b; b.resize(a.size()); for (int i = 0; i < a.size(); i++) { @@ -879,7 +879,7 @@ std::vector LinAlg::exp(std::vector a) { return b; } -std::vector LinAlg::erf(std::vector a) { +std::vector MLPPLinAlg::erf(std::vector a) { std::vector b; b.resize(a.size()); for (int i = 0; i < a.size(); i++) { @@ -888,7 +888,7 @@ std::vector LinAlg::erf(std::vector a) { return b; } -std::vector LinAlg::exponentiate(std::vector a, double p) { +std::vector MLPPLinAlg::exponentiate(std::vector a, double p) { std::vector b; b.resize(a.size()); for (int i = 0; i < b.size(); i++) { @@ -897,15 +897,15 @@ std::vector LinAlg::exponentiate(std::vector a, double p) { return b; } -std::vector LinAlg::sqrt(std::vector a) { +std::vector MLPPLinAlg::sqrt(std::vector a) { return exponentiate(a, 0.5); } -std::vector LinAlg::cbrt(std::vector a) { +std::vector MLPPLinAlg::cbrt(std::vector a) { return exponentiate(a, double(1) / double(3)); } -double LinAlg::dot(std::vector a, std::vector b) { +double MLPPLinAlg::dot(std::vector a, std::vector b) { double c = 0; for (int i = 0; i < a.size(); i++) { c += a[i] * b[i]; @@ -913,7 +913,7 @@ double LinAlg::dot(std::vector a, std::vector b) { return c; } -std::vector LinAlg::cross(std::vector a, std::vector b) { +std::vector MLPPLinAlg::cross(std::vector a, std::vector b) { // Cross products exist in R^7 also. Though, I will limit it to R^3 as Wolfram does this. std::vector> mat = { onevec(3), a, b }; @@ -924,7 +924,7 @@ std::vector LinAlg::cross(std::vector a, std::vector b) return { det1, det2, det3 }; } -std::vector LinAlg::abs(std::vector a) { +std::vector MLPPLinAlg::abs(std::vector a) { std::vector b; b.resize(a.size()); for (int i = 0; i < b.size(); i++) { @@ -933,17 +933,17 @@ std::vector LinAlg::abs(std::vector a) { return b; } -std::vector LinAlg::zerovec(int n) { +std::vector MLPPLinAlg::zerovec(int n) { std::vector zerovec; zerovec.resize(n); return zerovec; } -std::vector LinAlg::onevec(int n) { +std::vector MLPPLinAlg::onevec(int n) { return full(n, 1); } -std::vector> LinAlg::diag(std::vector a) { +std::vector> MLPPLinAlg::diag(std::vector a) { std::vector> B = zeromat(a.size(), a.size()); for (int i = 0; i < B.size(); i++) { B[i][i] = a[i]; @@ -951,7 +951,7 @@ std::vector> LinAlg::diag(std::vector a) { return B; } -std::vector LinAlg::full(int n, int k) { +std::vector MLPPLinAlg::full(int n, int k) { std::vector full; full.resize(n); for (int i = 0; i < full.size(); i++) { @@ -960,7 +960,7 @@ std::vector LinAlg::full(int n, int k) { return full; } -std::vector LinAlg::sin(std::vector a) { +std::vector MLPPLinAlg::sin(std::vector a) { std::vector b; b.resize(a.size()); for (int i = 0; i < a.size(); i++) { @@ -969,7 +969,7 @@ std::vector LinAlg::sin(std::vector a) { return b; } -std::vector LinAlg::cos(std::vector a) { +std::vector MLPPLinAlg::cos(std::vector a) { std::vector b; b.resize(a.size()); for (int i = 0; i < a.size(); i++) { @@ -978,7 +978,7 @@ std::vector LinAlg::cos(std::vector a) { return b; } -std::vector> LinAlg::rotate(std::vector> A, double theta, int axis) { +std::vector> MLPPLinAlg::rotate(std::vector> A, double theta, int axis) { std::vector> rotationMatrix = { { std::cos(theta), -std::sin(theta) }, { std::sin(theta), std::cos(theta) } }; if (axis == 0) { rotationMatrix = { { 1, 0, 0 }, { 0, std::cos(theta), -std::sin(theta) }, { 0, std::sin(theta), std::cos(theta) } }; @@ -991,7 +991,7 @@ std::vector> LinAlg::rotate(std::vector> return matmult(A, rotationMatrix); } -std::vector> LinAlg::max(std::vector> A, std::vector> B) { +std::vector> MLPPLinAlg::max(std::vector> A, std::vector> B) { std::vector> C; C.resize(A.size()); for (int i = 0; i < C.size(); i++) { @@ -1003,7 +1003,7 @@ std::vector> LinAlg::max(std::vector> A, return C; } -double LinAlg::max(std::vector a) { +double MLPPLinAlg::max(std::vector a) { int max = a[0]; for (int i = 0; i < a.size(); i++) { if (a[i] > max) { @@ -1013,7 +1013,7 @@ double LinAlg::max(std::vector a) { return max; } -double LinAlg::min(std::vector a) { +double MLPPLinAlg::min(std::vector a) { int min = a[0]; for (int i = 0; i < a.size(); i++) { if (a[i] < min) { @@ -1023,7 +1023,7 @@ double LinAlg::min(std::vector a) { return min; } -std::vector LinAlg::round(std::vector a) { +std::vector MLPPLinAlg::round(std::vector a) { std::vector b; b.resize(a.size()); for (int i = 0; i < a.size(); i++) { @@ -1033,7 +1033,7 @@ std::vector LinAlg::round(std::vector a) { } // Multidimensional Euclidean Distance -double LinAlg::euclideanDistance(std::vector a, std::vector b) { +double MLPPLinAlg::euclideanDistance(std::vector a, std::vector b) { double dist = 0; for (int i = 0; i < a.size(); i++) { dist += (a[i] - b[i]) * (a[i] - b[i]); @@ -1041,11 +1041,11 @@ double LinAlg::euclideanDistance(std::vector a, std::vector b) { return std::sqrt(dist); } -double LinAlg::norm_2(std::vector a) { +double MLPPLinAlg::norm_2(std::vector a) { return std::sqrt(norm_sq(a)); } -double LinAlg::norm_sq(std::vector a) { +double MLPPLinAlg::norm_sq(std::vector a) { double n_sq = 0; for (int i = 0; i < a.size(); i++) { n_sq += a[i] * a[i]; @@ -1053,7 +1053,7 @@ double LinAlg::norm_sq(std::vector a) { return n_sq; } -double LinAlg::sum_elements(std::vector a) { +double MLPPLinAlg::sum_elements(std::vector a) { double sum = 0; for (int i = 0; i < a.size(); i++) { sum += a[i]; @@ -1061,18 +1061,18 @@ double LinAlg::sum_elements(std::vector a) { return sum; } -double LinAlg::cosineSimilarity(std::vector a, std::vector b) { +double MLPPLinAlg::cosineSimilarity(std::vector a, std::vector b) { return dot(a, b) / (norm_2(a) * norm_2(b)); } -void LinAlg::printVector(std::vector a) { +void MLPPLinAlg::printVector(std::vector a) { for (int i = 0; i < a.size(); i++) { std::cout << a[i] << " "; } std::cout << std::endl; } -std::vector> LinAlg::mat_vec_add(std::vector> A, std::vector b) { +std::vector> MLPPLinAlg::mat_vec_add(std::vector> A, std::vector b) { for (int i = 0; i < A.size(); i++) { for (int j = 0; j < A[i].size(); j++) { A[i][j] += b[j]; @@ -1081,7 +1081,7 @@ std::vector> LinAlg::mat_vec_add(std::vector LinAlg::mat_vec_mult(std::vector> A, std::vector b) { +std::vector MLPPLinAlg::mat_vec_mult(std::vector> A, std::vector b) { std::vector c; c.resize(A.size()); @@ -1093,35 +1093,35 @@ std::vector LinAlg::mat_vec_mult(std::vector> A, std return c; } -std::vector>> LinAlg::addition(std::vector>> A, std::vector>> B) { +std::vector>> MLPPLinAlg::addition(std::vector>> A, std::vector>> B) { for (int i = 0; i < A.size(); i++) { A[i] = addition(A[i], B[i]); } return A; } -std::vector>> LinAlg::elementWiseDivision(std::vector>> A, std::vector>> B) { +std::vector>> MLPPLinAlg::elementWiseDivision(std::vector>> A, std::vector>> B) { for (int i = 0; i < A.size(); i++) { A[i] = elementWiseDivision(A[i], B[i]); } return A; } -std::vector>> LinAlg::sqrt(std::vector>> A) { +std::vector>> MLPPLinAlg::sqrt(std::vector>> A) { for (int i = 0; i < A.size(); i++) { A[i] = sqrt(A[i]); } return A; } -std::vector>> LinAlg::exponentiate(std::vector>> A, double p) { +std::vector>> MLPPLinAlg::exponentiate(std::vector>> A, double p) { for (int i = 0; i < A.size(); i++) { A[i] = exponentiate(A[i], p); } return A; } -std::vector> LinAlg::tensor_vec_mult(std::vector>> A, std::vector b) { +std::vector> MLPPLinAlg::tensor_vec_mult(std::vector>> A, std::vector b) { std::vector> C; C.resize(A.size()); for (int i = 0; i < C.size(); i++) { @@ -1135,7 +1135,7 @@ std::vector> LinAlg::tensor_vec_mult(std::vector LinAlg::flatten(std::vector>> A) { +std::vector MLPPLinAlg::flatten(std::vector>> A) { std::vector c; for (int i = 0; i < A.size(); i++) { std::vector flattenedVec = flatten(A[i]); @@ -1144,7 +1144,7 @@ std::vector LinAlg::flatten(std::vector> return c; } -void LinAlg::printTensor(std::vector>> A) { +void MLPPLinAlg::printTensor(std::vector>> A) { for (int i = 0; i < A.size(); i++) { printMatrix(A[i]); if (i != A.size() - 1) { @@ -1153,21 +1153,21 @@ void LinAlg::printTensor(std::vector>> A) { } } -std::vector>> LinAlg::scalarMultiply(double scalar, std::vector>> A) { +std::vector>> MLPPLinAlg::scalarMultiply(double scalar, std::vector>> A) { for (int i = 0; i < A.size(); i++) { A[i] = scalarMultiply(scalar, A[i]); } return A; } -std::vector>> LinAlg::scalarAdd(double scalar, std::vector>> A) { +std::vector>> MLPPLinAlg::scalarAdd(double scalar, std::vector>> A) { for (int i = 0; i < A.size(); i++) { A[i] = scalarAdd(scalar, A[i]); } return A; } -std::vector>> LinAlg::resize(std::vector>> A, std::vector>> B) { +std::vector>> MLPPLinAlg::resize(std::vector>> A, std::vector>> B) { A.resize(B.size()); for (int i = 0; i < B.size(); i++) { A[i].resize(B[i].size()); @@ -1178,21 +1178,21 @@ std::vector>> LinAlg::resize(std::vector>> LinAlg::max(std::vector>> A, std::vector>> B) { +std::vector>> MLPPLinAlg::max(std::vector>> A, std::vector>> B) { for (int i = 0; i < A.size(); i++) { A[i] = max(A[i], B[i]); } return A; } -std::vector>> LinAlg::abs(std::vector>> A) { +std::vector>> MLPPLinAlg::abs(std::vector>> A) { for (int i = 0; i < A.size(); i++) { A[i] = abs(A[i]); } return A; } -double LinAlg::norm_2(std::vector>> A) { +double MLPPLinAlg::norm_2(std::vector>> A) { double sum = 0; for (int i = 0; i < A.size(); i++) { for (int j = 0; j < A[i].size(); j++) { @@ -1205,7 +1205,7 @@ double LinAlg::norm_2(std::vector>> A) { } // Bad implementation. Change this later. -std::vector>> LinAlg::vector_wise_tensor_product(std::vector>> A, std::vector> B) { +std::vector>> MLPPLinAlg::vector_wise_tensor_product(std::vector>> A, std::vector> B) { std::vector>> C; C = resize(C, A); for (int i = 0; i < A[0].size(); i++) { diff --git a/mlpp/lin_alg/lin_alg.h b/mlpp/lin_alg/lin_alg.h index 7c9412c..bb3c343 100644 --- a/mlpp/lin_alg/lin_alg.h +++ b/mlpp/lin_alg/lin_alg.h @@ -12,7 +12,7 @@ #include -class LinAlg { +class MLPPLinAlg { public: // MATRIX FUNCTIONS diff --git a/mlpp/lin_reg/lin_reg.cpp b/mlpp/lin_reg/lin_reg.cpp index d4a0c74..b22f098 100644 --- a/mlpp/lin_reg/lin_reg.cpp +++ b/mlpp/lin_reg/lin_reg.cpp @@ -34,7 +34,7 @@ double LinReg::modelTest(std::vector x) { } void LinReg::NewtonRaphson(double learning_rate, int max_epoch, bool UI) { - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; @@ -66,7 +66,7 @@ void LinReg::NewtonRaphson(double learning_rate, int max_epoch, bool UI) { } void LinReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; @@ -97,7 +97,7 @@ void LinReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { } void LinReg::SGD(double learning_rate, int max_epoch, bool UI) { - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; @@ -136,7 +136,7 @@ void LinReg::SGD(double learning_rate, int max_epoch, bool UI) { } void LinReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; @@ -174,7 +174,7 @@ void LinReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool } void LinReg::normalEquation() { - LinAlg alg; + MLPPLinAlg alg; Stat stat; std::vector x_means; std::vector> inputSetT = alg.transpose(inputSet); @@ -224,12 +224,12 @@ double LinReg::Cost(std::vector y_hat, std::vector y) { } std::vector LinReg::Evaluate(std::vector> X) { - LinAlg alg; + MLPPLinAlg alg; return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)); } double LinReg::Evaluate(std::vector x) { - LinAlg alg; + MLPPLinAlg alg; return alg.dot(weights, x) + bias; } diff --git a/mlpp/log_reg/log_reg.cpp b/mlpp/log_reg/log_reg.cpp index 21d8c00..50439f2 100644 --- a/mlpp/log_reg/log_reg.cpp +++ b/mlpp/log_reg/log_reg.cpp @@ -31,7 +31,7 @@ double LogReg::modelTest(std::vector x) { } void LogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; @@ -63,7 +63,7 @@ void LogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { } void LogReg::MLE(double learning_rate, int max_epoch, bool UI) { - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; @@ -94,7 +94,7 @@ void LogReg::MLE(double learning_rate, int max_epoch, bool UI) { } void LogReg::SGD(double learning_rate, int max_epoch, bool UI) { - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; @@ -133,7 +133,7 @@ void LogReg::SGD(double learning_rate, int max_epoch, bool UI) { } void LogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; @@ -187,13 +187,13 @@ double LogReg::Cost(std::vector y_hat, std::vector y) { } std::vector LogReg::Evaluate(std::vector> X) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; return avn.sigmoid(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); } double LogReg::Evaluate(std::vector x) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; return avn.sigmoid(alg.dot(weights, x) + bias); } diff --git a/mlpp/mann/mann.cpp b/mlpp/mann/mann.cpp index c6aae98..5b315a9 100644 --- a/mlpp/mann/mann.cpp +++ b/mlpp/mann/mann.cpp @@ -55,7 +55,7 @@ std::vector MANN::modelTest(std::vector x) { void MANN::gradientDescent(double learning_rate, int max_epoch, bool UI) { class MLPPCost cost; MLPPActivation avn; - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; diff --git a/mlpp/mlp/mlp.cpp b/mlpp/mlp/mlp.cpp index c17f6f8..dcc3072 100644 --- a/mlpp/mlp/mlp.cpp +++ b/mlpp/mlp/mlp.cpp @@ -37,7 +37,7 @@ double MLP::modelTest(std::vector x) { void MLP::gradientDescent(double learning_rate, int max_epoch, bool UI) { MLPPActivation avn; - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; @@ -96,7 +96,7 @@ void MLP::gradientDescent(double learning_rate, int max_epoch, bool UI) { void MLP::SGD(double learning_rate, int max_epoch, bool UI) { MLPPActivation avn; - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; @@ -150,7 +150,7 @@ void MLP::SGD(double learning_rate, int max_epoch, bool UI) { void MLP::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { MLPPActivation avn; - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; @@ -232,7 +232,7 @@ double MLP::Cost(std::vector y_hat, std::vector y) { } std::vector MLP::Evaluate(std::vector> X) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; std::vector> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector> a2 = avn.sigmoid(z2); @@ -240,7 +240,7 @@ std::vector MLP::Evaluate(std::vector> X) { } std::tuple>, std::vector>> MLP::propagate(std::vector> X) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; std::vector> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector> a2 = avn.sigmoid(z2); @@ -248,7 +248,7 @@ std::tuple>, std::vector>> M } double MLP::Evaluate(std::vector x) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; std::vector z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector a2 = avn.sigmoid(z2); @@ -256,7 +256,7 @@ double MLP::Evaluate(std::vector x) { } std::tuple, std::vector> MLP::propagate(std::vector x) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; std::vector z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector a2 = avn.sigmoid(z2); @@ -264,7 +264,7 @@ std::tuple, std::vector> MLP::propagate(std::vector< } void MLP::forwardPass() { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1); a2 = avn.sigmoid(z2); diff --git a/mlpp/multi_output_layer/multi_output_layer.cpp b/mlpp/multi_output_layer/multi_output_layer.cpp index 959ba92..a9b4bd1 100644 --- a/mlpp/multi_output_layer/multi_output_layer.cpp +++ b/mlpp/multi_output_layer/multi_output_layer.cpp @@ -117,14 +117,14 @@ MultiOutputLayer::MultiOutputLayer(int n_output, int n_hidden, std::string activ } void MultiOutputLayer::forwardPass() { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; z = alg.mat_vec_add(alg.matmult(input, weights), bias); a = (avn.*activation_map[activation])(z, 0); } void MultiOutputLayer::Test(std::vector x) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias); a_test = (avn.*activationTest_map[activation])(z_test, 0); diff --git a/mlpp/multinomial_nb/multinomial_nb.cpp b/mlpp/multinomial_nb/multinomial_nb.cpp index 0370990..613669e 100644 --- a/mlpp/multinomial_nb/multinomial_nb.cpp +++ b/mlpp/multinomial_nb/multinomial_nb.cpp @@ -78,7 +78,7 @@ void MultinomialNB::computeTheta() { } void MultinomialNB::Evaluate() { - LinAlg alg; + MLPPLinAlg alg; for (int i = 0; i < outputSet.size(); i++) { // Pr(B | A) * Pr(A) double score[class_num]; diff --git a/mlpp/numerical_analysis/numerical_analysis.cpp b/mlpp/numerical_analysis/numerical_analysis.cpp index 3e0d8e0..e40e29e 100644 --- a/mlpp/numerical_analysis/numerical_analysis.cpp +++ b/mlpp/numerical_analysis/numerical_analysis.cpp @@ -226,12 +226,12 @@ double NumericalAnalysis::constantApproximation(double (*function)(std::vector), std::vector c, std::vector x) { - LinAlg alg; + MLPPLinAlg alg; return constantApproximation(function, c) + alg.matmult(alg.transpose({ jacobian(function, c) }), { alg.subtraction(x, c) })[0][0]; } double NumericalAnalysis::quadraticApproximation(double (*function)(std::vector), std::vector c, std::vector x) { - LinAlg alg; + MLPPLinAlg alg; return linearApproximation(function, c, x) + 0.5 * alg.matmult({ (alg.subtraction(x, c)) }, alg.matmult(hessian(function, c), alg.transpose({ alg.subtraction(x, c) })))[0][0]; } @@ -245,7 +245,7 @@ double NumericalAnalysis::cubicApproximation(double (*function)(std::vector> resultMat = alg.tensor_vec_mult(thirdOrderTensor(function, c), alg.subtraction(x, c)); double resultScalar = alg.matmult({ (alg.subtraction(x, c)) }, alg.matmult(resultMat, alg.transpose({ alg.subtraction(x, c) })))[0][0]; @@ -253,7 +253,7 @@ double NumericalAnalysis::cubicApproximation(double (*function)(std::vector), std::vector x) { - LinAlg alg; + MLPPLinAlg alg; std::vector> hessian_matrix = hessian(function, x); double laplacian = 0; for (int i = 0; i < hessian_matrix.size(); i++) { @@ -263,7 +263,7 @@ double NumericalAnalysis::laplacian(double (*function)(std::vector), std } std::string NumericalAnalysis::secondPartialDerivativeTest(double (*function)(std::vector), std::vector x) { - LinAlg alg; + MLPPLinAlg alg; std::vector> hessianMatrix = hessian(function, x); /* The reason we do this is because the 2nd partial derivative test is less conclusive for functions of variables greater than diff --git a/mlpp/output_layer/output_layer.cpp b/mlpp/output_layer/output_layer.cpp index a86e37e..5934a9d 100644 --- a/mlpp/output_layer/output_layer.cpp +++ b/mlpp/output_layer/output_layer.cpp @@ -114,14 +114,14 @@ OutputLayer::OutputLayer(int n_hidden, std::string activation, std::string cost, } void OutputLayer::forwardPass() { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; z = alg.scalarAdd(bias, alg.mat_vec_mult(input, weights)); a = (avn.*activation_map[activation])(z, 0); } void OutputLayer::Test(std::vector x) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; z_test = alg.dot(weights, x) + bias; a_test = (avn.*activationTest_map[activation])(z_test, 0); diff --git a/mlpp/pca/pca.cpp b/mlpp/pca/pca.cpp index 87c92b8..04976b1 100644 --- a/mlpp/pca/pca.cpp +++ b/mlpp/pca/pca.cpp @@ -18,7 +18,7 @@ PCA::PCA(std::vector> inputSet, int k) : } std::vector> PCA::principalComponents() { - LinAlg alg; + MLPPLinAlg alg; MLPPData data; auto [U, S, Vt] = alg.SVD(alg.cov(inputSet)); @@ -34,7 +34,7 @@ std::vector> PCA::principalComponents() { } // Simply tells us the percentage of variance maintained. double PCA::score() { - LinAlg alg; + MLPPLinAlg alg; std::vector> X_approx = alg.matmult(U_reduce, Z); double num, den = 0; for (int i = 0; i < X_normalized.size(); i++) { diff --git a/mlpp/probit_reg/probit_reg.cpp b/mlpp/probit_reg/probit_reg.cpp index 8e689d1..90b70ed 100644 --- a/mlpp/probit_reg/probit_reg.cpp +++ b/mlpp/probit_reg/probit_reg.cpp @@ -32,7 +32,7 @@ double ProbitReg::modelTest(std::vector x) { void ProbitReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { MLPPActivation avn; - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; @@ -65,7 +65,7 @@ void ProbitReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { void ProbitReg::MLE(double learning_rate, int max_epoch, bool UI) { MLPPActivation avn; - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; @@ -99,7 +99,7 @@ void ProbitReg::MLE(double learning_rate, int max_epoch, bool UI) { void ProbitReg::SGD(double learning_rate, int max_epoch, bool UI) { // NOTE: ∂y_hat/∂z is sparse MLPPActivation avn; - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; @@ -140,7 +140,7 @@ void ProbitReg::SGD(double learning_rate, int max_epoch, bool UI) { void ProbitReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { MLPPActivation avn; - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; @@ -214,30 +214,30 @@ double ProbitReg::Cost(std::vector y_hat, std::vector y) { } std::vector ProbitReg::Evaluate(std::vector> X) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; return avn.gaussianCDF(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); } std::vector ProbitReg::propagate(std::vector> X) { - LinAlg alg; + MLPPLinAlg alg; return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)); } double ProbitReg::Evaluate(std::vector x) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; return avn.gaussianCDF(alg.dot(weights, x) + bias); } double ProbitReg::propagate(std::vector x) { - LinAlg alg; + MLPPLinAlg alg; return alg.dot(weights, x) + bias; } // gaussianCDF ( wTx + b ) void ProbitReg::forwardPass() { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; z = propagate(inputSet); diff --git a/mlpp/regularization/reg.cpp b/mlpp/regularization/reg.cpp index af198ae..01c9846 100644 --- a/mlpp/regularization/reg.cpp +++ b/mlpp/regularization/reg.cpp @@ -67,7 +67,7 @@ double Reg::regTerm(std::vector> weights, double lambda, dou } std::vector Reg::regWeights(std::vector weights, double lambda, double alpha, std::string reg) { - LinAlg alg; + MLPPLinAlg alg; if (reg == "WeightClipping") { return regDerivTerm(weights, lambda, alpha, reg); } @@ -79,7 +79,7 @@ std::vector Reg::regWeights(std::vector weights, double lambda, } std::vector> Reg::regWeights(std::vector> weights, double lambda, double alpha, std::string reg) { - LinAlg alg; + MLPPLinAlg alg; if (reg == "WeightClipping") { return regDerivTerm(weights, lambda, alpha, reg); } diff --git a/mlpp/softmax_net/softmax_net.cpp b/mlpp/softmax_net/softmax_net.cpp index e866e00..5fa6219 100644 --- a/mlpp/softmax_net/softmax_net.cpp +++ b/mlpp/softmax_net/softmax_net.cpp @@ -36,7 +36,7 @@ std::vector> SoftmaxNet::modelSetTest(std::vector> SoftmaxNet::getEmbeddings() { @@ -251,7 +251,7 @@ double SoftmaxNet::Cost(std::vector> y_hat, std::vector> SoftmaxNet::Evaluate(std::vector> X) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; std::vector> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector> a2 = avn.sigmoid(z2); @@ -259,7 +259,7 @@ std::vector> SoftmaxNet::Evaluate(std::vector>, std::vector>> SoftmaxNet::propagate(std::vector> X) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; std::vector> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector> a2 = avn.sigmoid(z2); @@ -267,7 +267,7 @@ std::tuple>, std::vector>> S } std::vector SoftmaxNet::Evaluate(std::vector x) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; std::vector z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector a2 = avn.sigmoid(z2); @@ -275,7 +275,7 @@ std::vector SoftmaxNet::Evaluate(std::vector x) { } std::tuple, std::vector> SoftmaxNet::propagate(std::vector x) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; std::vector z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector a2 = avn.sigmoid(z2); @@ -283,7 +283,7 @@ std::tuple, std::vector> SoftmaxNet::propagate(std:: } void SoftmaxNet::forwardPass() { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1); a2 = avn.sigmoid(z2); diff --git a/mlpp/softmax_reg/softmax_reg.cpp b/mlpp/softmax_reg/softmax_reg.cpp index 98b72e8..425b468 100644 --- a/mlpp/softmax_reg/softmax_reg.cpp +++ b/mlpp/softmax_reg/softmax_reg.cpp @@ -31,7 +31,7 @@ std::vector> SoftmaxReg::modelSetTest(std::vector> y_hat, std::vector SoftmaxReg::Evaluate(std::vector x) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; return avn.softmax(alg.addition(bias, alg.mat_vec_mult(alg.transpose(weights), x))); } std::vector> SoftmaxReg::Evaluate(std::vector> X) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; return avn.softmax(alg.mat_vec_add(alg.matmult(X, weights), bias)); @@ -184,7 +184,7 @@ std::vector> SoftmaxReg::Evaluate(std::vector Stat::mode(const std::vector &x) { } double Stat::range(const std::vector &x) { - LinAlg alg; + MLPPLinAlg alg; return alg.max(x) - alg.min(x); } diff --git a/mlpp/svc/svc.cpp b/mlpp/svc/svc.cpp index 50b0e87..84f106b 100644 --- a/mlpp/svc/svc.cpp +++ b/mlpp/svc/svc.cpp @@ -33,7 +33,7 @@ double SVC::modelTest(std::vector x) { void SVC::gradientDescent(double learning_rate, int max_epoch, bool UI) { class MLPPCost cost; MLPPActivation avn; - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; @@ -66,7 +66,7 @@ void SVC::gradientDescent(double learning_rate, int max_epoch, bool UI) { void SVC::SGD(double learning_rate, int max_epoch, bool UI) { class MLPPCost cost; MLPPActivation avn; - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; @@ -109,7 +109,7 @@ void SVC::SGD(double learning_rate, int max_epoch, bool UI) { void SVC::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { class MLPPCost cost; MLPPActivation avn; - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; @@ -164,32 +164,32 @@ double SVC::Cost(std::vector z, std::vector y, std::vector SVC::Evaluate(std::vector> X) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; return avn.sign(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); } std::vector SVC::propagate(std::vector> X) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)); } double SVC::Evaluate(std::vector x) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; return avn.sign(alg.dot(weights, x) + bias); } double SVC::propagate(std::vector x) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; return alg.dot(weights, x) + bias; } // sign ( wTx + b ) void SVC::forwardPass() { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; z = propagate(inputSet); diff --git a/mlpp/tanh_reg/tanh_reg.cpp b/mlpp/tanh_reg/tanh_reg.cpp index 194a205..13b5016 100644 --- a/mlpp/tanh_reg/tanh_reg.cpp +++ b/mlpp/tanh_reg/tanh_reg.cpp @@ -32,7 +32,7 @@ double TanhReg::modelTest(std::vector x) { void TanhReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { MLPPActivation avn; - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; @@ -65,7 +65,7 @@ void TanhReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { } void TanhReg::SGD(double learning_rate, int max_epoch, bool UI) { - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; @@ -105,7 +105,7 @@ void TanhReg::SGD(double learning_rate, int max_epoch, bool UI) { void TanhReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { MLPPActivation avn; - LinAlg alg; + MLPPLinAlg alg; Reg regularization; double cost_prev = 0; int epoch = 1; @@ -163,30 +163,30 @@ double TanhReg::Cost(std::vector y_hat, std::vector y) { } std::vector TanhReg::Evaluate(std::vector> X) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; return avn.tanh(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); } std::vector TanhReg::propagate(std::vector> X) { - LinAlg alg; + MLPPLinAlg alg; return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)); } double TanhReg::Evaluate(std::vector x) { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; return avn.tanh(alg.dot(weights, x) + bias); } double TanhReg::propagate(std::vector x) { - LinAlg alg; + MLPPLinAlg alg; return alg.dot(weights, x) + bias; } // Tanh ( wTx + b ) void TanhReg::forwardPass() { - LinAlg alg; + MLPPLinAlg alg; MLPPActivation avn; z = propagate(inputSet); diff --git a/mlpp/transforms/transforms.cpp b/mlpp/transforms/transforms.cpp index b78e144..4935a69 100644 --- a/mlpp/transforms/transforms.cpp +++ b/mlpp/transforms/transforms.cpp @@ -15,7 +15,7 @@ // DCT ii. // https://www.mathworks.com/help/images/discrete-cosine-transform.html std::vector> Transforms::discreteCosineTransform(std::vector> A) { - LinAlg alg; + MLPPLinAlg alg; A = alg.scalarAdd(-128, A); // Center around 0. std::vector> B; diff --git a/mlpp/uni_lin_reg/uni_lin_reg.cpp b/mlpp/uni_lin_reg/uni_lin_reg.cpp index e942e62..0a0d0a7 100644 --- a/mlpp/uni_lin_reg/uni_lin_reg.cpp +++ b/mlpp/uni_lin_reg/uni_lin_reg.cpp @@ -24,7 +24,7 @@ UniLinReg::UniLinReg(std::vector x, std::vector y) : } std::vector UniLinReg::modelSetTest(std::vector x) { - LinAlg alg; + MLPPLinAlg alg; return alg.scalarAdd(b0, alg.scalarMultiply(b1, x)); } diff --git a/mlpp/wgan/wgan.cpp b/mlpp/wgan/wgan.cpp index 5a5dfdb..3a81d64 100644 --- a/mlpp/wgan/wgan.cpp +++ b/mlpp/wgan/wgan.cpp @@ -24,13 +24,13 @@ WGAN::~WGAN() { } std::vector> WGAN::generateExample(int n) { - LinAlg alg; + MLPPLinAlg alg; return modelSetTestGenerator(alg.gaussianNoise(n, k)); } void WGAN::gradientDescent(double learning_rate, int max_epoch, bool UI) { class MLPPCost cost; - LinAlg alg; + MLPPLinAlg alg; double cost_prev = 0; int epoch = 1; forwardPass(); @@ -86,7 +86,7 @@ void WGAN::gradientDescent(double learning_rate, int max_epoch, bool UI) { } double WGAN::score() { - LinAlg alg; + MLPPLinAlg alg; Utilities util; forwardPass(); return util.performance(y_hat, alg.onevec(n)); @@ -106,7 +106,7 @@ void WGAN::save(std::string fileName) { } void WGAN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, double lambda, double alpha) { - LinAlg alg; + MLPPLinAlg alg; if (network.empty()) { network.push_back(MLPPHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha)); network[0].forwardPass(); @@ -117,7 +117,7 @@ void WGAN::addLayer(int n_hidden, std::string activation, std::string weightInit } void WGAN::addOutputLayer(std::string weightInit, std::string reg, double lambda, double alpha) { - LinAlg alg; + MLPPLinAlg alg; if (!network.empty()) { outputLayer = new OutputLayer(network[network.size() - 1].n_hidden, "Linear", "WassersteinLoss", network[network.size() - 1].a, weightInit, "WeightClipping", -0.01, 0.01); } else { // Should never happen. @@ -169,7 +169,7 @@ double WGAN::Cost(std::vector y_hat, std::vector y) { } void WGAN::forwardPass() { - LinAlg alg; + MLPPLinAlg alg; if (!network.empty()) { network[0].input = alg.gaussianNoise(n, k); network[0].forwardPass(); @@ -187,7 +187,7 @@ void WGAN::forwardPass() { } void WGAN::updateDiscriminatorParameters(std::vector>> hiddenLayerUpdations, std::vector outputLayerUpdation, double learning_rate) { - LinAlg alg; + MLPPLinAlg alg; outputLayer->weights = alg.subtraction(outputLayer->weights, outputLayerUpdation); outputLayer->bias -= learning_rate * alg.sum_elements(outputLayer->delta) / n; @@ -204,7 +204,7 @@ void WGAN::updateDiscriminatorParameters(std::vector>> hiddenLayerUpdations, double learning_rate) { - LinAlg alg; + MLPPLinAlg alg; if (!network.empty()) { for (int i = network.size() / 2; i >= 0; i--) { @@ -219,7 +219,7 @@ void WGAN::updateGeneratorParameters(std::vector std::tuple>>, std::vector> WGAN::computeDiscriminatorGradients(std::vector y_hat, std::vector outputSet) { class MLPPCost cost; MLPPActivation avn; - LinAlg alg; + MLPPLinAlg alg; Reg regularization; std::vector>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads. @@ -255,7 +255,7 @@ std::tuple>>, std::vector> W std::vector>> WGAN::computeGeneratorGradients(std::vector y_hat, std::vector outputSet) { class MLPPCost cost; MLPPActivation avn; - LinAlg alg; + MLPPLinAlg alg; Reg regularization; std::vector>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads.