From 43e1b8d1fc128562acb21dcdd02d327392f349d9 Mon Sep 17 00:00:00 2001 From: Relintai Date: Wed, 25 Jan 2023 00:54:50 +0100 Subject: [PATCH] Prefixed more classes with MLPP. --- mlpp/ann/ann.cpp | 8 +-- mlpp/ann/ann.h | 2 +- mlpp/c_log_log_reg/c_log_log_reg.cpp | 10 ++-- mlpp/cost/cost.cpp | 8 +-- mlpp/data/data.cpp | 6 +-- mlpp/dual_svc/dual_svc.cpp | 6 +-- mlpp/exp_reg/exp_reg.cpp | 8 +-- mlpp/gan/gan.cpp | 10 ++-- mlpp/gan/gan.h | 2 +- mlpp/lin_reg/lin_reg.cpp | 38 +++++++------- mlpp/lin_reg/lin_reg.h | 4 +- mlpp/log_reg/log_reg.cpp | 36 ++++++------- mlpp/log_reg/log_reg.h | 4 +- mlpp/mann/mann.cpp | 30 +++++------ mlpp/mann/mann.h | 8 +-- mlpp/mlp/mlp.cpp | 36 ++++++------- mlpp/mlp/mlp.h | 4 +- .../multi_output_layer/multi_output_layer.cpp | 6 +-- mlpp/multi_output_layer/multi_output_layer.h | 4 +- mlpp/multinomial_nb/multinomial_nb.cpp | 12 ++--- mlpp/multinomial_nb/multinomial_nb.h | 4 +- .../numerical_analysis/numerical_analysis.cpp | 50 +++++++++---------- mlpp/numerical_analysis/numerical_analysis.h | 2 +- mlpp/outlier_finder/outlier_finder.cpp | 6 +-- mlpp/outlier_finder/outlier_finder.h | 4 +- mlpp/output_layer/output_layer.cpp | 6 +-- mlpp/output_layer/output_layer.h | 4 +- mlpp/pca/pca.cpp | 6 +-- mlpp/pca/pca.h | 4 +- mlpp/probit_reg/probit_reg.cpp | 40 +++++++-------- mlpp/probit_reg/probit_reg.h | 4 +- mlpp/regularization/reg.cpp | 16 +++--- mlpp/regularization/reg.h | 2 +- mlpp/softmax_net/softmax_net.cpp | 38 +++++++------- mlpp/softmax_net/softmax_net.h | 4 +- mlpp/softmax_reg/softmax_reg.cpp | 8 +-- mlpp/svc/svc.cpp | 6 +-- mlpp/tanh_reg/tanh_reg.cpp | 8 +-- mlpp/wgan/wgan.cpp | 10 ++-- mlpp/wgan/wgan.h | 2 +- 40 files changed, 233 insertions(+), 233 deletions(-) diff --git a/mlpp/ann/ann.cpp b/mlpp/ann/ann.cpp index e772c73..e2b8774 100644 --- a/mlpp/ann/ann.cpp +++ b/mlpp/ann/ann.cpp @@ -663,14 +663,14 @@ void MLPPANN::addLayer(int n_hidden, std::string activation, std::string weightI void MLPPANN::addOutputLayer(std::string activation, std::string loss, std::string weightInit, std::string reg, double lambda, double alpha) { MLPPLinAlg alg; if (!network.empty()) { - outputLayer = new OutputLayer(network[network.size() - 1].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha); + outputLayer = new MLPPOutputLayer(network[network.size() - 1].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha); } else { - outputLayer = new OutputLayer(k, activation, loss, inputSet, weightInit, reg, lambda, alpha); + outputLayer = new MLPPOutputLayer(k, activation, loss, inputSet, weightInit, reg, lambda, alpha); } } double MLPPANN::Cost(std::vector y_hat, std::vector y) { - Reg regularization; + MLPPReg regularization; class MLPPCost cost; double totalRegTerm = 0; @@ -722,7 +722,7 @@ std::tuple>>, std::vector> M class MLPPCost cost; MLPPActivation avn; MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; std::vector>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads. diff --git a/mlpp/ann/ann.h b/mlpp/ann/ann.h index b679155..f12ffae 100644 --- a/mlpp/ann/ann.h +++ b/mlpp/ann/ann.h @@ -55,7 +55,7 @@ private: std::vector y_hat; std::vector network; - OutputLayer *outputLayer; + MLPPOutputLayer *outputLayer; int n; int k; diff --git a/mlpp/c_log_log_reg/c_log_log_reg.cpp b/mlpp/c_log_log_reg/c_log_log_reg.cpp index 7dde514..c02104c 100644 --- a/mlpp/c_log_log_reg/c_log_log_reg.cpp +++ b/mlpp/c_log_log_reg/c_log_log_reg.cpp @@ -32,7 +32,7 @@ double MLPPCLogLogReg::modelTest(std::vector x) { void MLPPCLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { MLPPActivation avn; MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; forwardPass(); @@ -66,7 +66,7 @@ void MLPPCLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool U void MLPPCLogLogReg::MLE(double learning_rate, int max_epoch, bool UI) { MLPPActivation avn; MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; forwardPass(); @@ -97,7 +97,7 @@ void MLPPCLogLogReg::MLE(double learning_rate, int max_epoch, bool UI) { void MLPPCLogLogReg::SGD(double learning_rate, int max_epoch, bool UI) { MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; forwardPass(); @@ -139,7 +139,7 @@ void MLPPCLogLogReg::SGD(double learning_rate, int max_epoch, bool UI) { void MLPPCLogLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { MLPPActivation avn; MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; @@ -185,7 +185,7 @@ double MLPPCLogLogReg::score() { } double MLPPCLogLogReg::Cost(std::vector y_hat, std::vector y) { - Reg regularization; + MLPPReg regularization; class MLPPCost cost; return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); } diff --git a/mlpp/cost/cost.cpp b/mlpp/cost/cost.cpp index 62b1f7d..f59abab 100644 --- a/mlpp/cost/cost.cpp +++ b/mlpp/cost/cost.cpp @@ -360,23 +360,23 @@ std::vector> MLPPCost::WassersteinLossDeriv(std::vector y_hat, std::vector y, std::vector weights, double C) { MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; return C * HingeLoss(y_hat, y) + regularization.regTerm(weights, 1, 0, "Ridge"); } double MLPPCost::HingeLoss(std::vector> y_hat, std::vector> y, std::vector> weights, double C) { MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; return C * HingeLoss(y_hat, y) + regularization.regTerm(weights, 1, 0, "Ridge"); } std::vector MLPPCost::HingeLossDeriv(std::vector y_hat, std::vector y, double C) { MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; return alg.scalarMultiply(C, HingeLossDeriv(y_hat, y)); } std::vector> MLPPCost::HingeLossDeriv(std::vector> y_hat, std::vector> y, double C) { MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; return alg.scalarMultiply(C, HingeLossDeriv(y_hat, y)); } diff --git a/mlpp/data/data.cpp b/mlpp/data/data.cpp index 3ae512c..a3a9baf 100644 --- a/mlpp/data/data.cpp +++ b/mlpp/data/data.cpp @@ -621,11 +621,11 @@ std::tuple>, std::vector> MLPPData: outputSet.push_back(BOW[i]); } MLPPLinAlg alg; - SoftmaxNet *model; + MLPPSoftmaxNet *model; if (type == "Skipgram") { - model = new SoftmaxNet(outputSet, inputSet, dimension); + model = new MLPPSoftmaxNet(outputSet, inputSet, dimension); } else { // else = CBOW. We maintain it is a default. - model = new SoftmaxNet(inputSet, outputSet, dimension); + model = new MLPPSoftmaxNet(inputSet, outputSet, dimension); } model->gradientDescent(learning_rate, max_epoch, 1); diff --git a/mlpp/dual_svc/dual_svc.cpp b/mlpp/dual_svc/dual_svc.cpp index 87fcc00..1a2b699 100644 --- a/mlpp/dual_svc/dual_svc.cpp +++ b/mlpp/dual_svc/dual_svc.cpp @@ -35,7 +35,7 @@ void MLPPDualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) class MLPPCost cost; MLPPActivation avn; MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; forwardPass(); @@ -83,7 +83,7 @@ void MLPPDualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) // class MLPPCost cost; // MLPPActivation avn; // MLPPLinAlg alg; -// Reg regularization; +// MLPPReg regularization; // double cost_prev = 0; // int epoch = 1; @@ -116,7 +116,7 @@ void MLPPDualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) // class MLPPCost cost; // MLPPActivation avn; // MLPPLinAlg alg; -// Reg regularization; +// MLPPReg regularization; // double cost_prev = 0; // int epoch = 1; diff --git a/mlpp/exp_reg/exp_reg.cpp b/mlpp/exp_reg/exp_reg.cpp index 4e46ce3..4e7dcb6 100644 --- a/mlpp/exp_reg/exp_reg.cpp +++ b/mlpp/exp_reg/exp_reg.cpp @@ -33,7 +33,7 @@ double MLPPExpReg::modelTest(std::vector x) { void MLPPExpReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; forwardPass(); @@ -89,7 +89,7 @@ void MLPPExpReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { } void MLPPExpReg::SGD(double learning_rate, int max_epoch, bool UI) { - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; @@ -136,7 +136,7 @@ void MLPPExpReg::SGD(double learning_rate, int max_epoch, bool UI) { void MLPPExpReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; @@ -204,7 +204,7 @@ void MLPPExpReg::save(std::string fileName) { } double MLPPExpReg::Cost(std::vector y_hat, std::vector y) { - Reg regularization; + MLPPReg regularization; class MLPPCost cost; return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); } diff --git a/mlpp/gan/gan.cpp b/mlpp/gan/gan.cpp index bfa0280..e10e44a 100644 --- a/mlpp/gan/gan.cpp +++ b/mlpp/gan/gan.cpp @@ -110,9 +110,9 @@ void MLPPGAN::addLayer(int n_hidden, std::string activation, std::string weightI void MLPPGAN::addOutputLayer(std::string weightInit, std::string reg, double lambda, double alpha) { MLPPLinAlg alg; if (!network.empty()) { - outputLayer = new OutputLayer(network[network.size() - 1].n_hidden, "Sigmoid", "LogLoss", network[network.size() - 1].a, weightInit, reg, lambda, alpha); + outputLayer = new MLPPOutputLayer(network[network.size() - 1].n_hidden, "Sigmoid", "LogLoss", network[network.size() - 1].a, weightInit, reg, lambda, alpha); } else { - outputLayer = new OutputLayer(k, "Sigmoid", "LogLoss", alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha); + outputLayer = new MLPPOutputLayer(k, "Sigmoid", "LogLoss", alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha); } } @@ -146,7 +146,7 @@ std::vector MLPPGAN::modelSetTestDiscriminator(std::vector y_hat, std::vector y) { - Reg regularization; + MLPPReg regularization; class MLPPCost cost; double totalRegTerm = 0; @@ -211,7 +211,7 @@ std::tuple>>, std::vector> M class MLPPCost cost; MLPPActivation avn; MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; std::vector>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads. @@ -247,7 +247,7 @@ std::vector>> MLPPGAN::computeGeneratorGradients class MLPPCost cost; MLPPActivation avn; MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; std::vector>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads. diff --git a/mlpp/gan/gan.h b/mlpp/gan/gan.h index 3ce8a20..3e73cef 100644 --- a/mlpp/gan/gan.h +++ b/mlpp/gan/gan.h @@ -47,7 +47,7 @@ private: std::vector y_hat; std::vector network; - OutputLayer *outputLayer; + MLPPOutputLayer *outputLayer; int n; int k; diff --git a/mlpp/lin_reg/lin_reg.cpp b/mlpp/lin_reg/lin_reg.cpp index b22f098..17ea602 100644 --- a/mlpp/lin_reg/lin_reg.cpp +++ b/mlpp/lin_reg/lin_reg.cpp @@ -17,7 +17,7 @@ -LinReg::LinReg(std::vector> inputSet, std::vector outputSet, std::string reg, double lambda, double alpha) : +MLPPLinReg::MLPPLinReg(std::vector> inputSet, std::vector outputSet, std::string reg, double lambda, double alpha) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { y_hat.resize(n); @@ -25,17 +25,17 @@ LinReg::LinReg(std::vector> inputSet, std::vector ou bias = Utilities::biasInitialization(); } -std::vector LinReg::modelSetTest(std::vector> X) { +std::vector MLPPLinReg::modelSetTest(std::vector> X) { return Evaluate(X); } -double LinReg::modelTest(std::vector x) { +double MLPPLinReg::modelTest(std::vector x) { return Evaluate(x); } -void LinReg::NewtonRaphson(double learning_rate, int max_epoch, bool UI) { +void MLPPLinReg::NewtonRaphson(double learning_rate, int max_epoch, bool UI) { MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; forwardPass(); @@ -65,9 +65,9 @@ void LinReg::NewtonRaphson(double learning_rate, int max_epoch, bool UI) { } } -void LinReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { +void MLPPLinReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; forwardPass(); @@ -96,9 +96,9 @@ void LinReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { } } -void LinReg::SGD(double learning_rate, int max_epoch, bool UI) { +void MLPPLinReg::SGD(double learning_rate, int max_epoch, bool UI) { MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; @@ -135,9 +135,9 @@ void LinReg::SGD(double learning_rate, int max_epoch, bool UI) { forwardPass(); } -void LinReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { +void MLPPLinReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; @@ -173,7 +173,7 @@ void LinReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool forwardPass(); } -void LinReg::normalEquation() { +void MLPPLinReg::normalEquation() { MLPPLinAlg alg; Stat stat; std::vector x_means; @@ -207,33 +207,33 @@ void LinReg::normalEquation() { //} } -double LinReg::score() { +double MLPPLinReg::score() { Utilities util; return util.performance(y_hat, outputSet); } -void LinReg::save(std::string fileName) { +void MLPPLinReg::save(std::string fileName) { Utilities util; util.saveParameters(fileName, weights, bias); } -double LinReg::Cost(std::vector y_hat, std::vector y) { - Reg regularization; +double MLPPLinReg::Cost(std::vector y_hat, std::vector y) { + MLPPReg regularization; class MLPPCost cost; return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); } -std::vector LinReg::Evaluate(std::vector> X) { +std::vector MLPPLinReg::Evaluate(std::vector> X) { MLPPLinAlg alg; return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)); } -double LinReg::Evaluate(std::vector x) { +double MLPPLinReg::Evaluate(std::vector x) { MLPPLinAlg alg; return alg.dot(weights, x) + bias; } // wTx + b -void LinReg::forwardPass() { +void MLPPLinReg::forwardPass() { y_hat = Evaluate(inputSet); } diff --git a/mlpp/lin_reg/lin_reg.h b/mlpp/lin_reg/lin_reg.h index 97251ed..af45fe4 100644 --- a/mlpp/lin_reg/lin_reg.h +++ b/mlpp/lin_reg/lin_reg.h @@ -12,9 +12,9 @@ #include -class LinReg { +class MLPPLinReg { public: - LinReg(std::vector> inputSet, std::vector outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); + MLPPLinReg(std::vector> inputSet, std::vector outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); std::vector modelSetTest(std::vector> X); double modelTest(std::vector x); void NewtonRaphson(double learning_rate, int max_epoch, bool UI); diff --git a/mlpp/log_reg/log_reg.cpp b/mlpp/log_reg/log_reg.cpp index 50439f2..7f9863f 100644 --- a/mlpp/log_reg/log_reg.cpp +++ b/mlpp/log_reg/log_reg.cpp @@ -15,24 +15,24 @@ #include -LogReg::LogReg(std::vector> inputSet, std::vector outputSet, std::string reg, double lambda, double alpha) : +MLPPLogReg::MLPPLogReg(std::vector> inputSet, std::vector outputSet, std::string reg, double lambda, double alpha) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { y_hat.resize(n); weights = Utilities::weightInitialization(k); bias = Utilities::biasInitialization(); } -std::vector LogReg::modelSetTest(std::vector> X) { +std::vector MLPPLogReg::modelSetTest(std::vector> X) { return Evaluate(X); } -double LogReg::modelTest(std::vector x) { +double MLPPLogReg::modelTest(std::vector x) { return Evaluate(x); } -void LogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { +void MLPPLogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; forwardPass(); @@ -62,9 +62,9 @@ void LogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { } } -void LogReg::MLE(double learning_rate, int max_epoch, bool UI) { +void MLPPLogReg::MLE(double learning_rate, int max_epoch, bool UI) { MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; forwardPass(); @@ -93,9 +93,9 @@ void LogReg::MLE(double learning_rate, int max_epoch, bool UI) { } } -void LogReg::SGD(double learning_rate, int max_epoch, bool UI) { +void MLPPLogReg::SGD(double learning_rate, int max_epoch, bool UI) { MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; @@ -132,9 +132,9 @@ void LogReg::SGD(double learning_rate, int max_epoch, bool UI) { forwardPass(); } -void LogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { +void MLPPLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; @@ -170,35 +170,35 @@ void LogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool forwardPass(); } -double LogReg::score() { +double MLPPLogReg::score() { Utilities util; return util.performance(y_hat, outputSet); } -void LogReg::save(std::string fileName) { +void MLPPLogReg::save(std::string fileName) { Utilities util; util.saveParameters(fileName, weights, bias); } -double LogReg::Cost(std::vector y_hat, std::vector y) { - Reg regularization; +double MLPPLogReg::Cost(std::vector y_hat, std::vector y) { + MLPPReg regularization; class MLPPCost cost; return cost.LogLoss(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); } -std::vector LogReg::Evaluate(std::vector> X) { +std::vector MLPPLogReg::Evaluate(std::vector> X) { MLPPLinAlg alg; MLPPActivation avn; return avn.sigmoid(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); } -double LogReg::Evaluate(std::vector x) { +double MLPPLogReg::Evaluate(std::vector x) { MLPPLinAlg alg; MLPPActivation avn; return avn.sigmoid(alg.dot(weights, x) + bias); } // sigmoid ( wTx + b ) -void LogReg::forwardPass() { +void MLPPLogReg::forwardPass() { y_hat = Evaluate(inputSet); } diff --git a/mlpp/log_reg/log_reg.h b/mlpp/log_reg/log_reg.h index 8de8da8..f651d40 100644 --- a/mlpp/log_reg/log_reg.h +++ b/mlpp/log_reg/log_reg.h @@ -13,9 +13,9 @@ -class LogReg { +class MLPPLogReg { public: - LogReg(std::vector> inputSet, std::vector outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); + MLPPLogReg(std::vector> inputSet, std::vector outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); std::vector modelSetTest(std::vector> X); double modelTest(std::vector x); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1); diff --git a/mlpp/mann/mann.cpp b/mlpp/mann/mann.cpp index 5b315a9..6e3bbd7 100644 --- a/mlpp/mann/mann.cpp +++ b/mlpp/mann/mann.cpp @@ -14,15 +14,15 @@ #include -MANN::MANN(std::vector> inputSet, std::vector> outputSet) : +MLPPMANN::MLPPMANN(std::vector> inputSet, std::vector> outputSet) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), n_output(outputSet[0].size()) { } -MANN::~MANN() { +MLPPMANN::~MLPPMANN() { delete outputLayer; } -std::vector> MANN::modelSetTest(std::vector> X) { +std::vector> MLPPMANN::modelSetTest(std::vector> X) { if (!network.empty()) { network[0].input = X; network[0].forwardPass(); @@ -39,7 +39,7 @@ std::vector> MANN::modelSetTest(std::vectora; } -std::vector MANN::modelTest(std::vector x) { +std::vector MLPPMANN::modelTest(std::vector x) { if (!network.empty()) { network[0].Test(x); for (int i = 1; i < network.size(); i++) { @@ -52,11 +52,11 @@ std::vector MANN::modelTest(std::vector x) { return outputLayer->a_test; } -void MANN::gradientDescent(double learning_rate, int max_epoch, bool UI) { +void MLPPMANN::gradientDescent(double learning_rate, int max_epoch, bool UI) { class MLPPCost cost; MLPPActivation avn; MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; @@ -120,13 +120,13 @@ void MANN::gradientDescent(double learning_rate, int max_epoch, bool UI) { } } -double MANN::score() { +double MLPPMANN::score() { Utilities util; forwardPass(); return util.performance(y_hat, outputSet); } -void MANN::save(std::string fileName) { +void MLPPMANN::save(std::string fileName) { Utilities util; if (!network.empty()) { util.saveParameters(fileName, network[0].weights, network[0].bias, 0, 1); @@ -139,7 +139,7 @@ void MANN::save(std::string fileName) { } } -void MANN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, double lambda, double alpha) { +void MLPPMANN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, double lambda, double alpha) { if (network.empty()) { network.push_back(MLPPHiddenLayer(n_hidden, activation, inputSet, weightInit, reg, lambda, alpha)); network[0].forwardPass(); @@ -149,16 +149,16 @@ void MANN::addLayer(int n_hidden, std::string activation, std::string weightInit } } -void MANN::addOutputLayer(std::string activation, std::string loss, std::string weightInit, std::string reg, double lambda, double alpha) { +void MLPPMANN::addOutputLayer(std::string activation, std::string loss, std::string weightInit, std::string reg, double lambda, double alpha) { if (!network.empty()) { - outputLayer = new MultiOutputLayer(n_output, network[0].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha); + outputLayer = new MLPPMultiOutputLayer(n_output, network[0].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha); } else { - outputLayer = new MultiOutputLayer(n_output, k, activation, loss, inputSet, weightInit, reg, lambda, alpha); + outputLayer = new MLPPMultiOutputLayer(n_output, k, activation, loss, inputSet, weightInit, reg, lambda, alpha); } } -double MANN::Cost(std::vector> y_hat, std::vector> y) { - Reg regularization; +double MLPPMANN::Cost(std::vector> y_hat, std::vector> y) { + MLPPReg regularization; class MLPPCost cost; double totalRegTerm = 0; @@ -171,7 +171,7 @@ double MANN::Cost(std::vector> y_hat, std::vectorweights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg); } -void MANN::forwardPass() { +void MLPPMANN::forwardPass() { if (!network.empty()) { network[0].input = inputSet; network[0].forwardPass(); diff --git a/mlpp/mann/mann.h b/mlpp/mann/mann.h index d70ba94..6c7db4e 100644 --- a/mlpp/mann/mann.h +++ b/mlpp/mann/mann.h @@ -16,10 +16,10 @@ -class MANN { +class MLPPMANN { public: - MANN(std::vector> inputSet, std::vector> outputSet); - ~MANN(); + MLPPMANN(std::vector> inputSet, std::vector> outputSet); + ~MLPPMANN(); std::vector> modelSetTest(std::vector> X); std::vector modelTest(std::vector x); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1); @@ -38,7 +38,7 @@ private: std::vector> y_hat; std::vector network; - MultiOutputLayer *outputLayer; + MLPPMultiOutputLayer *outputLayer; int n; int k; diff --git a/mlpp/mlp/mlp.cpp b/mlpp/mlp/mlp.cpp index dcc3072..165e12e 100644 --- a/mlpp/mlp/mlp.cpp +++ b/mlpp/mlp/mlp.cpp @@ -16,7 +16,7 @@ #include -MLP::MLP(std::vector> inputSet, std::vector outputSet, int n_hidden, std::string reg, double lambda, double alpha) : +MLPPMLP::MLPPMLP(std::vector> inputSet, std::vector outputSet, int n_hidden, std::string reg, double lambda, double alpha) : inputSet(inputSet), outputSet(outputSet), n_hidden(n_hidden), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { MLPPActivation avn; y_hat.resize(n); @@ -27,18 +27,18 @@ MLP::MLP(std::vector> inputSet, std::vector outputSe bias2 = Utilities::biasInitialization(); } -std::vector MLP::modelSetTest(std::vector> X) { +std::vector MLPPMLP::modelSetTest(std::vector> X) { return Evaluate(X); } -double MLP::modelTest(std::vector x) { +double MLPPMLP::modelTest(std::vector x) { return Evaluate(x); } -void MLP::gradientDescent(double learning_rate, int max_epoch, bool UI) { +void MLPPMLP::gradientDescent(double learning_rate, int max_epoch, bool UI) { MLPPActivation avn; MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; forwardPass(); @@ -94,10 +94,10 @@ void MLP::gradientDescent(double learning_rate, int max_epoch, bool UI) { } } -void MLP::SGD(double learning_rate, int max_epoch, bool UI) { +void MLPPMLP::SGD(double learning_rate, int max_epoch, bool UI) { MLPPActivation avn; MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; @@ -148,10 +148,10 @@ void MLP::SGD(double learning_rate, int max_epoch, bool UI) { forwardPass(); } -void MLP::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { +void MLPPMLP::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { MLPPActivation avn; MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; @@ -214,24 +214,24 @@ void MLP::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI forwardPass(); } -double MLP::score() { +double MLPPMLP::score() { Utilities util; return util.performance(y_hat, outputSet); } -void MLP::save(std::string fileName) { +void MLPPMLP::save(std::string fileName) { Utilities util; util.saveParameters(fileName, weights1, bias1, 0, 1); util.saveParameters(fileName, weights2, bias2, 1, 2); } -double MLP::Cost(std::vector y_hat, std::vector y) { - Reg regularization; +double MLPPMLP::Cost(std::vector y_hat, std::vector y) { + MLPPReg regularization; class MLPPCost cost; return cost.LogLoss(y_hat, y) + regularization.regTerm(weights2, lambda, alpha, reg) + regularization.regTerm(weights1, lambda, alpha, reg); } -std::vector MLP::Evaluate(std::vector> X) { +std::vector MLPPMLP::Evaluate(std::vector> X) { MLPPLinAlg alg; MLPPActivation avn; std::vector> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); @@ -239,7 +239,7 @@ std::vector MLP::Evaluate(std::vector> X) { return avn.sigmoid(alg.scalarAdd(bias2, alg.mat_vec_mult(a2, weights2))); } -std::tuple>, std::vector>> MLP::propagate(std::vector> X) { +std::tuple>, std::vector>> MLPPMLP::propagate(std::vector> X) { MLPPLinAlg alg; MLPPActivation avn; std::vector> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); @@ -247,7 +247,7 @@ std::tuple>, std::vector>> M return { z2, a2 }; } -double MLP::Evaluate(std::vector x) { +double MLPPMLP::Evaluate(std::vector x) { MLPPLinAlg alg; MLPPActivation avn; std::vector z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); @@ -255,7 +255,7 @@ double MLP::Evaluate(std::vector x) { return avn.sigmoid(alg.dot(weights2, a2) + bias2); } -std::tuple, std::vector> MLP::propagate(std::vector x) { +std::tuple, std::vector> MLPPMLP::propagate(std::vector x) { MLPPLinAlg alg; MLPPActivation avn; std::vector z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); @@ -263,7 +263,7 @@ std::tuple, std::vector> MLP::propagate(std::vector< return { z2, a2 }; } -void MLP::forwardPass() { +void MLPPMLP::forwardPass() { MLPPLinAlg alg; MLPPActivation avn; z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1); diff --git a/mlpp/mlp/mlp.h b/mlpp/mlp/mlp.h index 82b56c4..d5a7b38 100644 --- a/mlpp/mlp/mlp.h +++ b/mlpp/mlp/mlp.h @@ -14,9 +14,9 @@ -class MLP { +class MLPPMLP { public: - MLP(std::vector> inputSet, std::vector outputSet, int n_hidden, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); + MLPPMLP(std::vector> inputSet, std::vector outputSet, int n_hidden, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); std::vector modelSetTest(std::vector> X); double modelTest(std::vector x); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1); diff --git a/mlpp/multi_output_layer/multi_output_layer.cpp b/mlpp/multi_output_layer/multi_output_layer.cpp index a9b4bd1..f9fdae5 100644 --- a/mlpp/multi_output_layer/multi_output_layer.cpp +++ b/mlpp/multi_output_layer/multi_output_layer.cpp @@ -12,7 +12,7 @@ #include -MultiOutputLayer::MultiOutputLayer(int n_output, int n_hidden, std::string activation, std::string cost, std::vector> input, std::string weightInit, std::string reg, double lambda, double alpha) : +MLPPMultiOutputLayer::MLPPMultiOutputLayer(int n_output, int n_hidden, std::string activation, std::string cost, std::vector> input, std::string weightInit, std::string reg, double lambda, double alpha) : n_output(n_output), n_hidden(n_hidden), activation(activation), cost(cost), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) { weights = Utilities::weightInitialization(n_hidden, n_output, weightInit); bias = Utilities::biasInitialization(n_output); @@ -116,14 +116,14 @@ MultiOutputLayer::MultiOutputLayer(int n_output, int n_hidden, std::string activ cost_map["WassersteinLoss"] = &MLPPCost::HingeLoss; } -void MultiOutputLayer::forwardPass() { +void MLPPMultiOutputLayer::forwardPass() { MLPPLinAlg alg; MLPPActivation avn; z = alg.mat_vec_add(alg.matmult(input, weights), bias); a = (avn.*activation_map[activation])(z, 0); } -void MultiOutputLayer::Test(std::vector x) { +void MLPPMultiOutputLayer::Test(std::vector x) { MLPPLinAlg alg; MLPPActivation avn; z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias); diff --git a/mlpp/multi_output_layer/multi_output_layer.h b/mlpp/multi_output_layer/multi_output_layer.h index 26f2efb..e1b96e0 100644 --- a/mlpp/multi_output_layer/multi_output_layer.h +++ b/mlpp/multi_output_layer/multi_output_layer.h @@ -16,9 +16,9 @@ #include -class MultiOutputLayer { +class MLPPMultiOutputLayer { public: - MultiOutputLayer(int n_output, int n_hidden, std::string activation, std::string cost, std::vector> input, std::string weightInit, std::string reg, double lambda, double alpha); + MLPPMultiOutputLayer(int n_output, int n_hidden, std::string activation, std::string cost, std::vector> input, std::string weightInit, std::string reg, double lambda, double alpha); int n_output; int n_hidden; diff --git a/mlpp/multinomial_nb/multinomial_nb.cpp b/mlpp/multinomial_nb/multinomial_nb.cpp index 613669e..ce17680 100644 --- a/mlpp/multinomial_nb/multinomial_nb.cpp +++ b/mlpp/multinomial_nb/multinomial_nb.cpp @@ -13,13 +13,13 @@ #include -MultinomialNB::MultinomialNB(std::vector> inputSet, std::vector outputSet, int class_num) : +MLPPMultinomialNB::MLPPMultinomialNB(std::vector> inputSet, std::vector outputSet, int class_num) : inputSet(inputSet), outputSet(outputSet), class_num(class_num) { y_hat.resize(outputSet.size()); Evaluate(); } -std::vector MultinomialNB::modelSetTest(std::vector> X) { +std::vector MLPPMultinomialNB::modelSetTest(std::vector> X) { std::vector y_hat; for (int i = 0; i < X.size(); i++) { y_hat.push_back(modelTest(X[i])); @@ -27,7 +27,7 @@ std::vector MultinomialNB::modelSetTest(std::vector> return y_hat; } -double MultinomialNB::modelTest(std::vector x) { +double MLPPMultinomialNB::modelTest(std::vector x) { double score[class_num]; computeTheta(); @@ -48,12 +48,12 @@ double MultinomialNB::modelTest(std::vector x) { return std::distance(score, std::max_element(score, score + sizeof(score) / sizeof(double))); } -double MultinomialNB::score() { +double MLPPMultinomialNB::score() { Utilities util; return util.performance(y_hat, outputSet); } -void MultinomialNB::computeTheta() { +void MLPPMultinomialNB::computeTheta() { // Resizing theta for the sake of ease & proper access of the elements. theta.resize(class_num); @@ -77,7 +77,7 @@ void MultinomialNB::computeTheta() { } } -void MultinomialNB::Evaluate() { +void MLPPMultinomialNB::Evaluate() { MLPPLinAlg alg; for (int i = 0; i < outputSet.size(); i++) { // Pr(B | A) * Pr(A) diff --git a/mlpp/multinomial_nb/multinomial_nb.h b/mlpp/multinomial_nb/multinomial_nb.h index 01ac0b4..0828451 100644 --- a/mlpp/multinomial_nb/multinomial_nb.h +++ b/mlpp/multinomial_nb/multinomial_nb.h @@ -12,9 +12,9 @@ #include -class MultinomialNB { +class MLPPMultinomialNB { public: - MultinomialNB(std::vector> inputSet, std::vector outputSet, int class_num); + MLPPMultinomialNB(std::vector> inputSet, std::vector outputSet, int class_num); std::vector modelSetTest(std::vector> X); double modelTest(std::vector x); double score(); diff --git a/mlpp/numerical_analysis/numerical_analysis.cpp b/mlpp/numerical_analysis/numerical_analysis.cpp index e40e29e..919b841 100644 --- a/mlpp/numerical_analysis/numerical_analysis.cpp +++ b/mlpp/numerical_analysis/numerical_analysis.cpp @@ -14,40 +14,40 @@ -double NumericalAnalysis::numDiff(double (*function)(double), double x) { +double MLPPNumericalAnalysis::numDiff(double (*function)(double), double x) { double eps = 1e-10; return (function(x + eps) - function(x)) / eps; // This is just the formal def. of the derivative. } -double NumericalAnalysis::numDiff_2(double (*function)(double), double x) { +double MLPPNumericalAnalysis::numDiff_2(double (*function)(double), double x) { double eps = 1e-5; return (function(x + 2 * eps) - 2 * function(x + eps) + function(x)) / (eps * eps); } -double NumericalAnalysis::numDiff_3(double (*function)(double), double x) { +double MLPPNumericalAnalysis::numDiff_3(double (*function)(double), double x) { double eps = 1e-5; double t1 = function(x + 3 * eps) - 2 * function(x + 2 * eps) + function(x + eps); double t2 = function(x + 2 * eps) - 2 * function(x + eps) + function(x); return (t1 - t2) / (eps * eps * eps); } -double NumericalAnalysis::constantApproximation(double (*function)(double), double c) { +double MLPPNumericalAnalysis::constantApproximation(double (*function)(double), double c) { return function(c); } -double NumericalAnalysis::linearApproximation(double (*function)(double), double c, double x) { +double MLPPNumericalAnalysis::linearApproximation(double (*function)(double), double c, double x) { return constantApproximation(function, c) + numDiff(function, c) * (x - c); } -double NumericalAnalysis::quadraticApproximation(double (*function)(double), double c, double x) { +double MLPPNumericalAnalysis::quadraticApproximation(double (*function)(double), double c, double x) { return linearApproximation(function, c, x) + 0.5 * numDiff_2(function, c) * (x - c) * (x - c); } -double NumericalAnalysis::cubicApproximation(double (*function)(double), double c, double x) { +double MLPPNumericalAnalysis::cubicApproximation(double (*function)(double), double c, double x) { return quadraticApproximation(function, c, x) + (1 / 6) * numDiff_3(function, c) * (x - c) * (x - c) * (x - c); } -double NumericalAnalysis::numDiff(double (*function)(std::vector), std::vector x, int axis) { +double MLPPNumericalAnalysis::numDiff(double (*function)(std::vector), std::vector x, int axis) { // For multivariable function analysis. // This will be used for calculating Jacobian vectors. // Diffrentiate with respect to indicated axis. (0, 1, 2 ...) @@ -58,7 +58,7 @@ double NumericalAnalysis::numDiff(double (*function)(std::vector), std:: return (function(x_eps) - function(x)) / eps; } -double NumericalAnalysis::numDiff_2(double (*function)(std::vector), std::vector x, int axis1, int axis2) { +double MLPPNumericalAnalysis::numDiff_2(double (*function)(std::vector), std::vector x, int axis1, int axis2) { //For Hessians. double eps = 1e-5; @@ -75,7 +75,7 @@ double NumericalAnalysis::numDiff_2(double (*function)(std::vector), std return (function(x_pp) - function(x_np) - function(x_pn) + function(x)) / (eps * eps); } -double NumericalAnalysis::numDiff_3(double (*function)(std::vector), std::vector x, int axis1, int axis2, int axis3) { +double MLPPNumericalAnalysis::numDiff_3(double (*function)(std::vector), std::vector x, int axis1, int axis2, int axis3) { // For third order derivative tensors. // NOTE: Approximations do not appear to be accurate for sinusodial functions... // Should revisit this later. @@ -112,7 +112,7 @@ double NumericalAnalysis::numDiff_3(double (*function)(std::vector), std return (thirdAxis - noThirdAxis) / (eps * eps * eps); } -double NumericalAnalysis::newtonRaphsonMethod(double (*function)(double), double x_0, double epoch_num) { +double MLPPNumericalAnalysis::newtonRaphsonMethod(double (*function)(double), double x_0, double epoch_num) { double x = x_0; for (int i = 0; i < epoch_num; i++) { x -= function(x) / numDiff(function, x); @@ -120,7 +120,7 @@ double NumericalAnalysis::newtonRaphsonMethod(double (*function)(double), double return x; } -double NumericalAnalysis::halleyMethod(double (*function)(double), double x_0, double epoch_num) { +double MLPPNumericalAnalysis::halleyMethod(double (*function)(double), double x_0, double epoch_num) { double x = x_0; for (int i = 0; i < epoch_num; i++) { x -= ((2 * function(x) * numDiff(function, x)) / (2 * numDiff(function, x) * numDiff(function, x) - function(x) * numDiff_2(function, x))); @@ -128,7 +128,7 @@ double NumericalAnalysis::halleyMethod(double (*function)(double), double x_0, d return x; } -double NumericalAnalysis::invQuadraticInterpolation(double (*function)(double), std::vector x_0, double epoch_num) { +double MLPPNumericalAnalysis::invQuadraticInterpolation(double (*function)(double), std::vector x_0, double epoch_num) { double x = 0; std::vector currentThree = x_0; for (int i = 0; i < epoch_num; i++) { @@ -143,7 +143,7 @@ double NumericalAnalysis::invQuadraticInterpolation(double (*function)(double), return x; } -double NumericalAnalysis::eulerianMethod(double (*derivative)(double), std::vector q_0, double p, double h) { +double MLPPNumericalAnalysis::eulerianMethod(double (*derivative)(double), std::vector q_0, double p, double h) { double max_epoch = (p - q_0[0]) / h; double x = q_0[0]; double y = q_0[1]; @@ -154,7 +154,7 @@ double NumericalAnalysis::eulerianMethod(double (*derivative)(double), std::vect return y; } -double NumericalAnalysis::eulerianMethod(double (*derivative)(std::vector), std::vector q_0, double p, double h) { +double MLPPNumericalAnalysis::eulerianMethod(double (*derivative)(std::vector), std::vector q_0, double p, double h) { double max_epoch = (p - q_0[0]) / h; double x = q_0[0]; double y = q_0[1]; @@ -165,7 +165,7 @@ double NumericalAnalysis::eulerianMethod(double (*derivative)(std::vector NumericalAnalysis::jacobian(double (*function)(std::vector), std::vector x) { +std::vector MLPPNumericalAnalysis::jacobian(double (*function)(std::vector), std::vector x) { std::vector jacobian; jacobian.resize(x.size()); for (int i = 0; i < jacobian.size(); i++) { @@ -189,7 +189,7 @@ std::vector NumericalAnalysis::jacobian(double (*function)(std::vector> NumericalAnalysis::hessian(double (*function)(std::vector), std::vector x) { +std::vector> MLPPNumericalAnalysis::hessian(double (*function)(std::vector), std::vector x) { std::vector> hessian; hessian.resize(x.size()); for (int i = 0; i < hessian.size(); i++) { @@ -203,7 +203,7 @@ std::vector> NumericalAnalysis::hessian(double (*function)(s return hessian; } -std::vector>> NumericalAnalysis::thirdOrderTensor(double (*function)(std::vector), std::vector x) { +std::vector>> MLPPNumericalAnalysis::thirdOrderTensor(double (*function)(std::vector), std::vector x) { std::vector>> tensor; tensor.resize(x.size()); for (int i = 0; i < tensor.size(); i++) { @@ -221,21 +221,21 @@ std::vector>> NumericalAnalysis::thirdOrderTenso return tensor; } -double NumericalAnalysis::constantApproximation(double (*function)(std::vector), std::vector c) { +double MLPPNumericalAnalysis::constantApproximation(double (*function)(std::vector), std::vector c) { return function(c); } -double NumericalAnalysis::linearApproximation(double (*function)(std::vector), std::vector c, std::vector x) { +double MLPPNumericalAnalysis::linearApproximation(double (*function)(std::vector), std::vector c, std::vector x) { MLPPLinAlg alg; return constantApproximation(function, c) + alg.matmult(alg.transpose({ jacobian(function, c) }), { alg.subtraction(x, c) })[0][0]; } -double NumericalAnalysis::quadraticApproximation(double (*function)(std::vector), std::vector c, std::vector x) { +double MLPPNumericalAnalysis::quadraticApproximation(double (*function)(std::vector), std::vector c, std::vector x) { MLPPLinAlg alg; return linearApproximation(function, c, x) + 0.5 * alg.matmult({ (alg.subtraction(x, c)) }, alg.matmult(hessian(function, c), alg.transpose({ alg.subtraction(x, c) })))[0][0]; } -double NumericalAnalysis::cubicApproximation(double (*function)(std::vector), std::vector c, std::vector x) { +double MLPPNumericalAnalysis::cubicApproximation(double (*function)(std::vector), std::vector c, std::vector x) { /* Not completely sure as the literature seldom discusses the third order taylor approximation, in particular for multivariate cases, but ostensibly, the matrix/tensor/vector multiplies @@ -252,7 +252,7 @@ double NumericalAnalysis::cubicApproximation(double (*function)(std::vector), std::vector x) { +double MLPPNumericalAnalysis::laplacian(double (*function)(std::vector), std::vector x) { MLPPLinAlg alg; std::vector> hessian_matrix = hessian(function, x); double laplacian = 0; @@ -262,7 +262,7 @@ double NumericalAnalysis::laplacian(double (*function)(std::vector), std return laplacian; } -std::string NumericalAnalysis::secondPartialDerivativeTest(double (*function)(std::vector), std::vector x) { +std::string MLPPNumericalAnalysis::secondPartialDerivativeTest(double (*function)(std::vector), std::vector x) { MLPPLinAlg alg; std::vector> hessianMatrix = hessian(function, x); /* diff --git a/mlpp/numerical_analysis/numerical_analysis.h b/mlpp/numerical_analysis/numerical_analysis.h index 03794f5..9116726 100644 --- a/mlpp/numerical_analysis/numerical_analysis.h +++ b/mlpp/numerical_analysis/numerical_analysis.h @@ -11,7 +11,7 @@ #include -class NumericalAnalysis { +class MLPPNumericalAnalysis { public: /* A numerical method for derivatives is used. This may be subject to change, as an analytical method for calculating derivatives will most likely be used in diff --git a/mlpp/outlier_finder/outlier_finder.cpp b/mlpp/outlier_finder/outlier_finder.cpp index 1c6c734..4dd74d9 100644 --- a/mlpp/outlier_finder/outlier_finder.cpp +++ b/mlpp/outlier_finder/outlier_finder.cpp @@ -9,11 +9,11 @@ #include -OutlierFinder::OutlierFinder(int threshold) : +MLPPOutlierFinder::MLPPOutlierFinder(int threshold) : threshold(threshold) { } -std::vector> OutlierFinder::modelSetTest(std::vector> inputSet) { +std::vector> MLPPOutlierFinder::modelSetTest(std::vector> inputSet) { Stat stat; std::vector> outliers; outliers.resize(inputSet.size()); @@ -28,7 +28,7 @@ std::vector> OutlierFinder::modelSetTest(std::vector OutlierFinder::modelTest(std::vector inputSet) { +std::vector MLPPOutlierFinder::modelTest(std::vector inputSet) { Stat stat; std::vector outliers; for (int i = 0; i < inputSet.size(); i++) { diff --git a/mlpp/outlier_finder/outlier_finder.h b/mlpp/outlier_finder/outlier_finder.h index f23b0ed..ae1b09d 100644 --- a/mlpp/outlier_finder/outlier_finder.h +++ b/mlpp/outlier_finder/outlier_finder.h @@ -11,10 +11,10 @@ #include -class OutlierFinder { +class MLPPOutlierFinder { public: // Cnstr - OutlierFinder(int threshold); + MLPPOutlierFinder(int threshold); std::vector> modelSetTest(std::vector> inputSet); std::vector modelTest(std::vector inputSet); diff --git a/mlpp/output_layer/output_layer.cpp b/mlpp/output_layer/output_layer.cpp index 5934a9d..749ec44 100644 --- a/mlpp/output_layer/output_layer.cpp +++ b/mlpp/output_layer/output_layer.cpp @@ -12,7 +12,7 @@ #include -OutputLayer::OutputLayer(int n_hidden, std::string activation, std::string cost, std::vector> input, std::string weightInit, std::string reg, double lambda, double alpha) : +MLPPOutputLayer::MLPPOutputLayer(int n_hidden, std::string activation, std::string cost, std::vector> input, std::string weightInit, std::string reg, double lambda, double alpha) : n_hidden(n_hidden), activation(activation), cost(cost), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) { weights = Utilities::weightInitialization(n_hidden, weightInit); bias = Utilities::biasInitialization(); @@ -113,14 +113,14 @@ OutputLayer::OutputLayer(int n_hidden, std::string activation, std::string cost, cost_map["WassersteinLoss"] = &MLPPCost::HingeLoss; } -void OutputLayer::forwardPass() { +void MLPPOutputLayer::forwardPass() { MLPPLinAlg alg; MLPPActivation avn; z = alg.scalarAdd(bias, alg.mat_vec_mult(input, weights)); a = (avn.*activation_map[activation])(z, 0); } -void OutputLayer::Test(std::vector x) { +void MLPPOutputLayer::Test(std::vector x) { MLPPLinAlg alg; MLPPActivation avn; z_test = alg.dot(weights, x) + bias; diff --git a/mlpp/output_layer/output_layer.h b/mlpp/output_layer/output_layer.h index 81bc306..2f3d08c 100644 --- a/mlpp/output_layer/output_layer.h +++ b/mlpp/output_layer/output_layer.h @@ -16,9 +16,9 @@ #include -class OutputLayer { +class MLPPOutputLayer { public: - OutputLayer(int n_hidden, std::string activation, std::string cost, std::vector> input, std::string weightInit, std::string reg, double lambda, double alpha); + MLPPOutputLayer(int n_hidden, std::string activation, std::string cost, std::vector> input, std::string weightInit, std::string reg, double lambda, double alpha); int n_hidden; std::string activation; diff --git a/mlpp/pca/pca.cpp b/mlpp/pca/pca.cpp index 04976b1..7f4891a 100644 --- a/mlpp/pca/pca.cpp +++ b/mlpp/pca/pca.cpp @@ -13,11 +13,11 @@ -PCA::PCA(std::vector> inputSet, int k) : +MLPPPCA::MLPPPCA(std::vector> inputSet, int k) : inputSet(inputSet), k(k) { } -std::vector> PCA::principalComponents() { +std::vector> MLPPPCA::principalComponents() { MLPPLinAlg alg; MLPPData data; @@ -33,7 +33,7 @@ std::vector> PCA::principalComponents() { return Z; } // Simply tells us the percentage of variance maintained. -double PCA::score() { +double MLPPPCA::score() { MLPPLinAlg alg; std::vector> X_approx = alg.matmult(U_reduce, Z); double num, den = 0; diff --git a/mlpp/pca/pca.h b/mlpp/pca/pca.h index 354d4fd..9b856a6 100644 --- a/mlpp/pca/pca.h +++ b/mlpp/pca/pca.h @@ -11,9 +11,9 @@ #include -class PCA { +class MLPPPCA { public: - PCA(std::vector> inputSet, int k); + MLPPPCA(std::vector> inputSet, int k); std::vector> principalComponents(); double score(); diff --git a/mlpp/probit_reg/probit_reg.cpp b/mlpp/probit_reg/probit_reg.cpp index 90b70ed..6158a7d 100644 --- a/mlpp/probit_reg/probit_reg.cpp +++ b/mlpp/probit_reg/probit_reg.cpp @@ -15,25 +15,25 @@ #include -ProbitReg::ProbitReg(std::vector> inputSet, std::vector outputSet, std::string reg, double lambda, double alpha) : +MLPPProbitReg::MLPPProbitReg(std::vector> inputSet, std::vector outputSet, std::string reg, double lambda, double alpha) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { y_hat.resize(n); weights = Utilities::weightInitialization(k); bias = Utilities::biasInitialization(); } -std::vector ProbitReg::modelSetTest(std::vector> X) { +std::vector MLPPProbitReg::modelSetTest(std::vector> X) { return Evaluate(X); } -double ProbitReg::modelTest(std::vector x) { +double MLPPProbitReg::modelTest(std::vector x) { return Evaluate(x); } -void ProbitReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { +void MLPPProbitReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { MLPPActivation avn; MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; forwardPass(); @@ -63,10 +63,10 @@ void ProbitReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { } } -void ProbitReg::MLE(double learning_rate, int max_epoch, bool UI) { +void MLPPProbitReg::MLE(double learning_rate, int max_epoch, bool UI) { MLPPActivation avn; MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; forwardPass(); @@ -96,11 +96,11 @@ void ProbitReg::MLE(double learning_rate, int max_epoch, bool UI) { } } -void ProbitReg::SGD(double learning_rate, int max_epoch, bool UI) { +void MLPPProbitReg::SGD(double learning_rate, int max_epoch, bool UI) { // NOTE: ∂y_hat/∂z is sparse MLPPActivation avn; MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; @@ -138,10 +138,10 @@ void ProbitReg::SGD(double learning_rate, int max_epoch, bool UI) { forwardPass(); } -void ProbitReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { +void MLPPProbitReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { MLPPActivation avn; MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; @@ -197,46 +197,46 @@ void ProbitReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, b forwardPass(); } -double ProbitReg::score() { +double MLPPProbitReg::score() { Utilities util; return util.performance(y_hat, outputSet); } -void ProbitReg::save(std::string fileName) { +void MLPPProbitReg::save(std::string fileName) { Utilities util; util.saveParameters(fileName, weights, bias); } -double ProbitReg::Cost(std::vector y_hat, std::vector y) { - Reg regularization; +double MLPPProbitReg::Cost(std::vector y_hat, std::vector y) { + MLPPReg regularization; class MLPPCost cost; return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); } -std::vector ProbitReg::Evaluate(std::vector> X) { +std::vector MLPPProbitReg::Evaluate(std::vector> X) { MLPPLinAlg alg; MLPPActivation avn; return avn.gaussianCDF(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); } -std::vector ProbitReg::propagate(std::vector> X) { +std::vector MLPPProbitReg::propagate(std::vector> X) { MLPPLinAlg alg; return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)); } -double ProbitReg::Evaluate(std::vector x) { +double MLPPProbitReg::Evaluate(std::vector x) { MLPPLinAlg alg; MLPPActivation avn; return avn.gaussianCDF(alg.dot(weights, x) + bias); } -double ProbitReg::propagate(std::vector x) { +double MLPPProbitReg::propagate(std::vector x) { MLPPLinAlg alg; return alg.dot(weights, x) + bias; } // gaussianCDF ( wTx + b ) -void ProbitReg::forwardPass() { +void MLPPProbitReg::forwardPass() { MLPPLinAlg alg; MLPPActivation avn; diff --git a/mlpp/probit_reg/probit_reg.h b/mlpp/probit_reg/probit_reg.h index 278f901..d9a4bd3 100644 --- a/mlpp/probit_reg/probit_reg.h +++ b/mlpp/probit_reg/probit_reg.h @@ -13,9 +13,9 @@ -class ProbitReg { +class MLPPProbitReg { public: - ProbitReg(std::vector> inputSet, std::vector outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); + MLPPProbitReg(std::vector> inputSet, std::vector outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); std::vector modelSetTest(std::vector> X); double modelTest(std::vector x); void gradientDescent(double learning_rate, int max_epoch = 0, bool UI = 1); diff --git a/mlpp/regularization/reg.cpp b/mlpp/regularization/reg.cpp index 01c9846..de546c0 100644 --- a/mlpp/regularization/reg.cpp +++ b/mlpp/regularization/reg.cpp @@ -12,7 +12,7 @@ -double Reg::regTerm(std::vector weights, double lambda, double alpha, std::string reg) { +double MLPPReg::regTerm(std::vector weights, double lambda, double alpha, std::string reg) { if (reg == "Ridge") { double reg = 0; for (int i = 0; i < weights.size(); i++) { @@ -36,7 +36,7 @@ double Reg::regTerm(std::vector weights, double lambda, double alpha, st return 0; } -double Reg::regTerm(std::vector> weights, double lambda, double alpha, std::string reg) { +double MLPPReg::regTerm(std::vector> weights, double lambda, double alpha, std::string reg) { if (reg == "Ridge") { double reg = 0; for (int i = 0; i < weights.size(); i++) { @@ -66,7 +66,7 @@ double Reg::regTerm(std::vector> weights, double lambda, dou return 0; } -std::vector Reg::regWeights(std::vector weights, double lambda, double alpha, std::string reg) { +std::vector MLPPReg::regWeights(std::vector weights, double lambda, double alpha, std::string reg) { MLPPLinAlg alg; if (reg == "WeightClipping") { return regDerivTerm(weights, lambda, alpha, reg); @@ -78,7 +78,7 @@ std::vector Reg::regWeights(std::vector weights, double lambda, // return weights; } -std::vector> Reg::regWeights(std::vector> weights, double lambda, double alpha, std::string reg) { +std::vector> MLPPReg::regWeights(std::vector> weights, double lambda, double alpha, std::string reg) { MLPPLinAlg alg; if (reg == "WeightClipping") { return regDerivTerm(weights, lambda, alpha, reg); @@ -92,7 +92,7 @@ std::vector> Reg::regWeights(std::vector // return weights; } -std::vector Reg::regDerivTerm(std::vector weights, double lambda, double alpha, std::string reg) { +std::vector MLPPReg::regDerivTerm(std::vector weights, double lambda, double alpha, std::string reg) { std::vector regDeriv; regDeriv.resize(weights.size()); @@ -102,7 +102,7 @@ std::vector Reg::regDerivTerm(std::vector weights, double lambda return regDeriv; } -std::vector> Reg::regDerivTerm(std::vector> weights, double lambda, double alpha, std::string reg) { +std::vector> MLPPReg::regDerivTerm(std::vector> weights, double lambda, double alpha, std::string reg) { std::vector> regDeriv; regDeriv.resize(weights.size()); for (int i = 0; i < regDeriv.size(); i++) { @@ -117,7 +117,7 @@ std::vector> Reg::regDerivTerm(std::vector weights, double lambda, double alpha, std::string reg, int j) { +double MLPPReg::regDerivTerm(std::vector weights, double lambda, double alpha, std::string reg, int j) { MLPPActivation act; if (reg == "Ridge") { return lambda * weights[j]; @@ -140,7 +140,7 @@ double Reg::regDerivTerm(std::vector weights, double lambda, double alph } } -double Reg::regDerivTerm(std::vector> weights, double lambda, double alpha, std::string reg, int i, int j) { +double MLPPReg::regDerivTerm(std::vector> weights, double lambda, double alpha, std::string reg, int i, int j) { MLPPActivation act; if (reg == "Ridge") { return lambda * weights[i][j]; diff --git a/mlpp/regularization/reg.h b/mlpp/regularization/reg.h index c2194cb..9fad76a 100644 --- a/mlpp/regularization/reg.h +++ b/mlpp/regularization/reg.h @@ -13,7 +13,7 @@ #include -class Reg { +class MLPPReg { public: double regTerm(std::vector weights, double lambda, double alpha, std::string reg); double regTerm(std::vector> weights, double lambda, double alpha, std::string reg); diff --git a/mlpp/softmax_net/softmax_net.cpp b/mlpp/softmax_net/softmax_net.cpp index 5fa6219..61b3f95 100644 --- a/mlpp/softmax_net/softmax_net.cpp +++ b/mlpp/softmax_net/softmax_net.cpp @@ -16,7 +16,7 @@ #include -SoftmaxNet::SoftmaxNet(std::vector> inputSet, std::vector> outputSet, int n_hidden, std::string reg, double lambda, double alpha) : +MLPPSoftmaxNet::MLPPSoftmaxNet(std::vector> inputSet, std::vector> outputSet, int n_hidden, std::string reg, double lambda, double alpha) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), n_hidden(n_hidden), n_class(outputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { y_hat.resize(n); @@ -26,18 +26,18 @@ SoftmaxNet::SoftmaxNet(std::vector> inputSet, std::vector SoftmaxNet::modelTest(std::vector x) { +std::vector MLPPSoftmaxNet::modelTest(std::vector x) { return Evaluate(x); } -std::vector> SoftmaxNet::modelSetTest(std::vector> X) { +std::vector> MLPPSoftmaxNet::modelSetTest(std::vector> X) { return Evaluate(X); } -void SoftmaxNet::gradientDescent(double learning_rate, int max_epoch, bool UI) { +void MLPPSoftmaxNet::gradientDescent(double learning_rate, int max_epoch, bool UI) { MLPPActivation avn; MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; forwardPass(); @@ -90,10 +90,10 @@ void SoftmaxNet::gradientDescent(double learning_rate, int max_epoch, bool UI) { } } -void SoftmaxNet::SGD(double learning_rate, int max_epoch, bool UI) { +void MLPPSoftmaxNet::SGD(double learning_rate, int max_epoch, bool UI) { MLPPActivation avn; MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; @@ -144,10 +144,10 @@ void SoftmaxNet::SGD(double learning_rate, int max_epoch, bool UI) { forwardPass(); } -void SoftmaxNet::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { +void MLPPSoftmaxNet::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { MLPPActivation avn; MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; @@ -226,12 +226,12 @@ void SoftmaxNet::MBGD(double learning_rate, int max_epoch, int mini_batch_size, forwardPass(); } -double SoftmaxNet::score() { +double MLPPSoftmaxNet::score() { Utilities util; return util.performance(y_hat, outputSet); } -void SoftmaxNet::save(std::string fileName) { +void MLPPSoftmaxNet::save(std::string fileName) { Utilities util; util.saveParameters(fileName, weights1, bias1, 0, 1); util.saveParameters(fileName, weights2, bias2, 1, 2); @@ -239,18 +239,18 @@ void SoftmaxNet::save(std::string fileName) { MLPPLinAlg alg; } -std::vector> SoftmaxNet::getEmbeddings() { +std::vector> MLPPSoftmaxNet::getEmbeddings() { return weights1; } -double SoftmaxNet::Cost(std::vector> y_hat, std::vector> y) { - Reg regularization; +double MLPPSoftmaxNet::Cost(std::vector> y_hat, std::vector> y) { + MLPPReg regularization; MLPPData data; class MLPPCost cost; return cost.CrossEntropy(y_hat, y) + regularization.regTerm(weights1, lambda, alpha, reg) + regularization.regTerm(weights2, lambda, alpha, reg); } -std::vector> SoftmaxNet::Evaluate(std::vector> X) { +std::vector> MLPPSoftmaxNet::Evaluate(std::vector> X) { MLPPLinAlg alg; MLPPActivation avn; std::vector> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); @@ -258,7 +258,7 @@ std::vector> SoftmaxNet::Evaluate(std::vector>, std::vector>> SoftmaxNet::propagate(std::vector> X) { +std::tuple>, std::vector>> MLPPSoftmaxNet::propagate(std::vector> X) { MLPPLinAlg alg; MLPPActivation avn; std::vector> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); @@ -266,7 +266,7 @@ std::tuple>, std::vector>> S return { z2, a2 }; } -std::vector SoftmaxNet::Evaluate(std::vector x) { +std::vector MLPPSoftmaxNet::Evaluate(std::vector x) { MLPPLinAlg alg; MLPPActivation avn; std::vector z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); @@ -274,7 +274,7 @@ std::vector SoftmaxNet::Evaluate(std::vector x) { return avn.adjSoftmax(alg.addition(alg.mat_vec_mult(alg.transpose(weights2), a2), bias2)); } -std::tuple, std::vector> SoftmaxNet::propagate(std::vector x) { +std::tuple, std::vector> MLPPSoftmaxNet::propagate(std::vector x) { MLPPLinAlg alg; MLPPActivation avn; std::vector z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); @@ -282,7 +282,7 @@ std::tuple, std::vector> SoftmaxNet::propagate(std:: return { z2, a2 }; } -void SoftmaxNet::forwardPass() { +void MLPPSoftmaxNet::forwardPass() { MLPPLinAlg alg; MLPPActivation avn; z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1); diff --git a/mlpp/softmax_net/softmax_net.h b/mlpp/softmax_net/softmax_net.h index 0e8d3c7..e9d78c2 100644 --- a/mlpp/softmax_net/softmax_net.h +++ b/mlpp/softmax_net/softmax_net.h @@ -13,9 +13,9 @@ -class SoftmaxNet { +class MLPPSoftmaxNet { public: - SoftmaxNet(std::vector> inputSet, std::vector> outputSet, int n_hidden, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); + MLPPSoftmaxNet(std::vector> inputSet, std::vector> outputSet, int n_hidden, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); std::vector modelTest(std::vector x); std::vector> modelSetTest(std::vector> X); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1); diff --git a/mlpp/softmax_reg/softmax_reg.cpp b/mlpp/softmax_reg/softmax_reg.cpp index 425b468..39bad0c 100644 --- a/mlpp/softmax_reg/softmax_reg.cpp +++ b/mlpp/softmax_reg/softmax_reg.cpp @@ -32,7 +32,7 @@ std::vector> SoftmaxReg::modelSetTest(std::vector> y_hat, std::vector> y) { - Reg regularization; + MLPPReg regularization; class MLPPCost cost; return cost.CrossEntropy(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); } diff --git a/mlpp/svc/svc.cpp b/mlpp/svc/svc.cpp index 84f106b..9857ffa 100644 --- a/mlpp/svc/svc.cpp +++ b/mlpp/svc/svc.cpp @@ -34,7 +34,7 @@ void SVC::gradientDescent(double learning_rate, int max_epoch, bool UI) { class MLPPCost cost; MLPPActivation avn; MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; forwardPass(); @@ -67,7 +67,7 @@ void SVC::SGD(double learning_rate, int max_epoch, bool UI) { class MLPPCost cost; MLPPActivation avn; MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; @@ -110,7 +110,7 @@ void SVC::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI class MLPPCost cost; MLPPActivation avn; MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; diff --git a/mlpp/tanh_reg/tanh_reg.cpp b/mlpp/tanh_reg/tanh_reg.cpp index 13b5016..0e6363d 100644 --- a/mlpp/tanh_reg/tanh_reg.cpp +++ b/mlpp/tanh_reg/tanh_reg.cpp @@ -33,7 +33,7 @@ double TanhReg::modelTest(std::vector x) { void TanhReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { MLPPActivation avn; MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; forwardPass(); @@ -66,7 +66,7 @@ void TanhReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { void TanhReg::SGD(double learning_rate, int max_epoch, bool UI) { MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; @@ -106,7 +106,7 @@ void TanhReg::SGD(double learning_rate, int max_epoch, bool UI) { void TanhReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { MLPPActivation avn; MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; double cost_prev = 0; int epoch = 1; @@ -157,7 +157,7 @@ void TanhReg::save(std::string fileName) { } double TanhReg::Cost(std::vector y_hat, std::vector y) { - Reg regularization; + MLPPReg regularization; class MLPPCost cost; return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); } diff --git a/mlpp/wgan/wgan.cpp b/mlpp/wgan/wgan.cpp index 3a81d64..e28f2c0 100644 --- a/mlpp/wgan/wgan.cpp +++ b/mlpp/wgan/wgan.cpp @@ -119,9 +119,9 @@ void WGAN::addLayer(int n_hidden, std::string activation, std::string weightInit void WGAN::addOutputLayer(std::string weightInit, std::string reg, double lambda, double alpha) { MLPPLinAlg alg; if (!network.empty()) { - outputLayer = new OutputLayer(network[network.size() - 1].n_hidden, "Linear", "WassersteinLoss", network[network.size() - 1].a, weightInit, "WeightClipping", -0.01, 0.01); + outputLayer = new MLPPOutputLayer(network[network.size() - 1].n_hidden, "Linear", "WassersteinLoss", network[network.size() - 1].a, weightInit, "WeightClipping", -0.01, 0.01); } else { // Should never happen. - outputLayer = new OutputLayer(k, "Linear", "WassersteinLoss", alg.gaussianNoise(n, k), weightInit, "WeightClipping", -0.01, 0.01); + outputLayer = new MLPPOutputLayer(k, "Linear", "WassersteinLoss", alg.gaussianNoise(n, k), weightInit, "WeightClipping", -0.01, 0.01); } } @@ -155,7 +155,7 @@ std::vector WGAN::modelSetTestDiscriminator(std::vector y_hat, std::vector y) { - Reg regularization; + MLPPReg regularization; class MLPPCost cost; double totalRegTerm = 0; @@ -220,7 +220,7 @@ std::tuple>>, std::vector> W class MLPPCost cost; MLPPActivation avn; MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; std::vector>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads. @@ -256,7 +256,7 @@ std::vector>> WGAN::computeGeneratorGradients(st class MLPPCost cost; MLPPActivation avn; MLPPLinAlg alg; - Reg regularization; + MLPPReg regularization; std::vector>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads. diff --git a/mlpp/wgan/wgan.h b/mlpp/wgan/wgan.h index 4872f4d..fbccf7d 100644 --- a/mlpp/wgan/wgan.h +++ b/mlpp/wgan/wgan.h @@ -47,7 +47,7 @@ private: std::vector y_hat; std::vector network; - OutputLayer *outputLayer; + MLPPOutputLayer *outputLayer; int n; int k;