From 1381b5f70ef1f44dfa856ee6101f17701f587308 Mon Sep 17 00:00:00 2001 From: Relintai Date: Tue, 24 Jan 2023 19:29:29 +0100 Subject: [PATCH] Prefix classes with MLPP. --- mlpp/ann/ann.cpp | 78 ++++++++++++++-------------- mlpp/ann/ann.h | 6 +-- mlpp/auto_encoder/auto_encoder.cpp | 28 +++++----- mlpp/auto_encoder/auto_encoder.h | 4 +- mlpp/bernoulli_nb/bernoulli_nb.cpp | 14 ++--- mlpp/bernoulli_nb/bernoulli_nb.h | 4 +- mlpp/c_log_log_reg/c_log_log_reg.cpp | 28 +++++----- mlpp/c_log_log_reg/c_log_log_reg.h | 4 +- 8 files changed, 83 insertions(+), 83 deletions(-) diff --git a/mlpp/ann/ann.cpp b/mlpp/ann/ann.cpp index efda628..30d0770 100644 --- a/mlpp/ann/ann.cpp +++ b/mlpp/ann/ann.cpp @@ -15,15 +15,15 @@ #include #include -ANN::ANN(std::vector> inputSet, std::vector outputSet) : +MLPPANN::MLPPANN(std::vector> inputSet, std::vector outputSet) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), lrScheduler("None"), decayConstant(0), dropRate(0) { } -ANN::~ANN() { +MLPPANN::~MLPPANN() { delete outputLayer; } -std::vector ANN::modelSetTest(std::vector> X) { +std::vector MLPPANN::modelSetTest(std::vector> X) { if (!network.empty()) { network[0].input = X; network[0].forwardPass(); @@ -40,7 +40,7 @@ std::vector ANN::modelSetTest(std::vector> X) { return outputLayer->a; } -double ANN::modelTest(std::vector x) { +double MLPPANN::modelTest(std::vector x) { if (!network.empty()) { network[0].Test(x); for (int i = 1; i < network.size(); i++) { @@ -53,7 +53,7 @@ double ANN::modelTest(std::vector x) { return outputLayer->a_test; } -void ANN::gradientDescent(double learning_rate, int max_epoch, bool UI) { +void MLPPANN::gradientDescent(double learning_rate, int max_epoch, bool UI) { class Cost cost; LinAlg alg; double cost_prev = 0; @@ -77,7 +77,7 @@ void ANN::gradientDescent(double learning_rate, int max_epoch, bool UI) { forwardPass(); if (UI) { - ANN::UI(epoch, cost_prev, y_hat, outputSet); + MLPPANN::UI(epoch, cost_prev, y_hat, outputSet); } epoch++; @@ -87,7 +87,7 @@ void ANN::gradientDescent(double learning_rate, int max_epoch, bool UI) { } } -void ANN::SGD(double learning_rate, int max_epoch, bool UI) { +void MLPPANN::SGD(double learning_rate, int max_epoch, bool UI) { class Cost cost; LinAlg alg; @@ -114,7 +114,7 @@ void ANN::SGD(double learning_rate, int max_epoch, bool UI) { y_hat = modelSetTest({ inputSet[outputIndex] }); if (UI) { - ANN::UI(epoch, cost_prev, y_hat, { outputSet[outputIndex] }); + MLPPANN::UI(epoch, cost_prev, y_hat, { outputSet[outputIndex] }); } epoch++; @@ -125,7 +125,7 @@ void ANN::SGD(double learning_rate, int max_epoch, bool UI) { forwardPass(); } -void ANN::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { +void MLPPANN::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { class Cost cost; LinAlg alg; @@ -152,7 +152,7 @@ void ANN::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI y_hat = modelSetTest(inputMiniBatches[i]); if (UI) { - ANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]); + MLPPANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]); } } epoch++; @@ -163,7 +163,7 @@ void ANN::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI forwardPass(); } -void ANN::Momentum(double learning_rate, int max_epoch, int mini_batch_size, double gamma, bool NAG, bool UI) { +void MLPPANN::Momentum(double learning_rate, int max_epoch, int mini_batch_size, double gamma, bool NAG, bool UI) { class Cost cost; LinAlg alg; @@ -209,7 +209,7 @@ void ANN::Momentum(double learning_rate, int max_epoch, int mini_batch_size, dou y_hat = modelSetTest(inputMiniBatches[i]); if (UI) { - ANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]); + MLPPANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]); } } epoch++; @@ -220,7 +220,7 @@ void ANN::Momentum(double learning_rate, int max_epoch, int mini_batch_size, dou forwardPass(); } -void ANN::Adagrad(double learning_rate, int max_epoch, int mini_batch_size, double e, bool UI) { +void MLPPANN::Adagrad(double learning_rate, int max_epoch, int mini_batch_size, double e, bool UI) { class Cost cost; LinAlg alg; @@ -265,7 +265,7 @@ void ANN::Adagrad(double learning_rate, int max_epoch, int mini_batch_size, doub y_hat = modelSetTest(inputMiniBatches[i]); if (UI) { - ANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]); + MLPPANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]); } } epoch++; @@ -276,7 +276,7 @@ void ANN::Adagrad(double learning_rate, int max_epoch, int mini_batch_size, doub forwardPass(); } -void ANN::Adadelta(double learning_rate, int max_epoch, int mini_batch_size, double b1, double e, bool UI) { +void MLPPANN::Adadelta(double learning_rate, int max_epoch, int mini_batch_size, double b1, double e, bool UI) { class Cost cost; LinAlg alg; @@ -321,7 +321,7 @@ void ANN::Adadelta(double learning_rate, int max_epoch, int mini_batch_size, dou y_hat = modelSetTest(inputMiniBatches[i]); if (UI) { - ANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]); + MLPPANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]); } } epoch++; @@ -332,7 +332,7 @@ void ANN::Adadelta(double learning_rate, int max_epoch, int mini_batch_size, dou forwardPass(); } -void ANN::Adam(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) { +void MLPPANN::Adam(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) { class Cost cost; LinAlg alg; @@ -388,7 +388,7 @@ void ANN::Adam(double learning_rate, int max_epoch, int mini_batch_size, double y_hat = modelSetTest(inputMiniBatches[i]); if (UI) { - ANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]); + MLPPANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]); } } epoch++; @@ -399,7 +399,7 @@ void ANN::Adam(double learning_rate, int max_epoch, int mini_batch_size, double forwardPass(); } -void ANN::Adamax(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) { +void MLPPANN::Adamax(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) { class Cost cost; LinAlg alg; @@ -453,7 +453,7 @@ void ANN::Adamax(double learning_rate, int max_epoch, int mini_batch_size, doubl y_hat = modelSetTest(inputMiniBatches[i]); if (UI) { - ANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]); + MLPPANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]); } } epoch++; @@ -464,7 +464,7 @@ void ANN::Adamax(double learning_rate, int max_epoch, int mini_batch_size, doubl forwardPass(); } -void ANN::Nadam(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) { +void MLPPANN::Nadam(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) { class Cost cost; LinAlg alg; @@ -523,7 +523,7 @@ void ANN::Nadam(double learning_rate, int max_epoch, int mini_batch_size, double y_hat = modelSetTest(inputMiniBatches[i]); if (UI) { - ANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]); + MLPPANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]); } } epoch++; @@ -534,7 +534,7 @@ void ANN::Nadam(double learning_rate, int max_epoch, int mini_batch_size, double forwardPass(); } -void ANN::AMSGrad(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) { +void MLPPANN::AMSGrad(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) { class Cost cost; LinAlg alg; @@ -594,7 +594,7 @@ void ANN::AMSGrad(double learning_rate, int max_epoch, int mini_batch_size, doub y_hat = modelSetTest(inputMiniBatches[i]); if (UI) { - ANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]); + MLPPANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]); } } epoch++; @@ -605,13 +605,13 @@ void ANN::AMSGrad(double learning_rate, int max_epoch, int mini_batch_size, doub forwardPass(); } -double ANN::score() { +double MLPPANN::score() { Utilities util; forwardPass(); return util.performance(y_hat, outputSet); } -void ANN::save(std::string fileName) { +void MLPPANN::save(std::string fileName) { Utilities util; if (!network.empty()) { util.saveParameters(fileName, network[0].weights, network[0].bias, 0, 1); @@ -624,20 +624,20 @@ void ANN::save(std::string fileName) { } } -void ANN::setLearningRateScheduler(std::string type, double decayConstant) { +void MLPPANN::setLearningRateScheduler(std::string type, double decayConstant) { lrScheduler = type; - ANN::decayConstant = decayConstant; + MLPPANN::decayConstant = decayConstant; } -void ANN::setLearningRateScheduler(std::string type, double decayConstant, double dropRate) { +void MLPPANN::setLearningRateScheduler(std::string type, double decayConstant, double dropRate) { lrScheduler = type; - ANN::decayConstant = decayConstant; - ANN::dropRate = dropRate; + MLPPANN::decayConstant = decayConstant; + MLPPANN::dropRate = dropRate; } // https://en.wikipedia.org/wiki/Learning_rate // Learning Rate Decay (C2W2L09) - Andrew Ng - Deep Learning Specialization -double ANN::applyLearningRateScheduler(double learningRate, double decayConstant, double epoch, double dropRate) { +double MLPPANN::applyLearningRateScheduler(double learningRate, double decayConstant, double epoch, double dropRate) { if (lrScheduler == "Time") { return learningRate / (1 + decayConstant * epoch); } else if (lrScheduler == "Epoch") { @@ -650,7 +650,7 @@ double ANN::applyLearningRateScheduler(double learningRate, double decayConstant return learningRate; } -void ANN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, double lambda, double alpha) { +void MLPPANN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, double lambda, double alpha) { if (network.empty()) { network.push_back(HiddenLayer(n_hidden, activation, inputSet, weightInit, reg, lambda, alpha)); network[0].forwardPass(); @@ -660,7 +660,7 @@ void ANN::addLayer(int n_hidden, std::string activation, std::string weightInit, } } -void ANN::addOutputLayer(std::string activation, std::string loss, std::string weightInit, std::string reg, double lambda, double alpha) { +void MLPPANN::addOutputLayer(std::string activation, std::string loss, std::string weightInit, std::string reg, double lambda, double alpha) { LinAlg alg; if (!network.empty()) { outputLayer = new OutputLayer(network[network.size() - 1].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha); @@ -669,7 +669,7 @@ void ANN::addOutputLayer(std::string activation, std::string loss, std::string w } } -double ANN::Cost(std::vector y_hat, std::vector y) { +double MLPPANN::Cost(std::vector y_hat, std::vector y) { Reg regularization; class Cost cost; double totalRegTerm = 0; @@ -683,7 +683,7 @@ double ANN::Cost(std::vector y_hat, std::vector y) { return (cost.*cost_function)(y_hat, y) + totalRegTerm + regularization.regTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg); } -void ANN::forwardPass() { +void MLPPANN::forwardPass() { if (!network.empty()) { network[0].input = inputSet; network[0].forwardPass(); @@ -700,7 +700,7 @@ void ANN::forwardPass() { y_hat = outputLayer->a; } -void ANN::updateParameters(std::vector>> hiddenLayerUpdations, std::vector outputLayerUpdation, double learning_rate) { +void MLPPANN::updateParameters(std::vector>> hiddenLayerUpdations, std::vector outputLayerUpdation, double learning_rate) { LinAlg alg; outputLayer->weights = alg.subtraction(outputLayer->weights, outputLayerUpdation); @@ -717,7 +717,7 @@ void ANN::updateParameters(std::vector>> hiddenL } } -std::tuple>>, std::vector> ANN::computeGradients(std::vector y_hat, std::vector outputSet) { +std::tuple>>, std::vector> MLPPANN::computeGradients(std::vector y_hat, std::vector outputSet) { // std::cout << "BEGIN" << std::endl; class Cost cost; MLPPActivation avn; @@ -749,7 +749,7 @@ std::tuple>>, std::vector> A return { cumulativeHiddenLayerWGrad, outputWGrad }; } -void ANN::UI(int epoch, double cost_prev, std::vector y_hat, std::vector outputSet) { +void MLPPANN::UI(int epoch, double cost_prev, std::vector y_hat, std::vector outputSet) { Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); std::cout << "Layer " << network.size() + 1 << ": " << std::endl; Utilities::UI(outputLayer->weights, outputLayer->bias); diff --git a/mlpp/ann/ann.h b/mlpp/ann/ann.h index 52f85db..fe582d1 100644 --- a/mlpp/ann/ann.h +++ b/mlpp/ann/ann.h @@ -14,10 +14,10 @@ #include #include -class ANN { +class MLPPANN { public: - ANN(std::vector> inputSet, std::vector outputSet); - ~ANN(); + MLPPANN(std::vector> inputSet, std::vector outputSet); + ~MLPPANN(); std::vector modelSetTest(std::vector> X); double modelTest(std::vector x); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1); diff --git a/mlpp/auto_encoder/auto_encoder.cpp b/mlpp/auto_encoder/auto_encoder.cpp index 1f77f07..2cf3dc2 100644 --- a/mlpp/auto_encoder/auto_encoder.cpp +++ b/mlpp/auto_encoder/auto_encoder.cpp @@ -13,7 +13,7 @@ #include #include -AutoEncoder::AutoEncoder(std::vector> inputSet, int n_hidden) : +MLPPAutoEncoder::MLPPAutoEncoder(std::vector> inputSet, int n_hidden) : inputSet(inputSet), n_hidden(n_hidden), n(inputSet.size()), k(inputSet[0].size()) { MLPPActivation avn; y_hat.resize(inputSet.size()); @@ -24,15 +24,15 @@ AutoEncoder::AutoEncoder(std::vector> inputSet, int n_hidden bias2 = Utilities::biasInitialization(k); } -std::vector> AutoEncoder::modelSetTest(std::vector> X) { +std::vector> MLPPAutoEncoder::modelSetTest(std::vector> X) { return Evaluate(X); } -std::vector AutoEncoder::modelTest(std::vector x) { +std::vector MLPPAutoEncoder::modelTest(std::vector x) { return Evaluate(x); } -void AutoEncoder::gradientDescent(double learning_rate, int max_epoch, bool UI) { +void MLPPAutoEncoder::gradientDescent(double learning_rate, int max_epoch, bool UI) { MLPPActivation avn; LinAlg alg; double cost_prev = 0; @@ -85,7 +85,7 @@ void AutoEncoder::gradientDescent(double learning_rate, int max_epoch, bool UI) } } -void AutoEncoder::SGD(double learning_rate, int max_epoch, bool UI) { +void MLPPAutoEncoder::SGD(double learning_rate, int max_epoch, bool UI) { MLPPActivation avn; LinAlg alg; double cost_prev = 0; @@ -136,7 +136,7 @@ void AutoEncoder::SGD(double learning_rate, int max_epoch, bool UI) { forwardPass(); } -void AutoEncoder::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { +void MLPPAutoEncoder::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { MLPPActivation avn; LinAlg alg; double cost_prev = 0; @@ -196,23 +196,23 @@ void AutoEncoder::MBGD(double learning_rate, int max_epoch, int mini_batch_size, forwardPass(); } -double AutoEncoder::score() { +double MLPPAutoEncoder::score() { Utilities util; return util.performance(y_hat, inputSet); } -void AutoEncoder::save(std::string fileName) { +void MLPPAutoEncoder::save(std::string fileName) { Utilities util; util.saveParameters(fileName, weights1, bias1, 0, 1); util.saveParameters(fileName, weights2, bias2, 1, 2); } -double AutoEncoder::Cost(std::vector> y_hat, std::vector> y) { +double MLPPAutoEncoder::Cost(std::vector> y_hat, std::vector> y) { class Cost cost; return cost.MSE(y_hat, inputSet); } -std::vector> AutoEncoder::Evaluate(std::vector> X) { +std::vector> MLPPAutoEncoder::Evaluate(std::vector> X) { LinAlg alg; MLPPActivation avn; std::vector> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); @@ -220,7 +220,7 @@ std::vector> AutoEncoder::Evaluate(std::vector>, std::vector>> AutoEncoder::propagate(std::vector> X) { +std::tuple>, std::vector>> MLPPAutoEncoder::propagate(std::vector> X) { LinAlg alg; MLPPActivation avn; std::vector> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); @@ -228,7 +228,7 @@ std::tuple>, std::vector>> A return { z2, a2 }; } -std::vector AutoEncoder::Evaluate(std::vector x) { +std::vector MLPPAutoEncoder::Evaluate(std::vector x) { LinAlg alg; MLPPActivation avn; std::vector z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); @@ -236,7 +236,7 @@ std::vector AutoEncoder::Evaluate(std::vector x) { return alg.addition(alg.mat_vec_mult(alg.transpose(weights2), a2), bias2); } -std::tuple, std::vector> AutoEncoder::propagate(std::vector x) { +std::tuple, std::vector> MLPPAutoEncoder::propagate(std::vector x) { LinAlg alg; MLPPActivation avn; std::vector z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); @@ -244,7 +244,7 @@ std::tuple, std::vector> AutoEncoder::propagate(std: return { z2, a2 }; } -void AutoEncoder::forwardPass() { +void MLPPAutoEncoder::forwardPass() { LinAlg alg; MLPPActivation avn; z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1); diff --git a/mlpp/auto_encoder/auto_encoder.h b/mlpp/auto_encoder/auto_encoder.h index db98dce..e29f586 100644 --- a/mlpp/auto_encoder/auto_encoder.h +++ b/mlpp/auto_encoder/auto_encoder.h @@ -12,9 +12,9 @@ #include #include -class AutoEncoder { +class MLPPAutoEncoder { public: - AutoEncoder(std::vector> inputSet, int n_hidden); + MLPPAutoEncoder(std::vector> inputSet, int n_hidden); std::vector> modelSetTest(std::vector> X); std::vector modelTest(std::vector x); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1); diff --git a/mlpp/bernoulli_nb/bernoulli_nb.cpp b/mlpp/bernoulli_nb/bernoulli_nb.cpp index 494946b..5489736 100644 --- a/mlpp/bernoulli_nb/bernoulli_nb.cpp +++ b/mlpp/bernoulli_nb/bernoulli_nb.cpp @@ -12,13 +12,13 @@ #include #include -BernoulliNB::BernoulliNB(std::vector> inputSet, std::vector outputSet) : +MLPPBernoulliNB::MLPPBernoulliNB(std::vector> inputSet, std::vector outputSet) : inputSet(inputSet), outputSet(outputSet), class_num(2) { y_hat.resize(outputSet.size()); Evaluate(); } -std::vector BernoulliNB::modelSetTest(std::vector> X) { +std::vector MLPPBernoulliNB::modelSetTest(std::vector> X) { std::vector y_hat; for (int i = 0; i < X.size(); i++) { y_hat.push_back(modelTest(X[i])); @@ -26,7 +26,7 @@ std::vector BernoulliNB::modelSetTest(std::vector> X return y_hat; } -double BernoulliNB::modelTest(std::vector x) { +double MLPPBernoulliNB::modelTest(std::vector x) { double score_0 = 1; double score_1 = 1; @@ -68,18 +68,18 @@ double BernoulliNB::modelTest(std::vector x) { } } -double BernoulliNB::score() { +double MLPPBernoulliNB::score() { Utilities util; return util.performance(y_hat, outputSet); } -void BernoulliNB::computeVocab() { +void MLPPBernoulliNB::computeVocab() { LinAlg alg; Data data; vocab = data.vecToSet(alg.flatten(inputSet)); } -void BernoulliNB::computeTheta() { +void MLPPBernoulliNB::computeTheta() { // Resizing theta for the sake of ease & proper access of the elements. theta.resize(class_num); @@ -107,7 +107,7 @@ void BernoulliNB::computeTheta() { } } -void BernoulliNB::Evaluate() { +void MLPPBernoulliNB::Evaluate() { for (int i = 0; i < outputSet.size(); i++) { // Pr(B | A) * Pr(A) double score_0 = 1; diff --git a/mlpp/bernoulli_nb/bernoulli_nb.h b/mlpp/bernoulli_nb/bernoulli_nb.h index 8ae08bc..1e76644 100644 --- a/mlpp/bernoulli_nb/bernoulli_nb.h +++ b/mlpp/bernoulli_nb/bernoulli_nb.h @@ -11,9 +11,9 @@ #include #include -class BernoulliNB { +class MLPPBernoulliNB { public: - BernoulliNB(std::vector> inputSet, std::vector outputSet); + MLPPBernoulliNB(std::vector> inputSet, std::vector outputSet); std::vector modelSetTest(std::vector> X); double modelTest(std::vector x); double score(); diff --git a/mlpp/c_log_log_reg/c_log_log_reg.cpp b/mlpp/c_log_log_reg/c_log_log_reg.cpp index 616a15c..25a2a08 100644 --- a/mlpp/c_log_log_reg/c_log_log_reg.cpp +++ b/mlpp/c_log_log_reg/c_log_log_reg.cpp @@ -14,22 +14,22 @@ #include #include -CLogLogReg::CLogLogReg(std::vector> inputSet, std::vector outputSet, std::string reg, double lambda, double alpha) : +MLPPCLogLogReg::MLPPCLogLogReg(std::vector> inputSet, std::vector outputSet, std::string reg, double lambda, double alpha) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { y_hat.resize(n); weights = Utilities::weightInitialization(k); bias = Utilities::biasInitialization(); } -std::vector CLogLogReg::modelSetTest(std::vector> X) { +std::vector MLPPCLogLogReg::modelSetTest(std::vector> X) { return Evaluate(X); } -double CLogLogReg::modelTest(std::vector x) { +double MLPPCLogLogReg::modelTest(std::vector x) { return Evaluate(x); } -void CLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { +void MLPPCLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { MLPPActivation avn; LinAlg alg; Reg regularization; @@ -63,7 +63,7 @@ void CLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { } } -void CLogLogReg::MLE(double learning_rate, int max_epoch, bool UI) { +void MLPPCLogLogReg::MLE(double learning_rate, int max_epoch, bool UI) { MLPPActivation avn; LinAlg alg; Reg regularization; @@ -95,7 +95,7 @@ void CLogLogReg::MLE(double learning_rate, int max_epoch, bool UI) { } } -void CLogLogReg::SGD(double learning_rate, int max_epoch, bool UI) { +void MLPPCLogLogReg::SGD(double learning_rate, int max_epoch, bool UI) { LinAlg alg; Reg regularization; double cost_prev = 0; @@ -136,7 +136,7 @@ void CLogLogReg::SGD(double learning_rate, int max_epoch, bool UI) { forwardPass(); } -void CLogLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { +void MLPPCLogLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { MLPPActivation avn; LinAlg alg; Reg regularization; @@ -179,41 +179,41 @@ void CLogLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, forwardPass(); } -double CLogLogReg::score() { +double MLPPCLogLogReg::score() { Utilities util; return util.performance(y_hat, outputSet); } -double CLogLogReg::Cost(std::vector y_hat, std::vector y) { +double MLPPCLogLogReg::Cost(std::vector y_hat, std::vector y) { Reg regularization; class Cost cost; return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); } -std::vector CLogLogReg::Evaluate(std::vector> X) { +std::vector MLPPCLogLogReg::Evaluate(std::vector> X) { LinAlg alg; MLPPActivation avn; return avn.cloglog(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); } -std::vector CLogLogReg::propagate(std::vector> X) { +std::vector MLPPCLogLogReg::propagate(std::vector> X) { LinAlg alg; return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)); } -double CLogLogReg::Evaluate(std::vector x) { +double MLPPCLogLogReg::Evaluate(std::vector x) { LinAlg alg; MLPPActivation avn; return avn.cloglog(alg.dot(weights, x) + bias); } -double CLogLogReg::propagate(std::vector x) { +double MLPPCLogLogReg::propagate(std::vector x) { LinAlg alg; return alg.dot(weights, x) + bias; } // cloglog ( wTx + b ) -void CLogLogReg::forwardPass() { +void MLPPCLogLogReg::forwardPass() { LinAlg alg; MLPPActivation avn; diff --git a/mlpp/c_log_log_reg/c_log_log_reg.h b/mlpp/c_log_log_reg/c_log_log_reg.h index da65675..08622e2 100644 --- a/mlpp/c_log_log_reg/c_log_log_reg.h +++ b/mlpp/c_log_log_reg/c_log_log_reg.h @@ -11,9 +11,9 @@ #include #include -class CLogLogReg { +class MLPPCLogLogReg { public: - CLogLogReg(std::vector> inputSet, std::vector outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); + MLPPCLogLogReg(std::vector> inputSet, std::vector outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); std::vector modelSetTest(std::vector> X); double modelTest(std::vector x); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);