Prefix classes with MLPP.

This commit is contained in:
Relintai 2023-01-24 19:29:29 +01:00
parent 18c4ae6ea1
commit 1381b5f70e
8 changed files with 83 additions and 83 deletions

View File

@ -15,15 +15,15 @@
#include <iostream>
#include <random>
ANN::ANN(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet) :
MLPPANN::MLPPANN(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), lrScheduler("None"), decayConstant(0), dropRate(0) {
}
ANN::~ANN() {
MLPPANN::~MLPPANN() {
delete outputLayer;
}
std::vector<double> ANN::modelSetTest(std::vector<std::vector<double>> X) {
std::vector<double> MLPPANN::modelSetTest(std::vector<std::vector<double>> X) {
if (!network.empty()) {
network[0].input = X;
network[0].forwardPass();
@ -40,7 +40,7 @@ std::vector<double> ANN::modelSetTest(std::vector<std::vector<double>> X) {
return outputLayer->a;
}
double ANN::modelTest(std::vector<double> x) {
double MLPPANN::modelTest(std::vector<double> x) {
if (!network.empty()) {
network[0].Test(x);
for (int i = 1; i < network.size(); i++) {
@ -53,7 +53,7 @@ double ANN::modelTest(std::vector<double> x) {
return outputLayer->a_test;
}
void ANN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
void MLPPANN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
class Cost cost;
LinAlg alg;
double cost_prev = 0;
@ -77,7 +77,7 @@ void ANN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
forwardPass();
if (UI) {
ANN::UI(epoch, cost_prev, y_hat, outputSet);
MLPPANN::UI(epoch, cost_prev, y_hat, outputSet);
}
epoch++;
@ -87,7 +87,7 @@ void ANN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
}
}
void ANN::SGD(double learning_rate, int max_epoch, bool UI) {
void MLPPANN::SGD(double learning_rate, int max_epoch, bool UI) {
class Cost cost;
LinAlg alg;
@ -114,7 +114,7 @@ void ANN::SGD(double learning_rate, int max_epoch, bool UI) {
y_hat = modelSetTest({ inputSet[outputIndex] });
if (UI) {
ANN::UI(epoch, cost_prev, y_hat, { outputSet[outputIndex] });
MLPPANN::UI(epoch, cost_prev, y_hat, { outputSet[outputIndex] });
}
epoch++;
@ -125,7 +125,7 @@ void ANN::SGD(double learning_rate, int max_epoch, bool UI) {
forwardPass();
}
void ANN::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
void MLPPANN::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
class Cost cost;
LinAlg alg;
@ -152,7 +152,7 @@ void ANN::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI
y_hat = modelSetTest(inputMiniBatches[i]);
if (UI) {
ANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
MLPPANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
}
}
epoch++;
@ -163,7 +163,7 @@ void ANN::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI
forwardPass();
}
void ANN::Momentum(double learning_rate, int max_epoch, int mini_batch_size, double gamma, bool NAG, bool UI) {
void MLPPANN::Momentum(double learning_rate, int max_epoch, int mini_batch_size, double gamma, bool NAG, bool UI) {
class Cost cost;
LinAlg alg;
@ -209,7 +209,7 @@ void ANN::Momentum(double learning_rate, int max_epoch, int mini_batch_size, dou
y_hat = modelSetTest(inputMiniBatches[i]);
if (UI) {
ANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
MLPPANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
}
}
epoch++;
@ -220,7 +220,7 @@ void ANN::Momentum(double learning_rate, int max_epoch, int mini_batch_size, dou
forwardPass();
}
void ANN::Adagrad(double learning_rate, int max_epoch, int mini_batch_size, double e, bool UI) {
void MLPPANN::Adagrad(double learning_rate, int max_epoch, int mini_batch_size, double e, bool UI) {
class Cost cost;
LinAlg alg;
@ -265,7 +265,7 @@ void ANN::Adagrad(double learning_rate, int max_epoch, int mini_batch_size, doub
y_hat = modelSetTest(inputMiniBatches[i]);
if (UI) {
ANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
MLPPANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
}
}
epoch++;
@ -276,7 +276,7 @@ void ANN::Adagrad(double learning_rate, int max_epoch, int mini_batch_size, doub
forwardPass();
}
void ANN::Adadelta(double learning_rate, int max_epoch, int mini_batch_size, double b1, double e, bool UI) {
void MLPPANN::Adadelta(double learning_rate, int max_epoch, int mini_batch_size, double b1, double e, bool UI) {
class Cost cost;
LinAlg alg;
@ -321,7 +321,7 @@ void ANN::Adadelta(double learning_rate, int max_epoch, int mini_batch_size, dou
y_hat = modelSetTest(inputMiniBatches[i]);
if (UI) {
ANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
MLPPANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
}
}
epoch++;
@ -332,7 +332,7 @@ void ANN::Adadelta(double learning_rate, int max_epoch, int mini_batch_size, dou
forwardPass();
}
void ANN::Adam(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) {
void MLPPANN::Adam(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) {
class Cost cost;
LinAlg alg;
@ -388,7 +388,7 @@ void ANN::Adam(double learning_rate, int max_epoch, int mini_batch_size, double
y_hat = modelSetTest(inputMiniBatches[i]);
if (UI) {
ANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
MLPPANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
}
}
epoch++;
@ -399,7 +399,7 @@ void ANN::Adam(double learning_rate, int max_epoch, int mini_batch_size, double
forwardPass();
}
void ANN::Adamax(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) {
void MLPPANN::Adamax(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) {
class Cost cost;
LinAlg alg;
@ -453,7 +453,7 @@ void ANN::Adamax(double learning_rate, int max_epoch, int mini_batch_size, doubl
y_hat = modelSetTest(inputMiniBatches[i]);
if (UI) {
ANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
MLPPANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
}
}
epoch++;
@ -464,7 +464,7 @@ void ANN::Adamax(double learning_rate, int max_epoch, int mini_batch_size, doubl
forwardPass();
}
void ANN::Nadam(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) {
void MLPPANN::Nadam(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) {
class Cost cost;
LinAlg alg;
@ -523,7 +523,7 @@ void ANN::Nadam(double learning_rate, int max_epoch, int mini_batch_size, double
y_hat = modelSetTest(inputMiniBatches[i]);
if (UI) {
ANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
MLPPANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
}
}
epoch++;
@ -534,7 +534,7 @@ void ANN::Nadam(double learning_rate, int max_epoch, int mini_batch_size, double
forwardPass();
}
void ANN::AMSGrad(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) {
void MLPPANN::AMSGrad(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) {
class Cost cost;
LinAlg alg;
@ -594,7 +594,7 @@ void ANN::AMSGrad(double learning_rate, int max_epoch, int mini_batch_size, doub
y_hat = modelSetTest(inputMiniBatches[i]);
if (UI) {
ANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
MLPPANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
}
}
epoch++;
@ -605,13 +605,13 @@ void ANN::AMSGrad(double learning_rate, int max_epoch, int mini_batch_size, doub
forwardPass();
}
double ANN::score() {
double MLPPANN::score() {
Utilities util;
forwardPass();
return util.performance(y_hat, outputSet);
}
void ANN::save(std::string fileName) {
void MLPPANN::save(std::string fileName) {
Utilities util;
if (!network.empty()) {
util.saveParameters(fileName, network[0].weights, network[0].bias, 0, 1);
@ -624,20 +624,20 @@ void ANN::save(std::string fileName) {
}
}
void ANN::setLearningRateScheduler(std::string type, double decayConstant) {
void MLPPANN::setLearningRateScheduler(std::string type, double decayConstant) {
lrScheduler = type;
ANN::decayConstant = decayConstant;
MLPPANN::decayConstant = decayConstant;
}
void ANN::setLearningRateScheduler(std::string type, double decayConstant, double dropRate) {
void MLPPANN::setLearningRateScheduler(std::string type, double decayConstant, double dropRate) {
lrScheduler = type;
ANN::decayConstant = decayConstant;
ANN::dropRate = dropRate;
MLPPANN::decayConstant = decayConstant;
MLPPANN::dropRate = dropRate;
}
// https://en.wikipedia.org/wiki/Learning_rate
// Learning Rate Decay (C2W2L09) - Andrew Ng - Deep Learning Specialization
double ANN::applyLearningRateScheduler(double learningRate, double decayConstant, double epoch, double dropRate) {
double MLPPANN::applyLearningRateScheduler(double learningRate, double decayConstant, double epoch, double dropRate) {
if (lrScheduler == "Time") {
return learningRate / (1 + decayConstant * epoch);
} else if (lrScheduler == "Epoch") {
@ -650,7 +650,7 @@ double ANN::applyLearningRateScheduler(double learningRate, double decayConstant
return learningRate;
}
void ANN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, double lambda, double alpha) {
void MLPPANN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, double lambda, double alpha) {
if (network.empty()) {
network.push_back(HiddenLayer(n_hidden, activation, inputSet, weightInit, reg, lambda, alpha));
network[0].forwardPass();
@ -660,7 +660,7 @@ void ANN::addLayer(int n_hidden, std::string activation, std::string weightInit,
}
}
void ANN::addOutputLayer(std::string activation, std::string loss, std::string weightInit, std::string reg, double lambda, double alpha) {
void MLPPANN::addOutputLayer(std::string activation, std::string loss, std::string weightInit, std::string reg, double lambda, double alpha) {
LinAlg alg;
if (!network.empty()) {
outputLayer = new OutputLayer(network[network.size() - 1].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha);
@ -669,7 +669,7 @@ void ANN::addOutputLayer(std::string activation, std::string loss, std::string w
}
}
double ANN::Cost(std::vector<double> y_hat, std::vector<double> y) {
double MLPPANN::Cost(std::vector<double> y_hat, std::vector<double> y) {
Reg regularization;
class Cost cost;
double totalRegTerm = 0;
@ -683,7 +683,7 @@ double ANN::Cost(std::vector<double> y_hat, std::vector<double> y) {
return (cost.*cost_function)(y_hat, y) + totalRegTerm + regularization.regTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg);
}
void ANN::forwardPass() {
void MLPPANN::forwardPass() {
if (!network.empty()) {
network[0].input = inputSet;
network[0].forwardPass();
@ -700,7 +700,7 @@ void ANN::forwardPass() {
y_hat = outputLayer->a;
}
void ANN::updateParameters(std::vector<std::vector<std::vector<double>>> hiddenLayerUpdations, std::vector<double> outputLayerUpdation, double learning_rate) {
void MLPPANN::updateParameters(std::vector<std::vector<std::vector<double>>> hiddenLayerUpdations, std::vector<double> outputLayerUpdation, double learning_rate) {
LinAlg alg;
outputLayer->weights = alg.subtraction(outputLayer->weights, outputLayerUpdation);
@ -717,7 +717,7 @@ void ANN::updateParameters(std::vector<std::vector<std::vector<double>>> hiddenL
}
}
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> ANN::computeGradients(std::vector<double> y_hat, std::vector<double> outputSet) {
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> MLPPANN::computeGradients(std::vector<double> y_hat, std::vector<double> outputSet) {
// std::cout << "BEGIN" << std::endl;
class Cost cost;
MLPPActivation avn;
@ -749,7 +749,7 @@ std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> A
return { cumulativeHiddenLayerWGrad, outputWGrad };
}
void ANN::UI(int epoch, double cost_prev, std::vector<double> y_hat, std::vector<double> outputSet) {
void MLPPANN::UI(int epoch, double cost_prev, std::vector<double> y_hat, std::vector<double> outputSet) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
std::cout << "Layer " << network.size() + 1 << ": " << std::endl;
Utilities::UI(outputLayer->weights, outputLayer->bias);

View File

@ -14,10 +14,10 @@
#include <tuple>
#include <vector>
class ANN {
class MLPPANN {
public:
ANN(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet);
~ANN();
MLPPANN(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet);
~MLPPANN();
std::vector<double> modelSetTest(std::vector<std::vector<double>> X);
double modelTest(std::vector<double> x);
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);

View File

@ -13,7 +13,7 @@
#include <iostream>
#include <random>
AutoEncoder::AutoEncoder(std::vector<std::vector<double>> inputSet, int n_hidden) :
MLPPAutoEncoder::MLPPAutoEncoder(std::vector<std::vector<double>> inputSet, int n_hidden) :
inputSet(inputSet), n_hidden(n_hidden), n(inputSet.size()), k(inputSet[0].size()) {
MLPPActivation avn;
y_hat.resize(inputSet.size());
@ -24,15 +24,15 @@ AutoEncoder::AutoEncoder(std::vector<std::vector<double>> inputSet, int n_hidden
bias2 = Utilities::biasInitialization(k);
}
std::vector<std::vector<double>> AutoEncoder::modelSetTest(std::vector<std::vector<double>> X) {
std::vector<std::vector<double>> MLPPAutoEncoder::modelSetTest(std::vector<std::vector<double>> X) {
return Evaluate(X);
}
std::vector<double> AutoEncoder::modelTest(std::vector<double> x) {
std::vector<double> MLPPAutoEncoder::modelTest(std::vector<double> x) {
return Evaluate(x);
}
void AutoEncoder::gradientDescent(double learning_rate, int max_epoch, bool UI) {
void MLPPAutoEncoder::gradientDescent(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn;
LinAlg alg;
double cost_prev = 0;
@ -85,7 +85,7 @@ void AutoEncoder::gradientDescent(double learning_rate, int max_epoch, bool UI)
}
}
void AutoEncoder::SGD(double learning_rate, int max_epoch, bool UI) {
void MLPPAutoEncoder::SGD(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn;
LinAlg alg;
double cost_prev = 0;
@ -136,7 +136,7 @@ void AutoEncoder::SGD(double learning_rate, int max_epoch, bool UI) {
forwardPass();
}
void AutoEncoder::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
void MLPPAutoEncoder::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPActivation avn;
LinAlg alg;
double cost_prev = 0;
@ -196,23 +196,23 @@ void AutoEncoder::MBGD(double learning_rate, int max_epoch, int mini_batch_size,
forwardPass();
}
double AutoEncoder::score() {
double MLPPAutoEncoder::score() {
Utilities util;
return util.performance(y_hat, inputSet);
}
void AutoEncoder::save(std::string fileName) {
void MLPPAutoEncoder::save(std::string fileName) {
Utilities util;
util.saveParameters(fileName, weights1, bias1, 0, 1);
util.saveParameters(fileName, weights2, bias2, 1, 2);
}
double AutoEncoder::Cost(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
double MLPPAutoEncoder::Cost(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
class Cost cost;
return cost.MSE(y_hat, inputSet);
}
std::vector<std::vector<double>> AutoEncoder::Evaluate(std::vector<std::vector<double>> X) {
std::vector<std::vector<double>> MLPPAutoEncoder::Evaluate(std::vector<std::vector<double>> X) {
LinAlg alg;
MLPPActivation avn;
std::vector<std::vector<double>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
@ -220,7 +220,7 @@ std::vector<std::vector<double>> AutoEncoder::Evaluate(std::vector<std::vector<d
return alg.mat_vec_add(alg.matmult(a2, weights2), bias2);
}
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> AutoEncoder::propagate(std::vector<std::vector<double>> X) {
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> MLPPAutoEncoder::propagate(std::vector<std::vector<double>> X) {
LinAlg alg;
MLPPActivation avn;
std::vector<std::vector<double>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
@ -228,7 +228,7 @@ std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> A
return { z2, a2 };
}
std::vector<double> AutoEncoder::Evaluate(std::vector<double> x) {
std::vector<double> MLPPAutoEncoder::Evaluate(std::vector<double> x) {
LinAlg alg;
MLPPActivation avn;
std::vector<double> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
@ -236,7 +236,7 @@ std::vector<double> AutoEncoder::Evaluate(std::vector<double> x) {
return alg.addition(alg.mat_vec_mult(alg.transpose(weights2), a2), bias2);
}
std::tuple<std::vector<double>, std::vector<double>> AutoEncoder::propagate(std::vector<double> x) {
std::tuple<std::vector<double>, std::vector<double>> MLPPAutoEncoder::propagate(std::vector<double> x) {
LinAlg alg;
MLPPActivation avn;
std::vector<double> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
@ -244,7 +244,7 @@ std::tuple<std::vector<double>, std::vector<double>> AutoEncoder::propagate(std:
return { z2, a2 };
}
void AutoEncoder::forwardPass() {
void MLPPAutoEncoder::forwardPass() {
LinAlg alg;
MLPPActivation avn;
z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1);

View File

@ -12,9 +12,9 @@
#include <tuple>
#include <vector>
class AutoEncoder {
class MLPPAutoEncoder {
public:
AutoEncoder(std::vector<std::vector<double>> inputSet, int n_hidden);
MLPPAutoEncoder(std::vector<std::vector<double>> inputSet, int n_hidden);
std::vector<std::vector<double>> modelSetTest(std::vector<std::vector<double>> X);
std::vector<double> modelTest(std::vector<double> x);
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);

View File

@ -12,13 +12,13 @@
#include <iostream>
#include <random>
BernoulliNB::BernoulliNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet) :
MLPPBernoulliNB::MLPPBernoulliNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet) :
inputSet(inputSet), outputSet(outputSet), class_num(2) {
y_hat.resize(outputSet.size());
Evaluate();
}
std::vector<double> BernoulliNB::modelSetTest(std::vector<std::vector<double>> X) {
std::vector<double> MLPPBernoulliNB::modelSetTest(std::vector<std::vector<double>> X) {
std::vector<double> y_hat;
for (int i = 0; i < X.size(); i++) {
y_hat.push_back(modelTest(X[i]));
@ -26,7 +26,7 @@ std::vector<double> BernoulliNB::modelSetTest(std::vector<std::vector<double>> X
return y_hat;
}
double BernoulliNB::modelTest(std::vector<double> x) {
double MLPPBernoulliNB::modelTest(std::vector<double> x) {
double score_0 = 1;
double score_1 = 1;
@ -68,18 +68,18 @@ double BernoulliNB::modelTest(std::vector<double> x) {
}
}
double BernoulliNB::score() {
double MLPPBernoulliNB::score() {
Utilities util;
return util.performance(y_hat, outputSet);
}
void BernoulliNB::computeVocab() {
void MLPPBernoulliNB::computeVocab() {
LinAlg alg;
Data data;
vocab = data.vecToSet<double>(alg.flatten(inputSet));
}
void BernoulliNB::computeTheta() {
void MLPPBernoulliNB::computeTheta() {
// Resizing theta for the sake of ease & proper access of the elements.
theta.resize(class_num);
@ -107,7 +107,7 @@ void BernoulliNB::computeTheta() {
}
}
void BernoulliNB::Evaluate() {
void MLPPBernoulliNB::Evaluate() {
for (int i = 0; i < outputSet.size(); i++) {
// Pr(B | A) * Pr(A)
double score_0 = 1;

View File

@ -11,9 +11,9 @@
#include <map>
#include <vector>
class BernoulliNB {
class MLPPBernoulliNB {
public:
BernoulliNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet);
MLPPBernoulliNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet);
std::vector<double> modelSetTest(std::vector<std::vector<double>> X);
double modelTest(std::vector<double> x);
double score();

View File

@ -14,22 +14,22 @@
#include <iostream>
#include <random>
CLogLogReg::CLogLogReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) :
MLPPCLogLogReg::MLPPCLogLogReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n);
weights = Utilities::weightInitialization(k);
bias = Utilities::biasInitialization();
}
std::vector<double> CLogLogReg::modelSetTest(std::vector<std::vector<double>> X) {
std::vector<double> MLPPCLogLogReg::modelSetTest(std::vector<std::vector<double>> X) {
return Evaluate(X);
}
double CLogLogReg::modelTest(std::vector<double> x) {
double MLPPCLogLogReg::modelTest(std::vector<double> x) {
return Evaluate(x);
}
void CLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
void MLPPCLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn;
LinAlg alg;
Reg regularization;
@ -63,7 +63,7 @@ void CLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
}
}
void CLogLogReg::MLE(double learning_rate, int max_epoch, bool UI) {
void MLPPCLogLogReg::MLE(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn;
LinAlg alg;
Reg regularization;
@ -95,7 +95,7 @@ void CLogLogReg::MLE(double learning_rate, int max_epoch, bool UI) {
}
}
void CLogLogReg::SGD(double learning_rate, int max_epoch, bool UI) {
void MLPPCLogLogReg::SGD(double learning_rate, int max_epoch, bool UI) {
LinAlg alg;
Reg regularization;
double cost_prev = 0;
@ -136,7 +136,7 @@ void CLogLogReg::SGD(double learning_rate, int max_epoch, bool UI) {
forwardPass();
}
void CLogLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
void MLPPCLogLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPActivation avn;
LinAlg alg;
Reg regularization;
@ -179,41 +179,41 @@ void CLogLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size,
forwardPass();
}
double CLogLogReg::score() {
double MLPPCLogLogReg::score() {
Utilities util;
return util.performance(y_hat, outputSet);
}
double CLogLogReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
double MLPPCLogLogReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
Reg regularization;
class Cost cost;
return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
}
std::vector<double> CLogLogReg::Evaluate(std::vector<std::vector<double>> X) {
std::vector<double> MLPPCLogLogReg::Evaluate(std::vector<std::vector<double>> X) {
LinAlg alg;
MLPPActivation avn;
return avn.cloglog(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
}
std::vector<double> CLogLogReg::propagate(std::vector<std::vector<double>> X) {
std::vector<double> MLPPCLogLogReg::propagate(std::vector<std::vector<double>> X) {
LinAlg alg;
return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights));
}
double CLogLogReg::Evaluate(std::vector<double> x) {
double MLPPCLogLogReg::Evaluate(std::vector<double> x) {
LinAlg alg;
MLPPActivation avn;
return avn.cloglog(alg.dot(weights, x) + bias);
}
double CLogLogReg::propagate(std::vector<double> x) {
double MLPPCLogLogReg::propagate(std::vector<double> x) {
LinAlg alg;
return alg.dot(weights, x) + bias;
}
// cloglog ( wTx + b )
void CLogLogReg::forwardPass() {
void MLPPCLogLogReg::forwardPass() {
LinAlg alg;
MLPPActivation avn;

View File

@ -11,9 +11,9 @@
#include <string>
#include <vector>
class CLogLogReg {
class MLPPCLogLogReg {
public:
CLogLogReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5);
MLPPCLogLogReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5);
std::vector<double> modelSetTest(std::vector<std::vector<double>> X);
double modelTest(std::vector<double> x);
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);