Prefix classes with MLPP.

This commit is contained in:
Relintai 2023-01-24 19:29:29 +01:00
parent 18c4ae6ea1
commit 1381b5f70e
8 changed files with 83 additions and 83 deletions

View File

@ -15,15 +15,15 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
ANN::ANN(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet) : MLPPANN::MLPPANN(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), lrScheduler("None"), decayConstant(0), dropRate(0) { inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), lrScheduler("None"), decayConstant(0), dropRate(0) {
} }
ANN::~ANN() { MLPPANN::~MLPPANN() {
delete outputLayer; delete outputLayer;
} }
std::vector<double> ANN::modelSetTest(std::vector<std::vector<double>> X) { std::vector<double> MLPPANN::modelSetTest(std::vector<std::vector<double>> X) {
if (!network.empty()) { if (!network.empty()) {
network[0].input = X; network[0].input = X;
network[0].forwardPass(); network[0].forwardPass();
@ -40,7 +40,7 @@ std::vector<double> ANN::modelSetTest(std::vector<std::vector<double>> X) {
return outputLayer->a; return outputLayer->a;
} }
double ANN::modelTest(std::vector<double> x) { double MLPPANN::modelTest(std::vector<double> x) {
if (!network.empty()) { if (!network.empty()) {
network[0].Test(x); network[0].Test(x);
for (int i = 1; i < network.size(); i++) { for (int i = 1; i < network.size(); i++) {
@ -53,7 +53,7 @@ double ANN::modelTest(std::vector<double> x) {
return outputLayer->a_test; return outputLayer->a_test;
} }
void ANN::gradientDescent(double learning_rate, int max_epoch, bool UI) { void MLPPANN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
class Cost cost; class Cost cost;
LinAlg alg; LinAlg alg;
double cost_prev = 0; double cost_prev = 0;
@ -77,7 +77,7 @@ void ANN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
forwardPass(); forwardPass();
if (UI) { if (UI) {
ANN::UI(epoch, cost_prev, y_hat, outputSet); MLPPANN::UI(epoch, cost_prev, y_hat, outputSet);
} }
epoch++; epoch++;
@ -87,7 +87,7 @@ void ANN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
} }
} }
void ANN::SGD(double learning_rate, int max_epoch, bool UI) { void MLPPANN::SGD(double learning_rate, int max_epoch, bool UI) {
class Cost cost; class Cost cost;
LinAlg alg; LinAlg alg;
@ -114,7 +114,7 @@ void ANN::SGD(double learning_rate, int max_epoch, bool UI) {
y_hat = modelSetTest({ inputSet[outputIndex] }); y_hat = modelSetTest({ inputSet[outputIndex] });
if (UI) { if (UI) {
ANN::UI(epoch, cost_prev, y_hat, { outputSet[outputIndex] }); MLPPANN::UI(epoch, cost_prev, y_hat, { outputSet[outputIndex] });
} }
epoch++; epoch++;
@ -125,7 +125,7 @@ void ANN::SGD(double learning_rate, int max_epoch, bool UI) {
forwardPass(); forwardPass();
} }
void ANN::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { void MLPPANN::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
class Cost cost; class Cost cost;
LinAlg alg; LinAlg alg;
@ -152,7 +152,7 @@ void ANN::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI
y_hat = modelSetTest(inputMiniBatches[i]); y_hat = modelSetTest(inputMiniBatches[i]);
if (UI) { if (UI) {
ANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]); MLPPANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
} }
} }
epoch++; epoch++;
@ -163,7 +163,7 @@ void ANN::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI
forwardPass(); forwardPass();
} }
void ANN::Momentum(double learning_rate, int max_epoch, int mini_batch_size, double gamma, bool NAG, bool UI) { void MLPPANN::Momentum(double learning_rate, int max_epoch, int mini_batch_size, double gamma, bool NAG, bool UI) {
class Cost cost; class Cost cost;
LinAlg alg; LinAlg alg;
@ -209,7 +209,7 @@ void ANN::Momentum(double learning_rate, int max_epoch, int mini_batch_size, dou
y_hat = modelSetTest(inputMiniBatches[i]); y_hat = modelSetTest(inputMiniBatches[i]);
if (UI) { if (UI) {
ANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]); MLPPANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
} }
} }
epoch++; epoch++;
@ -220,7 +220,7 @@ void ANN::Momentum(double learning_rate, int max_epoch, int mini_batch_size, dou
forwardPass(); forwardPass();
} }
void ANN::Adagrad(double learning_rate, int max_epoch, int mini_batch_size, double e, bool UI) { void MLPPANN::Adagrad(double learning_rate, int max_epoch, int mini_batch_size, double e, bool UI) {
class Cost cost; class Cost cost;
LinAlg alg; LinAlg alg;
@ -265,7 +265,7 @@ void ANN::Adagrad(double learning_rate, int max_epoch, int mini_batch_size, doub
y_hat = modelSetTest(inputMiniBatches[i]); y_hat = modelSetTest(inputMiniBatches[i]);
if (UI) { if (UI) {
ANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]); MLPPANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
} }
} }
epoch++; epoch++;
@ -276,7 +276,7 @@ void ANN::Adagrad(double learning_rate, int max_epoch, int mini_batch_size, doub
forwardPass(); forwardPass();
} }
void ANN::Adadelta(double learning_rate, int max_epoch, int mini_batch_size, double b1, double e, bool UI) { void MLPPANN::Adadelta(double learning_rate, int max_epoch, int mini_batch_size, double b1, double e, bool UI) {
class Cost cost; class Cost cost;
LinAlg alg; LinAlg alg;
@ -321,7 +321,7 @@ void ANN::Adadelta(double learning_rate, int max_epoch, int mini_batch_size, dou
y_hat = modelSetTest(inputMiniBatches[i]); y_hat = modelSetTest(inputMiniBatches[i]);
if (UI) { if (UI) {
ANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]); MLPPANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
} }
} }
epoch++; epoch++;
@ -332,7 +332,7 @@ void ANN::Adadelta(double learning_rate, int max_epoch, int mini_batch_size, dou
forwardPass(); forwardPass();
} }
void ANN::Adam(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) { void MLPPANN::Adam(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) {
class Cost cost; class Cost cost;
LinAlg alg; LinAlg alg;
@ -388,7 +388,7 @@ void ANN::Adam(double learning_rate, int max_epoch, int mini_batch_size, double
y_hat = modelSetTest(inputMiniBatches[i]); y_hat = modelSetTest(inputMiniBatches[i]);
if (UI) { if (UI) {
ANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]); MLPPANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
} }
} }
epoch++; epoch++;
@ -399,7 +399,7 @@ void ANN::Adam(double learning_rate, int max_epoch, int mini_batch_size, double
forwardPass(); forwardPass();
} }
void ANN::Adamax(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) { void MLPPANN::Adamax(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) {
class Cost cost; class Cost cost;
LinAlg alg; LinAlg alg;
@ -453,7 +453,7 @@ void ANN::Adamax(double learning_rate, int max_epoch, int mini_batch_size, doubl
y_hat = modelSetTest(inputMiniBatches[i]); y_hat = modelSetTest(inputMiniBatches[i]);
if (UI) { if (UI) {
ANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]); MLPPANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
} }
} }
epoch++; epoch++;
@ -464,7 +464,7 @@ void ANN::Adamax(double learning_rate, int max_epoch, int mini_batch_size, doubl
forwardPass(); forwardPass();
} }
void ANN::Nadam(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) { void MLPPANN::Nadam(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) {
class Cost cost; class Cost cost;
LinAlg alg; LinAlg alg;
@ -523,7 +523,7 @@ void ANN::Nadam(double learning_rate, int max_epoch, int mini_batch_size, double
y_hat = modelSetTest(inputMiniBatches[i]); y_hat = modelSetTest(inputMiniBatches[i]);
if (UI) { if (UI) {
ANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]); MLPPANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
} }
} }
epoch++; epoch++;
@ -534,7 +534,7 @@ void ANN::Nadam(double learning_rate, int max_epoch, int mini_batch_size, double
forwardPass(); forwardPass();
} }
void ANN::AMSGrad(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) { void MLPPANN::AMSGrad(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) {
class Cost cost; class Cost cost;
LinAlg alg; LinAlg alg;
@ -594,7 +594,7 @@ void ANN::AMSGrad(double learning_rate, int max_epoch, int mini_batch_size, doub
y_hat = modelSetTest(inputMiniBatches[i]); y_hat = modelSetTest(inputMiniBatches[i]);
if (UI) { if (UI) {
ANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]); MLPPANN::UI(epoch, cost_prev, y_hat, outputMiniBatches[i]);
} }
} }
epoch++; epoch++;
@ -605,13 +605,13 @@ void ANN::AMSGrad(double learning_rate, int max_epoch, int mini_batch_size, doub
forwardPass(); forwardPass();
} }
double ANN::score() { double MLPPANN::score() {
Utilities util; Utilities util;
forwardPass(); forwardPass();
return util.performance(y_hat, outputSet); return util.performance(y_hat, outputSet);
} }
void ANN::save(std::string fileName) { void MLPPANN::save(std::string fileName) {
Utilities util; Utilities util;
if (!network.empty()) { if (!network.empty()) {
util.saveParameters(fileName, network[0].weights, network[0].bias, 0, 1); util.saveParameters(fileName, network[0].weights, network[0].bias, 0, 1);
@ -624,20 +624,20 @@ void ANN::save(std::string fileName) {
} }
} }
void ANN::setLearningRateScheduler(std::string type, double decayConstant) { void MLPPANN::setLearningRateScheduler(std::string type, double decayConstant) {
lrScheduler = type; lrScheduler = type;
ANN::decayConstant = decayConstant; MLPPANN::decayConstant = decayConstant;
} }
void ANN::setLearningRateScheduler(std::string type, double decayConstant, double dropRate) { void MLPPANN::setLearningRateScheduler(std::string type, double decayConstant, double dropRate) {
lrScheduler = type; lrScheduler = type;
ANN::decayConstant = decayConstant; MLPPANN::decayConstant = decayConstant;
ANN::dropRate = dropRate; MLPPANN::dropRate = dropRate;
} }
// https://en.wikipedia.org/wiki/Learning_rate // https://en.wikipedia.org/wiki/Learning_rate
// Learning Rate Decay (C2W2L09) - Andrew Ng - Deep Learning Specialization // Learning Rate Decay (C2W2L09) - Andrew Ng - Deep Learning Specialization
double ANN::applyLearningRateScheduler(double learningRate, double decayConstant, double epoch, double dropRate) { double MLPPANN::applyLearningRateScheduler(double learningRate, double decayConstant, double epoch, double dropRate) {
if (lrScheduler == "Time") { if (lrScheduler == "Time") {
return learningRate / (1 + decayConstant * epoch); return learningRate / (1 + decayConstant * epoch);
} else if (lrScheduler == "Epoch") { } else if (lrScheduler == "Epoch") {
@ -650,7 +650,7 @@ double ANN::applyLearningRateScheduler(double learningRate, double decayConstant
return learningRate; return learningRate;
} }
void ANN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, double lambda, double alpha) { void MLPPANN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, double lambda, double alpha) {
if (network.empty()) { if (network.empty()) {
network.push_back(HiddenLayer(n_hidden, activation, inputSet, weightInit, reg, lambda, alpha)); network.push_back(HiddenLayer(n_hidden, activation, inputSet, weightInit, reg, lambda, alpha));
network[0].forwardPass(); network[0].forwardPass();
@ -660,7 +660,7 @@ void ANN::addLayer(int n_hidden, std::string activation, std::string weightInit,
} }
} }
void ANN::addOutputLayer(std::string activation, std::string loss, std::string weightInit, std::string reg, double lambda, double alpha) { void MLPPANN::addOutputLayer(std::string activation, std::string loss, std::string weightInit, std::string reg, double lambda, double alpha) {
LinAlg alg; LinAlg alg;
if (!network.empty()) { if (!network.empty()) {
outputLayer = new OutputLayer(network[network.size() - 1].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha); outputLayer = new OutputLayer(network[network.size() - 1].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha);
@ -669,7 +669,7 @@ void ANN::addOutputLayer(std::string activation, std::string loss, std::string w
} }
} }
double ANN::Cost(std::vector<double> y_hat, std::vector<double> y) { double MLPPANN::Cost(std::vector<double> y_hat, std::vector<double> y) {
Reg regularization; Reg regularization;
class Cost cost; class Cost cost;
double totalRegTerm = 0; double totalRegTerm = 0;
@ -683,7 +683,7 @@ double ANN::Cost(std::vector<double> y_hat, std::vector<double> y) {
return (cost.*cost_function)(y_hat, y) + totalRegTerm + regularization.regTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg); return (cost.*cost_function)(y_hat, y) + totalRegTerm + regularization.regTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg);
} }
void ANN::forwardPass() { void MLPPANN::forwardPass() {
if (!network.empty()) { if (!network.empty()) {
network[0].input = inputSet; network[0].input = inputSet;
network[0].forwardPass(); network[0].forwardPass();
@ -700,7 +700,7 @@ void ANN::forwardPass() {
y_hat = outputLayer->a; y_hat = outputLayer->a;
} }
void ANN::updateParameters(std::vector<std::vector<std::vector<double>>> hiddenLayerUpdations, std::vector<double> outputLayerUpdation, double learning_rate) { void MLPPANN::updateParameters(std::vector<std::vector<std::vector<double>>> hiddenLayerUpdations, std::vector<double> outputLayerUpdation, double learning_rate) {
LinAlg alg; LinAlg alg;
outputLayer->weights = alg.subtraction(outputLayer->weights, outputLayerUpdation); outputLayer->weights = alg.subtraction(outputLayer->weights, outputLayerUpdation);
@ -717,7 +717,7 @@ void ANN::updateParameters(std::vector<std::vector<std::vector<double>>> hiddenL
} }
} }
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> ANN::computeGradients(std::vector<double> y_hat, std::vector<double> outputSet) { std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> MLPPANN::computeGradients(std::vector<double> y_hat, std::vector<double> outputSet) {
// std::cout << "BEGIN" << std::endl; // std::cout << "BEGIN" << std::endl;
class Cost cost; class Cost cost;
MLPPActivation avn; MLPPActivation avn;
@ -749,7 +749,7 @@ std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> A
return { cumulativeHiddenLayerWGrad, outputWGrad }; return { cumulativeHiddenLayerWGrad, outputWGrad };
} }
void ANN::UI(int epoch, double cost_prev, std::vector<double> y_hat, std::vector<double> outputSet) { void MLPPANN::UI(int epoch, double cost_prev, std::vector<double> y_hat, std::vector<double> outputSet) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
std::cout << "Layer " << network.size() + 1 << ": " << std::endl; std::cout << "Layer " << network.size() + 1 << ": " << std::endl;
Utilities::UI(outputLayer->weights, outputLayer->bias); Utilities::UI(outputLayer->weights, outputLayer->bias);

View File

@ -14,10 +14,10 @@
#include <tuple> #include <tuple>
#include <vector> #include <vector>
class ANN { class MLPPANN {
public: public:
ANN(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet); MLPPANN(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet);
~ANN(); ~MLPPANN();
std::vector<double> modelSetTest(std::vector<std::vector<double>> X); std::vector<double> modelSetTest(std::vector<std::vector<double>> X);
double modelTest(std::vector<double> x); double modelTest(std::vector<double> x);
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);

View File

@ -13,7 +13,7 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
AutoEncoder::AutoEncoder(std::vector<std::vector<double>> inputSet, int n_hidden) : MLPPAutoEncoder::MLPPAutoEncoder(std::vector<std::vector<double>> inputSet, int n_hidden) :
inputSet(inputSet), n_hidden(n_hidden), n(inputSet.size()), k(inputSet[0].size()) { inputSet(inputSet), n_hidden(n_hidden), n(inputSet.size()), k(inputSet[0].size()) {
MLPPActivation avn; MLPPActivation avn;
y_hat.resize(inputSet.size()); y_hat.resize(inputSet.size());
@ -24,15 +24,15 @@ AutoEncoder::AutoEncoder(std::vector<std::vector<double>> inputSet, int n_hidden
bias2 = Utilities::biasInitialization(k); bias2 = Utilities::biasInitialization(k);
} }
std::vector<std::vector<double>> AutoEncoder::modelSetTest(std::vector<std::vector<double>> X) { std::vector<std::vector<double>> MLPPAutoEncoder::modelSetTest(std::vector<std::vector<double>> X) {
return Evaluate(X); return Evaluate(X);
} }
std::vector<double> AutoEncoder::modelTest(std::vector<double> x) { std::vector<double> MLPPAutoEncoder::modelTest(std::vector<double> x) {
return Evaluate(x); return Evaluate(x);
} }
void AutoEncoder::gradientDescent(double learning_rate, int max_epoch, bool UI) { void MLPPAutoEncoder::gradientDescent(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn; MLPPActivation avn;
LinAlg alg; LinAlg alg;
double cost_prev = 0; double cost_prev = 0;
@ -85,7 +85,7 @@ void AutoEncoder::gradientDescent(double learning_rate, int max_epoch, bool UI)
} }
} }
void AutoEncoder::SGD(double learning_rate, int max_epoch, bool UI) { void MLPPAutoEncoder::SGD(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn; MLPPActivation avn;
LinAlg alg; LinAlg alg;
double cost_prev = 0; double cost_prev = 0;
@ -136,7 +136,7 @@ void AutoEncoder::SGD(double learning_rate, int max_epoch, bool UI) {
forwardPass(); forwardPass();
} }
void AutoEncoder::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { void MLPPAutoEncoder::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPActivation avn; MLPPActivation avn;
LinAlg alg; LinAlg alg;
double cost_prev = 0; double cost_prev = 0;
@ -196,23 +196,23 @@ void AutoEncoder::MBGD(double learning_rate, int max_epoch, int mini_batch_size,
forwardPass(); forwardPass();
} }
double AutoEncoder::score() { double MLPPAutoEncoder::score() {
Utilities util; Utilities util;
return util.performance(y_hat, inputSet); return util.performance(y_hat, inputSet);
} }
void AutoEncoder::save(std::string fileName) { void MLPPAutoEncoder::save(std::string fileName) {
Utilities util; Utilities util;
util.saveParameters(fileName, weights1, bias1, 0, 1); util.saveParameters(fileName, weights1, bias1, 0, 1);
util.saveParameters(fileName, weights2, bias2, 1, 2); util.saveParameters(fileName, weights2, bias2, 1, 2);
} }
double AutoEncoder::Cost(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) { double MLPPAutoEncoder::Cost(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
class Cost cost; class Cost cost;
return cost.MSE(y_hat, inputSet); return cost.MSE(y_hat, inputSet);
} }
std::vector<std::vector<double>> AutoEncoder::Evaluate(std::vector<std::vector<double>> X) { std::vector<std::vector<double>> MLPPAutoEncoder::Evaluate(std::vector<std::vector<double>> X) {
LinAlg alg; LinAlg alg;
MLPPActivation avn; MLPPActivation avn;
std::vector<std::vector<double>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector<std::vector<double>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
@ -220,7 +220,7 @@ std::vector<std::vector<double>> AutoEncoder::Evaluate(std::vector<std::vector<d
return alg.mat_vec_add(alg.matmult(a2, weights2), bias2); return alg.mat_vec_add(alg.matmult(a2, weights2), bias2);
} }
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> AutoEncoder::propagate(std::vector<std::vector<double>> X) { std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> MLPPAutoEncoder::propagate(std::vector<std::vector<double>> X) {
LinAlg alg; LinAlg alg;
MLPPActivation avn; MLPPActivation avn;
std::vector<std::vector<double>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector<std::vector<double>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
@ -228,7 +228,7 @@ std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> A
return { z2, a2 }; return { z2, a2 };
} }
std::vector<double> AutoEncoder::Evaluate(std::vector<double> x) { std::vector<double> MLPPAutoEncoder::Evaluate(std::vector<double> x) {
LinAlg alg; LinAlg alg;
MLPPActivation avn; MLPPActivation avn;
std::vector<double> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector<double> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
@ -236,7 +236,7 @@ std::vector<double> AutoEncoder::Evaluate(std::vector<double> x) {
return alg.addition(alg.mat_vec_mult(alg.transpose(weights2), a2), bias2); return alg.addition(alg.mat_vec_mult(alg.transpose(weights2), a2), bias2);
} }
std::tuple<std::vector<double>, std::vector<double>> AutoEncoder::propagate(std::vector<double> x) { std::tuple<std::vector<double>, std::vector<double>> MLPPAutoEncoder::propagate(std::vector<double> x) {
LinAlg alg; LinAlg alg;
MLPPActivation avn; MLPPActivation avn;
std::vector<double> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector<double> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
@ -244,7 +244,7 @@ std::tuple<std::vector<double>, std::vector<double>> AutoEncoder::propagate(std:
return { z2, a2 }; return { z2, a2 };
} }
void AutoEncoder::forwardPass() { void MLPPAutoEncoder::forwardPass() {
LinAlg alg; LinAlg alg;
MLPPActivation avn; MLPPActivation avn;
z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1); z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1);

View File

@ -12,9 +12,9 @@
#include <tuple> #include <tuple>
#include <vector> #include <vector>
class AutoEncoder { class MLPPAutoEncoder {
public: public:
AutoEncoder(std::vector<std::vector<double>> inputSet, int n_hidden); MLPPAutoEncoder(std::vector<std::vector<double>> inputSet, int n_hidden);
std::vector<std::vector<double>> modelSetTest(std::vector<std::vector<double>> X); std::vector<std::vector<double>> modelSetTest(std::vector<std::vector<double>> X);
std::vector<double> modelTest(std::vector<double> x); std::vector<double> modelTest(std::vector<double> x);
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);

View File

@ -12,13 +12,13 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
BernoulliNB::BernoulliNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet) : MLPPBernoulliNB::MLPPBernoulliNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet) :
inputSet(inputSet), outputSet(outputSet), class_num(2) { inputSet(inputSet), outputSet(outputSet), class_num(2) {
y_hat.resize(outputSet.size()); y_hat.resize(outputSet.size());
Evaluate(); Evaluate();
} }
std::vector<double> BernoulliNB::modelSetTest(std::vector<std::vector<double>> X) { std::vector<double> MLPPBernoulliNB::modelSetTest(std::vector<std::vector<double>> X) {
std::vector<double> y_hat; std::vector<double> y_hat;
for (int i = 0; i < X.size(); i++) { for (int i = 0; i < X.size(); i++) {
y_hat.push_back(modelTest(X[i])); y_hat.push_back(modelTest(X[i]));
@ -26,7 +26,7 @@ std::vector<double> BernoulliNB::modelSetTest(std::vector<std::vector<double>> X
return y_hat; return y_hat;
} }
double BernoulliNB::modelTest(std::vector<double> x) { double MLPPBernoulliNB::modelTest(std::vector<double> x) {
double score_0 = 1; double score_0 = 1;
double score_1 = 1; double score_1 = 1;
@ -68,18 +68,18 @@ double BernoulliNB::modelTest(std::vector<double> x) {
} }
} }
double BernoulliNB::score() { double MLPPBernoulliNB::score() {
Utilities util; Utilities util;
return util.performance(y_hat, outputSet); return util.performance(y_hat, outputSet);
} }
void BernoulliNB::computeVocab() { void MLPPBernoulliNB::computeVocab() {
LinAlg alg; LinAlg alg;
Data data; Data data;
vocab = data.vecToSet<double>(alg.flatten(inputSet)); vocab = data.vecToSet<double>(alg.flatten(inputSet));
} }
void BernoulliNB::computeTheta() { void MLPPBernoulliNB::computeTheta() {
// Resizing theta for the sake of ease & proper access of the elements. // Resizing theta for the sake of ease & proper access of the elements.
theta.resize(class_num); theta.resize(class_num);
@ -107,7 +107,7 @@ void BernoulliNB::computeTheta() {
} }
} }
void BernoulliNB::Evaluate() { void MLPPBernoulliNB::Evaluate() {
for (int i = 0; i < outputSet.size(); i++) { for (int i = 0; i < outputSet.size(); i++) {
// Pr(B | A) * Pr(A) // Pr(B | A) * Pr(A)
double score_0 = 1; double score_0 = 1;

View File

@ -11,9 +11,9 @@
#include <map> #include <map>
#include <vector> #include <vector>
class BernoulliNB { class MLPPBernoulliNB {
public: public:
BernoulliNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet); MLPPBernoulliNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet);
std::vector<double> modelSetTest(std::vector<std::vector<double>> X); std::vector<double> modelSetTest(std::vector<std::vector<double>> X);
double modelTest(std::vector<double> x); double modelTest(std::vector<double> x);
double score(); double score();

View File

@ -14,22 +14,22 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
CLogLogReg::CLogLogReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) : MLPPCLogLogReg::MLPPCLogLogReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n); y_hat.resize(n);
weights = Utilities::weightInitialization(k); weights = Utilities::weightInitialization(k);
bias = Utilities::biasInitialization(); bias = Utilities::biasInitialization();
} }
std::vector<double> CLogLogReg::modelSetTest(std::vector<std::vector<double>> X) { std::vector<double> MLPPCLogLogReg::modelSetTest(std::vector<std::vector<double>> X) {
return Evaluate(X); return Evaluate(X);
} }
double CLogLogReg::modelTest(std::vector<double> x) { double MLPPCLogLogReg::modelTest(std::vector<double> x) {
return Evaluate(x); return Evaluate(x);
} }
void CLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { void MLPPCLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn; MLPPActivation avn;
LinAlg alg; LinAlg alg;
Reg regularization; Reg regularization;
@ -63,7 +63,7 @@ void CLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
} }
} }
void CLogLogReg::MLE(double learning_rate, int max_epoch, bool UI) { void MLPPCLogLogReg::MLE(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn; MLPPActivation avn;
LinAlg alg; LinAlg alg;
Reg regularization; Reg regularization;
@ -95,7 +95,7 @@ void CLogLogReg::MLE(double learning_rate, int max_epoch, bool UI) {
} }
} }
void CLogLogReg::SGD(double learning_rate, int max_epoch, bool UI) { void MLPPCLogLogReg::SGD(double learning_rate, int max_epoch, bool UI) {
LinAlg alg; LinAlg alg;
Reg regularization; Reg regularization;
double cost_prev = 0; double cost_prev = 0;
@ -136,7 +136,7 @@ void CLogLogReg::SGD(double learning_rate, int max_epoch, bool UI) {
forwardPass(); forwardPass();
} }
void CLogLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { void MLPPCLogLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPActivation avn; MLPPActivation avn;
LinAlg alg; LinAlg alg;
Reg regularization; Reg regularization;
@ -179,41 +179,41 @@ void CLogLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size,
forwardPass(); forwardPass();
} }
double CLogLogReg::score() { double MLPPCLogLogReg::score() {
Utilities util; Utilities util;
return util.performance(y_hat, outputSet); return util.performance(y_hat, outputSet);
} }
double CLogLogReg::Cost(std::vector<double> y_hat, std::vector<double> y) { double MLPPCLogLogReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
Reg regularization; Reg regularization;
class Cost cost; class Cost cost;
return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
} }
std::vector<double> CLogLogReg::Evaluate(std::vector<std::vector<double>> X) { std::vector<double> MLPPCLogLogReg::Evaluate(std::vector<std::vector<double>> X) {
LinAlg alg; LinAlg alg;
MLPPActivation avn; MLPPActivation avn;
return avn.cloglog(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); return avn.cloglog(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
} }
std::vector<double> CLogLogReg::propagate(std::vector<std::vector<double>> X) { std::vector<double> MLPPCLogLogReg::propagate(std::vector<std::vector<double>> X) {
LinAlg alg; LinAlg alg;
return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)); return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights));
} }
double CLogLogReg::Evaluate(std::vector<double> x) { double MLPPCLogLogReg::Evaluate(std::vector<double> x) {
LinAlg alg; LinAlg alg;
MLPPActivation avn; MLPPActivation avn;
return avn.cloglog(alg.dot(weights, x) + bias); return avn.cloglog(alg.dot(weights, x) + bias);
} }
double CLogLogReg::propagate(std::vector<double> x) { double MLPPCLogLogReg::propagate(std::vector<double> x) {
LinAlg alg; LinAlg alg;
return alg.dot(weights, x) + bias; return alg.dot(weights, x) + bias;
} }
// cloglog ( wTx + b ) // cloglog ( wTx + b )
void CLogLogReg::forwardPass() { void MLPPCLogLogReg::forwardPass() {
LinAlg alg; LinAlg alg;
MLPPActivation avn; MLPPActivation avn;

View File

@ -11,9 +11,9 @@
#include <string> #include <string>
#include <vector> #include <vector>
class CLogLogReg { class MLPPCLogLogReg {
public: public:
CLogLogReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); MLPPCLogLogReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5);
std::vector<double> modelSetTest(std::vector<std::vector<double>> X); std::vector<double> modelSetTest(std::vector<std::vector<double>> X);
double modelTest(std::vector<double> x); double modelTest(std::vector<double> x);
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);