Prefixed more classes with MLPP.

This commit is contained in:
Relintai 2023-01-25 00:54:50 +01:00
parent 6fe1f32c3d
commit 43e1b8d1fc
40 changed files with 233 additions and 233 deletions

View File

@ -663,14 +663,14 @@ void MLPPANN::addLayer(int n_hidden, std::string activation, std::string weightI
void MLPPANN::addOutputLayer(std::string activation, std::string loss, std::string weightInit, std::string reg, double lambda, double alpha) { void MLPPANN::addOutputLayer(std::string activation, std::string loss, std::string weightInit, std::string reg, double lambda, double alpha) {
MLPPLinAlg alg; MLPPLinAlg alg;
if (!network.empty()) { if (!network.empty()) {
outputLayer = new OutputLayer(network[network.size() - 1].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha); outputLayer = new MLPPOutputLayer(network[network.size() - 1].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha);
} else { } else {
outputLayer = new OutputLayer(k, activation, loss, inputSet, weightInit, reg, lambda, alpha); outputLayer = new MLPPOutputLayer(k, activation, loss, inputSet, weightInit, reg, lambda, alpha);
} }
} }
double MLPPANN::Cost(std::vector<double> y_hat, std::vector<double> y) { double MLPPANN::Cost(std::vector<double> y_hat, std::vector<double> y) {
Reg regularization; MLPPReg regularization;
class MLPPCost cost; class MLPPCost cost;
double totalRegTerm = 0; double totalRegTerm = 0;
@ -722,7 +722,7 @@ std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> M
class MLPPCost cost; class MLPPCost cost;
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
std::vector<std::vector<std::vector<double>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads. std::vector<std::vector<std::vector<double>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads.

View File

@ -55,7 +55,7 @@ private:
std::vector<double> y_hat; std::vector<double> y_hat;
std::vector<MLPPHiddenLayer> network; std::vector<MLPPHiddenLayer> network;
OutputLayer *outputLayer; MLPPOutputLayer *outputLayer;
int n; int n;
int k; int k;

View File

@ -32,7 +32,7 @@ double MLPPCLogLogReg::modelTest(std::vector<double> x) {
void MLPPCLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { void MLPPCLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -66,7 +66,7 @@ void MLPPCLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool U
void MLPPCLogLogReg::MLE(double learning_rate, int max_epoch, bool UI) { void MLPPCLogLogReg::MLE(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -97,7 +97,7 @@ void MLPPCLogLogReg::MLE(double learning_rate, int max_epoch, bool UI) {
void MLPPCLogLogReg::SGD(double learning_rate, int max_epoch, bool UI) { void MLPPCLogLogReg::SGD(double learning_rate, int max_epoch, bool UI) {
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -139,7 +139,7 @@ void MLPPCLogLogReg::SGD(double learning_rate, int max_epoch, bool UI) {
void MLPPCLogLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { void MLPPCLogLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -185,7 +185,7 @@ double MLPPCLogLogReg::score() {
} }
double MLPPCLogLogReg::Cost(std::vector<double> y_hat, std::vector<double> y) { double MLPPCLogLogReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
Reg regularization; MLPPReg regularization;
class MLPPCost cost; class MLPPCost cost;
return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
} }

View File

@ -360,23 +360,23 @@ std::vector<std::vector<double>> MLPPCost::WassersteinLossDeriv(std::vector<std:
double MLPPCost::HingeLoss(std::vector<double> y_hat, std::vector<double> y, std::vector<double> weights, double C) { double MLPPCost::HingeLoss(std::vector<double> y_hat, std::vector<double> y, std::vector<double> weights, double C) {
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
return C * HingeLoss(y_hat, y) + regularization.regTerm(weights, 1, 0, "Ridge"); return C * HingeLoss(y_hat, y) + regularization.regTerm(weights, 1, 0, "Ridge");
} }
double MLPPCost::HingeLoss(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y, std::vector<std::vector<double>> weights, double C) { double MLPPCost::HingeLoss(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y, std::vector<std::vector<double>> weights, double C) {
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
return C * HingeLoss(y_hat, y) + regularization.regTerm(weights, 1, 0, "Ridge"); return C * HingeLoss(y_hat, y) + regularization.regTerm(weights, 1, 0, "Ridge");
} }
std::vector<double> MLPPCost::HingeLossDeriv(std::vector<double> y_hat, std::vector<double> y, double C) { std::vector<double> MLPPCost::HingeLossDeriv(std::vector<double> y_hat, std::vector<double> y, double C) {
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
return alg.scalarMultiply(C, HingeLossDeriv(y_hat, y)); return alg.scalarMultiply(C, HingeLossDeriv(y_hat, y));
} }
std::vector<std::vector<double>> MLPPCost::HingeLossDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y, double C) { std::vector<std::vector<double>> MLPPCost::HingeLossDeriv(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y, double C) {
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
return alg.scalarMultiply(C, HingeLossDeriv(y_hat, y)); return alg.scalarMultiply(C, HingeLossDeriv(y_hat, y));
} }

View File

@ -621,11 +621,11 @@ std::tuple<std::vector<std::vector<double>>, std::vector<std::string>> MLPPData:
outputSet.push_back(BOW[i]); outputSet.push_back(BOW[i]);
} }
MLPPLinAlg alg; MLPPLinAlg alg;
SoftmaxNet *model; MLPPSoftmaxNet *model;
if (type == "Skipgram") { if (type == "Skipgram") {
model = new SoftmaxNet(outputSet, inputSet, dimension); model = new MLPPSoftmaxNet(outputSet, inputSet, dimension);
} else { // else = CBOW. We maintain it is a default. } else { // else = CBOW. We maintain it is a default.
model = new SoftmaxNet(inputSet, outputSet, dimension); model = new MLPPSoftmaxNet(inputSet, outputSet, dimension);
} }
model->gradientDescent(learning_rate, max_epoch, 1); model->gradientDescent(learning_rate, max_epoch, 1);

View File

@ -35,7 +35,7 @@ void MLPPDualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI)
class MLPPCost cost; class MLPPCost cost;
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -83,7 +83,7 @@ void MLPPDualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI)
// class MLPPCost cost; // class MLPPCost cost;
// MLPPActivation avn; // MLPPActivation avn;
// MLPPLinAlg alg; // MLPPLinAlg alg;
// Reg regularization; // MLPPReg regularization;
// double cost_prev = 0; // double cost_prev = 0;
// int epoch = 1; // int epoch = 1;
@ -116,7 +116,7 @@ void MLPPDualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI)
// class MLPPCost cost; // class MLPPCost cost;
// MLPPActivation avn; // MLPPActivation avn;
// MLPPLinAlg alg; // MLPPLinAlg alg;
// Reg regularization; // MLPPReg regularization;
// double cost_prev = 0; // double cost_prev = 0;
// int epoch = 1; // int epoch = 1;

View File

@ -33,7 +33,7 @@ double MLPPExpReg::modelTest(std::vector<double> x) {
void MLPPExpReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { void MLPPExpReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -89,7 +89,7 @@ void MLPPExpReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
} }
void MLPPExpReg::SGD(double learning_rate, int max_epoch, bool UI) { void MLPPExpReg::SGD(double learning_rate, int max_epoch, bool UI) {
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -136,7 +136,7 @@ void MLPPExpReg::SGD(double learning_rate, int max_epoch, bool UI) {
void MLPPExpReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { void MLPPExpReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -204,7 +204,7 @@ void MLPPExpReg::save(std::string fileName) {
} }
double MLPPExpReg::Cost(std::vector<double> y_hat, std::vector<double> y) { double MLPPExpReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
Reg regularization; MLPPReg regularization;
class MLPPCost cost; class MLPPCost cost;
return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
} }

View File

@ -110,9 +110,9 @@ void MLPPGAN::addLayer(int n_hidden, std::string activation, std::string weightI
void MLPPGAN::addOutputLayer(std::string weightInit, std::string reg, double lambda, double alpha) { void MLPPGAN::addOutputLayer(std::string weightInit, std::string reg, double lambda, double alpha) {
MLPPLinAlg alg; MLPPLinAlg alg;
if (!network.empty()) { if (!network.empty()) {
outputLayer = new OutputLayer(network[network.size() - 1].n_hidden, "Sigmoid", "LogLoss", network[network.size() - 1].a, weightInit, reg, lambda, alpha); outputLayer = new MLPPOutputLayer(network[network.size() - 1].n_hidden, "Sigmoid", "LogLoss", network[network.size() - 1].a, weightInit, reg, lambda, alpha);
} else { } else {
outputLayer = new OutputLayer(k, "Sigmoid", "LogLoss", alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha); outputLayer = new MLPPOutputLayer(k, "Sigmoid", "LogLoss", alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha);
} }
} }
@ -146,7 +146,7 @@ std::vector<double> MLPPGAN::modelSetTestDiscriminator(std::vector<std::vector<d
} }
double MLPPGAN::Cost(std::vector<double> y_hat, std::vector<double> y) { double MLPPGAN::Cost(std::vector<double> y_hat, std::vector<double> y) {
Reg regularization; MLPPReg regularization;
class MLPPCost cost; class MLPPCost cost;
double totalRegTerm = 0; double totalRegTerm = 0;
@ -211,7 +211,7 @@ std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> M
class MLPPCost cost; class MLPPCost cost;
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
std::vector<std::vector<std::vector<double>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads. std::vector<std::vector<std::vector<double>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads.
@ -247,7 +247,7 @@ std::vector<std::vector<std::vector<double>>> MLPPGAN::computeGeneratorGradients
class MLPPCost cost; class MLPPCost cost;
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
std::vector<std::vector<std::vector<double>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads. std::vector<std::vector<std::vector<double>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads.

View File

@ -47,7 +47,7 @@ private:
std::vector<double> y_hat; std::vector<double> y_hat;
std::vector<MLPPHiddenLayer> network; std::vector<MLPPHiddenLayer> network;
OutputLayer *outputLayer; MLPPOutputLayer *outputLayer;
int n; int n;
int k; int k;

View File

@ -17,7 +17,7 @@
LinReg::LinReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) : MLPPLinReg::MLPPLinReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n); y_hat.resize(n);
@ -25,17 +25,17 @@ LinReg::LinReg(std::vector<std::vector<double>> inputSet, std::vector<double> ou
bias = Utilities::biasInitialization(); bias = Utilities::biasInitialization();
} }
std::vector<double> LinReg::modelSetTest(std::vector<std::vector<double>> X) { std::vector<double> MLPPLinReg::modelSetTest(std::vector<std::vector<double>> X) {
return Evaluate(X); return Evaluate(X);
} }
double LinReg::modelTest(std::vector<double> x) { double MLPPLinReg::modelTest(std::vector<double> x) {
return Evaluate(x); return Evaluate(x);
} }
void LinReg::NewtonRaphson(double learning_rate, int max_epoch, bool UI) { void MLPPLinReg::NewtonRaphson(double learning_rate, int max_epoch, bool UI) {
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -65,9 +65,9 @@ void LinReg::NewtonRaphson(double learning_rate, int max_epoch, bool UI) {
} }
} }
void LinReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { void MLPPLinReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -96,9 +96,9 @@ void LinReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
} }
} }
void LinReg::SGD(double learning_rate, int max_epoch, bool UI) { void MLPPLinReg::SGD(double learning_rate, int max_epoch, bool UI) {
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -135,9 +135,9 @@ void LinReg::SGD(double learning_rate, int max_epoch, bool UI) {
forwardPass(); forwardPass();
} }
void LinReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { void MLPPLinReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -173,7 +173,7 @@ void LinReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool
forwardPass(); forwardPass();
} }
void LinReg::normalEquation() { void MLPPLinReg::normalEquation() {
MLPPLinAlg alg; MLPPLinAlg alg;
Stat stat; Stat stat;
std::vector<double> x_means; std::vector<double> x_means;
@ -207,33 +207,33 @@ void LinReg::normalEquation() {
//} //}
} }
double LinReg::score() { double MLPPLinReg::score() {
Utilities util; Utilities util;
return util.performance(y_hat, outputSet); return util.performance(y_hat, outputSet);
} }
void LinReg::save(std::string fileName) { void MLPPLinReg::save(std::string fileName) {
Utilities util; Utilities util;
util.saveParameters(fileName, weights, bias); util.saveParameters(fileName, weights, bias);
} }
double LinReg::Cost(std::vector<double> y_hat, std::vector<double> y) { double MLPPLinReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
Reg regularization; MLPPReg regularization;
class MLPPCost cost; class MLPPCost cost;
return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
} }
std::vector<double> LinReg::Evaluate(std::vector<std::vector<double>> X) { std::vector<double> MLPPLinReg::Evaluate(std::vector<std::vector<double>> X) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)); return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights));
} }
double LinReg::Evaluate(std::vector<double> x) { double MLPPLinReg::Evaluate(std::vector<double> x) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.dot(weights, x) + bias; return alg.dot(weights, x) + bias;
} }
// wTx + b // wTx + b
void LinReg::forwardPass() { void MLPPLinReg::forwardPass() {
y_hat = Evaluate(inputSet); y_hat = Evaluate(inputSet);
} }

View File

@ -12,9 +12,9 @@
#include <vector> #include <vector>
class LinReg { class MLPPLinReg {
public: public:
LinReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); MLPPLinReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5);
std::vector<double> modelSetTest(std::vector<std::vector<double>> X); std::vector<double> modelSetTest(std::vector<std::vector<double>> X);
double modelTest(std::vector<double> x); double modelTest(std::vector<double> x);
void NewtonRaphson(double learning_rate, int max_epoch, bool UI); void NewtonRaphson(double learning_rate, int max_epoch, bool UI);

View File

@ -15,24 +15,24 @@
#include <random> #include <random>
LogReg::LogReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) : MLPPLogReg::MLPPLogReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n); y_hat.resize(n);
weights = Utilities::weightInitialization(k); weights = Utilities::weightInitialization(k);
bias = Utilities::biasInitialization(); bias = Utilities::biasInitialization();
} }
std::vector<double> LogReg::modelSetTest(std::vector<std::vector<double>> X) { std::vector<double> MLPPLogReg::modelSetTest(std::vector<std::vector<double>> X) {
return Evaluate(X); return Evaluate(X);
} }
double LogReg::modelTest(std::vector<double> x) { double MLPPLogReg::modelTest(std::vector<double> x) {
return Evaluate(x); return Evaluate(x);
} }
void LogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { void MLPPLogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -62,9 +62,9 @@ void LogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
} }
} }
void LogReg::MLE(double learning_rate, int max_epoch, bool UI) { void MLPPLogReg::MLE(double learning_rate, int max_epoch, bool UI) {
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -93,9 +93,9 @@ void LogReg::MLE(double learning_rate, int max_epoch, bool UI) {
} }
} }
void LogReg::SGD(double learning_rate, int max_epoch, bool UI) { void MLPPLogReg::SGD(double learning_rate, int max_epoch, bool UI) {
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -132,9 +132,9 @@ void LogReg::SGD(double learning_rate, int max_epoch, bool UI) {
forwardPass(); forwardPass();
} }
void LogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { void MLPPLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -170,35 +170,35 @@ void LogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool
forwardPass(); forwardPass();
} }
double LogReg::score() { double MLPPLogReg::score() {
Utilities util; Utilities util;
return util.performance(y_hat, outputSet); return util.performance(y_hat, outputSet);
} }
void LogReg::save(std::string fileName) { void MLPPLogReg::save(std::string fileName) {
Utilities util; Utilities util;
util.saveParameters(fileName, weights, bias); util.saveParameters(fileName, weights, bias);
} }
double LogReg::Cost(std::vector<double> y_hat, std::vector<double> y) { double MLPPLogReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
Reg regularization; MLPPReg regularization;
class MLPPCost cost; class MLPPCost cost;
return cost.LogLoss(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); return cost.LogLoss(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
} }
std::vector<double> LogReg::Evaluate(std::vector<std::vector<double>> X) { std::vector<double> MLPPLogReg::Evaluate(std::vector<std::vector<double>> X) {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPActivation avn; MLPPActivation avn;
return avn.sigmoid(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); return avn.sigmoid(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
} }
double LogReg::Evaluate(std::vector<double> x) { double MLPPLogReg::Evaluate(std::vector<double> x) {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPActivation avn; MLPPActivation avn;
return avn.sigmoid(alg.dot(weights, x) + bias); return avn.sigmoid(alg.dot(weights, x) + bias);
} }
// sigmoid ( wTx + b ) // sigmoid ( wTx + b )
void LogReg::forwardPass() { void MLPPLogReg::forwardPass() {
y_hat = Evaluate(inputSet); y_hat = Evaluate(inputSet);
} }

View File

@ -13,9 +13,9 @@
class LogReg { class MLPPLogReg {
public: public:
LogReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); MLPPLogReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5);
std::vector<double> modelSetTest(std::vector<std::vector<double>> X); std::vector<double> modelSetTest(std::vector<std::vector<double>> X);
double modelTest(std::vector<double> x); double modelTest(std::vector<double> x);
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);

View File

@ -14,15 +14,15 @@
#include <iostream> #include <iostream>
MANN::MANN(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet) : MLPPMANN::MLPPMANN(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), n_output(outputSet[0].size()) { inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), n_output(outputSet[0].size()) {
} }
MANN::~MANN() { MLPPMANN::~MLPPMANN() {
delete outputLayer; delete outputLayer;
} }
std::vector<std::vector<double>> MANN::modelSetTest(std::vector<std::vector<double>> X) { std::vector<std::vector<double>> MLPPMANN::modelSetTest(std::vector<std::vector<double>> X) {
if (!network.empty()) { if (!network.empty()) {
network[0].input = X; network[0].input = X;
network[0].forwardPass(); network[0].forwardPass();
@ -39,7 +39,7 @@ std::vector<std::vector<double>> MANN::modelSetTest(std::vector<std::vector<doub
return outputLayer->a; return outputLayer->a;
} }
std::vector<double> MANN::modelTest(std::vector<double> x) { std::vector<double> MLPPMANN::modelTest(std::vector<double> x) {
if (!network.empty()) { if (!network.empty()) {
network[0].Test(x); network[0].Test(x);
for (int i = 1; i < network.size(); i++) { for (int i = 1; i < network.size(); i++) {
@ -52,11 +52,11 @@ std::vector<double> MANN::modelTest(std::vector<double> x) {
return outputLayer->a_test; return outputLayer->a_test;
} }
void MANN::gradientDescent(double learning_rate, int max_epoch, bool UI) { void MLPPMANN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
class MLPPCost cost; class MLPPCost cost;
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -120,13 +120,13 @@ void MANN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
} }
} }
double MANN::score() { double MLPPMANN::score() {
Utilities util; Utilities util;
forwardPass(); forwardPass();
return util.performance(y_hat, outputSet); return util.performance(y_hat, outputSet);
} }
void MANN::save(std::string fileName) { void MLPPMANN::save(std::string fileName) {
Utilities util; Utilities util;
if (!network.empty()) { if (!network.empty()) {
util.saveParameters(fileName, network[0].weights, network[0].bias, 0, 1); util.saveParameters(fileName, network[0].weights, network[0].bias, 0, 1);
@ -139,7 +139,7 @@ void MANN::save(std::string fileName) {
} }
} }
void MANN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, double lambda, double alpha) { void MLPPMANN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, double lambda, double alpha) {
if (network.empty()) { if (network.empty()) {
network.push_back(MLPPHiddenLayer(n_hidden, activation, inputSet, weightInit, reg, lambda, alpha)); network.push_back(MLPPHiddenLayer(n_hidden, activation, inputSet, weightInit, reg, lambda, alpha));
network[0].forwardPass(); network[0].forwardPass();
@ -149,16 +149,16 @@ void MANN::addLayer(int n_hidden, std::string activation, std::string weightInit
} }
} }
void MANN::addOutputLayer(std::string activation, std::string loss, std::string weightInit, std::string reg, double lambda, double alpha) { void MLPPMANN::addOutputLayer(std::string activation, std::string loss, std::string weightInit, std::string reg, double lambda, double alpha) {
if (!network.empty()) { if (!network.empty()) {
outputLayer = new MultiOutputLayer(n_output, network[0].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha); outputLayer = new MLPPMultiOutputLayer(n_output, network[0].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha);
} else { } else {
outputLayer = new MultiOutputLayer(n_output, k, activation, loss, inputSet, weightInit, reg, lambda, alpha); outputLayer = new MLPPMultiOutputLayer(n_output, k, activation, loss, inputSet, weightInit, reg, lambda, alpha);
} }
} }
double MANN::Cost(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) { double MLPPMANN::Cost(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
Reg regularization; MLPPReg regularization;
class MLPPCost cost; class MLPPCost cost;
double totalRegTerm = 0; double totalRegTerm = 0;
@ -171,7 +171,7 @@ double MANN::Cost(std::vector<std::vector<double>> y_hat, std::vector<std::vecto
return (cost.*cost_function)(y_hat, y) + totalRegTerm + regularization.regTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg); return (cost.*cost_function)(y_hat, y) + totalRegTerm + regularization.regTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg);
} }
void MANN::forwardPass() { void MLPPMANN::forwardPass() {
if (!network.empty()) { if (!network.empty()) {
network[0].input = inputSet; network[0].input = inputSet;
network[0].forwardPass(); network[0].forwardPass();

View File

@ -16,10 +16,10 @@
class MANN { class MLPPMANN {
public: public:
MANN(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet); MLPPMANN(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet);
~MANN(); ~MLPPMANN();
std::vector<std::vector<double>> modelSetTest(std::vector<std::vector<double>> X); std::vector<std::vector<double>> modelSetTest(std::vector<std::vector<double>> X);
std::vector<double> modelTest(std::vector<double> x); std::vector<double> modelTest(std::vector<double> x);
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);
@ -38,7 +38,7 @@ private:
std::vector<std::vector<double>> y_hat; std::vector<std::vector<double>> y_hat;
std::vector<MLPPHiddenLayer> network; std::vector<MLPPHiddenLayer> network;
MultiOutputLayer *outputLayer; MLPPMultiOutputLayer *outputLayer;
int n; int n;
int k; int k;

View File

@ -16,7 +16,7 @@
#include <random> #include <random>
MLP::MLP(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int n_hidden, std::string reg, double lambda, double alpha) : MLPPMLP::MLPPMLP(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int n_hidden, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n_hidden(n_hidden), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { inputSet(inputSet), outputSet(outputSet), n_hidden(n_hidden), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
MLPPActivation avn; MLPPActivation avn;
y_hat.resize(n); y_hat.resize(n);
@ -27,18 +27,18 @@ MLP::MLP(std::vector<std::vector<double>> inputSet, std::vector<double> outputSe
bias2 = Utilities::biasInitialization(); bias2 = Utilities::biasInitialization();
} }
std::vector<double> MLP::modelSetTest(std::vector<std::vector<double>> X) { std::vector<double> MLPPMLP::modelSetTest(std::vector<std::vector<double>> X) {
return Evaluate(X); return Evaluate(X);
} }
double MLP::modelTest(std::vector<double> x) { double MLPPMLP::modelTest(std::vector<double> x) {
return Evaluate(x); return Evaluate(x);
} }
void MLP::gradientDescent(double learning_rate, int max_epoch, bool UI) { void MLPPMLP::gradientDescent(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -94,10 +94,10 @@ void MLP::gradientDescent(double learning_rate, int max_epoch, bool UI) {
} }
} }
void MLP::SGD(double learning_rate, int max_epoch, bool UI) { void MLPPMLP::SGD(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -148,10 +148,10 @@ void MLP::SGD(double learning_rate, int max_epoch, bool UI) {
forwardPass(); forwardPass();
} }
void MLP::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { void MLPPMLP::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -214,24 +214,24 @@ void MLP::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI
forwardPass(); forwardPass();
} }
double MLP::score() { double MLPPMLP::score() {
Utilities util; Utilities util;
return util.performance(y_hat, outputSet); return util.performance(y_hat, outputSet);
} }
void MLP::save(std::string fileName) { void MLPPMLP::save(std::string fileName) {
Utilities util; Utilities util;
util.saveParameters(fileName, weights1, bias1, 0, 1); util.saveParameters(fileName, weights1, bias1, 0, 1);
util.saveParameters(fileName, weights2, bias2, 1, 2); util.saveParameters(fileName, weights2, bias2, 1, 2);
} }
double MLP::Cost(std::vector<double> y_hat, std::vector<double> y) { double MLPPMLP::Cost(std::vector<double> y_hat, std::vector<double> y) {
Reg regularization; MLPPReg regularization;
class MLPPCost cost; class MLPPCost cost;
return cost.LogLoss(y_hat, y) + regularization.regTerm(weights2, lambda, alpha, reg) + regularization.regTerm(weights1, lambda, alpha, reg); return cost.LogLoss(y_hat, y) + regularization.regTerm(weights2, lambda, alpha, reg) + regularization.regTerm(weights1, lambda, alpha, reg);
} }
std::vector<double> MLP::Evaluate(std::vector<std::vector<double>> X) { std::vector<double> MLPPMLP::Evaluate(std::vector<std::vector<double>> X) {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPActivation avn; MLPPActivation avn;
std::vector<std::vector<double>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector<std::vector<double>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
@ -239,7 +239,7 @@ std::vector<double> MLP::Evaluate(std::vector<std::vector<double>> X) {
return avn.sigmoid(alg.scalarAdd(bias2, alg.mat_vec_mult(a2, weights2))); return avn.sigmoid(alg.scalarAdd(bias2, alg.mat_vec_mult(a2, weights2)));
} }
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> MLP::propagate(std::vector<std::vector<double>> X) { std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> MLPPMLP::propagate(std::vector<std::vector<double>> X) {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPActivation avn; MLPPActivation avn;
std::vector<std::vector<double>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector<std::vector<double>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
@ -247,7 +247,7 @@ std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> M
return { z2, a2 }; return { z2, a2 };
} }
double MLP::Evaluate(std::vector<double> x) { double MLPPMLP::Evaluate(std::vector<double> x) {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPActivation avn; MLPPActivation avn;
std::vector<double> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector<double> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
@ -255,7 +255,7 @@ double MLP::Evaluate(std::vector<double> x) {
return avn.sigmoid(alg.dot(weights2, a2) + bias2); return avn.sigmoid(alg.dot(weights2, a2) + bias2);
} }
std::tuple<std::vector<double>, std::vector<double>> MLP::propagate(std::vector<double> x) { std::tuple<std::vector<double>, std::vector<double>> MLPPMLP::propagate(std::vector<double> x) {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPActivation avn; MLPPActivation avn;
std::vector<double> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector<double> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
@ -263,7 +263,7 @@ std::tuple<std::vector<double>, std::vector<double>> MLP::propagate(std::vector<
return { z2, a2 }; return { z2, a2 };
} }
void MLP::forwardPass() { void MLPPMLP::forwardPass() {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPActivation avn; MLPPActivation avn;
z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1); z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1);

View File

@ -14,9 +14,9 @@
class MLP { class MLPPMLP {
public: public:
MLP(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int n_hidden, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); MLPPMLP(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int n_hidden, std::string reg = "None", double lambda = 0.5, double alpha = 0.5);
std::vector<double> modelSetTest(std::vector<std::vector<double>> X); std::vector<double> modelSetTest(std::vector<std::vector<double>> X);
double modelTest(std::vector<double> x); double modelTest(std::vector<double> x);
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);

View File

@ -12,7 +12,7 @@
#include <random> #include <random>
MultiOutputLayer::MultiOutputLayer(int n_output, int n_hidden, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha) : MLPPMultiOutputLayer::MLPPMultiOutputLayer(int n_output, int n_hidden, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha) :
n_output(n_output), n_hidden(n_hidden), activation(activation), cost(cost), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) { n_output(n_output), n_hidden(n_hidden), activation(activation), cost(cost), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) {
weights = Utilities::weightInitialization(n_hidden, n_output, weightInit); weights = Utilities::weightInitialization(n_hidden, n_output, weightInit);
bias = Utilities::biasInitialization(n_output); bias = Utilities::biasInitialization(n_output);
@ -116,14 +116,14 @@ MultiOutputLayer::MultiOutputLayer(int n_output, int n_hidden, std::string activ
cost_map["WassersteinLoss"] = &MLPPCost::HingeLoss; cost_map["WassersteinLoss"] = &MLPPCost::HingeLoss;
} }
void MultiOutputLayer::forwardPass() { void MLPPMultiOutputLayer::forwardPass() {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPActivation avn; MLPPActivation avn;
z = alg.mat_vec_add(alg.matmult(input, weights), bias); z = alg.mat_vec_add(alg.matmult(input, weights), bias);
a = (avn.*activation_map[activation])(z, 0); a = (avn.*activation_map[activation])(z, 0);
} }
void MultiOutputLayer::Test(std::vector<double> x) { void MLPPMultiOutputLayer::Test(std::vector<double> x) {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPActivation avn; MLPPActivation avn;
z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias); z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias);

View File

@ -16,9 +16,9 @@
#include <vector> #include <vector>
class MultiOutputLayer { class MLPPMultiOutputLayer {
public: public:
MultiOutputLayer(int n_output, int n_hidden, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha); MLPPMultiOutputLayer(int n_output, int n_hidden, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha);
int n_output; int n_output;
int n_hidden; int n_hidden;

View File

@ -13,13 +13,13 @@
#include <random> #include <random>
MultinomialNB::MultinomialNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int class_num) : MLPPMultinomialNB::MLPPMultinomialNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int class_num) :
inputSet(inputSet), outputSet(outputSet), class_num(class_num) { inputSet(inputSet), outputSet(outputSet), class_num(class_num) {
y_hat.resize(outputSet.size()); y_hat.resize(outputSet.size());
Evaluate(); Evaluate();
} }
std::vector<double> MultinomialNB::modelSetTest(std::vector<std::vector<double>> X) { std::vector<double> MLPPMultinomialNB::modelSetTest(std::vector<std::vector<double>> X) {
std::vector<double> y_hat; std::vector<double> y_hat;
for (int i = 0; i < X.size(); i++) { for (int i = 0; i < X.size(); i++) {
y_hat.push_back(modelTest(X[i])); y_hat.push_back(modelTest(X[i]));
@ -27,7 +27,7 @@ std::vector<double> MultinomialNB::modelSetTest(std::vector<std::vector<double>>
return y_hat; return y_hat;
} }
double MultinomialNB::modelTest(std::vector<double> x) { double MLPPMultinomialNB::modelTest(std::vector<double> x) {
double score[class_num]; double score[class_num];
computeTheta(); computeTheta();
@ -48,12 +48,12 @@ double MultinomialNB::modelTest(std::vector<double> x) {
return std::distance(score, std::max_element(score, score + sizeof(score) / sizeof(double))); return std::distance(score, std::max_element(score, score + sizeof(score) / sizeof(double)));
} }
double MultinomialNB::score() { double MLPPMultinomialNB::score() {
Utilities util; Utilities util;
return util.performance(y_hat, outputSet); return util.performance(y_hat, outputSet);
} }
void MultinomialNB::computeTheta() { void MLPPMultinomialNB::computeTheta() {
// Resizing theta for the sake of ease & proper access of the elements. // Resizing theta for the sake of ease & proper access of the elements.
theta.resize(class_num); theta.resize(class_num);
@ -77,7 +77,7 @@ void MultinomialNB::computeTheta() {
} }
} }
void MultinomialNB::Evaluate() { void MLPPMultinomialNB::Evaluate() {
MLPPLinAlg alg; MLPPLinAlg alg;
for (int i = 0; i < outputSet.size(); i++) { for (int i = 0; i < outputSet.size(); i++) {
// Pr(B | A) * Pr(A) // Pr(B | A) * Pr(A)

View File

@ -12,9 +12,9 @@
#include <vector> #include <vector>
class MultinomialNB { class MLPPMultinomialNB {
public: public:
MultinomialNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int class_num); MLPPMultinomialNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int class_num);
std::vector<double> modelSetTest(std::vector<std::vector<double>> X); std::vector<double> modelSetTest(std::vector<std::vector<double>> X);
double modelTest(std::vector<double> x); double modelTest(std::vector<double> x);
double score(); double score();

View File

@ -14,40 +14,40 @@
double NumericalAnalysis::numDiff(double (*function)(double), double x) { double MLPPNumericalAnalysis::numDiff(double (*function)(double), double x) {
double eps = 1e-10; double eps = 1e-10;
return (function(x + eps) - function(x)) / eps; // This is just the formal def. of the derivative. return (function(x + eps) - function(x)) / eps; // This is just the formal def. of the derivative.
} }
double NumericalAnalysis::numDiff_2(double (*function)(double), double x) { double MLPPNumericalAnalysis::numDiff_2(double (*function)(double), double x) {
double eps = 1e-5; double eps = 1e-5;
return (function(x + 2 * eps) - 2 * function(x + eps) + function(x)) / (eps * eps); return (function(x + 2 * eps) - 2 * function(x + eps) + function(x)) / (eps * eps);
} }
double NumericalAnalysis::numDiff_3(double (*function)(double), double x) { double MLPPNumericalAnalysis::numDiff_3(double (*function)(double), double x) {
double eps = 1e-5; double eps = 1e-5;
double t1 = function(x + 3 * eps) - 2 * function(x + 2 * eps) + function(x + eps); double t1 = function(x + 3 * eps) - 2 * function(x + 2 * eps) + function(x + eps);
double t2 = function(x + 2 * eps) - 2 * function(x + eps) + function(x); double t2 = function(x + 2 * eps) - 2 * function(x + eps) + function(x);
return (t1 - t2) / (eps * eps * eps); return (t1 - t2) / (eps * eps * eps);
} }
double NumericalAnalysis::constantApproximation(double (*function)(double), double c) { double MLPPNumericalAnalysis::constantApproximation(double (*function)(double), double c) {
return function(c); return function(c);
} }
double NumericalAnalysis::linearApproximation(double (*function)(double), double c, double x) { double MLPPNumericalAnalysis::linearApproximation(double (*function)(double), double c, double x) {
return constantApproximation(function, c) + numDiff(function, c) * (x - c); return constantApproximation(function, c) + numDiff(function, c) * (x - c);
} }
double NumericalAnalysis::quadraticApproximation(double (*function)(double), double c, double x) { double MLPPNumericalAnalysis::quadraticApproximation(double (*function)(double), double c, double x) {
return linearApproximation(function, c, x) + 0.5 * numDiff_2(function, c) * (x - c) * (x - c); return linearApproximation(function, c, x) + 0.5 * numDiff_2(function, c) * (x - c) * (x - c);
} }
double NumericalAnalysis::cubicApproximation(double (*function)(double), double c, double x) { double MLPPNumericalAnalysis::cubicApproximation(double (*function)(double), double c, double x) {
return quadraticApproximation(function, c, x) + (1 / 6) * numDiff_3(function, c) * (x - c) * (x - c) * (x - c); return quadraticApproximation(function, c, x) + (1 / 6) * numDiff_3(function, c) * (x - c) * (x - c) * (x - c);
} }
double NumericalAnalysis::numDiff(double (*function)(std::vector<double>), std::vector<double> x, int axis) { double MLPPNumericalAnalysis::numDiff(double (*function)(std::vector<double>), std::vector<double> x, int axis) {
// For multivariable function analysis. // For multivariable function analysis.
// This will be used for calculating Jacobian vectors. // This will be used for calculating Jacobian vectors.
// Diffrentiate with respect to indicated axis. (0, 1, 2 ...) // Diffrentiate with respect to indicated axis. (0, 1, 2 ...)
@ -58,7 +58,7 @@ double NumericalAnalysis::numDiff(double (*function)(std::vector<double>), std::
return (function(x_eps) - function(x)) / eps; return (function(x_eps) - function(x)) / eps;
} }
double NumericalAnalysis::numDiff_2(double (*function)(std::vector<double>), std::vector<double> x, int axis1, int axis2) { double MLPPNumericalAnalysis::numDiff_2(double (*function)(std::vector<double>), std::vector<double> x, int axis1, int axis2) {
//For Hessians. //For Hessians.
double eps = 1e-5; double eps = 1e-5;
@ -75,7 +75,7 @@ double NumericalAnalysis::numDiff_2(double (*function)(std::vector<double>), std
return (function(x_pp) - function(x_np) - function(x_pn) + function(x)) / (eps * eps); return (function(x_pp) - function(x_np) - function(x_pn) + function(x)) / (eps * eps);
} }
double NumericalAnalysis::numDiff_3(double (*function)(std::vector<double>), std::vector<double> x, int axis1, int axis2, int axis3) { double MLPPNumericalAnalysis::numDiff_3(double (*function)(std::vector<double>), std::vector<double> x, int axis1, int axis2, int axis3) {
// For third order derivative tensors. // For third order derivative tensors.
// NOTE: Approximations do not appear to be accurate for sinusodial functions... // NOTE: Approximations do not appear to be accurate for sinusodial functions...
// Should revisit this later. // Should revisit this later.
@ -112,7 +112,7 @@ double NumericalAnalysis::numDiff_3(double (*function)(std::vector<double>), std
return (thirdAxis - noThirdAxis) / (eps * eps * eps); return (thirdAxis - noThirdAxis) / (eps * eps * eps);
} }
double NumericalAnalysis::newtonRaphsonMethod(double (*function)(double), double x_0, double epoch_num) { double MLPPNumericalAnalysis::newtonRaphsonMethod(double (*function)(double), double x_0, double epoch_num) {
double x = x_0; double x = x_0;
for (int i = 0; i < epoch_num; i++) { for (int i = 0; i < epoch_num; i++) {
x -= function(x) / numDiff(function, x); x -= function(x) / numDiff(function, x);
@ -120,7 +120,7 @@ double NumericalAnalysis::newtonRaphsonMethod(double (*function)(double), double
return x; return x;
} }
double NumericalAnalysis::halleyMethod(double (*function)(double), double x_0, double epoch_num) { double MLPPNumericalAnalysis::halleyMethod(double (*function)(double), double x_0, double epoch_num) {
double x = x_0; double x = x_0;
for (int i = 0; i < epoch_num; i++) { for (int i = 0; i < epoch_num; i++) {
x -= ((2 * function(x) * numDiff(function, x)) / (2 * numDiff(function, x) * numDiff(function, x) - function(x) * numDiff_2(function, x))); x -= ((2 * function(x) * numDiff(function, x)) / (2 * numDiff(function, x) * numDiff(function, x) - function(x) * numDiff_2(function, x)));
@ -128,7 +128,7 @@ double NumericalAnalysis::halleyMethod(double (*function)(double), double x_0, d
return x; return x;
} }
double NumericalAnalysis::invQuadraticInterpolation(double (*function)(double), std::vector<double> x_0, double epoch_num) { double MLPPNumericalAnalysis::invQuadraticInterpolation(double (*function)(double), std::vector<double> x_0, double epoch_num) {
double x = 0; double x = 0;
std::vector<double> currentThree = x_0; std::vector<double> currentThree = x_0;
for (int i = 0; i < epoch_num; i++) { for (int i = 0; i < epoch_num; i++) {
@ -143,7 +143,7 @@ double NumericalAnalysis::invQuadraticInterpolation(double (*function)(double),
return x; return x;
} }
double NumericalAnalysis::eulerianMethod(double (*derivative)(double), std::vector<double> q_0, double p, double h) { double MLPPNumericalAnalysis::eulerianMethod(double (*derivative)(double), std::vector<double> q_0, double p, double h) {
double max_epoch = (p - q_0[0]) / h; double max_epoch = (p - q_0[0]) / h;
double x = q_0[0]; double x = q_0[0];
double y = q_0[1]; double y = q_0[1];
@ -154,7 +154,7 @@ double NumericalAnalysis::eulerianMethod(double (*derivative)(double), std::vect
return y; return y;
} }
double NumericalAnalysis::eulerianMethod(double (*derivative)(std::vector<double>), std::vector<double> q_0, double p, double h) { double MLPPNumericalAnalysis::eulerianMethod(double (*derivative)(std::vector<double>), std::vector<double> q_0, double p, double h) {
double max_epoch = (p - q_0[0]) / h; double max_epoch = (p - q_0[0]) / h;
double x = q_0[0]; double x = q_0[0];
double y = q_0[1]; double y = q_0[1];
@ -165,7 +165,7 @@ double NumericalAnalysis::eulerianMethod(double (*derivative)(std::vector<double
return y; return y;
} }
double NumericalAnalysis::growthMethod(double C, double k, double t) { double MLPPNumericalAnalysis::growthMethod(double C, double k, double t) {
/* /*
dP/dt = kP dP/dt = kP
dP/P = kdt dP/P = kdt
@ -181,7 +181,7 @@ double NumericalAnalysis::growthMethod(double C, double k, double t) {
return C * std::exp(k * t); return C * std::exp(k * t);
} }
std::vector<double> NumericalAnalysis::jacobian(double (*function)(std::vector<double>), std::vector<double> x) { std::vector<double> MLPPNumericalAnalysis::jacobian(double (*function)(std::vector<double>), std::vector<double> x) {
std::vector<double> jacobian; std::vector<double> jacobian;
jacobian.resize(x.size()); jacobian.resize(x.size());
for (int i = 0; i < jacobian.size(); i++) { for (int i = 0; i < jacobian.size(); i++) {
@ -189,7 +189,7 @@ std::vector<double> NumericalAnalysis::jacobian(double (*function)(std::vector<d
} }
return jacobian; return jacobian;
} }
std::vector<std::vector<double>> NumericalAnalysis::hessian(double (*function)(std::vector<double>), std::vector<double> x) { std::vector<std::vector<double>> MLPPNumericalAnalysis::hessian(double (*function)(std::vector<double>), std::vector<double> x) {
std::vector<std::vector<double>> hessian; std::vector<std::vector<double>> hessian;
hessian.resize(x.size()); hessian.resize(x.size());
for (int i = 0; i < hessian.size(); i++) { for (int i = 0; i < hessian.size(); i++) {
@ -203,7 +203,7 @@ std::vector<std::vector<double>> NumericalAnalysis::hessian(double (*function)(s
return hessian; return hessian;
} }
std::vector<std::vector<std::vector<double>>> NumericalAnalysis::thirdOrderTensor(double (*function)(std::vector<double>), std::vector<double> x) { std::vector<std::vector<std::vector<double>>> MLPPNumericalAnalysis::thirdOrderTensor(double (*function)(std::vector<double>), std::vector<double> x) {
std::vector<std::vector<std::vector<double>>> tensor; std::vector<std::vector<std::vector<double>>> tensor;
tensor.resize(x.size()); tensor.resize(x.size());
for (int i = 0; i < tensor.size(); i++) { for (int i = 0; i < tensor.size(); i++) {
@ -221,21 +221,21 @@ std::vector<std::vector<std::vector<double>>> NumericalAnalysis::thirdOrderTenso
return tensor; return tensor;
} }
double NumericalAnalysis::constantApproximation(double (*function)(std::vector<double>), std::vector<double> c) { double MLPPNumericalAnalysis::constantApproximation(double (*function)(std::vector<double>), std::vector<double> c) {
return function(c); return function(c);
} }
double NumericalAnalysis::linearApproximation(double (*function)(std::vector<double>), std::vector<double> c, std::vector<double> x) { double MLPPNumericalAnalysis::linearApproximation(double (*function)(std::vector<double>), std::vector<double> c, std::vector<double> x) {
MLPPLinAlg alg; MLPPLinAlg alg;
return constantApproximation(function, c) + alg.matmult(alg.transpose({ jacobian(function, c) }), { alg.subtraction(x, c) })[0][0]; return constantApproximation(function, c) + alg.matmult(alg.transpose({ jacobian(function, c) }), { alg.subtraction(x, c) })[0][0];
} }
double NumericalAnalysis::quadraticApproximation(double (*function)(std::vector<double>), std::vector<double> c, std::vector<double> x) { double MLPPNumericalAnalysis::quadraticApproximation(double (*function)(std::vector<double>), std::vector<double> c, std::vector<double> x) {
MLPPLinAlg alg; MLPPLinAlg alg;
return linearApproximation(function, c, x) + 0.5 * alg.matmult({ (alg.subtraction(x, c)) }, alg.matmult(hessian(function, c), alg.transpose({ alg.subtraction(x, c) })))[0][0]; return linearApproximation(function, c, x) + 0.5 * alg.matmult({ (alg.subtraction(x, c)) }, alg.matmult(hessian(function, c), alg.transpose({ alg.subtraction(x, c) })))[0][0];
} }
double NumericalAnalysis::cubicApproximation(double (*function)(std::vector<double>), std::vector<double> c, std::vector<double> x) { double MLPPNumericalAnalysis::cubicApproximation(double (*function)(std::vector<double>), std::vector<double> c, std::vector<double> x) {
/* /*
Not completely sure as the literature seldom discusses the third order taylor approximation, Not completely sure as the literature seldom discusses the third order taylor approximation,
in particular for multivariate cases, but ostensibly, the matrix/tensor/vector multiplies in particular for multivariate cases, but ostensibly, the matrix/tensor/vector multiplies
@ -252,7 +252,7 @@ double NumericalAnalysis::cubicApproximation(double (*function)(std::vector<doub
return quadraticApproximation(function, c, x) + (1 / 6) * resultScalar; return quadraticApproximation(function, c, x) + (1 / 6) * resultScalar;
} }
double NumericalAnalysis::laplacian(double (*function)(std::vector<double>), std::vector<double> x) { double MLPPNumericalAnalysis::laplacian(double (*function)(std::vector<double>), std::vector<double> x) {
MLPPLinAlg alg; MLPPLinAlg alg;
std::vector<std::vector<double>> hessian_matrix = hessian(function, x); std::vector<std::vector<double>> hessian_matrix = hessian(function, x);
double laplacian = 0; double laplacian = 0;
@ -262,7 +262,7 @@ double NumericalAnalysis::laplacian(double (*function)(std::vector<double>), std
return laplacian; return laplacian;
} }
std::string NumericalAnalysis::secondPartialDerivativeTest(double (*function)(std::vector<double>), std::vector<double> x) { std::string MLPPNumericalAnalysis::secondPartialDerivativeTest(double (*function)(std::vector<double>), std::vector<double> x) {
MLPPLinAlg alg; MLPPLinAlg alg;
std::vector<std::vector<double>> hessianMatrix = hessian(function, x); std::vector<std::vector<double>> hessianMatrix = hessian(function, x);
/* /*

View File

@ -11,7 +11,7 @@
#include <vector> #include <vector>
class NumericalAnalysis { class MLPPNumericalAnalysis {
public: public:
/* A numerical method for derivatives is used. This may be subject to change, /* A numerical method for derivatives is used. This may be subject to change,
as an analytical method for calculating derivatives will most likely be used in as an analytical method for calculating derivatives will most likely be used in

View File

@ -9,11 +9,11 @@
#include <iostream> #include <iostream>
OutlierFinder::OutlierFinder(int threshold) : MLPPOutlierFinder::MLPPOutlierFinder(int threshold) :
threshold(threshold) { threshold(threshold) {
} }
std::vector<std::vector<double>> OutlierFinder::modelSetTest(std::vector<std::vector<double>> inputSet) { std::vector<std::vector<double>> MLPPOutlierFinder::modelSetTest(std::vector<std::vector<double>> inputSet) {
Stat stat; Stat stat;
std::vector<std::vector<double>> outliers; std::vector<std::vector<double>> outliers;
outliers.resize(inputSet.size()); outliers.resize(inputSet.size());
@ -28,7 +28,7 @@ std::vector<std::vector<double>> OutlierFinder::modelSetTest(std::vector<std::ve
return outliers; return outliers;
} }
std::vector<double> OutlierFinder::modelTest(std::vector<double> inputSet) { std::vector<double> MLPPOutlierFinder::modelTest(std::vector<double> inputSet) {
Stat stat; Stat stat;
std::vector<double> outliers; std::vector<double> outliers;
for (int i = 0; i < inputSet.size(); i++) { for (int i = 0; i < inputSet.size(); i++) {

View File

@ -11,10 +11,10 @@
#include <vector> #include <vector>
class OutlierFinder { class MLPPOutlierFinder {
public: public:
// Cnstr // Cnstr
OutlierFinder(int threshold); MLPPOutlierFinder(int threshold);
std::vector<std::vector<double>> modelSetTest(std::vector<std::vector<double>> inputSet); std::vector<std::vector<double>> modelSetTest(std::vector<std::vector<double>> inputSet);
std::vector<double> modelTest(std::vector<double> inputSet); std::vector<double> modelTest(std::vector<double> inputSet);

View File

@ -12,7 +12,7 @@
#include <random> #include <random>
OutputLayer::OutputLayer(int n_hidden, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha) : MLPPOutputLayer::MLPPOutputLayer(int n_hidden, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha) :
n_hidden(n_hidden), activation(activation), cost(cost), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) { n_hidden(n_hidden), activation(activation), cost(cost), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) {
weights = Utilities::weightInitialization(n_hidden, weightInit); weights = Utilities::weightInitialization(n_hidden, weightInit);
bias = Utilities::biasInitialization(); bias = Utilities::biasInitialization();
@ -113,14 +113,14 @@ OutputLayer::OutputLayer(int n_hidden, std::string activation, std::string cost,
cost_map["WassersteinLoss"] = &MLPPCost::HingeLoss; cost_map["WassersteinLoss"] = &MLPPCost::HingeLoss;
} }
void OutputLayer::forwardPass() { void MLPPOutputLayer::forwardPass() {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPActivation avn; MLPPActivation avn;
z = alg.scalarAdd(bias, alg.mat_vec_mult(input, weights)); z = alg.scalarAdd(bias, alg.mat_vec_mult(input, weights));
a = (avn.*activation_map[activation])(z, 0); a = (avn.*activation_map[activation])(z, 0);
} }
void OutputLayer::Test(std::vector<double> x) { void MLPPOutputLayer::Test(std::vector<double> x) {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPActivation avn; MLPPActivation avn;
z_test = alg.dot(weights, x) + bias; z_test = alg.dot(weights, x) + bias;

View File

@ -16,9 +16,9 @@
#include <vector> #include <vector>
class OutputLayer { class MLPPOutputLayer {
public: public:
OutputLayer(int n_hidden, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha); MLPPOutputLayer(int n_hidden, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha);
int n_hidden; int n_hidden;
std::string activation; std::string activation;

View File

@ -13,11 +13,11 @@
PCA::PCA(std::vector<std::vector<double>> inputSet, int k) : MLPPPCA::MLPPPCA(std::vector<std::vector<double>> inputSet, int k) :
inputSet(inputSet), k(k) { inputSet(inputSet), k(k) {
} }
std::vector<std::vector<double>> PCA::principalComponents() { std::vector<std::vector<double>> MLPPPCA::principalComponents() {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPData data; MLPPData data;
@ -33,7 +33,7 @@ std::vector<std::vector<double>> PCA::principalComponents() {
return Z; return Z;
} }
// Simply tells us the percentage of variance maintained. // Simply tells us the percentage of variance maintained.
double PCA::score() { double MLPPPCA::score() {
MLPPLinAlg alg; MLPPLinAlg alg;
std::vector<std::vector<double>> X_approx = alg.matmult(U_reduce, Z); std::vector<std::vector<double>> X_approx = alg.matmult(U_reduce, Z);
double num, den = 0; double num, den = 0;

View File

@ -11,9 +11,9 @@
#include <vector> #include <vector>
class PCA { class MLPPPCA {
public: public:
PCA(std::vector<std::vector<double>> inputSet, int k); MLPPPCA(std::vector<std::vector<double>> inputSet, int k);
std::vector<std::vector<double>> principalComponents(); std::vector<std::vector<double>> principalComponents();
double score(); double score();

View File

@ -15,25 +15,25 @@
#include <random> #include <random>
ProbitReg::ProbitReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) : MLPPProbitReg::MLPPProbitReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n); y_hat.resize(n);
weights = Utilities::weightInitialization(k); weights = Utilities::weightInitialization(k);
bias = Utilities::biasInitialization(); bias = Utilities::biasInitialization();
} }
std::vector<double> ProbitReg::modelSetTest(std::vector<std::vector<double>> X) { std::vector<double> MLPPProbitReg::modelSetTest(std::vector<std::vector<double>> X) {
return Evaluate(X); return Evaluate(X);
} }
double ProbitReg::modelTest(std::vector<double> x) { double MLPPProbitReg::modelTest(std::vector<double> x) {
return Evaluate(x); return Evaluate(x);
} }
void ProbitReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { void MLPPProbitReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -63,10 +63,10 @@ void ProbitReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
} }
} }
void ProbitReg::MLE(double learning_rate, int max_epoch, bool UI) { void MLPPProbitReg::MLE(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -96,11 +96,11 @@ void ProbitReg::MLE(double learning_rate, int max_epoch, bool UI) {
} }
} }
void ProbitReg::SGD(double learning_rate, int max_epoch, bool UI) { void MLPPProbitReg::SGD(double learning_rate, int max_epoch, bool UI) {
// NOTE: ∂y_hat/∂z is sparse // NOTE: ∂y_hat/∂z is sparse
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -138,10 +138,10 @@ void ProbitReg::SGD(double learning_rate, int max_epoch, bool UI) {
forwardPass(); forwardPass();
} }
void ProbitReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { void MLPPProbitReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -197,46 +197,46 @@ void ProbitReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, b
forwardPass(); forwardPass();
} }
double ProbitReg::score() { double MLPPProbitReg::score() {
Utilities util; Utilities util;
return util.performance(y_hat, outputSet); return util.performance(y_hat, outputSet);
} }
void ProbitReg::save(std::string fileName) { void MLPPProbitReg::save(std::string fileName) {
Utilities util; Utilities util;
util.saveParameters(fileName, weights, bias); util.saveParameters(fileName, weights, bias);
} }
double ProbitReg::Cost(std::vector<double> y_hat, std::vector<double> y) { double MLPPProbitReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
Reg regularization; MLPPReg regularization;
class MLPPCost cost; class MLPPCost cost;
return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
} }
std::vector<double> ProbitReg::Evaluate(std::vector<std::vector<double>> X) { std::vector<double> MLPPProbitReg::Evaluate(std::vector<std::vector<double>> X) {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPActivation avn; MLPPActivation avn;
return avn.gaussianCDF(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); return avn.gaussianCDF(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
} }
std::vector<double> ProbitReg::propagate(std::vector<std::vector<double>> X) { std::vector<double> MLPPProbitReg::propagate(std::vector<std::vector<double>> X) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)); return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights));
} }
double ProbitReg::Evaluate(std::vector<double> x) { double MLPPProbitReg::Evaluate(std::vector<double> x) {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPActivation avn; MLPPActivation avn;
return avn.gaussianCDF(alg.dot(weights, x) + bias); return avn.gaussianCDF(alg.dot(weights, x) + bias);
} }
double ProbitReg::propagate(std::vector<double> x) { double MLPPProbitReg::propagate(std::vector<double> x) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.dot(weights, x) + bias; return alg.dot(weights, x) + bias;
} }
// gaussianCDF ( wTx + b ) // gaussianCDF ( wTx + b )
void ProbitReg::forwardPass() { void MLPPProbitReg::forwardPass() {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPActivation avn; MLPPActivation avn;

View File

@ -13,9 +13,9 @@
class ProbitReg { class MLPPProbitReg {
public: public:
ProbitReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); MLPPProbitReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5);
std::vector<double> modelSetTest(std::vector<std::vector<double>> X); std::vector<double> modelSetTest(std::vector<std::vector<double>> X);
double modelTest(std::vector<double> x); double modelTest(std::vector<double> x);
void gradientDescent(double learning_rate, int max_epoch = 0, bool UI = 1); void gradientDescent(double learning_rate, int max_epoch = 0, bool UI = 1);

View File

@ -12,7 +12,7 @@
double Reg::regTerm(std::vector<double> weights, double lambda, double alpha, std::string reg) { double MLPPReg::regTerm(std::vector<double> weights, double lambda, double alpha, std::string reg) {
if (reg == "Ridge") { if (reg == "Ridge") {
double reg = 0; double reg = 0;
for (int i = 0; i < weights.size(); i++) { for (int i = 0; i < weights.size(); i++) {
@ -36,7 +36,7 @@ double Reg::regTerm(std::vector<double> weights, double lambda, double alpha, st
return 0; return 0;
} }
double Reg::regTerm(std::vector<std::vector<double>> weights, double lambda, double alpha, std::string reg) { double MLPPReg::regTerm(std::vector<std::vector<double>> weights, double lambda, double alpha, std::string reg) {
if (reg == "Ridge") { if (reg == "Ridge") {
double reg = 0; double reg = 0;
for (int i = 0; i < weights.size(); i++) { for (int i = 0; i < weights.size(); i++) {
@ -66,7 +66,7 @@ double Reg::regTerm(std::vector<std::vector<double>> weights, double lambda, dou
return 0; return 0;
} }
std::vector<double> Reg::regWeights(std::vector<double> weights, double lambda, double alpha, std::string reg) { std::vector<double> MLPPReg::regWeights(std::vector<double> weights, double lambda, double alpha, std::string reg) {
MLPPLinAlg alg; MLPPLinAlg alg;
if (reg == "WeightClipping") { if (reg == "WeightClipping") {
return regDerivTerm(weights, lambda, alpha, reg); return regDerivTerm(weights, lambda, alpha, reg);
@ -78,7 +78,7 @@ std::vector<double> Reg::regWeights(std::vector<double> weights, double lambda,
// return weights; // return weights;
} }
std::vector<std::vector<double>> Reg::regWeights(std::vector<std::vector<double>> weights, double lambda, double alpha, std::string reg) { std::vector<std::vector<double>> MLPPReg::regWeights(std::vector<std::vector<double>> weights, double lambda, double alpha, std::string reg) {
MLPPLinAlg alg; MLPPLinAlg alg;
if (reg == "WeightClipping") { if (reg == "WeightClipping") {
return regDerivTerm(weights, lambda, alpha, reg); return regDerivTerm(weights, lambda, alpha, reg);
@ -92,7 +92,7 @@ std::vector<std::vector<double>> Reg::regWeights(std::vector<std::vector<double>
// return weights; // return weights;
} }
std::vector<double> Reg::regDerivTerm(std::vector<double> weights, double lambda, double alpha, std::string reg) { std::vector<double> MLPPReg::regDerivTerm(std::vector<double> weights, double lambda, double alpha, std::string reg) {
std::vector<double> regDeriv; std::vector<double> regDeriv;
regDeriv.resize(weights.size()); regDeriv.resize(weights.size());
@ -102,7 +102,7 @@ std::vector<double> Reg::regDerivTerm(std::vector<double> weights, double lambda
return regDeriv; return regDeriv;
} }
std::vector<std::vector<double>> Reg::regDerivTerm(std::vector<std::vector<double>> weights, double lambda, double alpha, std::string reg) { std::vector<std::vector<double>> MLPPReg::regDerivTerm(std::vector<std::vector<double>> weights, double lambda, double alpha, std::string reg) {
std::vector<std::vector<double>> regDeriv; std::vector<std::vector<double>> regDeriv;
regDeriv.resize(weights.size()); regDeriv.resize(weights.size());
for (int i = 0; i < regDeriv.size(); i++) { for (int i = 0; i < regDeriv.size(); i++) {
@ -117,7 +117,7 @@ std::vector<std::vector<double>> Reg::regDerivTerm(std::vector<std::vector<doubl
return regDeriv; return regDeriv;
} }
double Reg::regDerivTerm(std::vector<double> weights, double lambda, double alpha, std::string reg, int j) { double MLPPReg::regDerivTerm(std::vector<double> weights, double lambda, double alpha, std::string reg, int j) {
MLPPActivation act; MLPPActivation act;
if (reg == "Ridge") { if (reg == "Ridge") {
return lambda * weights[j]; return lambda * weights[j];
@ -140,7 +140,7 @@ double Reg::regDerivTerm(std::vector<double> weights, double lambda, double alph
} }
} }
double Reg::regDerivTerm(std::vector<std::vector<double>> weights, double lambda, double alpha, std::string reg, int i, int j) { double MLPPReg::regDerivTerm(std::vector<std::vector<double>> weights, double lambda, double alpha, std::string reg, int i, int j) {
MLPPActivation act; MLPPActivation act;
if (reg == "Ridge") { if (reg == "Ridge") {
return lambda * weights[i][j]; return lambda * weights[i][j];

View File

@ -13,7 +13,7 @@
#include <string> #include <string>
class Reg { class MLPPReg {
public: public:
double regTerm(std::vector<double> weights, double lambda, double alpha, std::string reg); double regTerm(std::vector<double> weights, double lambda, double alpha, std::string reg);
double regTerm(std::vector<std::vector<double>> weights, double lambda, double alpha, std::string reg); double regTerm(std::vector<std::vector<double>> weights, double lambda, double alpha, std::string reg);

View File

@ -16,7 +16,7 @@
#include <random> #include <random>
SoftmaxNet::SoftmaxNet(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet, int n_hidden, std::string reg, double lambda, double alpha) : MLPPSoftmaxNet::MLPPSoftmaxNet(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet, int n_hidden, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), n_hidden(n_hidden), n_class(outputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), n_hidden(n_hidden), n_class(outputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n); y_hat.resize(n);
@ -26,18 +26,18 @@ SoftmaxNet::SoftmaxNet(std::vector<std::vector<double>> inputSet, std::vector<st
bias2 = Utilities::biasInitialization(n_class); bias2 = Utilities::biasInitialization(n_class);
} }
std::vector<double> SoftmaxNet::modelTest(std::vector<double> x) { std::vector<double> MLPPSoftmaxNet::modelTest(std::vector<double> x) {
return Evaluate(x); return Evaluate(x);
} }
std::vector<std::vector<double>> SoftmaxNet::modelSetTest(std::vector<std::vector<double>> X) { std::vector<std::vector<double>> MLPPSoftmaxNet::modelSetTest(std::vector<std::vector<double>> X) {
return Evaluate(X); return Evaluate(X);
} }
void SoftmaxNet::gradientDescent(double learning_rate, int max_epoch, bool UI) { void MLPPSoftmaxNet::gradientDescent(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -90,10 +90,10 @@ void SoftmaxNet::gradientDescent(double learning_rate, int max_epoch, bool UI) {
} }
} }
void SoftmaxNet::SGD(double learning_rate, int max_epoch, bool UI) { void MLPPSoftmaxNet::SGD(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -144,10 +144,10 @@ void SoftmaxNet::SGD(double learning_rate, int max_epoch, bool UI) {
forwardPass(); forwardPass();
} }
void SoftmaxNet::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { void MLPPSoftmaxNet::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -226,12 +226,12 @@ void SoftmaxNet::MBGD(double learning_rate, int max_epoch, int mini_batch_size,
forwardPass(); forwardPass();
} }
double SoftmaxNet::score() { double MLPPSoftmaxNet::score() {
Utilities util; Utilities util;
return util.performance(y_hat, outputSet); return util.performance(y_hat, outputSet);
} }
void SoftmaxNet::save(std::string fileName) { void MLPPSoftmaxNet::save(std::string fileName) {
Utilities util; Utilities util;
util.saveParameters(fileName, weights1, bias1, 0, 1); util.saveParameters(fileName, weights1, bias1, 0, 1);
util.saveParameters(fileName, weights2, bias2, 1, 2); util.saveParameters(fileName, weights2, bias2, 1, 2);
@ -239,18 +239,18 @@ void SoftmaxNet::save(std::string fileName) {
MLPPLinAlg alg; MLPPLinAlg alg;
} }
std::vector<std::vector<double>> SoftmaxNet::getEmbeddings() { std::vector<std::vector<double>> MLPPSoftmaxNet::getEmbeddings() {
return weights1; return weights1;
} }
double SoftmaxNet::Cost(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) { double MLPPSoftmaxNet::Cost(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
Reg regularization; MLPPReg regularization;
MLPPData data; MLPPData data;
class MLPPCost cost; class MLPPCost cost;
return cost.CrossEntropy(y_hat, y) + regularization.regTerm(weights1, lambda, alpha, reg) + regularization.regTerm(weights2, lambda, alpha, reg); return cost.CrossEntropy(y_hat, y) + regularization.regTerm(weights1, lambda, alpha, reg) + regularization.regTerm(weights2, lambda, alpha, reg);
} }
std::vector<std::vector<double>> SoftmaxNet::Evaluate(std::vector<std::vector<double>> X) { std::vector<std::vector<double>> MLPPSoftmaxNet::Evaluate(std::vector<std::vector<double>> X) {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPActivation avn; MLPPActivation avn;
std::vector<std::vector<double>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector<std::vector<double>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
@ -258,7 +258,7 @@ std::vector<std::vector<double>> SoftmaxNet::Evaluate(std::vector<std::vector<do
return avn.adjSoftmax(alg.mat_vec_add(alg.matmult(a2, weights2), bias2)); return avn.adjSoftmax(alg.mat_vec_add(alg.matmult(a2, weights2), bias2));
} }
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> SoftmaxNet::propagate(std::vector<std::vector<double>> X) { std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> MLPPSoftmaxNet::propagate(std::vector<std::vector<double>> X) {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPActivation avn; MLPPActivation avn;
std::vector<std::vector<double>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector<std::vector<double>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
@ -266,7 +266,7 @@ std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> S
return { z2, a2 }; return { z2, a2 };
} }
std::vector<double> SoftmaxNet::Evaluate(std::vector<double> x) { std::vector<double> MLPPSoftmaxNet::Evaluate(std::vector<double> x) {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPActivation avn; MLPPActivation avn;
std::vector<double> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector<double> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
@ -274,7 +274,7 @@ std::vector<double> SoftmaxNet::Evaluate(std::vector<double> x) {
return avn.adjSoftmax(alg.addition(alg.mat_vec_mult(alg.transpose(weights2), a2), bias2)); return avn.adjSoftmax(alg.addition(alg.mat_vec_mult(alg.transpose(weights2), a2), bias2));
} }
std::tuple<std::vector<double>, std::vector<double>> SoftmaxNet::propagate(std::vector<double> x) { std::tuple<std::vector<double>, std::vector<double>> MLPPSoftmaxNet::propagate(std::vector<double> x) {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPActivation avn; MLPPActivation avn;
std::vector<double> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector<double> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
@ -282,7 +282,7 @@ std::tuple<std::vector<double>, std::vector<double>> SoftmaxNet::propagate(std::
return { z2, a2 }; return { z2, a2 };
} }
void SoftmaxNet::forwardPass() { void MLPPSoftmaxNet::forwardPass() {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPActivation avn; MLPPActivation avn;
z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1); z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1);

View File

@ -13,9 +13,9 @@
class SoftmaxNet { class MLPPSoftmaxNet {
public: public:
SoftmaxNet(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet, int n_hidden, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); MLPPSoftmaxNet(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet, int n_hidden, std::string reg = "None", double lambda = 0.5, double alpha = 0.5);
std::vector<double> modelTest(std::vector<double> x); std::vector<double> modelTest(std::vector<double> x);
std::vector<std::vector<double>> modelSetTest(std::vector<std::vector<double>> X); std::vector<std::vector<double>> modelSetTest(std::vector<std::vector<double>> X);
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);

View File

@ -32,7 +32,7 @@ std::vector<std::vector<double>> SoftmaxReg::modelSetTest(std::vector<std::vecto
void SoftmaxReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { void SoftmaxReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -71,7 +71,7 @@ void SoftmaxReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
void SoftmaxReg::SGD(double learning_rate, int max_epoch, bool UI) { void SoftmaxReg::SGD(double learning_rate, int max_epoch, bool UI) {
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -114,7 +114,7 @@ void SoftmaxReg::SGD(double learning_rate, int max_epoch, bool UI) {
void SoftmaxReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { void SoftmaxReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -164,7 +164,7 @@ void SoftmaxReg::save(std::string fileName) {
} }
double SoftmaxReg::Cost(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) { double SoftmaxReg::Cost(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
Reg regularization; MLPPReg regularization;
class MLPPCost cost; class MLPPCost cost;
return cost.CrossEntropy(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); return cost.CrossEntropy(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
} }

View File

@ -34,7 +34,7 @@ void SVC::gradientDescent(double learning_rate, int max_epoch, bool UI) {
class MLPPCost cost; class MLPPCost cost;
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -67,7 +67,7 @@ void SVC::SGD(double learning_rate, int max_epoch, bool UI) {
class MLPPCost cost; class MLPPCost cost;
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -110,7 +110,7 @@ void SVC::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI
class MLPPCost cost; class MLPPCost cost;
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;

View File

@ -33,7 +33,7 @@ double TanhReg::modelTest(std::vector<double> x) {
void TanhReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { void TanhReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -66,7 +66,7 @@ void TanhReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
void TanhReg::SGD(double learning_rate, int max_epoch, bool UI) { void TanhReg::SGD(double learning_rate, int max_epoch, bool UI) {
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -106,7 +106,7 @@ void TanhReg::SGD(double learning_rate, int max_epoch, bool UI) {
void TanhReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { void TanhReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
double cost_prev = 0; double cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -157,7 +157,7 @@ void TanhReg::save(std::string fileName) {
} }
double TanhReg::Cost(std::vector<double> y_hat, std::vector<double> y) { double TanhReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
Reg regularization; MLPPReg regularization;
class MLPPCost cost; class MLPPCost cost;
return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
} }

View File

@ -119,9 +119,9 @@ void WGAN::addLayer(int n_hidden, std::string activation, std::string weightInit
void WGAN::addOutputLayer(std::string weightInit, std::string reg, double lambda, double alpha) { void WGAN::addOutputLayer(std::string weightInit, std::string reg, double lambda, double alpha) {
MLPPLinAlg alg; MLPPLinAlg alg;
if (!network.empty()) { if (!network.empty()) {
outputLayer = new OutputLayer(network[network.size() - 1].n_hidden, "Linear", "WassersteinLoss", network[network.size() - 1].a, weightInit, "WeightClipping", -0.01, 0.01); outputLayer = new MLPPOutputLayer(network[network.size() - 1].n_hidden, "Linear", "WassersteinLoss", network[network.size() - 1].a, weightInit, "WeightClipping", -0.01, 0.01);
} else { // Should never happen. } else { // Should never happen.
outputLayer = new OutputLayer(k, "Linear", "WassersteinLoss", alg.gaussianNoise(n, k), weightInit, "WeightClipping", -0.01, 0.01); outputLayer = new MLPPOutputLayer(k, "Linear", "WassersteinLoss", alg.gaussianNoise(n, k), weightInit, "WeightClipping", -0.01, 0.01);
} }
} }
@ -155,7 +155,7 @@ std::vector<double> WGAN::modelSetTestDiscriminator(std::vector<std::vector<doub
} }
double WGAN::Cost(std::vector<double> y_hat, std::vector<double> y) { double WGAN::Cost(std::vector<double> y_hat, std::vector<double> y) {
Reg regularization; MLPPReg regularization;
class MLPPCost cost; class MLPPCost cost;
double totalRegTerm = 0; double totalRegTerm = 0;
@ -220,7 +220,7 @@ std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> W
class MLPPCost cost; class MLPPCost cost;
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
std::vector<std::vector<std::vector<double>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads. std::vector<std::vector<std::vector<double>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads.
@ -256,7 +256,7 @@ std::vector<std::vector<std::vector<double>>> WGAN::computeGeneratorGradients(st
class MLPPCost cost; class MLPPCost cost;
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
Reg regularization; MLPPReg regularization;
std::vector<std::vector<std::vector<double>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads. std::vector<std::vector<std::vector<double>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads.

View File

@ -47,7 +47,7 @@ private:
std::vector<double> y_hat; std::vector<double> y_hat;
std::vector<MLPPHiddenLayer> network; std::vector<MLPPHiddenLayer> network;
OutputLayer *outputLayer; MLPPOutputLayer *outputLayer;
int n; int n;
int k; int k;