Prefixed the remaining classes with MLPP.

This commit is contained in:
Relintai 2023-01-25 01:09:37 +01:00
parent 43e1b8d1fc
commit 4f6b4d46de
41 changed files with 334 additions and 334 deletions

View File

@ -137,7 +137,7 @@ void MLPPANN::MBGD(double learning_rate, int max_epoch, int mini_batch_size, boo
int n_mini_batch = n / mini_batch_size;
// always evaluate the result
// always do forward pass only ONCE at end.
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
while (true) {
learning_rate = applyLearningRateScheduler(initial_learning_rate, decayConstant, epoch, dropRate);
for (int i = 0; i < n_mini_batch; i++) {
@ -175,7 +175,7 @@ void MLPPANN::Momentum(double learning_rate, int max_epoch, int mini_batch_size,
int n_mini_batch = n / mini_batch_size;
// always evaluate the result
// always do forward pass only ONCE at end.
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
// Initializing necessary components for Adam.
std::vector<std::vector<std::vector<double>>> v_hidden;
@ -232,7 +232,7 @@ void MLPPANN::Adagrad(double learning_rate, int max_epoch, int mini_batch_size,
int n_mini_batch = n / mini_batch_size;
// always evaluate the result
// always do forward pass only ONCE at end.
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
// Initializing necessary components for Adam.
std::vector<std::vector<std::vector<double>>> v_hidden;
@ -288,7 +288,7 @@ void MLPPANN::Adadelta(double learning_rate, int max_epoch, int mini_batch_size,
int n_mini_batch = n / mini_batch_size;
// always evaluate the result
// always do forward pass only ONCE at end.
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
// Initializing necessary components for Adam.
std::vector<std::vector<std::vector<double>>> v_hidden;
@ -344,7 +344,7 @@ void MLPPANN::Adam(double learning_rate, int max_epoch, int mini_batch_size, dou
int n_mini_batch = n / mini_batch_size;
// always evaluate the result
// always do forward pass only ONCE at end.
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
// Initializing necessary components for Adam.
std::vector<std::vector<std::vector<double>>> m_hidden;
@ -411,7 +411,7 @@ void MLPPANN::Adamax(double learning_rate, int max_epoch, int mini_batch_size, d
int n_mini_batch = n / mini_batch_size;
// always evaluate the result
// always do forward pass only ONCE at end.
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
// Initializing necessary components for Adam.
std::vector<std::vector<std::vector<double>>> m_hidden;
@ -476,7 +476,7 @@ void MLPPANN::Nadam(double learning_rate, int max_epoch, int mini_batch_size, do
int n_mini_batch = n / mini_batch_size;
// always evaluate the result
// always do forward pass only ONCE at end.
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
// Initializing necessary components for Adam.
std::vector<std::vector<std::vector<double>>> m_hidden;
@ -546,7 +546,7 @@ void MLPPANN::AMSGrad(double learning_rate, int max_epoch, int mini_batch_size,
int n_mini_batch = n / mini_batch_size;
// always evaluate the result
// always do forward pass only ONCE at end.
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
// Initializing necessary components for Adam.
std::vector<std::vector<std::vector<double>>> m_hidden;
@ -606,13 +606,13 @@ void MLPPANN::AMSGrad(double learning_rate, int max_epoch, int mini_batch_size,
}
double MLPPANN::score() {
Utilities util;
MLPPUtilities util;
forwardPass();
return util.performance(y_hat, outputSet);
}
void MLPPANN::save(std::string fileName) {
Utilities util;
MLPPUtilities util;
if (!network.empty()) {
util.saveParameters(fileName, network[0].weights, network[0].bias, 0, 1);
for (int i = 1; i < network.size(); i++) {
@ -750,13 +750,13 @@ std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> M
}
void MLPPANN::UI(int epoch, double cost_prev, std::vector<double> y_hat, std::vector<double> outputSet) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
std::cout << "Layer " << network.size() + 1 << ": " << std::endl;
Utilities::UI(outputLayer->weights, outputLayer->bias);
MLPPUtilities::UI(outputLayer->weights, outputLayer->bias);
if (!network.empty()) {
for (int i = network.size() - 1; i >= 0; i--) {
std::cout << "Layer " << i + 1 << ": " << std::endl;
Utilities::UI(network[i].weights, network[i].bias);
MLPPUtilities::UI(network[i].weights, network[i].bias);
}
}
}

View File

@ -18,10 +18,10 @@ MLPPAutoEncoder::MLPPAutoEncoder(std::vector<std::vector<double>> inputSet, int
MLPPActivation avn;
y_hat.resize(inputSet.size());
weights1 = Utilities::weightInitialization(k, n_hidden);
weights2 = Utilities::weightInitialization(n_hidden, k);
bias1 = Utilities::biasInitialization(n_hidden);
bias2 = Utilities::biasInitialization(k);
weights1 = MLPPUtilities::weightInitialization(k, n_hidden);
weights2 = MLPPUtilities::weightInitialization(n_hidden, k);
bias1 = MLPPUtilities::biasInitialization(n_hidden);
bias2 = MLPPUtilities::biasInitialization(k);
}
std::vector<std::vector<double>> MLPPAutoEncoder::modelSetTest(std::vector<std::vector<double>> X) {
@ -71,11 +71,11 @@ void MLPPAutoEncoder::gradientDescent(double learning_rate, int max_epoch, bool
// UI PORTION
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, inputSet));
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, inputSet));
std::cout << "Layer 1:" << std::endl;
Utilities::UI(weights1, bias1);
MLPPUtilities::UI(weights1, bias1);
std::cout << "Layer 2:" << std::endl;
Utilities::UI(weights2, bias2);
MLPPUtilities::UI(weights2, bias2);
}
epoch++;
@ -121,11 +121,11 @@ void MLPPAutoEncoder::SGD(double learning_rate, int max_epoch, bool UI) {
y_hat = Evaluate(inputSet[outputIndex]);
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { inputSet[outputIndex] }));
MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { inputSet[outputIndex] }));
std::cout << "Layer 1:" << std::endl;
Utilities::UI(weights1, bias1);
MLPPUtilities::UI(weights1, bias1);
std::cout << "Layer 2:" << std::endl;
Utilities::UI(weights2, bias2);
MLPPUtilities::UI(weights2, bias2);
}
epoch++;
@ -144,7 +144,7 @@ void MLPPAutoEncoder::MBGD(double learning_rate, int max_epoch, int mini_batch_s
// Creating the mini-batches
int n_mini_batch = n / mini_batch_size;
std::vector<std::vector<std::vector<double>>> inputMiniBatches = Utilities::createMiniBatches(inputSet, n_mini_batch);
std::vector<std::vector<std::vector<double>>> inputMiniBatches = MLPPUtilities::createMiniBatches(inputSet, n_mini_batch);
while (true) {
for (int i = 0; i < n_mini_batch; i++) {
@ -181,11 +181,11 @@ void MLPPAutoEncoder::MBGD(double learning_rate, int max_epoch, int mini_batch_s
y_hat = Evaluate(inputMiniBatches[i]);
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, inputMiniBatches[i]));
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, inputMiniBatches[i]));
std::cout << "Layer 1:" << std::endl;
Utilities::UI(weights1, bias1);
MLPPUtilities::UI(weights1, bias1);
std::cout << "Layer 2:" << std::endl;
Utilities::UI(weights2, bias2);
MLPPUtilities::UI(weights2, bias2);
}
}
epoch++;
@ -197,12 +197,12 @@ void MLPPAutoEncoder::MBGD(double learning_rate, int max_epoch, int mini_batch_s
}
double MLPPAutoEncoder::score() {
Utilities util;
MLPPUtilities util;
return util.performance(y_hat, inputSet);
}
void MLPPAutoEncoder::save(std::string fileName) {
Utilities util;
MLPPUtilities util;
util.saveParameters(fileName, weights1, bias1, 0, 1);
util.saveParameters(fileName, weights2, bias2, 1, 2);
}

View File

@ -69,7 +69,7 @@ double MLPPBernoulliNB::modelTest(std::vector<double> x) {
}
double MLPPBernoulliNB::score() {
Utilities util;
MLPPUtilities util;
return util.performance(y_hat, outputSet);
}

View File

@ -17,8 +17,8 @@
MLPPCLogLogReg::MLPPCLogLogReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n);
weights = Utilities::weightInitialization(k);
bias = Utilities::biasInitialization();
weights = MLPPUtilities::weightInitialization(k);
bias = MLPPUtilities::biasInitialization();
}
std::vector<double> MLPPCLogLogReg::modelSetTest(std::vector<std::vector<double>> X) {
@ -52,8 +52,8 @@ void MLPPCLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool U
forwardPass();
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
MLPPUtilities::UI(weights, bias);
}
epoch++;
@ -84,8 +84,8 @@ void MLPPCLogLogReg::MLE(double learning_rate, int max_epoch, bool UI) {
forwardPass();
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
MLPPUtilities::UI(weights, bias);
}
epoch++;
@ -124,8 +124,8 @@ void MLPPCLogLogReg::SGD(double learning_rate, int max_epoch, bool UI) {
y_hat = Evaluate({ inputSet[outputIndex] });
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] }));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] }));
MLPPUtilities::UI(weights, bias);
}
epoch++;
@ -145,7 +145,7 @@ void MLPPCLogLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_si
// Creating the mini-batches
int n_mini_batch = n / mini_batch_size;
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
while (true) {
for (int i = 0; i < n_mini_batch; i++) {
@ -167,8 +167,8 @@ void MLPPCLogLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_si
y_hat = Evaluate(inputMiniBatches[i]);
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
MLPPUtilities::UI(weights, bias);
}
}
epoch++;
@ -180,7 +180,7 @@ void MLPPCLogLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_si
}
double MLPPCLogLogReg::score() {
Utilities util;
MLPPUtilities util;
return util.performance(y_hat, outputSet);
}

View File

@ -164,7 +164,7 @@ std::vector<std::vector<double>> MLPPConvolutions::pool(std::vector<std::vector<
}
}
if (type == "Average") {
Stat stat;
MLPPStat stat;
pooledMap[i][j] = stat.mean(poolingInput);
} else if (type == "Min") {
pooledMap[i][j] = alg.min(poolingInput);
@ -187,7 +187,7 @@ std::vector<std::vector<std::vector<double>>> MLPPConvolutions::pool(std::vector
double MLPPConvolutions::globalPool(std::vector<std::vector<double>> input, std::string type) {
MLPPLinAlg alg;
if (type == "Average") {
Stat stat;
MLPPStat stat;
return stat.mean(alg.flatten(input));
} else if (type == "Min") {
return alg.min(alg.flatten(input));

View File

@ -699,7 +699,7 @@ std::vector<std::vector<double>> MLPPData::featureScaling(std::vector<std::vecto
std::vector<std::vector<double>> MLPPData::meanNormalization(std::vector<std::vector<double>> X) {
MLPPLinAlg alg;
Stat stat;
MLPPStat stat;
// (X_j - mu_j) / std_j, for every j
X = meanCentering(X);
@ -711,7 +711,7 @@ std::vector<std::vector<double>> MLPPData::meanNormalization(std::vector<std::ve
std::vector<std::vector<double>> MLPPData::meanCentering(std::vector<std::vector<double>> X) {
MLPPLinAlg alg;
Stat stat;
MLPPStat stat;
for (int i = 0; i < X.size(); i++) {
double mean_i = stat.mean(X[i]);
for (int j = 0; j < X[i].size(); j++) {

View File

@ -18,8 +18,8 @@
MLPPDualSVC::MLPPDualSVC(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, double C, std::string kernel) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), C(C), kernel(kernel) {
y_hat.resize(n);
bias = Utilities::biasInitialization();
alpha = Utilities::weightInitialization(n); // One alpha for all training examples, as per the lagrangian multipliers.
bias = MLPPUtilities::biasInitialization();
alpha = MLPPUtilities::weightInitialization(n); // One alpha for all training examples, as per the lagrangian multipliers.
K = kernelFunction(inputSet, inputSet, kernel); // For now this is unused. When non-linear kernels are added, the K will be manipulated.
}
@ -67,8 +67,8 @@ void MLPPDualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI)
// UI PORTION
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(alpha, inputSet, outputSet));
Utilities::UI(alpha, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(alpha, inputSet, outputSet));
MLPPUtilities::UI(alpha, bias);
std::cout << score() << std::endl; // TO DO: DELETE THIS.
}
epoch++;
@ -102,8 +102,8 @@ void MLPPDualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI)
// y_hat = Evaluate({inputSet[outputIndex]});
// if(UI) {
// Utilities::CostInfo(epoch, cost_prev, Cost(alpha));
// Utilities::UI(weights, bias);
// MLPPUtilities::CostInfo(epoch, cost_prev, Cost(alpha));
// MLPPUtilities::UI(weights, bias);
// }
// epoch++;
@ -122,7 +122,7 @@ void MLPPDualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI)
// // Creating the mini-batches
// int n_mini_batch = n/mini_batch_size;
// auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
// auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
// while(true){
// for(int i = 0; i < n_mini_batch; i++){
@ -142,8 +142,8 @@ void MLPPDualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI)
// y_hat = Evaluate(inputMiniBatches[i]);
// if(UI) {
// Utilities::CostInfo(epoch, cost_prev, Cost(z, outputMiniBatches[i], weights, C));
// Utilities::UI(weights, bias);
// MLPPUtilities::CostInfo(epoch, cost_prev, Cost(z, outputMiniBatches[i], weights, C));
// MLPPUtilities::UI(weights, bias);
// }
// }
// epoch++;
@ -153,12 +153,12 @@ void MLPPDualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI)
// }
double MLPPDualSVC::score() {
Utilities util;
MLPPUtilities util;
return util.performance(y_hat, outputSet);
}
void MLPPDualSVC::save(std::string fileName) {
Utilities util;
MLPPUtilities util;
util.saveParameters(fileName, alpha, bias);
}

View File

@ -18,9 +18,9 @@
MLPPExpReg::MLPPExpReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n);
weights = Utilities::weightInitialization(k);
initial = Utilities::weightInitialization(k);
bias = Utilities::biasInitialization();
weights = MLPPUtilities::weightInitialization(k);
initial = MLPPUtilities::weightInitialization(k);
bias = MLPPUtilities::biasInitialization();
}
std::vector<double> MLPPExpReg::modelSetTest(std::vector<std::vector<double>> X) {
@ -77,8 +77,8 @@ void MLPPExpReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
forwardPass();
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
MLPPUtilities::UI(weights, bias);
}
epoch++;
@ -122,8 +122,8 @@ void MLPPExpReg::SGD(double learning_rate, int max_epoch, bool UI) {
y_hat = Evaluate({ inputSet[outputIndex] });
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] }));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] }));
MLPPUtilities::UI(weights, bias);
}
epoch++;
@ -142,7 +142,7 @@ void MLPPExpReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size,
// Creating the mini-batches
int n_mini_batch = n / mini_batch_size;
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
while (true) {
for (int i = 0; i < n_mini_batch; i++) {
@ -181,8 +181,8 @@ void MLPPExpReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size,
y_hat = Evaluate(inputMiniBatches[i]);
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
MLPPUtilities::UI(weights, bias);
}
}
epoch++;
@ -194,12 +194,12 @@ void MLPPExpReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size,
}
double MLPPExpReg::score() {
Utilities util;
MLPPUtilities util;
return util.performance(y_hat, outputSet);
}
void MLPPExpReg::save(std::string fileName) {
Utilities util;
MLPPUtilities util;
util.saveParameters(fileName, weights, initial, bias);
}

View File

@ -78,13 +78,13 @@ void MLPPGAN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
double MLPPGAN::score() {
MLPPLinAlg alg;
Utilities util;
MLPPUtilities util;
forwardPass();
return util.performance(y_hat, alg.onevec(n));
}
void MLPPGAN::save(std::string fileName) {
Utilities util;
MLPPUtilities util;
if (!network.empty()) {
util.saveParameters(fileName, network[0].weights, network[0].bias, 0, 1);
for (int i = 1; i < network.size(); i++) {
@ -273,13 +273,13 @@ std::vector<std::vector<std::vector<double>>> MLPPGAN::computeGeneratorGradients
}
void MLPPGAN::UI(int epoch, double cost_prev, std::vector<double> y_hat, std::vector<double> outputSet) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
std::cout << "Layer " << network.size() + 1 << ": " << std::endl;
Utilities::UI(outputLayer->weights, outputLayer->bias);
MLPPUtilities::UI(outputLayer->weights, outputLayer->bias);
if (!network.empty()) {
for (int i = network.size() - 1; i >= 0; i--) {
std::cout << "Layer " << i + 1 << ": " << std::endl;
Utilities::UI(network[i].weights, network[i].bias);
MLPPUtilities::UI(network[i].weights, network[i].bias);
}
}
}

View File

@ -22,7 +22,7 @@ void MLPPGaussMarkovChecker::checkGMConditions(std::vector<double> eps) {
}
bool MLPPGaussMarkovChecker::arithmeticMean(std::vector<double> eps) {
Stat stat;
MLPPStat stat;
if (stat.mean(eps) == 0) {
return 1;
} else {
@ -31,7 +31,7 @@ bool MLPPGaussMarkovChecker::arithmeticMean(std::vector<double> eps) {
}
bool MLPPGaussMarkovChecker::homoscedasticity(std::vector<double> eps) {
Stat stat;
MLPPStat stat;
double currentVar = (eps[0] - stat.mean(eps)) * (eps[0] - stat.mean(eps)) / eps.size();
for (int i = 0; i < eps.size(); i++) {
if (currentVar != (eps[i] - stat.mean(eps)) * (eps[i] - stat.mean(eps)) / eps.size()) {
@ -42,7 +42,7 @@ bool MLPPGaussMarkovChecker::homoscedasticity(std::vector<double> eps) {
}
bool MLPPGaussMarkovChecker::exogeneity(std::vector<double> eps) {
Stat stat;
MLPPStat stat;
for (int i = 0; i < eps.size(); i++) {
for (int j = 0; j < eps.size(); j++) {
if (i != j) {

View File

@ -30,7 +30,7 @@ std::vector<double> MLPPGaussianNB::modelSetTest(std::vector<std::vector<double>
}
double MLPPGaussianNB::modelTest(std::vector<double> x) {
Stat stat;
MLPPStat stat;
MLPPLinAlg alg;
double score[class_num];
@ -43,12 +43,12 @@ double MLPPGaussianNB::modelTest(std::vector<double> x) {
}
double MLPPGaussianNB::score() {
Utilities util;
MLPPUtilities util;
return util.performance(y_hat, outputSet);
}
void MLPPGaussianNB::Evaluate() {
Stat stat;
MLPPStat stat;
MLPPLinAlg alg;
// Computing mu_k_y and sigma_k_y

View File

@ -15,8 +15,8 @@
MLPPHiddenLayer::MLPPHiddenLayer(int n_hidden, std::string activation, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha) :
n_hidden(n_hidden), activation(activation), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) {
weights = Utilities::weightInitialization(input[0].size(), n_hidden, weightInit);
bias = Utilities::biasInitialization(n_hidden);
weights = MLPPUtilities::weightInitialization(input[0].size(), n_hidden, weightInit);
bias = MLPPUtilities::biasInitialization(n_hidden);
activation_map["Linear"] = &MLPPActivation::linear;
activationTest_map["Linear"] = &MLPPActivation::linear;

View File

@ -70,7 +70,7 @@ void MLPPKMeans::train(int epoch_num, bool UI) {
// UI PORTION
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost());
MLPPUtilities::CostInfo(epoch, cost_prev, Cost());
}
epoch++;

View File

@ -30,7 +30,7 @@ int MLPPKNN::modelTest(std::vector<double> x) {
}
double MLPPKNN::score() {
Utilities util;
MLPPUtilities util;
return util.performance(modelSetTest(inputSet), outputSet);
}

View File

@ -507,7 +507,7 @@ std::vector<std::vector<double>> MLPPLinAlg::identity(double d) {
}
std::vector<std::vector<double>> MLPPLinAlg::cov(std::vector<std::vector<double>> A) {
Stat stat;
MLPPStat stat;
std::vector<std::vector<double>> covMat;
covMat.resize(A.size());
for (int i = 0; i < covMat.size(); i++) {

View File

@ -21,8 +21,8 @@ MLPPLinReg::MLPPLinReg(std::vector<std::vector<double>> inputSet, std::vector<do
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n);
weights = Utilities::weightInitialization(k);
bias = Utilities::biasInitialization();
weights = MLPPUtilities::weightInitialization(k);
bias = MLPPUtilities::biasInitialization();
}
std::vector<double> MLPPLinReg::modelSetTest(std::vector<std::vector<double>> X) {
@ -55,8 +55,8 @@ void MLPPLinReg::NewtonRaphson(double learning_rate, int max_epoch, bool UI) {
forwardPass();
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
MLPPUtilities::UI(weights, bias);
}
epoch++;
if (epoch > max_epoch) {
@ -86,8 +86,8 @@ void MLPPLinReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
forwardPass();
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
MLPPUtilities::UI(weights, bias);
}
epoch++;
if (epoch > max_epoch) {
@ -123,8 +123,8 @@ void MLPPLinReg::SGD(double learning_rate, int max_epoch, bool UI) {
y_hat = Evaluate({ inputSet[outputIndex] });
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] }));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] }));
MLPPUtilities::UI(weights, bias);
}
epoch++;
@ -143,7 +143,7 @@ void MLPPLinReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size,
// Creating the mini-batches
int n_mini_batch = n / mini_batch_size;
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
while (true) {
for (int i = 0; i < n_mini_batch; i++) {
@ -161,8 +161,8 @@ void MLPPLinReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size,
y_hat = Evaluate(inputMiniBatches[i]);
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
MLPPUtilities::UI(weights, bias);
}
}
epoch++;
@ -175,7 +175,7 @@ void MLPPLinReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size,
void MLPPLinReg::normalEquation() {
MLPPLinAlg alg;
Stat stat;
MLPPStat stat;
std::vector<double> x_means;
std::vector<std::vector<double>> inputSetT = alg.transpose(inputSet);
@ -208,12 +208,12 @@ void MLPPLinReg::normalEquation() {
}
double MLPPLinReg::score() {
Utilities util;
MLPPUtilities util;
return util.performance(y_hat, outputSet);
}
void MLPPLinReg::save(std::string fileName) {
Utilities util;
MLPPUtilities util;
util.saveParameters(fileName, weights, bias);
}

View File

@ -18,8 +18,8 @@
MLPPLogReg::MLPPLogReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n);
weights = Utilities::weightInitialization(k);
bias = Utilities::biasInitialization();
weights = MLPPUtilities::weightInitialization(k);
bias = MLPPUtilities::biasInitialization();
}
std::vector<double> MLPPLogReg::modelSetTest(std::vector<std::vector<double>> X) {
@ -51,8 +51,8 @@ void MLPPLogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
forwardPass();
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
MLPPUtilities::UI(weights, bias);
}
epoch++;
@ -83,8 +83,8 @@ void MLPPLogReg::MLE(double learning_rate, int max_epoch, bool UI) {
forwardPass();
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
MLPPUtilities::UI(weights, bias);
}
epoch++;
if (epoch > max_epoch) {
@ -120,8 +120,8 @@ void MLPPLogReg::SGD(double learning_rate, int max_epoch, bool UI) {
y_hat = Evaluate({ inputSet[outputIndex] });
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] }));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] }));
MLPPUtilities::UI(weights, bias);
}
epoch++;
@ -140,7 +140,7 @@ void MLPPLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size,
// Creating the mini-batches
int n_mini_batch = n / mini_batch_size;
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
while (true) {
for (int i = 0; i < n_mini_batch; i++) {
@ -158,8 +158,8 @@ void MLPPLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size,
y_hat = Evaluate(inputMiniBatches[i]);
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
MLPPUtilities::UI(weights, bias);
}
}
epoch++;
@ -171,12 +171,12 @@ void MLPPLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size,
}
double MLPPLogReg::score() {
Utilities util;
MLPPUtilities util;
return util.performance(y_hat, outputSet);
}
void MLPPLogReg::save(std::string fileName) {
Utilities util;
MLPPUtilities util;
util.saveParameters(fileName, weights, bias);
}

View File

@ -101,14 +101,14 @@ void MLPPMANN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
forwardPass();
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
std::cout << "Layer " << network.size() + 1 << ": " << std::endl;
Utilities::UI(outputLayer->weights, outputLayer->bias);
MLPPUtilities::UI(outputLayer->weights, outputLayer->bias);
if (!network.empty()) {
std::cout << "Layer " << network.size() << ": " << std::endl;
for (int i = network.size() - 1; i >= 0; i--) {
std::cout << "Layer " << i + 1 << ": " << std::endl;
Utilities::UI(network[i].weights, network[i].bias);
MLPPUtilities::UI(network[i].weights, network[i].bias);
}
}
}
@ -121,13 +121,13 @@ void MLPPMANN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
}
double MLPPMANN::score() {
Utilities util;
MLPPUtilities util;
forwardPass();
return util.performance(y_hat, outputSet);
}
void MLPPMANN::save(std::string fileName) {
Utilities util;
MLPPUtilities util;
if (!network.empty()) {
util.saveParameters(fileName, network[0].weights, network[0].bias, 0, 1);
for (int i = 1; i < network.size(); i++) {

View File

@ -21,10 +21,10 @@ MLPPMLP::MLPPMLP(std::vector<std::vector<double>> inputSet, std::vector<double>
MLPPActivation avn;
y_hat.resize(n);
weights1 = Utilities::weightInitialization(k, n_hidden);
weights2 = Utilities::weightInitialization(n_hidden);
bias1 = Utilities::biasInitialization(n_hidden);
bias2 = Utilities::biasInitialization();
weights1 = MLPPUtilities::weightInitialization(k, n_hidden);
weights2 = MLPPUtilities::weightInitialization(n_hidden);
bias1 = MLPPUtilities::biasInitialization(n_hidden);
bias2 = MLPPUtilities::biasInitialization();
}
std::vector<double> MLPPMLP::modelSetTest(std::vector<std::vector<double>> X) {
@ -80,11 +80,11 @@ void MLPPMLP::gradientDescent(double learning_rate, int max_epoch, bool UI) {
// UI PORTION
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
std::cout << "Layer 1:" << std::endl;
Utilities::UI(weights1, bias1);
MLPPUtilities::UI(weights1, bias1);
std::cout << "Layer 2:" << std::endl;
Utilities::UI(weights2, bias2);
MLPPUtilities::UI(weights2, bias2);
}
epoch++;
@ -133,11 +133,11 @@ void MLPPMLP::SGD(double learning_rate, int max_epoch, bool UI) {
y_hat = Evaluate(inputSet[outputIndex]);
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] }));
MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] }));
std::cout << "Layer 1:" << std::endl;
Utilities::UI(weights1, bias1);
MLPPUtilities::UI(weights1, bias1);
std::cout << "Layer 2:" << std::endl;
Utilities::UI(weights2, bias2);
MLPPUtilities::UI(weights2, bias2);
}
epoch++;
@ -157,7 +157,7 @@ void MLPPMLP::MBGD(double learning_rate, int max_epoch, int mini_batch_size, boo
// Creating the mini-batches
int n_mini_batch = n / mini_batch_size;
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
while (true) {
for (int i = 0; i < n_mini_batch; i++) {
@ -199,11 +199,11 @@ void MLPPMLP::MBGD(double learning_rate, int max_epoch, int mini_batch_size, boo
y_hat = Evaluate(inputMiniBatches[i]);
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
std::cout << "Layer 1:" << std::endl;
Utilities::UI(weights1, bias1);
MLPPUtilities::UI(weights1, bias1);
std::cout << "Layer 2:" << std::endl;
Utilities::UI(weights2, bias2);
MLPPUtilities::UI(weights2, bias2);
}
}
epoch++;
@ -215,12 +215,12 @@ void MLPPMLP::MBGD(double learning_rate, int max_epoch, int mini_batch_size, boo
}
double MLPPMLP::score() {
Utilities util;
MLPPUtilities util;
return util.performance(y_hat, outputSet);
}
void MLPPMLP::save(std::string fileName) {
Utilities util;
MLPPUtilities util;
util.saveParameters(fileName, weights1, bias1, 0, 1);
util.saveParameters(fileName, weights2, bias2, 1, 2);
}

View File

@ -14,8 +14,8 @@
MLPPMultiOutputLayer::MLPPMultiOutputLayer(int n_output, int n_hidden, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha) :
n_output(n_output), n_hidden(n_hidden), activation(activation), cost(cost), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) {
weights = Utilities::weightInitialization(n_hidden, n_output, weightInit);
bias = Utilities::biasInitialization(n_output);
weights = MLPPUtilities::weightInitialization(n_hidden, n_output, weightInit);
bias = MLPPUtilities::biasInitialization(n_output);
activation_map["Linear"] = &MLPPActivation::linear;
activationTest_map["Linear"] = &MLPPActivation::linear;

View File

@ -49,7 +49,7 @@ double MLPPMultinomialNB::modelTest(std::vector<double> x) {
}
double MLPPMultinomialNB::score() {
Utilities util;
MLPPUtilities util;
return util.performance(y_hat, outputSet);
}

View File

@ -14,7 +14,7 @@ MLPPOutlierFinder::MLPPOutlierFinder(int threshold) :
}
std::vector<std::vector<double>> MLPPOutlierFinder::modelSetTest(std::vector<std::vector<double>> inputSet) {
Stat stat;
MLPPStat stat;
std::vector<std::vector<double>> outliers;
outliers.resize(inputSet.size());
for (int i = 0; i < inputSet.size(); i++) {
@ -29,7 +29,7 @@ std::vector<std::vector<double>> MLPPOutlierFinder::modelSetTest(std::vector<std
}
std::vector<double> MLPPOutlierFinder::modelTest(std::vector<double> inputSet) {
Stat stat;
MLPPStat stat;
std::vector<double> outliers;
for (int i = 0; i < inputSet.size(); i++) {
double z = (inputSet[i] - stat.mean(inputSet)) / stat.standardDeviation(inputSet);

View File

@ -14,8 +14,8 @@
MLPPOutputLayer::MLPPOutputLayer(int n_hidden, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha) :
n_hidden(n_hidden), activation(activation), cost(cost), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) {
weights = Utilities::weightInitialization(n_hidden, weightInit);
bias = Utilities::biasInitialization();
weights = MLPPUtilities::weightInitialization(n_hidden, weightInit);
bias = MLPPUtilities::biasInitialization();
activation_map["Linear"] = &MLPPActivation::linear;
activationTest_map["Linear"] = &MLPPActivation::linear;

View File

@ -18,8 +18,8 @@
MLPPProbitReg::MLPPProbitReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n);
weights = Utilities::weightInitialization(k);
bias = Utilities::biasInitialization();
weights = MLPPUtilities::weightInitialization(k);
bias = MLPPUtilities::biasInitialization();
}
std::vector<double> MLPPProbitReg::modelSetTest(std::vector<std::vector<double>> X) {
@ -52,8 +52,8 @@ void MLPPProbitReg::gradientDescent(double learning_rate, int max_epoch, bool UI
forwardPass();
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
MLPPUtilities::UI(weights, bias);
}
epoch++;
@ -85,8 +85,8 @@ void MLPPProbitReg::MLE(double learning_rate, int max_epoch, bool UI) {
forwardPass();
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
MLPPUtilities::UI(weights, bias);
}
epoch++;
@ -126,8 +126,8 @@ void MLPPProbitReg::SGD(double learning_rate, int max_epoch, bool UI) {
y_hat = Evaluate({ inputSet[outputIndex] });
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] }));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] }));
MLPPUtilities::UI(weights, bias);
}
epoch++;
@ -147,7 +147,7 @@ void MLPPProbitReg::MBGD(double learning_rate, int max_epoch, int mini_batch_siz
// Creating the mini-batches
int n_mini_batch = n / mini_batch_size;
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
// Creating the mini-batches
for (int i = 0; i < n_mini_batch; i++) {
@ -185,8 +185,8 @@ void MLPPProbitReg::MBGD(double learning_rate, int max_epoch, int mini_batch_siz
y_hat = Evaluate(inputMiniBatches[i]);
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
MLPPUtilities::UI(weights, bias);
}
}
epoch++;
@ -198,12 +198,12 @@ void MLPPProbitReg::MBGD(double learning_rate, int max_epoch, int mini_batch_siz
}
double MLPPProbitReg::score() {
Utilities util;
MLPPUtilities util;
return util.performance(y_hat, outputSet);
}
void MLPPProbitReg::save(std::string fileName) {
Utilities util;
MLPPUtilities util;
util.saveParameters(fileName, weights, bias);
}

View File

@ -20,10 +20,10 @@ MLPPSoftmaxNet::MLPPSoftmaxNet(std::vector<std::vector<double>> inputSet, std::v
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), n_hidden(n_hidden), n_class(outputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n);
weights1 = Utilities::weightInitialization(k, n_hidden);
weights2 = Utilities::weightInitialization(n_hidden, n_class);
bias1 = Utilities::biasInitialization(n_hidden);
bias2 = Utilities::biasInitialization(n_class);
weights1 = MLPPUtilities::weightInitialization(k, n_hidden);
weights2 = MLPPUtilities::weightInitialization(n_hidden, n_class);
bias1 = MLPPUtilities::biasInitialization(n_hidden);
bias2 = MLPPUtilities::biasInitialization(n_class);
}
std::vector<double> MLPPSoftmaxNet::modelTest(std::vector<double> x) {
@ -76,11 +76,11 @@ void MLPPSoftmaxNet::gradientDescent(double learning_rate, int max_epoch, bool U
// UI PORTION
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
std::cout << "Layer 1:" << std::endl;
Utilities::UI(weights1, bias1);
MLPPUtilities::UI(weights1, bias1);
std::cout << "Layer 2:" << std::endl;
Utilities::UI(weights2, bias2);
MLPPUtilities::UI(weights2, bias2);
}
epoch++;
@ -129,11 +129,11 @@ void MLPPSoftmaxNet::SGD(double learning_rate, int max_epoch, bool UI) {
y_hat = Evaluate(inputSet[outputIndex]);
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] }));
MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] }));
std::cout << "Layer 1:" << std::endl;
Utilities::UI(weights1, bias1);
MLPPUtilities::UI(weights1, bias1);
std::cout << "Layer 2:" << std::endl;
Utilities::UI(weights2, bias2);
MLPPUtilities::UI(weights2, bias2);
}
epoch++;
@ -153,7 +153,7 @@ void MLPPSoftmaxNet::MBGD(double learning_rate, int max_epoch, int mini_batch_si
// Creating the mini-batches
int n_mini_batch = n / mini_batch_size;
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
// Creating the mini-batches
for (int i = 0; i < n_mini_batch; i++) {
@ -211,11 +211,11 @@ void MLPPSoftmaxNet::MBGD(double learning_rate, int max_epoch, int mini_batch_si
y_hat = Evaluate(inputMiniBatches[i]);
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
std::cout << "Layer 1:" << std::endl;
Utilities::UI(weights1, bias1);
MLPPUtilities::UI(weights1, bias1);
std::cout << "Layer 2:" << std::endl;
Utilities::UI(weights2, bias2);
MLPPUtilities::UI(weights2, bias2);
}
}
epoch++;
@ -227,12 +227,12 @@ void MLPPSoftmaxNet::MBGD(double learning_rate, int max_epoch, int mini_batch_si
}
double MLPPSoftmaxNet::score() {
Utilities util;
MLPPUtilities util;
return util.performance(y_hat, outputSet);
}
void MLPPSoftmaxNet::save(std::string fileName) {
Utilities util;
MLPPUtilities util;
util.saveParameters(fileName, weights1, bias1, 0, 1);
util.saveParameters(fileName, weights2, bias2, 1, 2);

View File

@ -15,22 +15,22 @@
#include <random>
SoftmaxReg::SoftmaxReg(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet, std::string reg, double lambda, double alpha) :
MLPPSoftmaxReg::MLPPSoftmaxReg(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), n_class(outputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n);
weights = Utilities::weightInitialization(k, n_class);
bias = Utilities::biasInitialization(n_class);
weights = MLPPUtilities::weightInitialization(k, n_class);
bias = MLPPUtilities::biasInitialization(n_class);
}
std::vector<double> SoftmaxReg::modelTest(std::vector<double> x) {
std::vector<double> MLPPSoftmaxReg::modelTest(std::vector<double> x) {
return Evaluate(x);
}
std::vector<std::vector<double>> SoftmaxReg::modelSetTest(std::vector<std::vector<double>> X) {
std::vector<std::vector<double>> MLPPSoftmaxReg::modelSetTest(std::vector<std::vector<double>> X) {
return Evaluate(X);
}
void SoftmaxReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
void MLPPSoftmaxReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
MLPPLinAlg alg;
MLPPReg regularization;
double cost_prev = 0;
@ -58,8 +58,8 @@ void SoftmaxReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
// UI PORTION
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
MLPPUtilities::UI(weights, bias);
}
epoch++;
@ -69,7 +69,7 @@ void SoftmaxReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
}
}
void SoftmaxReg::SGD(double learning_rate, int max_epoch, bool UI) {
void MLPPSoftmaxReg::SGD(double learning_rate, int max_epoch, bool UI) {
MLPPLinAlg alg;
MLPPReg regularization;
double cost_prev = 0;
@ -100,8 +100,8 @@ void SoftmaxReg::SGD(double learning_rate, int max_epoch, bool UI) {
y_hat = Evaluate({ inputSet[outputIndex] });
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] }));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] }));
MLPPUtilities::UI(weights, bias);
}
epoch++;
@ -112,7 +112,7 @@ void SoftmaxReg::SGD(double learning_rate, int max_epoch, bool UI) {
forwardPass();
}
void SoftmaxReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
void MLPPSoftmaxReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPLinAlg alg;
MLPPReg regularization;
double cost_prev = 0;
@ -120,7 +120,7 @@ void SoftmaxReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size,
// Creating the mini-batches
int n_mini_batch = n / mini_batch_size;
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
while (true) {
for (int i = 0; i < n_mini_batch; i++) {
@ -141,8 +141,8 @@ void SoftmaxReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size,
y_hat = Evaluate(inputMiniBatches[i]);
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
MLPPUtilities::UI(weights, bias);
}
}
epoch++;
@ -153,29 +153,29 @@ void SoftmaxReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size,
forwardPass();
}
double SoftmaxReg::score() {
Utilities util;
double MLPPSoftmaxReg::score() {
MLPPUtilities util;
return util.performance(y_hat, outputSet);
}
void SoftmaxReg::save(std::string fileName) {
Utilities util;
void MLPPSoftmaxReg::save(std::string fileName) {
MLPPUtilities util;
util.saveParameters(fileName, weights, bias);
}
double SoftmaxReg::Cost(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
double MLPPSoftmaxReg::Cost(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
MLPPReg regularization;
class MLPPCost cost;
return cost.CrossEntropy(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
}
std::vector<double> SoftmaxReg::Evaluate(std::vector<double> x) {
std::vector<double> MLPPSoftmaxReg::Evaluate(std::vector<double> x) {
MLPPLinAlg alg;
MLPPActivation avn;
return avn.softmax(alg.addition(bias, alg.mat_vec_mult(alg.transpose(weights), x)));
}
std::vector<std::vector<double>> SoftmaxReg::Evaluate(std::vector<std::vector<double>> X) {
std::vector<std::vector<double>> MLPPSoftmaxReg::Evaluate(std::vector<std::vector<double>> X) {
MLPPLinAlg alg;
MLPPActivation avn;
@ -183,7 +183,7 @@ std::vector<std::vector<double>> SoftmaxReg::Evaluate(std::vector<std::vector<do
}
// softmax ( wTx + b )
void SoftmaxReg::forwardPass() {
void MLPPSoftmaxReg::forwardPass() {
MLPPLinAlg alg;
MLPPActivation avn;

View File

@ -13,9 +13,9 @@
class SoftmaxReg {
class MLPPSoftmaxReg {
public:
SoftmaxReg(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5);
MLPPSoftmaxReg(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5);
std::vector<double> modelTest(std::vector<double> x);
std::vector<std::vector<double>> modelSetTest(std::vector<std::vector<double>> X);
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);

View File

@ -15,15 +15,15 @@
#include <iostream>
double Stat::b0Estimation(const std::vector<double> &x, const std::vector<double> &y) {
double MLPPStat::b0Estimation(const std::vector<double> &x, const std::vector<double> &y) {
return mean(y) - b1Estimation(x, y) * mean(x);
}
double Stat::b1Estimation(const std::vector<double> &x, const std::vector<double> &y) {
double MLPPStat::b1Estimation(const std::vector<double> &x, const std::vector<double> &y) {
return covariance(x, y) / variance(x);
}
double Stat::mean(const std::vector<double> &x) {
double MLPPStat::mean(const std::vector<double> &x) {
double sum = 0;
for (int i = 0; i < x.size(); i++) {
sum += x[i];
@ -31,7 +31,7 @@ double Stat::mean(const std::vector<double> &x) {
return sum / x.size();
}
double Stat::median(std::vector<double> x) {
double MLPPStat::median(std::vector<double> x) {
double center = double(x.size()) / double(2);
sort(x.begin(), x.end());
if (x.size() % 2 == 0) {
@ -41,7 +41,7 @@ double Stat::median(std::vector<double> x) {
}
}
std::vector<double> Stat::mode(const std::vector<double> &x) {
std::vector<double> MLPPStat::mode(const std::vector<double> &x) {
MLPPData data;
std::vector<double> x_set = data.vecToSet(x);
std::map<double, int> element_num;
@ -65,16 +65,16 @@ std::vector<double> Stat::mode(const std::vector<double> &x) {
return modes;
}
double Stat::range(const std::vector<double> &x) {
double MLPPStat::range(const std::vector<double> &x) {
MLPPLinAlg alg;
return alg.max(x) - alg.min(x);
}
double Stat::midrange(const std::vector<double> &x) {
double MLPPStat::midrange(const std::vector<double> &x) {
return range(x) / 2;
}
double Stat::absAvgDeviation(const std::vector<double> &x) {
double MLPPStat::absAvgDeviation(const std::vector<double> &x) {
double sum = 0;
for (int i = 0; i < x.size(); i++) {
sum += std::abs(x[i] - mean(x));
@ -82,11 +82,11 @@ double Stat::absAvgDeviation(const std::vector<double> &x) {
return sum / x.size();
}
double Stat::standardDeviation(const std::vector<double> &x) {
double MLPPStat::standardDeviation(const std::vector<double> &x) {
return std::sqrt(variance(x));
}
double Stat::variance(const std::vector<double> &x) {
double MLPPStat::variance(const std::vector<double> &x) {
double sum = 0;
for (int i = 0; i < x.size(); i++) {
sum += (x[i] - mean(x)) * (x[i] - mean(x));
@ -94,7 +94,7 @@ double Stat::variance(const std::vector<double> &x) {
return sum / (x.size() - 1);
}
double Stat::covariance(const std::vector<double> &x, const std::vector<double> &y) {
double MLPPStat::covariance(const std::vector<double> &x, const std::vector<double> &y) {
double sum = 0;
for (int i = 0; i < x.size(); i++) {
sum += (x[i] - mean(x)) * (y[i] - mean(y));
@ -102,20 +102,20 @@ double Stat::covariance(const std::vector<double> &x, const std::vector<double>
return sum / (x.size() - 1);
}
double Stat::correlation(const std::vector<double> &x, const std::vector<double> &y) {
double MLPPStat::correlation(const std::vector<double> &x, const std::vector<double> &y) {
return covariance(x, y) / (standardDeviation(x) * standardDeviation(y));
}
double Stat::R2(const std::vector<double> &x, const std::vector<double> &y) {
double MLPPStat::R2(const std::vector<double> &x, const std::vector<double> &y) {
return correlation(x, y) * correlation(x, y);
}
double Stat::chebyshevIneq(const double k) {
double MLPPStat::chebyshevIneq(const double k) {
// X may or may not belong to a Gaussian Distribution
return 1 - 1 / (k * k);
}
double Stat::weightedMean(const std::vector<double> &x, const std::vector<double> &weights) {
double MLPPStat::weightedMean(const std::vector<double> &x, const std::vector<double> &weights) {
double sum = 0;
double weights_sum = 0;
for (int i = 0; i < x.size(); i++) {
@ -125,7 +125,7 @@ double Stat::weightedMean(const std::vector<double> &x, const std::vector<double
return sum / weights_sum;
}
double Stat::geometricMean(const std::vector<double> &x) {
double MLPPStat::geometricMean(const std::vector<double> &x) {
double product = 1;
for (int i = 0; i < x.size(); i++) {
product *= x[i];
@ -133,7 +133,7 @@ double Stat::geometricMean(const std::vector<double> &x) {
return std::pow(product, 1.0 / x.size());
}
double Stat::harmonicMean(const std::vector<double> &x) {
double MLPPStat::harmonicMean(const std::vector<double> &x) {
double sum = 0;
for (int i = 0; i < x.size(); i++) {
sum += 1 / x[i];
@ -141,7 +141,7 @@ double Stat::harmonicMean(const std::vector<double> &x) {
return x.size() / sum;
}
double Stat::RMS(const std::vector<double> &x) {
double MLPPStat::RMS(const std::vector<double> &x) {
double sum = 0;
for (int i = 0; i < x.size(); i++) {
sum += x[i] * x[i];
@ -149,7 +149,7 @@ double Stat::RMS(const std::vector<double> &x) {
return sqrt(sum / x.size());
}
double Stat::powerMean(const std::vector<double> &x, const double p) {
double MLPPStat::powerMean(const std::vector<double> &x, const double p) {
double sum = 0;
for (int i = 0; i < x.size(); i++) {
sum += std::pow(x[i], p);
@ -157,7 +157,7 @@ double Stat::powerMean(const std::vector<double> &x, const double p) {
return std::pow(sum / x.size(), 1 / p);
}
double Stat::lehmerMean(const std::vector<double> &x, const double p) {
double MLPPStat::lehmerMean(const std::vector<double> &x, const double p) {
double num = 0;
double den = 0;
for (int i = 0; i < x.size(); i++) {
@ -167,7 +167,7 @@ double Stat::lehmerMean(const std::vector<double> &x, const double p) {
return num / den;
}
double Stat::weightedLehmerMean(const std::vector<double> &x, const std::vector<double> &weights, const double p) {
double MLPPStat::weightedLehmerMean(const std::vector<double> &x, const std::vector<double> &weights, const double p) {
double num = 0;
double den = 0;
for (int i = 0; i < x.size(); i++) {
@ -177,38 +177,38 @@ double Stat::weightedLehmerMean(const std::vector<double> &x, const std::vector<
return num / den;
}
double Stat::heronianMean(const double A, const double B) {
double MLPPStat::heronianMean(const double A, const double B) {
return (A + sqrt(A * B) + B) / 3;
}
double Stat::contraHarmonicMean(const std::vector<double> &x) {
double MLPPStat::contraHarmonicMean(const std::vector<double> &x) {
return lehmerMean(x, 2);
}
double Stat::heinzMean(const double A, const double B, const double x) {
double MLPPStat::heinzMean(const double A, const double B, const double x) {
return (std::pow(A, x) * std::pow(B, 1 - x) + std::pow(A, 1 - x) * std::pow(B, x)) / 2;
}
double Stat::neumanSandorMean(const double a, const double b) {
double MLPPStat::neumanSandorMean(const double a, const double b) {
MLPPActivation avn;
return (a - b) / 2 * avn.arsinh((a - b) / (a + b));
}
double Stat::stolarskyMean(const double x, const double y, const double p) {
double MLPPStat::stolarskyMean(const double x, const double y, const double p) {
if (x == y) {
return x;
}
return std::pow((std::pow(x, p) - std::pow(y, p)) / (p * (x - y)), 1 / (p - 1));
}
double Stat::identricMean(const double x, const double y) {
double MLPPStat::identricMean(const double x, const double y) {
if (x == y) {
return x;
}
return (1 / M_E) * std::pow(std::pow(x, x) / std::pow(y, y), 1 / (x - y));
}
double Stat::logMean(const double x, const double y) {
double MLPPStat::logMean(const double x, const double y) {
if (x == y) {
return x;
}

View File

@ -11,7 +11,7 @@
#include <vector>
class Stat {
class MLPPStat {
public:
// These functions are for univariate lin reg module- not for users.
double b0Estimation(const std::vector<double> &x, const std::vector<double> &y);

View File

@ -15,22 +15,22 @@
#include <random>
SVC::SVC(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, double C) :
MLPPSVC::MLPPSVC(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, double C) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), C(C) {
y_hat.resize(n);
weights = Utilities::weightInitialization(k);
bias = Utilities::biasInitialization();
weights = MLPPUtilities::weightInitialization(k);
bias = MLPPUtilities::biasInitialization();
}
std::vector<double> SVC::modelSetTest(std::vector<std::vector<double>> X) {
std::vector<double> MLPPSVC::modelSetTest(std::vector<std::vector<double>> X) {
return Evaluate(X);
}
double SVC::modelTest(std::vector<double> x) {
double MLPPSVC::modelTest(std::vector<double> x) {
return Evaluate(x);
}
void SVC::gradientDescent(double learning_rate, int max_epoch, bool UI) {
void MLPPSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) {
class MLPPCost cost;
MLPPActivation avn;
MLPPLinAlg alg;
@ -52,8 +52,8 @@ void SVC::gradientDescent(double learning_rate, int max_epoch, bool UI) {
// UI PORTION
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet, weights, C));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet, weights, C));
MLPPUtilities::UI(weights, bias);
}
epoch++;
@ -63,7 +63,7 @@ void SVC::gradientDescent(double learning_rate, int max_epoch, bool UI) {
}
}
void SVC::SGD(double learning_rate, int max_epoch, bool UI) {
void MLPPSVC::SGD(double learning_rate, int max_epoch, bool UI) {
class MLPPCost cost;
MLPPActivation avn;
MLPPLinAlg alg;
@ -94,8 +94,8 @@ void SVC::SGD(double learning_rate, int max_epoch, bool UI) {
y_hat = Evaluate({ inputSet[outputIndex] });
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost({ z }, { outputSet[outputIndex] }, weights, C));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ z }, { outputSet[outputIndex] }, weights, C));
MLPPUtilities::UI(weights, bias);
}
epoch++;
@ -106,7 +106,7 @@ void SVC::SGD(double learning_rate, int max_epoch, bool UI) {
forwardPass();
}
void SVC::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
void MLPPSVC::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
class MLPPCost cost;
MLPPActivation avn;
MLPPLinAlg alg;
@ -116,7 +116,7 @@ void SVC::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI
// Creating the mini-batches
int n_mini_batch = n / mini_batch_size;
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
while (true) {
for (int i = 0; i < n_mini_batch; i++) {
@ -136,8 +136,8 @@ void SVC::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI
y_hat = Evaluate(inputMiniBatches[i]);
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(z, outputMiniBatches[i], weights, C));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(z, outputMiniBatches[i], weights, C));
MLPPUtilities::UI(weights, bias);
}
}
epoch++;
@ -148,47 +148,47 @@ void SVC::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI
forwardPass();
}
double SVC::score() {
Utilities util;
double MLPPSVC::score() {
MLPPUtilities util;
return util.performance(y_hat, outputSet);
}
void SVC::save(std::string fileName) {
Utilities util;
void MLPPSVC::save(std::string fileName) {
MLPPUtilities util;
util.saveParameters(fileName, weights, bias);
}
double SVC::Cost(std::vector<double> z, std::vector<double> y, std::vector<double> weights, double C) {
double MLPPSVC::Cost(std::vector<double> z, std::vector<double> y, std::vector<double> weights, double C) {
class MLPPCost cost;
return cost.HingeLoss(z, y, weights, C);
}
std::vector<double> SVC::Evaluate(std::vector<std::vector<double>> X) {
std::vector<double> MLPPSVC::Evaluate(std::vector<std::vector<double>> X) {
MLPPLinAlg alg;
MLPPActivation avn;
return avn.sign(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
}
std::vector<double> SVC::propagate(std::vector<std::vector<double>> X) {
std::vector<double> MLPPSVC::propagate(std::vector<std::vector<double>> X) {
MLPPLinAlg alg;
MLPPActivation avn;
return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights));
}
double SVC::Evaluate(std::vector<double> x) {
double MLPPSVC::Evaluate(std::vector<double> x) {
MLPPLinAlg alg;
MLPPActivation avn;
return avn.sign(alg.dot(weights, x) + bias);
}
double SVC::propagate(std::vector<double> x) {
double MLPPSVC::propagate(std::vector<double> x) {
MLPPLinAlg alg;
MLPPActivation avn;
return alg.dot(weights, x) + bias;
}
// sign ( wTx + b )
void SVC::forwardPass() {
void MLPPSVC::forwardPass() {
MLPPLinAlg alg;
MLPPActivation avn;

View File

@ -16,9 +16,9 @@
class SVC {
class MLPPSVC {
public:
SVC(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, double C);
MLPPSVC(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, double C);
std::vector<double> modelSetTest(std::vector<std::vector<double>> X);
double modelTest(std::vector<double> x);
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);

View File

@ -15,22 +15,22 @@
#include <random>
TanhReg::TanhReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) :
MLPPTanhReg::MLPPTanhReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n);
weights = Utilities::weightInitialization(k);
bias = Utilities::biasInitialization();
weights = MLPPUtilities::weightInitialization(k);
bias = MLPPUtilities::biasInitialization();
}
std::vector<double> TanhReg::modelSetTest(std::vector<std::vector<double>> X) {
std::vector<double> MLPPTanhReg::modelSetTest(std::vector<std::vector<double>> X) {
return Evaluate(X);
}
double TanhReg::modelTest(std::vector<double> x) {
double MLPPTanhReg::modelTest(std::vector<double> x) {
return Evaluate(x);
}
void TanhReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
void MLPPTanhReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
MLPPActivation avn;
MLPPLinAlg alg;
MLPPReg regularization;
@ -53,8 +53,8 @@ void TanhReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
// UI PORTION
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
MLPPUtilities::UI(weights, bias);
}
epoch++;
@ -64,7 +64,7 @@ void TanhReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
}
}
void TanhReg::SGD(double learning_rate, int max_epoch, bool UI) {
void MLPPTanhReg::SGD(double learning_rate, int max_epoch, bool UI) {
MLPPLinAlg alg;
MLPPReg regularization;
double cost_prev = 0;
@ -91,8 +91,8 @@ void TanhReg::SGD(double learning_rate, int max_epoch, bool UI) {
y_hat = Evaluate({ inputSet[outputIndex] });
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] }));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] }));
MLPPUtilities::UI(weights, bias);
}
epoch++;
@ -103,7 +103,7 @@ void TanhReg::SGD(double learning_rate, int max_epoch, bool UI) {
forwardPass();
}
void TanhReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
void MLPPTanhReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPActivation avn;
MLPPLinAlg alg;
MLPPReg regularization;
@ -112,7 +112,7 @@ void TanhReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, boo
// Creating the mini-batches
int n_mini_batch = n / mini_batch_size;
auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
while (true) {
for (int i = 0; i < n_mini_batch; i++) {
@ -134,8 +134,8 @@ void TanhReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, boo
y_hat = Evaluate(inputMiniBatches[i]);
if (UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
Utilities::UI(weights, bias);
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
MLPPUtilities::UI(weights, bias);
}
}
epoch++;
@ -146,46 +146,46 @@ void TanhReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, boo
forwardPass();
}
double TanhReg::score() {
Utilities util;
double MLPPTanhReg::score() {
MLPPUtilities util;
return util.performance(y_hat, outputSet);
}
void TanhReg::save(std::string fileName) {
Utilities util;
void MLPPTanhReg::save(std::string fileName) {
MLPPUtilities util;
util.saveParameters(fileName, weights, bias);
}
double TanhReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
double MLPPTanhReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
MLPPReg regularization;
class MLPPCost cost;
return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
}
std::vector<double> TanhReg::Evaluate(std::vector<std::vector<double>> X) {
std::vector<double> MLPPTanhReg::Evaluate(std::vector<std::vector<double>> X) {
MLPPLinAlg alg;
MLPPActivation avn;
return avn.tanh(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
}
std::vector<double> TanhReg::propagate(std::vector<std::vector<double>> X) {
std::vector<double> MLPPTanhReg::propagate(std::vector<std::vector<double>> X) {
MLPPLinAlg alg;
return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights));
}
double TanhReg::Evaluate(std::vector<double> x) {
double MLPPTanhReg::Evaluate(std::vector<double> x) {
MLPPLinAlg alg;
MLPPActivation avn;
return avn.tanh(alg.dot(weights, x) + bias);
}
double TanhReg::propagate(std::vector<double> x) {
double MLPPTanhReg::propagate(std::vector<double> x) {
MLPPLinAlg alg;
return alg.dot(weights, x) + bias;
}
// Tanh ( wTx + b )
void TanhReg::forwardPass() {
void MLPPTanhReg::forwardPass() {
MLPPLinAlg alg;
MLPPActivation avn;

View File

@ -13,9 +13,9 @@
class TanhReg {
class MLPPTanhReg {
public:
TanhReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5);
MLPPTanhReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5);
std::vector<double> modelSetTest(std::vector<std::vector<double>> X);
double modelTest(std::vector<double> x);
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);

View File

@ -14,7 +14,7 @@
// DCT ii.
// https://www.mathworks.com/help/images/discrete-cosine-transform.html
std::vector<std::vector<double>> Transforms::discreteCosineTransform(std::vector<std::vector<double>> A) {
std::vector<std::vector<double>> MLPPTransforms::discreteCosineTransform(std::vector<std::vector<double>> A) {
MLPPLinAlg alg;
A = alg.scalarAdd(-128, A); // Center around 0.

View File

@ -11,7 +11,7 @@
#include <vector>
class Transforms {
class MLPPTransforms {
public:
std::vector<std::vector<double>> discreteCosineTransform(std::vector<std::vector<double>> A);
};

View File

@ -16,19 +16,19 @@
// ŷ = b0 + b1x1
UniLinReg::UniLinReg(std::vector<double> x, std::vector<double> y) :
MLPPUniLinReg::MLPPUniLinReg(std::vector<double> x, std::vector<double> y) :
inputSet(x), outputSet(y) {
Stat estimator;
MLPPStat estimator;
b1 = estimator.b1Estimation(inputSet, outputSet);
b0 = estimator.b0Estimation(inputSet, outputSet);
}
std::vector<double> UniLinReg::modelSetTest(std::vector<double> x) {
std::vector<double> MLPPUniLinReg::modelSetTest(std::vector<double> x) {
MLPPLinAlg alg;
return alg.scalarAdd(b0, alg.scalarMultiply(b1, x));
}
double UniLinReg::modelTest(double input) {
double MLPPUniLinReg::modelTest(double input) {
return b0 + b1 * input;
}

View File

@ -11,9 +11,9 @@
#include <vector>
class UniLinReg {
class MLPPUniLinReg {
public:
UniLinReg(std::vector<double> x, std::vector<double> y);
MLPPUniLinReg(std::vector<double> x, std::vector<double> y);
std::vector<double> modelSetTest(std::vector<double> x);
double modelTest(double x);

View File

@ -12,7 +12,7 @@
std::vector<double> Utilities::weightInitialization(int n, std::string type) {
std::vector<double> MLPPUtilities::weightInitialization(int n, std::string type) {
std::random_device rd;
std::default_random_engine generator(rd());
@ -47,7 +47,7 @@ std::vector<double> Utilities::weightInitialization(int n, std::string type) {
return weights;
}
double Utilities::biasInitialization() {
double MLPPUtilities::biasInitialization() {
std::random_device rd;
std::default_random_engine generator(rd());
std::uniform_real_distribution<double> distribution(0, 1);
@ -55,7 +55,7 @@ double Utilities::biasInitialization() {
return distribution(generator);
}
std::vector<std::vector<double>> Utilities::weightInitialization(int n, int m, std::string type) {
std::vector<std::vector<double>> MLPPUtilities::weightInitialization(int n, int m, std::string type) {
std::random_device rd;
std::default_random_engine generator(rd());
@ -94,7 +94,7 @@ std::vector<std::vector<double>> Utilities::weightInitialization(int n, int m, s
return weights;
}
std::vector<double> Utilities::biasInitialization(int n) {
std::vector<double> MLPPUtilities::biasInitialization(int n) {
std::vector<double> bias;
std::random_device rd;
std::default_random_engine generator(rd());
@ -106,7 +106,7 @@ std::vector<double> Utilities::biasInitialization(int n) {
return bias;
}
double Utilities::performance(std::vector<double> y_hat, std::vector<double> outputSet) {
double MLPPUtilities::performance(std::vector<double> y_hat, std::vector<double> outputSet) {
double correct = 0;
for (int i = 0; i < y_hat.size(); i++) {
if (std::round(y_hat[i]) == outputSet[i]) {
@ -116,7 +116,7 @@ double Utilities::performance(std::vector<double> y_hat, std::vector<double> out
return correct / y_hat.size();
}
double Utilities::performance(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
double MLPPUtilities::performance(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
double correct = 0;
for (int i = 0; i < y_hat.size(); i++) {
int sub_correct = 0;
@ -132,7 +132,7 @@ double Utilities::performance(std::vector<std::vector<double>> y_hat, std::vecto
return correct / y_hat.size();
}
void Utilities::saveParameters(std::string fileName, std::vector<double> weights, double bias, bool app, int layer) {
void MLPPUtilities::saveParameters(std::string fileName, std::vector<double> weights, double bias, bool app, int layer) {
std::string layer_info = "";
std::ofstream saveFile;
@ -160,7 +160,7 @@ void Utilities::saveParameters(std::string fileName, std::vector<double> weights
saveFile.close();
}
void Utilities::saveParameters(std::string fileName, std::vector<double> weights, std::vector<double> initial, double bias, bool app, int layer) {
void MLPPUtilities::saveParameters(std::string fileName, std::vector<double> weights, std::vector<double> initial, double bias, bool app, int layer) {
std::string layer_info = "";
std::ofstream saveFile;
@ -194,7 +194,7 @@ void Utilities::saveParameters(std::string fileName, std::vector<double> weights
saveFile.close();
}
void Utilities::saveParameters(std::string fileName, std::vector<std::vector<double>> weights, std::vector<double> bias, bool app, int layer) {
void MLPPUtilities::saveParameters(std::string fileName, std::vector<std::vector<double>> weights, std::vector<double> bias, bool app, int layer) {
std::string layer_info = "";
std::ofstream saveFile;
@ -226,7 +226,7 @@ void Utilities::saveParameters(std::string fileName, std::vector<std::vector<dou
saveFile.close();
}
void Utilities::UI(std::vector<double> weights, double bias) {
void MLPPUtilities::UI(std::vector<double> weights, double bias) {
std::cout << "Values of the weight(s):" << std::endl;
for (int i = 0; i < weights.size(); i++) {
std::cout << weights[i] << std::endl;
@ -235,7 +235,7 @@ void Utilities::UI(std::vector<double> weights, double bias) {
std::cout << bias << std::endl;
}
void Utilities::UI(std::vector<std::vector<double>> weights, std::vector<double> bias) {
void MLPPUtilities::UI(std::vector<std::vector<double>> weights, std::vector<double> bias) {
std::cout << "Values of the weight(s):" << std::endl;
for (int i = 0; i < weights.size(); i++) {
for (int j = 0; j < weights[i].size(); j++) {
@ -248,7 +248,7 @@ void Utilities::UI(std::vector<std::vector<double>> weights, std::vector<double>
}
}
void Utilities::UI(std::vector<double> weights, std::vector<double> initial, double bias) {
void MLPPUtilities::UI(std::vector<double> weights, std::vector<double> initial, double bias) {
std::cout << "Values of the weight(s):" << std::endl;
for (int i = 0; i < weights.size(); i++) {
std::cout << weights[i] << std::endl;
@ -261,7 +261,7 @@ void Utilities::UI(std::vector<double> weights, std::vector<double> initial, dou
std::cout << bias << std::endl;
}
void Utilities::CostInfo(int epoch, double cost_prev, double Cost) {
void MLPPUtilities::CostInfo(int epoch, double cost_prev, double Cost) {
std::cout << "-----------------------------------" << std::endl;
std::cout << "This is epoch: " << epoch << std::endl;
std::cout << "The cost function has been minimized by " << cost_prev - Cost << std::endl;
@ -269,7 +269,7 @@ void Utilities::CostInfo(int epoch, double cost_prev, double Cost) {
std::cout << Cost << std::endl;
}
std::vector<std::vector<std::vector<double>>> Utilities::createMiniBatches(std::vector<std::vector<double>> inputSet, int n_mini_batch) {
std::vector<std::vector<std::vector<double>>> MLPPUtilities::createMiniBatches(std::vector<std::vector<double>> inputSet, int n_mini_batch) {
int n = inputSet.size();
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
@ -291,7 +291,7 @@ std::vector<std::vector<std::vector<double>>> Utilities::createMiniBatches(std::
return inputMiniBatches;
}
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<std::vector<double>>> Utilities::createMiniBatches(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int n_mini_batch) {
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<std::vector<double>>> MLPPUtilities::createMiniBatches(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int n_mini_batch) {
int n = inputSet.size();
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
@ -317,7 +317,7 @@ std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<std::vecto
return { inputMiniBatches, outputMiniBatches };
}
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<std::vector<std::vector<double>>>> Utilities::createMiniBatches(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet, int n_mini_batch) {
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<std::vector<std::vector<double>>>> MLPPUtilities::createMiniBatches(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet, int n_mini_batch) {
int n = inputSet.size();
std::vector<std::vector<std::vector<double>>> inputMiniBatches;
@ -343,7 +343,7 @@ std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<std::vecto
return { inputMiniBatches, outputMiniBatches };
}
std::tuple<double, double, double, double> Utilities::TF_PN(std::vector<double> y_hat, std::vector<double> y) {
std::tuple<double, double, double, double> MLPPUtilities::TF_PN(std::vector<double> y_hat, std::vector<double> y) {
double TP, FP, TN, FN = 0;
for (int i = 0; i < y_hat.size(); i++) {
if (y_hat[i] == y[i]) {
@ -363,20 +363,20 @@ std::tuple<double, double, double, double> Utilities::TF_PN(std::vector<double>
return { TP, FP, TN, FN };
}
double Utilities::recall(std::vector<double> y_hat, std::vector<double> y) {
double MLPPUtilities::recall(std::vector<double> y_hat, std::vector<double> y) {
auto [TP, FP, TN, FN] = TF_PN(y_hat, y);
return TP / (TP + FN);
}
double Utilities::precision(std::vector<double> y_hat, std::vector<double> y) {
double MLPPUtilities::precision(std::vector<double> y_hat, std::vector<double> y) {
auto [TP, FP, TN, FN] = TF_PN(y_hat, y);
return TP / (TP + FP);
}
double Utilities::accuracy(std::vector<double> y_hat, std::vector<double> y) {
double MLPPUtilities::accuracy(std::vector<double> y_hat, std::vector<double> y) {
auto [TP, FP, TN, FN] = TF_PN(y_hat, y);
return (TP + TN) / (TP + FP + FN + TN);
}
double Utilities::f1_score(std::vector<double> y_hat, std::vector<double> y) {
double MLPPUtilities::f1_score(std::vector<double> y_hat, std::vector<double> y) {
return 2 * precision(y_hat, y) * recall(y_hat, y) / (precision(y_hat, y) + recall(y_hat, y));
}

View File

@ -13,7 +13,7 @@
#include <vector>
class Utilities {
class MLPPUtilities {
public:
// Weight Init
static std::vector<double> weightInitialization(int n, std::string type = "Default");

View File

@ -15,20 +15,20 @@
#include <iostream>
WGAN::WGAN(double k, std::vector<std::vector<double>> outputSet) :
MLPPWGAN::MLPPWGAN(double k, std::vector<std::vector<double>> outputSet) :
outputSet(outputSet), n(outputSet.size()), k(k) {
}
WGAN::~WGAN() {
MLPPWGAN::~MLPPWGAN() {
delete outputLayer;
}
std::vector<std::vector<double>> WGAN::generateExample(int n) {
std::vector<std::vector<double>> MLPPWGAN::generateExample(int n) {
MLPPLinAlg alg;
return modelSetTestGenerator(alg.gaussianNoise(n, k));
}
void WGAN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
void MLPPWGAN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
class MLPPCost cost;
MLPPLinAlg alg;
double cost_prev = 0;
@ -50,7 +50,7 @@ void WGAN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
for (int i = 0; i < CRITIC_INTERATIONS; i++) {
generatorInputSet = alg.gaussianNoise(n, k);
discriminatorInputSet = modelSetTestGenerator(generatorInputSet);
discriminatorInputSet.insert(discriminatorInputSet.end(), WGAN::outputSet.begin(), WGAN::outputSet.end()); // Fake + real inputs.
discriminatorInputSet.insert(discriminatorInputSet.end(), MLPPWGAN::outputSet.begin(), MLPPWGAN::outputSet.end()); // Fake + real inputs.
y_hat = modelSetTestDiscriminator(discriminatorInputSet);
outputSet = alg.scalarMultiply(-1, alg.onevec(n)); // WGAN changes y_i = 1 and y_i = 0 to y_i = 1 and y_i = -1
@ -75,7 +75,7 @@ void WGAN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
forwardPass();
if (UI) {
WGAN::UI(epoch, cost_prev, WGAN::y_hat, alg.onevec(n));
MLPPWGAN::UI(epoch, cost_prev, MLPPWGAN::y_hat, alg.onevec(n));
}
epoch++;
@ -85,15 +85,15 @@ void WGAN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
}
}
double WGAN::score() {
double MLPPWGAN::score() {
MLPPLinAlg alg;
Utilities util;
MLPPUtilities util;
forwardPass();
return util.performance(y_hat, alg.onevec(n));
}
void WGAN::save(std::string fileName) {
Utilities util;
void MLPPWGAN::save(std::string fileName) {
MLPPUtilities util;
if (!network.empty()) {
util.saveParameters(fileName, network[0].weights, network[0].bias, 0, 1);
for (int i = 1; i < network.size(); i++) {
@ -105,7 +105,7 @@ void WGAN::save(std::string fileName) {
}
}
void WGAN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, double lambda, double alpha) {
void MLPPWGAN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, double lambda, double alpha) {
MLPPLinAlg alg;
if (network.empty()) {
network.push_back(MLPPHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha));
@ -116,7 +116,7 @@ void WGAN::addLayer(int n_hidden, std::string activation, std::string weightInit
}
}
void WGAN::addOutputLayer(std::string weightInit, std::string reg, double lambda, double alpha) {
void MLPPWGAN::addOutputLayer(std::string weightInit, std::string reg, double lambda, double alpha) {
MLPPLinAlg alg;
if (!network.empty()) {
outputLayer = new MLPPOutputLayer(network[network.size() - 1].n_hidden, "Linear", "WassersteinLoss", network[network.size() - 1].a, weightInit, "WeightClipping", -0.01, 0.01);
@ -125,7 +125,7 @@ void WGAN::addOutputLayer(std::string weightInit, std::string reg, double lambda
}
}
std::vector<std::vector<double>> WGAN::modelSetTestGenerator(std::vector<std::vector<double>> X) {
std::vector<std::vector<double>> MLPPWGAN::modelSetTestGenerator(std::vector<std::vector<double>> X) {
if (!network.empty()) {
network[0].input = X;
network[0].forwardPass();
@ -138,7 +138,7 @@ std::vector<std::vector<double>> WGAN::modelSetTestGenerator(std::vector<std::ve
return network[network.size() / 2].a;
}
std::vector<double> WGAN::modelSetTestDiscriminator(std::vector<std::vector<double>> X) {
std::vector<double> MLPPWGAN::modelSetTestDiscriminator(std::vector<std::vector<double>> X) {
if (!network.empty()) {
for (int i = network.size() / 2 + 1; i < network.size(); i++) {
if (i == network.size() / 2 + 1) {
@ -154,7 +154,7 @@ std::vector<double> WGAN::modelSetTestDiscriminator(std::vector<std::vector<doub
return outputLayer->a;
}
double WGAN::Cost(std::vector<double> y_hat, std::vector<double> y) {
double MLPPWGAN::Cost(std::vector<double> y_hat, std::vector<double> y) {
MLPPReg regularization;
class MLPPCost cost;
double totalRegTerm = 0;
@ -168,7 +168,7 @@ double WGAN::Cost(std::vector<double> y_hat, std::vector<double> y) {
return (cost.*cost_function)(y_hat, y) + totalRegTerm + regularization.regTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg);
}
void WGAN::forwardPass() {
void MLPPWGAN::forwardPass() {
MLPPLinAlg alg;
if (!network.empty()) {
network[0].input = alg.gaussianNoise(n, k);
@ -186,7 +186,7 @@ void WGAN::forwardPass() {
y_hat = outputLayer->a;
}
void WGAN::updateDiscriminatorParameters(std::vector<std::vector<std::vector<double>>> hiddenLayerUpdations, std::vector<double> outputLayerUpdation, double learning_rate) {
void MLPPWGAN::updateDiscriminatorParameters(std::vector<std::vector<std::vector<double>>> hiddenLayerUpdations, std::vector<double> outputLayerUpdation, double learning_rate) {
MLPPLinAlg alg;
outputLayer->weights = alg.subtraction(outputLayer->weights, outputLayerUpdation);
@ -203,7 +203,7 @@ void WGAN::updateDiscriminatorParameters(std::vector<std::vector<std::vector<dou
}
}
void WGAN::updateGeneratorParameters(std::vector<std::vector<std::vector<double>>> hiddenLayerUpdations, double learning_rate) {
void MLPPWGAN::updateGeneratorParameters(std::vector<std::vector<std::vector<double>>> hiddenLayerUpdations, double learning_rate) {
MLPPLinAlg alg;
if (!network.empty()) {
@ -216,7 +216,7 @@ void WGAN::updateGeneratorParameters(std::vector<std::vector<std::vector<double>
}
}
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> WGAN::computeDiscriminatorGradients(std::vector<double> y_hat, std::vector<double> outputSet) {
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> MLPPWGAN::computeDiscriminatorGradients(std::vector<double> y_hat, std::vector<double> outputSet) {
class MLPPCost cost;
MLPPActivation avn;
MLPPLinAlg alg;
@ -252,7 +252,7 @@ std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> W
return { cumulativeHiddenLayerWGrad, outputWGrad };
}
std::vector<std::vector<std::vector<double>>> WGAN::computeGeneratorGradients(std::vector<double> y_hat, std::vector<double> outputSet) {
std::vector<std::vector<std::vector<double>>> MLPPWGAN::computeGeneratorGradients(std::vector<double> y_hat, std::vector<double> outputSet) {
class MLPPCost cost;
MLPPActivation avn;
MLPPLinAlg alg;
@ -281,14 +281,14 @@ std::vector<std::vector<std::vector<double>>> WGAN::computeGeneratorGradients(st
return cumulativeHiddenLayerWGrad;
}
void WGAN::UI(int epoch, double cost_prev, std::vector<double> y_hat, std::vector<double> outputSet) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
void MLPPWGAN::UI(int epoch, double cost_prev, std::vector<double> y_hat, std::vector<double> outputSet) {
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
std::cout << "Layer " << network.size() + 1 << ": " << std::endl;
Utilities::UI(outputLayer->weights, outputLayer->bias);
MLPPUtilities::UI(outputLayer->weights, outputLayer->bias);
if (!network.empty()) {
for (int i = network.size() - 1; i >= 0; i--) {
std::cout << "Layer " << i + 1 << ": " << std::endl;
Utilities::UI(network[i].weights, network[i].bias);
MLPPUtilities::UI(network[i].weights, network[i].bias);
}
}
}

View File

@ -17,10 +17,10 @@
class WGAN {
class MLPPWGAN {
public:
WGAN(double k, std::vector<std::vector<double>> outputSet);
~WGAN();
MLPPWGAN(double k, std::vector<std::vector<double>> outputSet);
~MLPPWGAN();
std::vector<std::vector<double>> generateExample(int n);
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);
double score();