diff --git a/mlpp/ann/ann.cpp b/mlpp/ann/ann.cpp index e2b8774..051e03e 100644 --- a/mlpp/ann/ann.cpp +++ b/mlpp/ann/ann.cpp @@ -137,7 +137,7 @@ void MLPPANN::MBGD(double learning_rate, int max_epoch, int mini_batch_size, boo int n_mini_batch = n / mini_batch_size; // always evaluate the result // always do forward pass only ONCE at end. - auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch); + auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch); while (true) { learning_rate = applyLearningRateScheduler(initial_learning_rate, decayConstant, epoch, dropRate); for (int i = 0; i < n_mini_batch; i++) { @@ -175,7 +175,7 @@ void MLPPANN::Momentum(double learning_rate, int max_epoch, int mini_batch_size, int n_mini_batch = n / mini_batch_size; // always evaluate the result // always do forward pass only ONCE at end. - auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch); + auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch); // Initializing necessary components for Adam. std::vector>> v_hidden; @@ -232,7 +232,7 @@ void MLPPANN::Adagrad(double learning_rate, int max_epoch, int mini_batch_size, int n_mini_batch = n / mini_batch_size; // always evaluate the result // always do forward pass only ONCE at end. - auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch); + auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch); // Initializing necessary components for Adam. std::vector>> v_hidden; @@ -288,7 +288,7 @@ void MLPPANN::Adadelta(double learning_rate, int max_epoch, int mini_batch_size, int n_mini_batch = n / mini_batch_size; // always evaluate the result // always do forward pass only ONCE at end. - auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch); + auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch); // Initializing necessary components for Adam. std::vector>> v_hidden; @@ -344,7 +344,7 @@ void MLPPANN::Adam(double learning_rate, int max_epoch, int mini_batch_size, dou int n_mini_batch = n / mini_batch_size; // always evaluate the result // always do forward pass only ONCE at end. - auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch); + auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch); // Initializing necessary components for Adam. std::vector>> m_hidden; @@ -411,7 +411,7 @@ void MLPPANN::Adamax(double learning_rate, int max_epoch, int mini_batch_size, d int n_mini_batch = n / mini_batch_size; // always evaluate the result // always do forward pass only ONCE at end. - auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch); + auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch); // Initializing necessary components for Adam. std::vector>> m_hidden; @@ -476,7 +476,7 @@ void MLPPANN::Nadam(double learning_rate, int max_epoch, int mini_batch_size, do int n_mini_batch = n / mini_batch_size; // always evaluate the result // always do forward pass only ONCE at end. - auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch); + auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch); // Initializing necessary components for Adam. std::vector>> m_hidden; @@ -546,7 +546,7 @@ void MLPPANN::AMSGrad(double learning_rate, int max_epoch, int mini_batch_size, int n_mini_batch = n / mini_batch_size; // always evaluate the result // always do forward pass only ONCE at end. - auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch); + auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch); // Initializing necessary components for Adam. std::vector>> m_hidden; @@ -606,13 +606,13 @@ void MLPPANN::AMSGrad(double learning_rate, int max_epoch, int mini_batch_size, } double MLPPANN::score() { - Utilities util; + MLPPUtilities util; forwardPass(); return util.performance(y_hat, outputSet); } void MLPPANN::save(std::string fileName) { - Utilities util; + MLPPUtilities util; if (!network.empty()) { util.saveParameters(fileName, network[0].weights, network[0].bias, 0, 1); for (int i = 1; i < network.size(); i++) { @@ -750,13 +750,13 @@ std::tuple>>, std::vector> M } void MLPPANN::UI(int epoch, double cost_prev, std::vector y_hat, std::vector outputSet) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); std::cout << "Layer " << network.size() + 1 << ": " << std::endl; - Utilities::UI(outputLayer->weights, outputLayer->bias); + MLPPUtilities::UI(outputLayer->weights, outputLayer->bias); if (!network.empty()) { for (int i = network.size() - 1; i >= 0; i--) { std::cout << "Layer " << i + 1 << ": " << std::endl; - Utilities::UI(network[i].weights, network[i].bias); + MLPPUtilities::UI(network[i].weights, network[i].bias); } } } diff --git a/mlpp/auto_encoder/auto_encoder.cpp b/mlpp/auto_encoder/auto_encoder.cpp index 279a81d..dd2bc6b 100644 --- a/mlpp/auto_encoder/auto_encoder.cpp +++ b/mlpp/auto_encoder/auto_encoder.cpp @@ -18,10 +18,10 @@ MLPPAutoEncoder::MLPPAutoEncoder(std::vector> inputSet, int MLPPActivation avn; y_hat.resize(inputSet.size()); - weights1 = Utilities::weightInitialization(k, n_hidden); - weights2 = Utilities::weightInitialization(n_hidden, k); - bias1 = Utilities::biasInitialization(n_hidden); - bias2 = Utilities::biasInitialization(k); + weights1 = MLPPUtilities::weightInitialization(k, n_hidden); + weights2 = MLPPUtilities::weightInitialization(n_hidden, k); + bias1 = MLPPUtilities::biasInitialization(n_hidden); + bias2 = MLPPUtilities::biasInitialization(k); } std::vector> MLPPAutoEncoder::modelSetTest(std::vector> X) { @@ -71,11 +71,11 @@ void MLPPAutoEncoder::gradientDescent(double learning_rate, int max_epoch, bool // UI PORTION if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, inputSet)); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, inputSet)); std::cout << "Layer 1:" << std::endl; - Utilities::UI(weights1, bias1); + MLPPUtilities::UI(weights1, bias1); std::cout << "Layer 2:" << std::endl; - Utilities::UI(weights2, bias2); + MLPPUtilities::UI(weights2, bias2); } epoch++; @@ -121,11 +121,11 @@ void MLPPAutoEncoder::SGD(double learning_rate, int max_epoch, bool UI) { y_hat = Evaluate(inputSet[outputIndex]); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { inputSet[outputIndex] })); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { inputSet[outputIndex] })); std::cout << "Layer 1:" << std::endl; - Utilities::UI(weights1, bias1); + MLPPUtilities::UI(weights1, bias1); std::cout << "Layer 2:" << std::endl; - Utilities::UI(weights2, bias2); + MLPPUtilities::UI(weights2, bias2); } epoch++; @@ -144,7 +144,7 @@ void MLPPAutoEncoder::MBGD(double learning_rate, int max_epoch, int mini_batch_s // Creating the mini-batches int n_mini_batch = n / mini_batch_size; - std::vector>> inputMiniBatches = Utilities::createMiniBatches(inputSet, n_mini_batch); + std::vector>> inputMiniBatches = MLPPUtilities::createMiniBatches(inputSet, n_mini_batch); while (true) { for (int i = 0; i < n_mini_batch; i++) { @@ -181,11 +181,11 @@ void MLPPAutoEncoder::MBGD(double learning_rate, int max_epoch, int mini_batch_s y_hat = Evaluate(inputMiniBatches[i]); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, inputMiniBatches[i])); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, inputMiniBatches[i])); std::cout << "Layer 1:" << std::endl; - Utilities::UI(weights1, bias1); + MLPPUtilities::UI(weights1, bias1); std::cout << "Layer 2:" << std::endl; - Utilities::UI(weights2, bias2); + MLPPUtilities::UI(weights2, bias2); } } epoch++; @@ -197,12 +197,12 @@ void MLPPAutoEncoder::MBGD(double learning_rate, int max_epoch, int mini_batch_s } double MLPPAutoEncoder::score() { - Utilities util; + MLPPUtilities util; return util.performance(y_hat, inputSet); } void MLPPAutoEncoder::save(std::string fileName) { - Utilities util; + MLPPUtilities util; util.saveParameters(fileName, weights1, bias1, 0, 1); util.saveParameters(fileName, weights2, bias2, 1, 2); } diff --git a/mlpp/bernoulli_nb/bernoulli_nb.cpp b/mlpp/bernoulli_nb/bernoulli_nb.cpp index e813def..457f39a 100644 --- a/mlpp/bernoulli_nb/bernoulli_nb.cpp +++ b/mlpp/bernoulli_nb/bernoulli_nb.cpp @@ -69,7 +69,7 @@ double MLPPBernoulliNB::modelTest(std::vector x) { } double MLPPBernoulliNB::score() { - Utilities util; + MLPPUtilities util; return util.performance(y_hat, outputSet); } diff --git a/mlpp/c_log_log_reg/c_log_log_reg.cpp b/mlpp/c_log_log_reg/c_log_log_reg.cpp index c02104c..92dd825 100644 --- a/mlpp/c_log_log_reg/c_log_log_reg.cpp +++ b/mlpp/c_log_log_reg/c_log_log_reg.cpp @@ -17,8 +17,8 @@ MLPPCLogLogReg::MLPPCLogLogReg(std::vector> inputSet, std::vector outputSet, std::string reg, double lambda, double alpha) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { y_hat.resize(n); - weights = Utilities::weightInitialization(k); - bias = Utilities::biasInitialization(); + weights = MLPPUtilities::weightInitialization(k); + bias = MLPPUtilities::biasInitialization(); } std::vector MLPPCLogLogReg::modelSetTest(std::vector> X) { @@ -52,8 +52,8 @@ void MLPPCLogLogReg::gradientDescent(double learning_rate, int max_epoch, bool U forwardPass(); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); + MLPPUtilities::UI(weights, bias); } epoch++; @@ -84,8 +84,8 @@ void MLPPCLogLogReg::MLE(double learning_rate, int max_epoch, bool UI) { forwardPass(); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); + MLPPUtilities::UI(weights, bias); } epoch++; @@ -124,8 +124,8 @@ void MLPPCLogLogReg::SGD(double learning_rate, int max_epoch, bool UI) { y_hat = Evaluate({ inputSet[outputIndex] }); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] })); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] })); + MLPPUtilities::UI(weights, bias); } epoch++; @@ -145,7 +145,7 @@ void MLPPCLogLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_si // Creating the mini-batches int n_mini_batch = n / mini_batch_size; - auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch); + auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch); while (true) { for (int i = 0; i < n_mini_batch; i++) { @@ -167,8 +167,8 @@ void MLPPCLogLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_si y_hat = Evaluate(inputMiniBatches[i]); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i])); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i])); + MLPPUtilities::UI(weights, bias); } } epoch++; @@ -180,7 +180,7 @@ void MLPPCLogLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_si } double MLPPCLogLogReg::score() { - Utilities util; + MLPPUtilities util; return util.performance(y_hat, outputSet); } diff --git a/mlpp/convolutions/convolutions.cpp b/mlpp/convolutions/convolutions.cpp index b9b934f..939eb79 100644 --- a/mlpp/convolutions/convolutions.cpp +++ b/mlpp/convolutions/convolutions.cpp @@ -164,7 +164,7 @@ std::vector> MLPPConvolutions::pool(std::vector>> MLPPConvolutions::pool(std::vector double MLPPConvolutions::globalPool(std::vector> input, std::string type) { MLPPLinAlg alg; if (type == "Average") { - Stat stat; + MLPPStat stat; return stat.mean(alg.flatten(input)); } else if (type == "Min") { return alg.min(alg.flatten(input)); diff --git a/mlpp/data/data.cpp b/mlpp/data/data.cpp index a3a9baf..b47597e 100644 --- a/mlpp/data/data.cpp +++ b/mlpp/data/data.cpp @@ -699,7 +699,7 @@ std::vector> MLPPData::featureScaling(std::vector> MLPPData::meanNormalization(std::vector> X) { MLPPLinAlg alg; - Stat stat; + MLPPStat stat; // (X_j - mu_j) / std_j, for every j X = meanCentering(X); @@ -711,7 +711,7 @@ std::vector> MLPPData::meanNormalization(std::vector> MLPPData::meanCentering(std::vector> X) { MLPPLinAlg alg; - Stat stat; + MLPPStat stat; for (int i = 0; i < X.size(); i++) { double mean_i = stat.mean(X[i]); for (int j = 0; j < X[i].size(); j++) { diff --git a/mlpp/dual_svc/dual_svc.cpp b/mlpp/dual_svc/dual_svc.cpp index 1a2b699..1c9b868 100644 --- a/mlpp/dual_svc/dual_svc.cpp +++ b/mlpp/dual_svc/dual_svc.cpp @@ -18,8 +18,8 @@ MLPPDualSVC::MLPPDualSVC(std::vector> inputSet, std::vector outputSet, double C, std::string kernel) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), C(C), kernel(kernel) { y_hat.resize(n); - bias = Utilities::biasInitialization(); - alpha = Utilities::weightInitialization(n); // One alpha for all training examples, as per the lagrangian multipliers. + bias = MLPPUtilities::biasInitialization(); + alpha = MLPPUtilities::weightInitialization(n); // One alpha for all training examples, as per the lagrangian multipliers. K = kernelFunction(inputSet, inputSet, kernel); // For now this is unused. When non-linear kernels are added, the K will be manipulated. } @@ -67,8 +67,8 @@ void MLPPDualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) // UI PORTION if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(alpha, inputSet, outputSet)); - Utilities::UI(alpha, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(alpha, inputSet, outputSet)); + MLPPUtilities::UI(alpha, bias); std::cout << score() << std::endl; // TO DO: DELETE THIS. } epoch++; @@ -102,8 +102,8 @@ void MLPPDualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) // y_hat = Evaluate({inputSet[outputIndex]}); // if(UI) { -// Utilities::CostInfo(epoch, cost_prev, Cost(alpha)); -// Utilities::UI(weights, bias); +// MLPPUtilities::CostInfo(epoch, cost_prev, Cost(alpha)); +// MLPPUtilities::UI(weights, bias); // } // epoch++; @@ -122,7 +122,7 @@ void MLPPDualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) // // Creating the mini-batches // int n_mini_batch = n/mini_batch_size; -// auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch); +// auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch); // while(true){ // for(int i = 0; i < n_mini_batch; i++){ @@ -142,8 +142,8 @@ void MLPPDualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) // y_hat = Evaluate(inputMiniBatches[i]); // if(UI) { -// Utilities::CostInfo(epoch, cost_prev, Cost(z, outputMiniBatches[i], weights, C)); -// Utilities::UI(weights, bias); +// MLPPUtilities::CostInfo(epoch, cost_prev, Cost(z, outputMiniBatches[i], weights, C)); +// MLPPUtilities::UI(weights, bias); // } // } // epoch++; @@ -153,12 +153,12 @@ void MLPPDualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) // } double MLPPDualSVC::score() { - Utilities util; + MLPPUtilities util; return util.performance(y_hat, outputSet); } void MLPPDualSVC::save(std::string fileName) { - Utilities util; + MLPPUtilities util; util.saveParameters(fileName, alpha, bias); } diff --git a/mlpp/exp_reg/exp_reg.cpp b/mlpp/exp_reg/exp_reg.cpp index 4e7dcb6..189ecdf 100644 --- a/mlpp/exp_reg/exp_reg.cpp +++ b/mlpp/exp_reg/exp_reg.cpp @@ -18,9 +18,9 @@ MLPPExpReg::MLPPExpReg(std::vector> inputSet, std::vector outputSet, std::string reg, double lambda, double alpha) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { y_hat.resize(n); - weights = Utilities::weightInitialization(k); - initial = Utilities::weightInitialization(k); - bias = Utilities::biasInitialization(); + weights = MLPPUtilities::weightInitialization(k); + initial = MLPPUtilities::weightInitialization(k); + bias = MLPPUtilities::biasInitialization(); } std::vector MLPPExpReg::modelSetTest(std::vector> X) { @@ -77,8 +77,8 @@ void MLPPExpReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { forwardPass(); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); + MLPPUtilities::UI(weights, bias); } epoch++; @@ -122,8 +122,8 @@ void MLPPExpReg::SGD(double learning_rate, int max_epoch, bool UI) { y_hat = Evaluate({ inputSet[outputIndex] }); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] })); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] })); + MLPPUtilities::UI(weights, bias); } epoch++; @@ -142,7 +142,7 @@ void MLPPExpReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, // Creating the mini-batches int n_mini_batch = n / mini_batch_size; - auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch); + auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch); while (true) { for (int i = 0; i < n_mini_batch; i++) { @@ -181,8 +181,8 @@ void MLPPExpReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, y_hat = Evaluate(inputMiniBatches[i]); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i])); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i])); + MLPPUtilities::UI(weights, bias); } } epoch++; @@ -194,12 +194,12 @@ void MLPPExpReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, } double MLPPExpReg::score() { - Utilities util; + MLPPUtilities util; return util.performance(y_hat, outputSet); } void MLPPExpReg::save(std::string fileName) { - Utilities util; + MLPPUtilities util; util.saveParameters(fileName, weights, initial, bias); } diff --git a/mlpp/gan/gan.cpp b/mlpp/gan/gan.cpp index e10e44a..4a535b1 100644 --- a/mlpp/gan/gan.cpp +++ b/mlpp/gan/gan.cpp @@ -78,13 +78,13 @@ void MLPPGAN::gradientDescent(double learning_rate, int max_epoch, bool UI) { double MLPPGAN::score() { MLPPLinAlg alg; - Utilities util; + MLPPUtilities util; forwardPass(); return util.performance(y_hat, alg.onevec(n)); } void MLPPGAN::save(std::string fileName) { - Utilities util; + MLPPUtilities util; if (!network.empty()) { util.saveParameters(fileName, network[0].weights, network[0].bias, 0, 1); for (int i = 1; i < network.size(); i++) { @@ -273,13 +273,13 @@ std::vector>> MLPPGAN::computeGeneratorGradients } void MLPPGAN::UI(int epoch, double cost_prev, std::vector y_hat, std::vector outputSet) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); std::cout << "Layer " << network.size() + 1 << ": " << std::endl; - Utilities::UI(outputLayer->weights, outputLayer->bias); + MLPPUtilities::UI(outputLayer->weights, outputLayer->bias); if (!network.empty()) { for (int i = network.size() - 1; i >= 0; i--) { std::cout << "Layer " << i + 1 << ": " << std::endl; - Utilities::UI(network[i].weights, network[i].bias); + MLPPUtilities::UI(network[i].weights, network[i].bias); } } } diff --git a/mlpp/gauss_markov_checker/gauss_markov_checker.cpp b/mlpp/gauss_markov_checker/gauss_markov_checker.cpp index 5210039..0e5fd6e 100644 --- a/mlpp/gauss_markov_checker/gauss_markov_checker.cpp +++ b/mlpp/gauss_markov_checker/gauss_markov_checker.cpp @@ -22,7 +22,7 @@ void MLPPGaussMarkovChecker::checkGMConditions(std::vector eps) { } bool MLPPGaussMarkovChecker::arithmeticMean(std::vector eps) { - Stat stat; + MLPPStat stat; if (stat.mean(eps) == 0) { return 1; } else { @@ -31,7 +31,7 @@ bool MLPPGaussMarkovChecker::arithmeticMean(std::vector eps) { } bool MLPPGaussMarkovChecker::homoscedasticity(std::vector eps) { - Stat stat; + MLPPStat stat; double currentVar = (eps[0] - stat.mean(eps)) * (eps[0] - stat.mean(eps)) / eps.size(); for (int i = 0; i < eps.size(); i++) { if (currentVar != (eps[i] - stat.mean(eps)) * (eps[i] - stat.mean(eps)) / eps.size()) { @@ -42,7 +42,7 @@ bool MLPPGaussMarkovChecker::homoscedasticity(std::vector eps) { } bool MLPPGaussMarkovChecker::exogeneity(std::vector eps) { - Stat stat; + MLPPStat stat; for (int i = 0; i < eps.size(); i++) { for (int j = 0; j < eps.size(); j++) { if (i != j) { diff --git a/mlpp/gaussian_nb/gaussian_nb.cpp b/mlpp/gaussian_nb/gaussian_nb.cpp index bbc3aeb..65444f6 100644 --- a/mlpp/gaussian_nb/gaussian_nb.cpp +++ b/mlpp/gaussian_nb/gaussian_nb.cpp @@ -30,7 +30,7 @@ std::vector MLPPGaussianNB::modelSetTest(std::vector } double MLPPGaussianNB::modelTest(std::vector x) { - Stat stat; + MLPPStat stat; MLPPLinAlg alg; double score[class_num]; @@ -43,12 +43,12 @@ double MLPPGaussianNB::modelTest(std::vector x) { } double MLPPGaussianNB::score() { - Utilities util; + MLPPUtilities util; return util.performance(y_hat, outputSet); } void MLPPGaussianNB::Evaluate() { - Stat stat; + MLPPStat stat; MLPPLinAlg alg; // Computing mu_k_y and sigma_k_y diff --git a/mlpp/hidden_layer/hidden_layer.cpp b/mlpp/hidden_layer/hidden_layer.cpp index 4775529..e806087 100644 --- a/mlpp/hidden_layer/hidden_layer.cpp +++ b/mlpp/hidden_layer/hidden_layer.cpp @@ -15,8 +15,8 @@ MLPPHiddenLayer::MLPPHiddenLayer(int n_hidden, std::string activation, std::vector> input, std::string weightInit, std::string reg, double lambda, double alpha) : n_hidden(n_hidden), activation(activation), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) { - weights = Utilities::weightInitialization(input[0].size(), n_hidden, weightInit); - bias = Utilities::biasInitialization(n_hidden); + weights = MLPPUtilities::weightInitialization(input[0].size(), n_hidden, weightInit); + bias = MLPPUtilities::biasInitialization(n_hidden); activation_map["Linear"] = &MLPPActivation::linear; activationTest_map["Linear"] = &MLPPActivation::linear; diff --git a/mlpp/kmeans/kmeans.cpp b/mlpp/kmeans/kmeans.cpp index 7b3be7b..d3a5106 100644 --- a/mlpp/kmeans/kmeans.cpp +++ b/mlpp/kmeans/kmeans.cpp @@ -70,7 +70,7 @@ void MLPPKMeans::train(int epoch_num, bool UI) { // UI PORTION if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost()); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost()); } epoch++; diff --git a/mlpp/knn/knn.cpp b/mlpp/knn/knn.cpp index 33a2e23..d615c33 100644 --- a/mlpp/knn/knn.cpp +++ b/mlpp/knn/knn.cpp @@ -30,7 +30,7 @@ int MLPPKNN::modelTest(std::vector x) { } double MLPPKNN::score() { - Utilities util; + MLPPUtilities util; return util.performance(modelSetTest(inputSet), outputSet); } diff --git a/mlpp/lin_alg/lin_alg.cpp b/mlpp/lin_alg/lin_alg.cpp index 701551e..02700fc 100644 --- a/mlpp/lin_alg/lin_alg.cpp +++ b/mlpp/lin_alg/lin_alg.cpp @@ -507,7 +507,7 @@ std::vector> MLPPLinAlg::identity(double d) { } std::vector> MLPPLinAlg::cov(std::vector> A) { - Stat stat; + MLPPStat stat; std::vector> covMat; covMat.resize(A.size()); for (int i = 0; i < covMat.size(); i++) { diff --git a/mlpp/lin_reg/lin_reg.cpp b/mlpp/lin_reg/lin_reg.cpp index 17ea602..c5d26db 100644 --- a/mlpp/lin_reg/lin_reg.cpp +++ b/mlpp/lin_reg/lin_reg.cpp @@ -21,8 +21,8 @@ MLPPLinReg::MLPPLinReg(std::vector> inputSet, std::vector MLPPLinReg::modelSetTest(std::vector> X) { @@ -55,8 +55,8 @@ void MLPPLinReg::NewtonRaphson(double learning_rate, int max_epoch, bool UI) { forwardPass(); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); + MLPPUtilities::UI(weights, bias); } epoch++; if (epoch > max_epoch) { @@ -86,8 +86,8 @@ void MLPPLinReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { forwardPass(); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); + MLPPUtilities::UI(weights, bias); } epoch++; if (epoch > max_epoch) { @@ -123,8 +123,8 @@ void MLPPLinReg::SGD(double learning_rate, int max_epoch, bool UI) { y_hat = Evaluate({ inputSet[outputIndex] }); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] })); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] })); + MLPPUtilities::UI(weights, bias); } epoch++; @@ -143,7 +143,7 @@ void MLPPLinReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, // Creating the mini-batches int n_mini_batch = n / mini_batch_size; - auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch); + auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch); while (true) { for (int i = 0; i < n_mini_batch; i++) { @@ -161,8 +161,8 @@ void MLPPLinReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, y_hat = Evaluate(inputMiniBatches[i]); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i])); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i])); + MLPPUtilities::UI(weights, bias); } } epoch++; @@ -175,7 +175,7 @@ void MLPPLinReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, void MLPPLinReg::normalEquation() { MLPPLinAlg alg; - Stat stat; + MLPPStat stat; std::vector x_means; std::vector> inputSetT = alg.transpose(inputSet); @@ -208,12 +208,12 @@ void MLPPLinReg::normalEquation() { } double MLPPLinReg::score() { - Utilities util; + MLPPUtilities util; return util.performance(y_hat, outputSet); } void MLPPLinReg::save(std::string fileName) { - Utilities util; + MLPPUtilities util; util.saveParameters(fileName, weights, bias); } diff --git a/mlpp/log_reg/log_reg.cpp b/mlpp/log_reg/log_reg.cpp index 7f9863f..f974d26 100644 --- a/mlpp/log_reg/log_reg.cpp +++ b/mlpp/log_reg/log_reg.cpp @@ -18,8 +18,8 @@ MLPPLogReg::MLPPLogReg(std::vector> inputSet, std::vector outputSet, std::string reg, double lambda, double alpha) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { y_hat.resize(n); - weights = Utilities::weightInitialization(k); - bias = Utilities::biasInitialization(); + weights = MLPPUtilities::weightInitialization(k); + bias = MLPPUtilities::biasInitialization(); } std::vector MLPPLogReg::modelSetTest(std::vector> X) { @@ -51,8 +51,8 @@ void MLPPLogReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { forwardPass(); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); + MLPPUtilities::UI(weights, bias); } epoch++; @@ -83,8 +83,8 @@ void MLPPLogReg::MLE(double learning_rate, int max_epoch, bool UI) { forwardPass(); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); + MLPPUtilities::UI(weights, bias); } epoch++; if (epoch > max_epoch) { @@ -120,8 +120,8 @@ void MLPPLogReg::SGD(double learning_rate, int max_epoch, bool UI) { y_hat = Evaluate({ inputSet[outputIndex] }); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] })); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] })); + MLPPUtilities::UI(weights, bias); } epoch++; @@ -140,7 +140,7 @@ void MLPPLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, // Creating the mini-batches int n_mini_batch = n / mini_batch_size; - auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch); + auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch); while (true) { for (int i = 0; i < n_mini_batch; i++) { @@ -158,8 +158,8 @@ void MLPPLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, y_hat = Evaluate(inputMiniBatches[i]); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i])); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i])); + MLPPUtilities::UI(weights, bias); } } epoch++; @@ -171,12 +171,12 @@ void MLPPLogReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, } double MLPPLogReg::score() { - Utilities util; + MLPPUtilities util; return util.performance(y_hat, outputSet); } void MLPPLogReg::save(std::string fileName) { - Utilities util; + MLPPUtilities util; util.saveParameters(fileName, weights, bias); } diff --git a/mlpp/mann/mann.cpp b/mlpp/mann/mann.cpp index 6e3bbd7..3d89470 100644 --- a/mlpp/mann/mann.cpp +++ b/mlpp/mann/mann.cpp @@ -101,14 +101,14 @@ void MLPPMANN::gradientDescent(double learning_rate, int max_epoch, bool UI) { forwardPass(); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); std::cout << "Layer " << network.size() + 1 << ": " << std::endl; - Utilities::UI(outputLayer->weights, outputLayer->bias); + MLPPUtilities::UI(outputLayer->weights, outputLayer->bias); if (!network.empty()) { std::cout << "Layer " << network.size() << ": " << std::endl; for (int i = network.size() - 1; i >= 0; i--) { std::cout << "Layer " << i + 1 << ": " << std::endl; - Utilities::UI(network[i].weights, network[i].bias); + MLPPUtilities::UI(network[i].weights, network[i].bias); } } } @@ -121,13 +121,13 @@ void MLPPMANN::gradientDescent(double learning_rate, int max_epoch, bool UI) { } double MLPPMANN::score() { - Utilities util; + MLPPUtilities util; forwardPass(); return util.performance(y_hat, outputSet); } void MLPPMANN::save(std::string fileName) { - Utilities util; + MLPPUtilities util; if (!network.empty()) { util.saveParameters(fileName, network[0].weights, network[0].bias, 0, 1); for (int i = 1; i < network.size(); i++) { diff --git a/mlpp/mlp/mlp.cpp b/mlpp/mlp/mlp.cpp index 165e12e..5da645d 100644 --- a/mlpp/mlp/mlp.cpp +++ b/mlpp/mlp/mlp.cpp @@ -21,10 +21,10 @@ MLPPMLP::MLPPMLP(std::vector> inputSet, std::vector MLPPActivation avn; y_hat.resize(n); - weights1 = Utilities::weightInitialization(k, n_hidden); - weights2 = Utilities::weightInitialization(n_hidden); - bias1 = Utilities::biasInitialization(n_hidden); - bias2 = Utilities::biasInitialization(); + weights1 = MLPPUtilities::weightInitialization(k, n_hidden); + weights2 = MLPPUtilities::weightInitialization(n_hidden); + bias1 = MLPPUtilities::biasInitialization(n_hidden); + bias2 = MLPPUtilities::biasInitialization(); } std::vector MLPPMLP::modelSetTest(std::vector> X) { @@ -80,11 +80,11 @@ void MLPPMLP::gradientDescent(double learning_rate, int max_epoch, bool UI) { // UI PORTION if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); std::cout << "Layer 1:" << std::endl; - Utilities::UI(weights1, bias1); + MLPPUtilities::UI(weights1, bias1); std::cout << "Layer 2:" << std::endl; - Utilities::UI(weights2, bias2); + MLPPUtilities::UI(weights2, bias2); } epoch++; @@ -133,11 +133,11 @@ void MLPPMLP::SGD(double learning_rate, int max_epoch, bool UI) { y_hat = Evaluate(inputSet[outputIndex]); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] })); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] })); std::cout << "Layer 1:" << std::endl; - Utilities::UI(weights1, bias1); + MLPPUtilities::UI(weights1, bias1); std::cout << "Layer 2:" << std::endl; - Utilities::UI(weights2, bias2); + MLPPUtilities::UI(weights2, bias2); } epoch++; @@ -157,7 +157,7 @@ void MLPPMLP::MBGD(double learning_rate, int max_epoch, int mini_batch_size, boo // Creating the mini-batches int n_mini_batch = n / mini_batch_size; - auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch); + auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch); while (true) { for (int i = 0; i < n_mini_batch; i++) { @@ -199,11 +199,11 @@ void MLPPMLP::MBGD(double learning_rate, int max_epoch, int mini_batch_size, boo y_hat = Evaluate(inputMiniBatches[i]); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i])); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i])); std::cout << "Layer 1:" << std::endl; - Utilities::UI(weights1, bias1); + MLPPUtilities::UI(weights1, bias1); std::cout << "Layer 2:" << std::endl; - Utilities::UI(weights2, bias2); + MLPPUtilities::UI(weights2, bias2); } } epoch++; @@ -215,12 +215,12 @@ void MLPPMLP::MBGD(double learning_rate, int max_epoch, int mini_batch_size, boo } double MLPPMLP::score() { - Utilities util; + MLPPUtilities util; return util.performance(y_hat, outputSet); } void MLPPMLP::save(std::string fileName) { - Utilities util; + MLPPUtilities util; util.saveParameters(fileName, weights1, bias1, 0, 1); util.saveParameters(fileName, weights2, bias2, 1, 2); } diff --git a/mlpp/multi_output_layer/multi_output_layer.cpp b/mlpp/multi_output_layer/multi_output_layer.cpp index f9fdae5..c920d28 100644 --- a/mlpp/multi_output_layer/multi_output_layer.cpp +++ b/mlpp/multi_output_layer/multi_output_layer.cpp @@ -14,8 +14,8 @@ MLPPMultiOutputLayer::MLPPMultiOutputLayer(int n_output, int n_hidden, std::string activation, std::string cost, std::vector> input, std::string weightInit, std::string reg, double lambda, double alpha) : n_output(n_output), n_hidden(n_hidden), activation(activation), cost(cost), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) { - weights = Utilities::weightInitialization(n_hidden, n_output, weightInit); - bias = Utilities::biasInitialization(n_output); + weights = MLPPUtilities::weightInitialization(n_hidden, n_output, weightInit); + bias = MLPPUtilities::biasInitialization(n_output); activation_map["Linear"] = &MLPPActivation::linear; activationTest_map["Linear"] = &MLPPActivation::linear; diff --git a/mlpp/multinomial_nb/multinomial_nb.cpp b/mlpp/multinomial_nb/multinomial_nb.cpp index ce17680..6407bc7 100644 --- a/mlpp/multinomial_nb/multinomial_nb.cpp +++ b/mlpp/multinomial_nb/multinomial_nb.cpp @@ -49,7 +49,7 @@ double MLPPMultinomialNB::modelTest(std::vector x) { } double MLPPMultinomialNB::score() { - Utilities util; + MLPPUtilities util; return util.performance(y_hat, outputSet); } diff --git a/mlpp/outlier_finder/outlier_finder.cpp b/mlpp/outlier_finder/outlier_finder.cpp index 4dd74d9..b3c9151 100644 --- a/mlpp/outlier_finder/outlier_finder.cpp +++ b/mlpp/outlier_finder/outlier_finder.cpp @@ -14,7 +14,7 @@ MLPPOutlierFinder::MLPPOutlierFinder(int threshold) : } std::vector> MLPPOutlierFinder::modelSetTest(std::vector> inputSet) { - Stat stat; + MLPPStat stat; std::vector> outliers; outliers.resize(inputSet.size()); for (int i = 0; i < inputSet.size(); i++) { @@ -29,7 +29,7 @@ std::vector> MLPPOutlierFinder::modelSetTest(std::vector MLPPOutlierFinder::modelTest(std::vector inputSet) { - Stat stat; + MLPPStat stat; std::vector outliers; for (int i = 0; i < inputSet.size(); i++) { double z = (inputSet[i] - stat.mean(inputSet)) / stat.standardDeviation(inputSet); diff --git a/mlpp/output_layer/output_layer.cpp b/mlpp/output_layer/output_layer.cpp index 749ec44..9de7d05 100644 --- a/mlpp/output_layer/output_layer.cpp +++ b/mlpp/output_layer/output_layer.cpp @@ -14,8 +14,8 @@ MLPPOutputLayer::MLPPOutputLayer(int n_hidden, std::string activation, std::string cost, std::vector> input, std::string weightInit, std::string reg, double lambda, double alpha) : n_hidden(n_hidden), activation(activation), cost(cost), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) { - weights = Utilities::weightInitialization(n_hidden, weightInit); - bias = Utilities::biasInitialization(); + weights = MLPPUtilities::weightInitialization(n_hidden, weightInit); + bias = MLPPUtilities::biasInitialization(); activation_map["Linear"] = &MLPPActivation::linear; activationTest_map["Linear"] = &MLPPActivation::linear; diff --git a/mlpp/probit_reg/probit_reg.cpp b/mlpp/probit_reg/probit_reg.cpp index 6158a7d..71b0756 100644 --- a/mlpp/probit_reg/probit_reg.cpp +++ b/mlpp/probit_reg/probit_reg.cpp @@ -18,8 +18,8 @@ MLPPProbitReg::MLPPProbitReg(std::vector> inputSet, std::vector outputSet, std::string reg, double lambda, double alpha) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { y_hat.resize(n); - weights = Utilities::weightInitialization(k); - bias = Utilities::biasInitialization(); + weights = MLPPUtilities::weightInitialization(k); + bias = MLPPUtilities::biasInitialization(); } std::vector MLPPProbitReg::modelSetTest(std::vector> X) { @@ -52,8 +52,8 @@ void MLPPProbitReg::gradientDescent(double learning_rate, int max_epoch, bool UI forwardPass(); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); + MLPPUtilities::UI(weights, bias); } epoch++; @@ -85,8 +85,8 @@ void MLPPProbitReg::MLE(double learning_rate, int max_epoch, bool UI) { forwardPass(); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); + MLPPUtilities::UI(weights, bias); } epoch++; @@ -126,8 +126,8 @@ void MLPPProbitReg::SGD(double learning_rate, int max_epoch, bool UI) { y_hat = Evaluate({ inputSet[outputIndex] }); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] })); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] })); + MLPPUtilities::UI(weights, bias); } epoch++; @@ -147,7 +147,7 @@ void MLPPProbitReg::MBGD(double learning_rate, int max_epoch, int mini_batch_siz // Creating the mini-batches int n_mini_batch = n / mini_batch_size; - auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch); + auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch); // Creating the mini-batches for (int i = 0; i < n_mini_batch; i++) { @@ -185,8 +185,8 @@ void MLPPProbitReg::MBGD(double learning_rate, int max_epoch, int mini_batch_siz y_hat = Evaluate(inputMiniBatches[i]); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i])); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i])); + MLPPUtilities::UI(weights, bias); } } epoch++; @@ -198,12 +198,12 @@ void MLPPProbitReg::MBGD(double learning_rate, int max_epoch, int mini_batch_siz } double MLPPProbitReg::score() { - Utilities util; + MLPPUtilities util; return util.performance(y_hat, outputSet); } void MLPPProbitReg::save(std::string fileName) { - Utilities util; + MLPPUtilities util; util.saveParameters(fileName, weights, bias); } diff --git a/mlpp/softmax_net/softmax_net.cpp b/mlpp/softmax_net/softmax_net.cpp index 61b3f95..a169a1f 100644 --- a/mlpp/softmax_net/softmax_net.cpp +++ b/mlpp/softmax_net/softmax_net.cpp @@ -20,10 +20,10 @@ MLPPSoftmaxNet::MLPPSoftmaxNet(std::vector> inputSet, std::v inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), n_hidden(n_hidden), n_class(outputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { y_hat.resize(n); - weights1 = Utilities::weightInitialization(k, n_hidden); - weights2 = Utilities::weightInitialization(n_hidden, n_class); - bias1 = Utilities::biasInitialization(n_hidden); - bias2 = Utilities::biasInitialization(n_class); + weights1 = MLPPUtilities::weightInitialization(k, n_hidden); + weights2 = MLPPUtilities::weightInitialization(n_hidden, n_class); + bias1 = MLPPUtilities::biasInitialization(n_hidden); + bias2 = MLPPUtilities::biasInitialization(n_class); } std::vector MLPPSoftmaxNet::modelTest(std::vector x) { @@ -76,11 +76,11 @@ void MLPPSoftmaxNet::gradientDescent(double learning_rate, int max_epoch, bool U // UI PORTION if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); std::cout << "Layer 1:" << std::endl; - Utilities::UI(weights1, bias1); + MLPPUtilities::UI(weights1, bias1); std::cout << "Layer 2:" << std::endl; - Utilities::UI(weights2, bias2); + MLPPUtilities::UI(weights2, bias2); } epoch++; @@ -129,11 +129,11 @@ void MLPPSoftmaxNet::SGD(double learning_rate, int max_epoch, bool UI) { y_hat = Evaluate(inputSet[outputIndex]); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] })); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] })); std::cout << "Layer 1:" << std::endl; - Utilities::UI(weights1, bias1); + MLPPUtilities::UI(weights1, bias1); std::cout << "Layer 2:" << std::endl; - Utilities::UI(weights2, bias2); + MLPPUtilities::UI(weights2, bias2); } epoch++; @@ -153,7 +153,7 @@ void MLPPSoftmaxNet::MBGD(double learning_rate, int max_epoch, int mini_batch_si // Creating the mini-batches int n_mini_batch = n / mini_batch_size; - auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch); + auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch); // Creating the mini-batches for (int i = 0; i < n_mini_batch; i++) { @@ -211,11 +211,11 @@ void MLPPSoftmaxNet::MBGD(double learning_rate, int max_epoch, int mini_batch_si y_hat = Evaluate(inputMiniBatches[i]); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i])); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i])); std::cout << "Layer 1:" << std::endl; - Utilities::UI(weights1, bias1); + MLPPUtilities::UI(weights1, bias1); std::cout << "Layer 2:" << std::endl; - Utilities::UI(weights2, bias2); + MLPPUtilities::UI(weights2, bias2); } } epoch++; @@ -227,12 +227,12 @@ void MLPPSoftmaxNet::MBGD(double learning_rate, int max_epoch, int mini_batch_si } double MLPPSoftmaxNet::score() { - Utilities util; + MLPPUtilities util; return util.performance(y_hat, outputSet); } void MLPPSoftmaxNet::save(std::string fileName) { - Utilities util; + MLPPUtilities util; util.saveParameters(fileName, weights1, bias1, 0, 1); util.saveParameters(fileName, weights2, bias2, 1, 2); diff --git a/mlpp/softmax_reg/softmax_reg.cpp b/mlpp/softmax_reg/softmax_reg.cpp index 39bad0c..340311b 100644 --- a/mlpp/softmax_reg/softmax_reg.cpp +++ b/mlpp/softmax_reg/softmax_reg.cpp @@ -15,22 +15,22 @@ #include -SoftmaxReg::SoftmaxReg(std::vector> inputSet, std::vector> outputSet, std::string reg, double lambda, double alpha) : +MLPPSoftmaxReg::MLPPSoftmaxReg(std::vector> inputSet, std::vector> outputSet, std::string reg, double lambda, double alpha) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), n_class(outputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { y_hat.resize(n); - weights = Utilities::weightInitialization(k, n_class); - bias = Utilities::biasInitialization(n_class); + weights = MLPPUtilities::weightInitialization(k, n_class); + bias = MLPPUtilities::biasInitialization(n_class); } -std::vector SoftmaxReg::modelTest(std::vector x) { +std::vector MLPPSoftmaxReg::modelTest(std::vector x) { return Evaluate(x); } -std::vector> SoftmaxReg::modelSetTest(std::vector> X) { +std::vector> MLPPSoftmaxReg::modelSetTest(std::vector> X) { return Evaluate(X); } -void SoftmaxReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { +void MLPPSoftmaxReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { MLPPLinAlg alg; MLPPReg regularization; double cost_prev = 0; @@ -58,8 +58,8 @@ void SoftmaxReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { // UI PORTION if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); + MLPPUtilities::UI(weights, bias); } epoch++; @@ -69,7 +69,7 @@ void SoftmaxReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { } } -void SoftmaxReg::SGD(double learning_rate, int max_epoch, bool UI) { +void MLPPSoftmaxReg::SGD(double learning_rate, int max_epoch, bool UI) { MLPPLinAlg alg; MLPPReg regularization; double cost_prev = 0; @@ -100,8 +100,8 @@ void SoftmaxReg::SGD(double learning_rate, int max_epoch, bool UI) { y_hat = Evaluate({ inputSet[outputIndex] }); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] })); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] })); + MLPPUtilities::UI(weights, bias); } epoch++; @@ -112,7 +112,7 @@ void SoftmaxReg::SGD(double learning_rate, int max_epoch, bool UI) { forwardPass(); } -void SoftmaxReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { +void MLPPSoftmaxReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { MLPPLinAlg alg; MLPPReg regularization; double cost_prev = 0; @@ -120,7 +120,7 @@ void SoftmaxReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, // Creating the mini-batches int n_mini_batch = n / mini_batch_size; - auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch); + auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch); while (true) { for (int i = 0; i < n_mini_batch; i++) { @@ -141,8 +141,8 @@ void SoftmaxReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, y_hat = Evaluate(inputMiniBatches[i]); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i])); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i])); + MLPPUtilities::UI(weights, bias); } } epoch++; @@ -153,29 +153,29 @@ void SoftmaxReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, forwardPass(); } -double SoftmaxReg::score() { - Utilities util; +double MLPPSoftmaxReg::score() { + MLPPUtilities util; return util.performance(y_hat, outputSet); } -void SoftmaxReg::save(std::string fileName) { - Utilities util; +void MLPPSoftmaxReg::save(std::string fileName) { + MLPPUtilities util; util.saveParameters(fileName, weights, bias); } -double SoftmaxReg::Cost(std::vector> y_hat, std::vector> y) { +double MLPPSoftmaxReg::Cost(std::vector> y_hat, std::vector> y) { MLPPReg regularization; class MLPPCost cost; return cost.CrossEntropy(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); } -std::vector SoftmaxReg::Evaluate(std::vector x) { +std::vector MLPPSoftmaxReg::Evaluate(std::vector x) { MLPPLinAlg alg; MLPPActivation avn; return avn.softmax(alg.addition(bias, alg.mat_vec_mult(alg.transpose(weights), x))); } -std::vector> SoftmaxReg::Evaluate(std::vector> X) { +std::vector> MLPPSoftmaxReg::Evaluate(std::vector> X) { MLPPLinAlg alg; MLPPActivation avn; @@ -183,7 +183,7 @@ std::vector> SoftmaxReg::Evaluate(std::vector> inputSet, std::vector> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); + MLPPSoftmaxReg(std::vector> inputSet, std::vector> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); std::vector modelTest(std::vector x); std::vector> modelSetTest(std::vector> X); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1); diff --git a/mlpp/stat/stat.cpp b/mlpp/stat/stat.cpp index 865bb3e..bbb7fb9 100644 --- a/mlpp/stat/stat.cpp +++ b/mlpp/stat/stat.cpp @@ -15,15 +15,15 @@ #include -double Stat::b0Estimation(const std::vector &x, const std::vector &y) { +double MLPPStat::b0Estimation(const std::vector &x, const std::vector &y) { return mean(y) - b1Estimation(x, y) * mean(x); } -double Stat::b1Estimation(const std::vector &x, const std::vector &y) { +double MLPPStat::b1Estimation(const std::vector &x, const std::vector &y) { return covariance(x, y) / variance(x); } -double Stat::mean(const std::vector &x) { +double MLPPStat::mean(const std::vector &x) { double sum = 0; for (int i = 0; i < x.size(); i++) { sum += x[i]; @@ -31,7 +31,7 @@ double Stat::mean(const std::vector &x) { return sum / x.size(); } -double Stat::median(std::vector x) { +double MLPPStat::median(std::vector x) { double center = double(x.size()) / double(2); sort(x.begin(), x.end()); if (x.size() % 2 == 0) { @@ -41,7 +41,7 @@ double Stat::median(std::vector x) { } } -std::vector Stat::mode(const std::vector &x) { +std::vector MLPPStat::mode(const std::vector &x) { MLPPData data; std::vector x_set = data.vecToSet(x); std::map element_num; @@ -65,16 +65,16 @@ std::vector Stat::mode(const std::vector &x) { return modes; } -double Stat::range(const std::vector &x) { +double MLPPStat::range(const std::vector &x) { MLPPLinAlg alg; return alg.max(x) - alg.min(x); } -double Stat::midrange(const std::vector &x) { +double MLPPStat::midrange(const std::vector &x) { return range(x) / 2; } -double Stat::absAvgDeviation(const std::vector &x) { +double MLPPStat::absAvgDeviation(const std::vector &x) { double sum = 0; for (int i = 0; i < x.size(); i++) { sum += std::abs(x[i] - mean(x)); @@ -82,11 +82,11 @@ double Stat::absAvgDeviation(const std::vector &x) { return sum / x.size(); } -double Stat::standardDeviation(const std::vector &x) { +double MLPPStat::standardDeviation(const std::vector &x) { return std::sqrt(variance(x)); } -double Stat::variance(const std::vector &x) { +double MLPPStat::variance(const std::vector &x) { double sum = 0; for (int i = 0; i < x.size(); i++) { sum += (x[i] - mean(x)) * (x[i] - mean(x)); @@ -94,7 +94,7 @@ double Stat::variance(const std::vector &x) { return sum / (x.size() - 1); } -double Stat::covariance(const std::vector &x, const std::vector &y) { +double MLPPStat::covariance(const std::vector &x, const std::vector &y) { double sum = 0; for (int i = 0; i < x.size(); i++) { sum += (x[i] - mean(x)) * (y[i] - mean(y)); @@ -102,20 +102,20 @@ double Stat::covariance(const std::vector &x, const std::vector return sum / (x.size() - 1); } -double Stat::correlation(const std::vector &x, const std::vector &y) { +double MLPPStat::correlation(const std::vector &x, const std::vector &y) { return covariance(x, y) / (standardDeviation(x) * standardDeviation(y)); } -double Stat::R2(const std::vector &x, const std::vector &y) { +double MLPPStat::R2(const std::vector &x, const std::vector &y) { return correlation(x, y) * correlation(x, y); } -double Stat::chebyshevIneq(const double k) { +double MLPPStat::chebyshevIneq(const double k) { // X may or may not belong to a Gaussian Distribution return 1 - 1 / (k * k); } -double Stat::weightedMean(const std::vector &x, const std::vector &weights) { +double MLPPStat::weightedMean(const std::vector &x, const std::vector &weights) { double sum = 0; double weights_sum = 0; for (int i = 0; i < x.size(); i++) { @@ -125,7 +125,7 @@ double Stat::weightedMean(const std::vector &x, const std::vector &x) { +double MLPPStat::geometricMean(const std::vector &x) { double product = 1; for (int i = 0; i < x.size(); i++) { product *= x[i]; @@ -133,7 +133,7 @@ double Stat::geometricMean(const std::vector &x) { return std::pow(product, 1.0 / x.size()); } -double Stat::harmonicMean(const std::vector &x) { +double MLPPStat::harmonicMean(const std::vector &x) { double sum = 0; for (int i = 0; i < x.size(); i++) { sum += 1 / x[i]; @@ -141,7 +141,7 @@ double Stat::harmonicMean(const std::vector &x) { return x.size() / sum; } -double Stat::RMS(const std::vector &x) { +double MLPPStat::RMS(const std::vector &x) { double sum = 0; for (int i = 0; i < x.size(); i++) { sum += x[i] * x[i]; @@ -149,7 +149,7 @@ double Stat::RMS(const std::vector &x) { return sqrt(sum / x.size()); } -double Stat::powerMean(const std::vector &x, const double p) { +double MLPPStat::powerMean(const std::vector &x, const double p) { double sum = 0; for (int i = 0; i < x.size(); i++) { sum += std::pow(x[i], p); @@ -157,7 +157,7 @@ double Stat::powerMean(const std::vector &x, const double p) { return std::pow(sum / x.size(), 1 / p); } -double Stat::lehmerMean(const std::vector &x, const double p) { +double MLPPStat::lehmerMean(const std::vector &x, const double p) { double num = 0; double den = 0; for (int i = 0; i < x.size(); i++) { @@ -167,7 +167,7 @@ double Stat::lehmerMean(const std::vector &x, const double p) { return num / den; } -double Stat::weightedLehmerMean(const std::vector &x, const std::vector &weights, const double p) { +double MLPPStat::weightedLehmerMean(const std::vector &x, const std::vector &weights, const double p) { double num = 0; double den = 0; for (int i = 0; i < x.size(); i++) { @@ -177,38 +177,38 @@ double Stat::weightedLehmerMean(const std::vector &x, const std::vector< return num / den; } -double Stat::heronianMean(const double A, const double B) { +double MLPPStat::heronianMean(const double A, const double B) { return (A + sqrt(A * B) + B) / 3; } -double Stat::contraHarmonicMean(const std::vector &x) { +double MLPPStat::contraHarmonicMean(const std::vector &x) { return lehmerMean(x, 2); } -double Stat::heinzMean(const double A, const double B, const double x) { +double MLPPStat::heinzMean(const double A, const double B, const double x) { return (std::pow(A, x) * std::pow(B, 1 - x) + std::pow(A, 1 - x) * std::pow(B, x)) / 2; } -double Stat::neumanSandorMean(const double a, const double b) { +double MLPPStat::neumanSandorMean(const double a, const double b) { MLPPActivation avn; return (a - b) / 2 * avn.arsinh((a - b) / (a + b)); } -double Stat::stolarskyMean(const double x, const double y, const double p) { +double MLPPStat::stolarskyMean(const double x, const double y, const double p) { if (x == y) { return x; } return std::pow((std::pow(x, p) - std::pow(y, p)) / (p * (x - y)), 1 / (p - 1)); } -double Stat::identricMean(const double x, const double y) { +double MLPPStat::identricMean(const double x, const double y) { if (x == y) { return x; } return (1 / M_E) * std::pow(std::pow(x, x) / std::pow(y, y), 1 / (x - y)); } -double Stat::logMean(const double x, const double y) { +double MLPPStat::logMean(const double x, const double y) { if (x == y) { return x; } diff --git a/mlpp/stat/stat.h b/mlpp/stat/stat.h index 884ac59..89c5be3 100644 --- a/mlpp/stat/stat.h +++ b/mlpp/stat/stat.h @@ -11,7 +11,7 @@ #include -class Stat { +class MLPPStat { public: // These functions are for univariate lin reg module- not for users. double b0Estimation(const std::vector &x, const std::vector &y); diff --git a/mlpp/svc/svc.cpp b/mlpp/svc/svc.cpp index 9857ffa..0989e66 100644 --- a/mlpp/svc/svc.cpp +++ b/mlpp/svc/svc.cpp @@ -15,22 +15,22 @@ #include -SVC::SVC(std::vector> inputSet, std::vector outputSet, double C) : +MLPPSVC::MLPPSVC(std::vector> inputSet, std::vector outputSet, double C) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), C(C) { y_hat.resize(n); - weights = Utilities::weightInitialization(k); - bias = Utilities::biasInitialization(); + weights = MLPPUtilities::weightInitialization(k); + bias = MLPPUtilities::biasInitialization(); } -std::vector SVC::modelSetTest(std::vector> X) { +std::vector MLPPSVC::modelSetTest(std::vector> X) { return Evaluate(X); } -double SVC::modelTest(std::vector x) { +double MLPPSVC::modelTest(std::vector x) { return Evaluate(x); } -void SVC::gradientDescent(double learning_rate, int max_epoch, bool UI) { +void MLPPSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) { class MLPPCost cost; MLPPActivation avn; MLPPLinAlg alg; @@ -52,8 +52,8 @@ void SVC::gradientDescent(double learning_rate, int max_epoch, bool UI) { // UI PORTION if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet, weights, C)); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet, weights, C)); + MLPPUtilities::UI(weights, bias); } epoch++; @@ -63,7 +63,7 @@ void SVC::gradientDescent(double learning_rate, int max_epoch, bool UI) { } } -void SVC::SGD(double learning_rate, int max_epoch, bool UI) { +void MLPPSVC::SGD(double learning_rate, int max_epoch, bool UI) { class MLPPCost cost; MLPPActivation avn; MLPPLinAlg alg; @@ -94,8 +94,8 @@ void SVC::SGD(double learning_rate, int max_epoch, bool UI) { y_hat = Evaluate({ inputSet[outputIndex] }); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost({ z }, { outputSet[outputIndex] }, weights, C)); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ z }, { outputSet[outputIndex] }, weights, C)); + MLPPUtilities::UI(weights, bias); } epoch++; @@ -106,7 +106,7 @@ void SVC::SGD(double learning_rate, int max_epoch, bool UI) { forwardPass(); } -void SVC::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { +void MLPPSVC::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { class MLPPCost cost; MLPPActivation avn; MLPPLinAlg alg; @@ -116,7 +116,7 @@ void SVC::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI // Creating the mini-batches int n_mini_batch = n / mini_batch_size; - auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch); + auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch); while (true) { for (int i = 0; i < n_mini_batch; i++) { @@ -136,8 +136,8 @@ void SVC::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI y_hat = Evaluate(inputMiniBatches[i]); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(z, outputMiniBatches[i], weights, C)); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(z, outputMiniBatches[i], weights, C)); + MLPPUtilities::UI(weights, bias); } } epoch++; @@ -148,47 +148,47 @@ void SVC::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI forwardPass(); } -double SVC::score() { - Utilities util; +double MLPPSVC::score() { + MLPPUtilities util; return util.performance(y_hat, outputSet); } -void SVC::save(std::string fileName) { - Utilities util; +void MLPPSVC::save(std::string fileName) { + MLPPUtilities util; util.saveParameters(fileName, weights, bias); } -double SVC::Cost(std::vector z, std::vector y, std::vector weights, double C) { +double MLPPSVC::Cost(std::vector z, std::vector y, std::vector weights, double C) { class MLPPCost cost; return cost.HingeLoss(z, y, weights, C); } -std::vector SVC::Evaluate(std::vector> X) { +std::vector MLPPSVC::Evaluate(std::vector> X) { MLPPLinAlg alg; MLPPActivation avn; return avn.sign(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); } -std::vector SVC::propagate(std::vector> X) { +std::vector MLPPSVC::propagate(std::vector> X) { MLPPLinAlg alg; MLPPActivation avn; return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)); } -double SVC::Evaluate(std::vector x) { +double MLPPSVC::Evaluate(std::vector x) { MLPPLinAlg alg; MLPPActivation avn; return avn.sign(alg.dot(weights, x) + bias); } -double SVC::propagate(std::vector x) { +double MLPPSVC::propagate(std::vector x) { MLPPLinAlg alg; MLPPActivation avn; return alg.dot(weights, x) + bias; } // sign ( wTx + b ) -void SVC::forwardPass() { +void MLPPSVC::forwardPass() { MLPPLinAlg alg; MLPPActivation avn; diff --git a/mlpp/svc/svc.h b/mlpp/svc/svc.h index d8497f6..9462be9 100644 --- a/mlpp/svc/svc.h +++ b/mlpp/svc/svc.h @@ -16,9 +16,9 @@ -class SVC { +class MLPPSVC { public: - SVC(std::vector> inputSet, std::vector outputSet, double C); + MLPPSVC(std::vector> inputSet, std::vector outputSet, double C); std::vector modelSetTest(std::vector> X); double modelTest(std::vector x); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1); diff --git a/mlpp/tanh_reg/tanh_reg.cpp b/mlpp/tanh_reg/tanh_reg.cpp index 0e6363d..201a88c 100644 --- a/mlpp/tanh_reg/tanh_reg.cpp +++ b/mlpp/tanh_reg/tanh_reg.cpp @@ -15,22 +15,22 @@ #include -TanhReg::TanhReg(std::vector> inputSet, std::vector outputSet, std::string reg, double lambda, double alpha) : +MLPPTanhReg::MLPPTanhReg(std::vector> inputSet, std::vector outputSet, std::string reg, double lambda, double alpha) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { y_hat.resize(n); - weights = Utilities::weightInitialization(k); - bias = Utilities::biasInitialization(); + weights = MLPPUtilities::weightInitialization(k); + bias = MLPPUtilities::biasInitialization(); } -std::vector TanhReg::modelSetTest(std::vector> X) { +std::vector MLPPTanhReg::modelSetTest(std::vector> X) { return Evaluate(X); } -double TanhReg::modelTest(std::vector x) { +double MLPPTanhReg::modelTest(std::vector x) { return Evaluate(x); } -void TanhReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { +void MLPPTanhReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { MLPPActivation avn; MLPPLinAlg alg; MLPPReg regularization; @@ -53,8 +53,8 @@ void TanhReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { // UI PORTION if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); + MLPPUtilities::UI(weights, bias); } epoch++; @@ -64,7 +64,7 @@ void TanhReg::gradientDescent(double learning_rate, int max_epoch, bool UI) { } } -void TanhReg::SGD(double learning_rate, int max_epoch, bool UI) { +void MLPPTanhReg::SGD(double learning_rate, int max_epoch, bool UI) { MLPPLinAlg alg; MLPPReg regularization; double cost_prev = 0; @@ -91,8 +91,8 @@ void TanhReg::SGD(double learning_rate, int max_epoch, bool UI) { y_hat = Evaluate({ inputSet[outputIndex] }); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] })); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] })); + MLPPUtilities::UI(weights, bias); } epoch++; @@ -103,7 +103,7 @@ void TanhReg::SGD(double learning_rate, int max_epoch, bool UI) { forwardPass(); } -void TanhReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { +void MLPPTanhReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { MLPPActivation avn; MLPPLinAlg alg; MLPPReg regularization; @@ -112,7 +112,7 @@ void TanhReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, boo // Creating the mini-batches int n_mini_batch = n / mini_batch_size; - auto [inputMiniBatches, outputMiniBatches] = Utilities::createMiniBatches(inputSet, outputSet, n_mini_batch); + auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch); while (true) { for (int i = 0; i < n_mini_batch; i++) { @@ -134,8 +134,8 @@ void TanhReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, boo y_hat = Evaluate(inputMiniBatches[i]); if (UI) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i])); - Utilities::UI(weights, bias); + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i])); + MLPPUtilities::UI(weights, bias); } } epoch++; @@ -146,46 +146,46 @@ void TanhReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, boo forwardPass(); } -double TanhReg::score() { - Utilities util; +double MLPPTanhReg::score() { + MLPPUtilities util; return util.performance(y_hat, outputSet); } -void TanhReg::save(std::string fileName) { - Utilities util; +void MLPPTanhReg::save(std::string fileName) { + MLPPUtilities util; util.saveParameters(fileName, weights, bias); } -double TanhReg::Cost(std::vector y_hat, std::vector y) { +double MLPPTanhReg::Cost(std::vector y_hat, std::vector y) { MLPPReg regularization; class MLPPCost cost; return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); } -std::vector TanhReg::Evaluate(std::vector> X) { +std::vector MLPPTanhReg::Evaluate(std::vector> X) { MLPPLinAlg alg; MLPPActivation avn; return avn.tanh(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); } -std::vector TanhReg::propagate(std::vector> X) { +std::vector MLPPTanhReg::propagate(std::vector> X) { MLPPLinAlg alg; return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)); } -double TanhReg::Evaluate(std::vector x) { +double MLPPTanhReg::Evaluate(std::vector x) { MLPPLinAlg alg; MLPPActivation avn; return avn.tanh(alg.dot(weights, x) + bias); } -double TanhReg::propagate(std::vector x) { +double MLPPTanhReg::propagate(std::vector x) { MLPPLinAlg alg; return alg.dot(weights, x) + bias; } // Tanh ( wTx + b ) -void TanhReg::forwardPass() { +void MLPPTanhReg::forwardPass() { MLPPLinAlg alg; MLPPActivation avn; diff --git a/mlpp/tanh_reg/tanh_reg.h b/mlpp/tanh_reg/tanh_reg.h index 3d66612..08fecb0 100644 --- a/mlpp/tanh_reg/tanh_reg.h +++ b/mlpp/tanh_reg/tanh_reg.h @@ -13,9 +13,9 @@ -class TanhReg { +class MLPPTanhReg { public: - TanhReg(std::vector> inputSet, std::vector outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); + MLPPTanhReg(std::vector> inputSet, std::vector outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); std::vector modelSetTest(std::vector> X); double modelTest(std::vector x); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1); diff --git a/mlpp/transforms/transforms.cpp b/mlpp/transforms/transforms.cpp index 4935a69..e3703d7 100644 --- a/mlpp/transforms/transforms.cpp +++ b/mlpp/transforms/transforms.cpp @@ -14,7 +14,7 @@ // DCT ii. // https://www.mathworks.com/help/images/discrete-cosine-transform.html -std::vector> Transforms::discreteCosineTransform(std::vector> A) { +std::vector> MLPPTransforms::discreteCosineTransform(std::vector> A) { MLPPLinAlg alg; A = alg.scalarAdd(-128, A); // Center around 0. diff --git a/mlpp/transforms/transforms.h b/mlpp/transforms/transforms.h index 99685d7..53da729 100644 --- a/mlpp/transforms/transforms.h +++ b/mlpp/transforms/transforms.h @@ -11,7 +11,7 @@ #include -class Transforms { +class MLPPTransforms { public: std::vector> discreteCosineTransform(std::vector> A); }; diff --git a/mlpp/uni_lin_reg/uni_lin_reg.cpp b/mlpp/uni_lin_reg/uni_lin_reg.cpp index 0a0d0a7..9e0e62a 100644 --- a/mlpp/uni_lin_reg/uni_lin_reg.cpp +++ b/mlpp/uni_lin_reg/uni_lin_reg.cpp @@ -16,19 +16,19 @@ // ลท = b0 + b1x1 -UniLinReg::UniLinReg(std::vector x, std::vector y) : +MLPPUniLinReg::MLPPUniLinReg(std::vector x, std::vector y) : inputSet(x), outputSet(y) { - Stat estimator; + MLPPStat estimator; b1 = estimator.b1Estimation(inputSet, outputSet); b0 = estimator.b0Estimation(inputSet, outputSet); } -std::vector UniLinReg::modelSetTest(std::vector x) { +std::vector MLPPUniLinReg::modelSetTest(std::vector x) { MLPPLinAlg alg; return alg.scalarAdd(b0, alg.scalarMultiply(b1, x)); } -double UniLinReg::modelTest(double input) { +double MLPPUniLinReg::modelTest(double input) { return b0 + b1 * input; } diff --git a/mlpp/uni_lin_reg/uni_lin_reg.h b/mlpp/uni_lin_reg/uni_lin_reg.h index 8832405..bd4e7d8 100644 --- a/mlpp/uni_lin_reg/uni_lin_reg.h +++ b/mlpp/uni_lin_reg/uni_lin_reg.h @@ -11,9 +11,9 @@ #include -class UniLinReg { +class MLPPUniLinReg { public: - UniLinReg(std::vector x, std::vector y); + MLPPUniLinReg(std::vector x, std::vector y); std::vector modelSetTest(std::vector x); double modelTest(double x); diff --git a/mlpp/utilities/utilities.cpp b/mlpp/utilities/utilities.cpp index 4f5525f..7a7aa74 100644 --- a/mlpp/utilities/utilities.cpp +++ b/mlpp/utilities/utilities.cpp @@ -12,7 +12,7 @@ -std::vector Utilities::weightInitialization(int n, std::string type) { +std::vector MLPPUtilities::weightInitialization(int n, std::string type) { std::random_device rd; std::default_random_engine generator(rd()); @@ -47,7 +47,7 @@ std::vector Utilities::weightInitialization(int n, std::string type) { return weights; } -double Utilities::biasInitialization() { +double MLPPUtilities::biasInitialization() { std::random_device rd; std::default_random_engine generator(rd()); std::uniform_real_distribution distribution(0, 1); @@ -55,7 +55,7 @@ double Utilities::biasInitialization() { return distribution(generator); } -std::vector> Utilities::weightInitialization(int n, int m, std::string type) { +std::vector> MLPPUtilities::weightInitialization(int n, int m, std::string type) { std::random_device rd; std::default_random_engine generator(rd()); @@ -94,7 +94,7 @@ std::vector> Utilities::weightInitialization(int n, int m, s return weights; } -std::vector Utilities::biasInitialization(int n) { +std::vector MLPPUtilities::biasInitialization(int n) { std::vector bias; std::random_device rd; std::default_random_engine generator(rd()); @@ -106,7 +106,7 @@ std::vector Utilities::biasInitialization(int n) { return bias; } -double Utilities::performance(std::vector y_hat, std::vector outputSet) { +double MLPPUtilities::performance(std::vector y_hat, std::vector outputSet) { double correct = 0; for (int i = 0; i < y_hat.size(); i++) { if (std::round(y_hat[i]) == outputSet[i]) { @@ -116,7 +116,7 @@ double Utilities::performance(std::vector y_hat, std::vector out return correct / y_hat.size(); } -double Utilities::performance(std::vector> y_hat, std::vector> y) { +double MLPPUtilities::performance(std::vector> y_hat, std::vector> y) { double correct = 0; for (int i = 0; i < y_hat.size(); i++) { int sub_correct = 0; @@ -132,7 +132,7 @@ double Utilities::performance(std::vector> y_hat, std::vecto return correct / y_hat.size(); } -void Utilities::saveParameters(std::string fileName, std::vector weights, double bias, bool app, int layer) { +void MLPPUtilities::saveParameters(std::string fileName, std::vector weights, double bias, bool app, int layer) { std::string layer_info = ""; std::ofstream saveFile; @@ -160,7 +160,7 @@ void Utilities::saveParameters(std::string fileName, std::vector weights saveFile.close(); } -void Utilities::saveParameters(std::string fileName, std::vector weights, std::vector initial, double bias, bool app, int layer) { +void MLPPUtilities::saveParameters(std::string fileName, std::vector weights, std::vector initial, double bias, bool app, int layer) { std::string layer_info = ""; std::ofstream saveFile; @@ -194,7 +194,7 @@ void Utilities::saveParameters(std::string fileName, std::vector weights saveFile.close(); } -void Utilities::saveParameters(std::string fileName, std::vector> weights, std::vector bias, bool app, int layer) { +void MLPPUtilities::saveParameters(std::string fileName, std::vector> weights, std::vector bias, bool app, int layer) { std::string layer_info = ""; std::ofstream saveFile; @@ -226,7 +226,7 @@ void Utilities::saveParameters(std::string fileName, std::vector weights, double bias) { +void MLPPUtilities::UI(std::vector weights, double bias) { std::cout << "Values of the weight(s):" << std::endl; for (int i = 0; i < weights.size(); i++) { std::cout << weights[i] << std::endl; @@ -235,7 +235,7 @@ void Utilities::UI(std::vector weights, double bias) { std::cout << bias << std::endl; } -void Utilities::UI(std::vector> weights, std::vector bias) { +void MLPPUtilities::UI(std::vector> weights, std::vector bias) { std::cout << "Values of the weight(s):" << std::endl; for (int i = 0; i < weights.size(); i++) { for (int j = 0; j < weights[i].size(); j++) { @@ -248,7 +248,7 @@ void Utilities::UI(std::vector> weights, std::vector } } -void Utilities::UI(std::vector weights, std::vector initial, double bias) { +void MLPPUtilities::UI(std::vector weights, std::vector initial, double bias) { std::cout << "Values of the weight(s):" << std::endl; for (int i = 0; i < weights.size(); i++) { std::cout << weights[i] << std::endl; @@ -261,7 +261,7 @@ void Utilities::UI(std::vector weights, std::vector initial, dou std::cout << bias << std::endl; } -void Utilities::CostInfo(int epoch, double cost_prev, double Cost) { +void MLPPUtilities::CostInfo(int epoch, double cost_prev, double Cost) { std::cout << "-----------------------------------" << std::endl; std::cout << "This is epoch: " << epoch << std::endl; std::cout << "The cost function has been minimized by " << cost_prev - Cost << std::endl; @@ -269,7 +269,7 @@ void Utilities::CostInfo(int epoch, double cost_prev, double Cost) { std::cout << Cost << std::endl; } -std::vector>> Utilities::createMiniBatches(std::vector> inputSet, int n_mini_batch) { +std::vector>> MLPPUtilities::createMiniBatches(std::vector> inputSet, int n_mini_batch) { int n = inputSet.size(); std::vector>> inputMiniBatches; @@ -291,7 +291,7 @@ std::vector>> Utilities::createMiniBatches(std:: return inputMiniBatches; } -std::tuple>>, std::vector>> Utilities::createMiniBatches(std::vector> inputSet, std::vector outputSet, int n_mini_batch) { +std::tuple>>, std::vector>> MLPPUtilities::createMiniBatches(std::vector> inputSet, std::vector outputSet, int n_mini_batch) { int n = inputSet.size(); std::vector>> inputMiniBatches; @@ -317,7 +317,7 @@ std::tuple>>, std::vector>>, std::vector>>> Utilities::createMiniBatches(std::vector> inputSet, std::vector> outputSet, int n_mini_batch) { +std::tuple>>, std::vector>>> MLPPUtilities::createMiniBatches(std::vector> inputSet, std::vector> outputSet, int n_mini_batch) { int n = inputSet.size(); std::vector>> inputMiniBatches; @@ -343,7 +343,7 @@ std::tuple>>, std::vector Utilities::TF_PN(std::vector y_hat, std::vector y) { +std::tuple MLPPUtilities::TF_PN(std::vector y_hat, std::vector y) { double TP, FP, TN, FN = 0; for (int i = 0; i < y_hat.size(); i++) { if (y_hat[i] == y[i]) { @@ -363,20 +363,20 @@ std::tuple Utilities::TF_PN(std::vector return { TP, FP, TN, FN }; } -double Utilities::recall(std::vector y_hat, std::vector y) { +double MLPPUtilities::recall(std::vector y_hat, std::vector y) { auto [TP, FP, TN, FN] = TF_PN(y_hat, y); return TP / (TP + FN); } -double Utilities::precision(std::vector y_hat, std::vector y) { +double MLPPUtilities::precision(std::vector y_hat, std::vector y) { auto [TP, FP, TN, FN] = TF_PN(y_hat, y); return TP / (TP + FP); } -double Utilities::accuracy(std::vector y_hat, std::vector y) { +double MLPPUtilities::accuracy(std::vector y_hat, std::vector y) { auto [TP, FP, TN, FN] = TF_PN(y_hat, y); return (TP + TN) / (TP + FP + FN + TN); } -double Utilities::f1_score(std::vector y_hat, std::vector y) { +double MLPPUtilities::f1_score(std::vector y_hat, std::vector y) { return 2 * precision(y_hat, y) * recall(y_hat, y) / (precision(y_hat, y) + recall(y_hat, y)); } diff --git a/mlpp/utilities/utilities.h b/mlpp/utilities/utilities.h index 1c7d8dd..7cb0acb 100644 --- a/mlpp/utilities/utilities.h +++ b/mlpp/utilities/utilities.h @@ -13,7 +13,7 @@ #include -class Utilities { +class MLPPUtilities { public: // Weight Init static std::vector weightInitialization(int n, std::string type = "Default"); diff --git a/mlpp/wgan/wgan.cpp b/mlpp/wgan/wgan.cpp index e28f2c0..fbc78ce 100644 --- a/mlpp/wgan/wgan.cpp +++ b/mlpp/wgan/wgan.cpp @@ -15,20 +15,20 @@ #include -WGAN::WGAN(double k, std::vector> outputSet) : +MLPPWGAN::MLPPWGAN(double k, std::vector> outputSet) : outputSet(outputSet), n(outputSet.size()), k(k) { } -WGAN::~WGAN() { +MLPPWGAN::~MLPPWGAN() { delete outputLayer; } -std::vector> WGAN::generateExample(int n) { +std::vector> MLPPWGAN::generateExample(int n) { MLPPLinAlg alg; return modelSetTestGenerator(alg.gaussianNoise(n, k)); } -void WGAN::gradientDescent(double learning_rate, int max_epoch, bool UI) { +void MLPPWGAN::gradientDescent(double learning_rate, int max_epoch, bool UI) { class MLPPCost cost; MLPPLinAlg alg; double cost_prev = 0; @@ -50,7 +50,7 @@ void WGAN::gradientDescent(double learning_rate, int max_epoch, bool UI) { for (int i = 0; i < CRITIC_INTERATIONS; i++) { generatorInputSet = alg.gaussianNoise(n, k); discriminatorInputSet = modelSetTestGenerator(generatorInputSet); - discriminatorInputSet.insert(discriminatorInputSet.end(), WGAN::outputSet.begin(), WGAN::outputSet.end()); // Fake + real inputs. + discriminatorInputSet.insert(discriminatorInputSet.end(), MLPPWGAN::outputSet.begin(), MLPPWGAN::outputSet.end()); // Fake + real inputs. y_hat = modelSetTestDiscriminator(discriminatorInputSet); outputSet = alg.scalarMultiply(-1, alg.onevec(n)); // WGAN changes y_i = 1 and y_i = 0 to y_i = 1 and y_i = -1 @@ -75,7 +75,7 @@ void WGAN::gradientDescent(double learning_rate, int max_epoch, bool UI) { forwardPass(); if (UI) { - WGAN::UI(epoch, cost_prev, WGAN::y_hat, alg.onevec(n)); + MLPPWGAN::UI(epoch, cost_prev, MLPPWGAN::y_hat, alg.onevec(n)); } epoch++; @@ -85,15 +85,15 @@ void WGAN::gradientDescent(double learning_rate, int max_epoch, bool UI) { } } -double WGAN::score() { +double MLPPWGAN::score() { MLPPLinAlg alg; - Utilities util; + MLPPUtilities util; forwardPass(); return util.performance(y_hat, alg.onevec(n)); } -void WGAN::save(std::string fileName) { - Utilities util; +void MLPPWGAN::save(std::string fileName) { + MLPPUtilities util; if (!network.empty()) { util.saveParameters(fileName, network[0].weights, network[0].bias, 0, 1); for (int i = 1; i < network.size(); i++) { @@ -105,7 +105,7 @@ void WGAN::save(std::string fileName) { } } -void WGAN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, double lambda, double alpha) { +void MLPPWGAN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, double lambda, double alpha) { MLPPLinAlg alg; if (network.empty()) { network.push_back(MLPPHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha)); @@ -116,7 +116,7 @@ void WGAN::addLayer(int n_hidden, std::string activation, std::string weightInit } } -void WGAN::addOutputLayer(std::string weightInit, std::string reg, double lambda, double alpha) { +void MLPPWGAN::addOutputLayer(std::string weightInit, std::string reg, double lambda, double alpha) { MLPPLinAlg alg; if (!network.empty()) { outputLayer = new MLPPOutputLayer(network[network.size() - 1].n_hidden, "Linear", "WassersteinLoss", network[network.size() - 1].a, weightInit, "WeightClipping", -0.01, 0.01); @@ -125,7 +125,7 @@ void WGAN::addOutputLayer(std::string weightInit, std::string reg, double lambda } } -std::vector> WGAN::modelSetTestGenerator(std::vector> X) { +std::vector> MLPPWGAN::modelSetTestGenerator(std::vector> X) { if (!network.empty()) { network[0].input = X; network[0].forwardPass(); @@ -138,7 +138,7 @@ std::vector> WGAN::modelSetTestGenerator(std::vector WGAN::modelSetTestDiscriminator(std::vector> X) { +std::vector MLPPWGAN::modelSetTestDiscriminator(std::vector> X) { if (!network.empty()) { for (int i = network.size() / 2 + 1; i < network.size(); i++) { if (i == network.size() / 2 + 1) { @@ -154,7 +154,7 @@ std::vector WGAN::modelSetTestDiscriminator(std::vectora; } -double WGAN::Cost(std::vector y_hat, std::vector y) { +double MLPPWGAN::Cost(std::vector y_hat, std::vector y) { MLPPReg regularization; class MLPPCost cost; double totalRegTerm = 0; @@ -168,7 +168,7 @@ double WGAN::Cost(std::vector y_hat, std::vector y) { return (cost.*cost_function)(y_hat, y) + totalRegTerm + regularization.regTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg); } -void WGAN::forwardPass() { +void MLPPWGAN::forwardPass() { MLPPLinAlg alg; if (!network.empty()) { network[0].input = alg.gaussianNoise(n, k); @@ -186,7 +186,7 @@ void WGAN::forwardPass() { y_hat = outputLayer->a; } -void WGAN::updateDiscriminatorParameters(std::vector>> hiddenLayerUpdations, std::vector outputLayerUpdation, double learning_rate) { +void MLPPWGAN::updateDiscriminatorParameters(std::vector>> hiddenLayerUpdations, std::vector outputLayerUpdation, double learning_rate) { MLPPLinAlg alg; outputLayer->weights = alg.subtraction(outputLayer->weights, outputLayerUpdation); @@ -203,7 +203,7 @@ void WGAN::updateDiscriminatorParameters(std::vector>> hiddenLayerUpdations, double learning_rate) { +void MLPPWGAN::updateGeneratorParameters(std::vector>> hiddenLayerUpdations, double learning_rate) { MLPPLinAlg alg; if (!network.empty()) { @@ -216,7 +216,7 @@ void WGAN::updateGeneratorParameters(std::vector } } -std::tuple>>, std::vector> WGAN::computeDiscriminatorGradients(std::vector y_hat, std::vector outputSet) { +std::tuple>>, std::vector> MLPPWGAN::computeDiscriminatorGradients(std::vector y_hat, std::vector outputSet) { class MLPPCost cost; MLPPActivation avn; MLPPLinAlg alg; @@ -252,7 +252,7 @@ std::tuple>>, std::vector> W return { cumulativeHiddenLayerWGrad, outputWGrad }; } -std::vector>> WGAN::computeGeneratorGradients(std::vector y_hat, std::vector outputSet) { +std::vector>> MLPPWGAN::computeGeneratorGradients(std::vector y_hat, std::vector outputSet) { class MLPPCost cost; MLPPActivation avn; MLPPLinAlg alg; @@ -281,14 +281,14 @@ std::vector>> WGAN::computeGeneratorGradients(st return cumulativeHiddenLayerWGrad; } -void WGAN::UI(int epoch, double cost_prev, std::vector y_hat, std::vector outputSet) { - Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); +void MLPPWGAN::UI(int epoch, double cost_prev, std::vector y_hat, std::vector outputSet) { + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); std::cout << "Layer " << network.size() + 1 << ": " << std::endl; - Utilities::UI(outputLayer->weights, outputLayer->bias); + MLPPUtilities::UI(outputLayer->weights, outputLayer->bias); if (!network.empty()) { for (int i = network.size() - 1; i >= 0; i--) { std::cout << "Layer " << i + 1 << ": " << std::endl; - Utilities::UI(network[i].weights, network[i].bias); + MLPPUtilities::UI(network[i].weights, network[i].bias); } } } diff --git a/mlpp/wgan/wgan.h b/mlpp/wgan/wgan.h index fbccf7d..6712136 100644 --- a/mlpp/wgan/wgan.h +++ b/mlpp/wgan/wgan.h @@ -17,10 +17,10 @@ -class WGAN { +class MLPPWGAN { public: - WGAN(double k, std::vector> outputSet); - ~WGAN(); + MLPPWGAN(double k, std::vector> outputSet); + ~MLPPWGAN(); std::vector> generateExample(int n); void gradientDescent(double learning_rate, int max_epoch, bool UI = 1); double score();