From ab0d41203be80167a4fd0bc72f3f227896a1c5b1 Mon Sep 17 00:00:00 2001 From: Relintai Date: Tue, 24 Jan 2023 19:20:18 +0100 Subject: [PATCH] Removed the MLPP namespace. --- main.cpp | 2 -- mlpp/activation/activation.cpp | 3 --- mlpp/activation/activation.h | 2 -- mlpp/ann/ann.cpp | 2 -- mlpp/ann/ann.h | 3 --- mlpp/auto_encoder/auto_encoder.cpp | 2 -- mlpp/auto_encoder/auto_encoder.h | 3 --- mlpp/bernoulli_nb/bernoulli_nb.cpp | 2 -- mlpp/bernoulli_nb/bernoulli_nb.h | 4 +--- mlpp/c_log_log_reg/c_log_log_reg.cpp | 2 -- mlpp/c_log_log_reg/c_log_log_reg.h | 3 --- mlpp/convolutions/convolutions.cpp | 3 --- mlpp/convolutions/convolutions.h | 2 -- mlpp/cost/cost.cpp | 3 +-- mlpp/cost/cost.h | 4 ++-- mlpp/data/data.cpp | 4 ++-- mlpp/data/data.h | 4 ++-- mlpp/dual_svc/dual_svc.cpp | 3 +-- mlpp/dual_svc/dual_svc.h | 4 ++-- mlpp/exp_reg/exp_reg.cpp | 3 +-- mlpp/exp_reg/exp_reg.h | 4 ++-- mlpp/gan/gan.cpp | 3 +-- mlpp/gan/gan.h | 4 ++-- mlpp/gauss_markov_checker/gauss_markov_checker.cpp | 4 ++-- mlpp/gauss_markov_checker/gauss_markov_checker.h | 4 ++-- mlpp/gaussian_nb/gaussian_nb.cpp | 3 +-- mlpp/gaussian_nb/gaussian_nb.h | 3 +-- mlpp/hidden_layer/hidden_layer.cpp | 3 +-- mlpp/hidden_layer/hidden_layer.h | 4 ++-- mlpp/hypothesis_testing/hypothesis_testing.cpp | 3 +-- mlpp/hypothesis_testing/hypothesis_testing.h | 4 ++-- mlpp/kmeans/kmeans.cpp | 4 ++-- mlpp/kmeans/kmeans.h | 4 ++-- mlpp/knn/knn.cpp | 3 +-- mlpp/knn/knn.h | 4 ++-- mlpp/lin_alg/lin_alg.cpp | 3 +-- mlpp/lin_alg/lin_alg.h | 4 ++-- mlpp/lin_reg/lin_reg.cpp | 3 +-- mlpp/lin_reg/lin_reg.h | 4 ++-- mlpp/log_reg/log_reg.cpp | 3 +-- mlpp/log_reg/log_reg.h | 4 ++-- mlpp/mann/mann.cpp | 3 +-- mlpp/mann/mann.h | 4 ++-- mlpp/mlp/mlp.cpp | 4 ++-- mlpp/mlp/mlp.h | 4 ++-- mlpp/multi_output_layer/multi_output_layer.cpp | 3 +-- mlpp/multi_output_layer/multi_output_layer.h | 4 ++-- mlpp/multinomial_nb/multinomial_nb.cpp | 3 +-- mlpp/multinomial_nb/multinomial_nb.h | 3 +-- mlpp/numerical_analysis/numerical_analysis.cpp | 3 +-- mlpp/numerical_analysis/numerical_analysis.h | 4 ++-- mlpp/outlier_finder/outlier_finder.cpp | 3 +-- mlpp/outlier_finder/outlier_finder.h | 4 ++-- mlpp/output_layer/output_layer.cpp | 3 +-- mlpp/output_layer/output_layer.h | 4 ++-- mlpp/pca/pca.cpp | 4 ++-- mlpp/pca/pca.h | 4 ++-- mlpp/probit_reg/probit_reg.cpp | 3 +-- mlpp/probit_reg/probit_reg.h | 4 ++-- mlpp/regularization/reg.cpp | 4 ++-- mlpp/regularization/reg.h | 4 ++-- mlpp/softmax_net/softmax_net.cpp | 3 +-- mlpp/softmax_net/softmax_net.h | 4 ++-- mlpp/softmax_reg/softmax_reg.cpp | 3 +-- mlpp/softmax_reg/softmax_reg.h | 4 ++-- mlpp/stat/stat.cpp | 3 +-- mlpp/stat/stat.h | 4 ++-- mlpp/svc/svc.cpp | 3 +-- mlpp/svc/svc.h | 4 ++-- mlpp/tanh_reg/tanh_reg.cpp | 3 +-- mlpp/tanh_reg/tanh_reg.h | 4 ++-- mlpp/transforms/transforms.cpp | 3 +-- mlpp/transforms/transforms.h | 4 ++-- mlpp/uni_lin_reg/uni_lin_reg.cpp | 4 ++-- mlpp/uni_lin_reg/uni_lin_reg.h | 4 ++-- mlpp/utilities/utilities.cpp | 3 +-- mlpp/utilities/utilities.h | 4 ++-- mlpp/wgan/wgan.cpp | 3 +-- mlpp/wgan/wgan.h | 4 ++-- 79 files changed, 105 insertions(+), 164 deletions(-) diff --git a/main.cpp b/main.cpp index b9a0f74..db813fb 100644 --- a/main.cpp +++ b/main.cpp @@ -51,8 +51,6 @@ #include "MLPP/WGAN/WGAN.hpp" #include "MLPP/Transforms/Transforms.hpp" -using namespace MLPP; - // double f(double x){ // return x*x*x + 2*x - 2; diff --git a/mlpp/activation/activation.cpp b/mlpp/activation/activation.cpp index a194951..2bcf859 100644 --- a/mlpp/activation/activation.cpp +++ b/mlpp/activation/activation.cpp @@ -10,8 +10,6 @@ #include #include -namespace MLPP { - double Activation::linear(double z, bool deriv) { if (deriv) { return 1; @@ -951,4 +949,3 @@ std::vector Activation::activation(std::vector z, bool deriv, do } return a; } -} //namespace MLPP diff --git a/mlpp/activation/activation.h b/mlpp/activation/activation.h index 562cd0a..65f6cec 100644 --- a/mlpp/activation/activation.h +++ b/mlpp/activation/activation.h @@ -10,7 +10,6 @@ #include -namespace MLPP { class Activation { public: double linear(double z, bool deriv = 0); @@ -142,6 +141,5 @@ public: private: }; -} //namespace MLPP #endif /* Activation_hpp */ diff --git a/mlpp/ann/ann.cpp b/mlpp/ann/ann.cpp index 1c4441a..a560f2f 100644 --- a/mlpp/ann/ann.cpp +++ b/mlpp/ann/ann.cpp @@ -15,7 +15,6 @@ #include #include -namespace MLPP { ANN::ANN(std::vector> inputSet, std::vector outputSet) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), lrScheduler("None"), decayConstant(0), dropRate(0) { } @@ -761,4 +760,3 @@ void ANN::UI(int epoch, double cost_prev, std::vector y_hat, std::vector } } } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/ann/ann.h b/mlpp/ann/ann.h index aaae521..52f85db 100644 --- a/mlpp/ann/ann.h +++ b/mlpp/ann/ann.h @@ -14,8 +14,6 @@ #include #include -namespace MLPP { - class ANN { public: ANN(std::vector> inputSet, std::vector outputSet); @@ -66,6 +64,5 @@ private: double decayConstant; double dropRate; }; -} //namespace MLPP #endif /* ANN_hpp */ \ No newline at end of file diff --git a/mlpp/auto_encoder/auto_encoder.cpp b/mlpp/auto_encoder/auto_encoder.cpp index 0cc54ac..d04bc58 100644 --- a/mlpp/auto_encoder/auto_encoder.cpp +++ b/mlpp/auto_encoder/auto_encoder.cpp @@ -13,7 +13,6 @@ #include #include -namespace MLPP { AutoEncoder::AutoEncoder(std::vector> inputSet, int n_hidden) : inputSet(inputSet), n_hidden(n_hidden), n(inputSet.size()), k(inputSet[0].size()) { Activation avn; @@ -252,4 +251,3 @@ void AutoEncoder::forwardPass() { a2 = avn.sigmoid(z2); y_hat = alg.mat_vec_add(alg.matmult(a2, weights2), bias2); } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/auto_encoder/auto_encoder.h b/mlpp/auto_encoder/auto_encoder.h index 03e9471..db98dce 100644 --- a/mlpp/auto_encoder/auto_encoder.h +++ b/mlpp/auto_encoder/auto_encoder.h @@ -12,8 +12,6 @@ #include #include -namespace MLPP { - class AutoEncoder { public: AutoEncoder(std::vector> inputSet, int n_hidden); @@ -50,6 +48,5 @@ private: int k; int n_hidden; }; -} //namespace MLPP #endif /* AutoEncoder_hpp */ diff --git a/mlpp/bernoulli_nb/bernoulli_nb.cpp b/mlpp/bernoulli_nb/bernoulli_nb.cpp index dcba064..494946b 100644 --- a/mlpp/bernoulli_nb/bernoulli_nb.cpp +++ b/mlpp/bernoulli_nb/bernoulli_nb.cpp @@ -12,7 +12,6 @@ #include #include -namespace MLPP { BernoulliNB::BernoulliNB(std::vector> inputSet, std::vector outputSet) : inputSet(inputSet), outputSet(outputSet), class_num(2) { y_hat.resize(outputSet.size()); @@ -175,4 +174,3 @@ void BernoulliNB::Evaluate() { } } } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/bernoulli_nb/bernoulli_nb.h b/mlpp/bernoulli_nb/bernoulli_nb.h index 02a5c09..8ae08bc 100644 --- a/mlpp/bernoulli_nb/bernoulli_nb.h +++ b/mlpp/bernoulli_nb/bernoulli_nb.h @@ -11,7 +11,6 @@ #include #include -namespace MLPP { class BernoulliNB { public: BernoulliNB(std::vector> inputSet, std::vector outputSet); @@ -38,5 +37,4 @@ private: std::vector y_hat; }; -#endif /* BernoulliNB_hpp */ -} \ No newline at end of file +#endif /* BernoulliNB_hpp */ \ No newline at end of file diff --git a/mlpp/c_log_log_reg/c_log_log_reg.cpp b/mlpp/c_log_log_reg/c_log_log_reg.cpp index 7bbfa03..b90b232 100644 --- a/mlpp/c_log_log_reg/c_log_log_reg.cpp +++ b/mlpp/c_log_log_reg/c_log_log_reg.cpp @@ -14,7 +14,6 @@ #include #include -namespace MLPP { CLogLogReg::CLogLogReg(std::vector> inputSet, std::vector outputSet, std::string reg, double lambda, double alpha) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { y_hat.resize(n); @@ -221,4 +220,3 @@ void CLogLogReg::forwardPass() { z = propagate(inputSet); y_hat = avn.cloglog(z); } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/c_log_log_reg/c_log_log_reg.h b/mlpp/c_log_log_reg/c_log_log_reg.h index 37cd802..da65675 100644 --- a/mlpp/c_log_log_reg/c_log_log_reg.h +++ b/mlpp/c_log_log_reg/c_log_log_reg.h @@ -11,8 +11,6 @@ #include #include -namespace MLPP { - class CLogLogReg { public: CLogLogReg(std::vector> inputSet, std::vector outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); @@ -50,6 +48,5 @@ private: double lambda; double alpha; /* This is the controlling param for Elastic Net*/ }; -} //namespace MLPP #endif /* CLogLogReg_hpp */ diff --git a/mlpp/convolutions/convolutions.cpp b/mlpp/convolutions/convolutions.cpp index cc0911f..e501af4 100644 --- a/mlpp/convolutions/convolutions.cpp +++ b/mlpp/convolutions/convolutions.cpp @@ -10,8 +10,6 @@ #include #include -namespace MLPP { - Convolutions::Convolutions() : prewittHorizontal({ { 1, 1, 1 }, { 0, 0, 0 }, { -1, -1, -1 } }), prewittVertical({ { 1, 0, -1 }, { 1, 0, -1 }, { 1, 0, -1 } }), sobelHorizontal({ { 1, 2, 1 }, { 0, 0, 0 }, { -1, -2, -1 } }), sobelVertical({ { -1, 0, 1 }, { -2, 0, 2 }, { -1, 0, 1 } }), scharrHorizontal({ { 3, 10, 3 }, { 0, 0, 0 }, { -3, -10, -3 } }), scharrVertical({ { 3, 0, -3 }, { 10, 0, -10 }, { 3, 0, -3 } }), robertsHorizontal({ { 0, 1 }, { -1, 0 } }), robertsVertical({ { 1, 0 }, { 0, -1 } }) { } @@ -373,4 +371,3 @@ std::vector> Convolutions::getRobertsHorizontal() { std::vector> Convolutions::getRobertsVertical() { return robertsVertical; } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/convolutions/convolutions.h b/mlpp/convolutions/convolutions.h index da9ad31..4f97db5 100644 --- a/mlpp/convolutions/convolutions.h +++ b/mlpp/convolutions/convolutions.h @@ -5,7 +5,6 @@ #include #include -namespace MLPP { class Convolutions { public: Convolutions(); @@ -47,6 +46,5 @@ private: std::vector> robertsHorizontal; std::vector> robertsVertical; }; -} //namespace MLPP #endif // Convolutions_hpp \ No newline at end of file diff --git a/mlpp/cost/cost.cpp b/mlpp/cost/cost.cpp index 06a6192..cdfe2a6 100644 --- a/mlpp/cost/cost.cpp +++ b/mlpp/cost/cost.cpp @@ -10,7 +10,7 @@ #include #include -namespace MLPP { + double Cost::MSE(std::vector y_hat, std::vector y) { double sum = 0; for (int i = 0; i < y_hat.size(); i++) { @@ -404,4 +404,3 @@ std::vector Cost::dualFormSVMDeriv(std::vector alpha, std::vecto return alg.subtraction(alphaQDeriv, one); } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/cost/cost.h b/mlpp/cost/cost.h index 6f348f1..6857527 100644 --- a/mlpp/cost/cost.h +++ b/mlpp/cost/cost.h @@ -10,7 +10,7 @@ #include -namespace MLPP { + class Cost { public: // Regression Costs @@ -81,6 +81,6 @@ public: private: }; -} //namespace MLPP + #endif /* Cost_hpp */ diff --git a/mlpp/data/data.cpp b/mlpp/data/data.cpp index 04a2518..afcf0f5 100644 --- a/mlpp/data/data.cpp +++ b/mlpp/data/data.cpp @@ -16,7 +16,7 @@ #include #include -namespace MLPP { + // Loading Datasets std::tuple>, std::vector> Data::loadBreastCancer() { const int BREAST_CANCER_SIZE = 30; // k = 30 @@ -753,4 +753,4 @@ std::vector Data::reverseOneHot(std::vector> tempOut return outputSet; } -} //namespace MLPP + diff --git a/mlpp/data/data.h b/mlpp/data/data.h index 1ad9f41..f5a8943 100644 --- a/mlpp/data/data.h +++ b/mlpp/data/data.h @@ -13,7 +13,7 @@ #include #include -namespace MLPP { + class Data { public: // Load Datasets @@ -94,6 +94,6 @@ public: private: }; -} //namespace MLPP + #endif /* Data_hpp */ diff --git a/mlpp/dual_svc/dual_svc.cpp b/mlpp/dual_svc/dual_svc.cpp index e8e242d..3e6396b 100644 --- a/mlpp/dual_svc/dual_svc.cpp +++ b/mlpp/dual_svc/dual_svc.cpp @@ -14,7 +14,7 @@ #include #include -namespace MLPP { + DualSVC::DualSVC(std::vector> inputSet, std::vector outputSet, double C, std::string kernel) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), C(C), kernel(kernel) { y_hat.resize(n); @@ -236,4 +236,3 @@ std::vector> DualSVC::kernelFunction(std::vector #include -namespace MLPP { + class DualSVC { public: @@ -65,6 +65,6 @@ private: // UI Portion void UI(int epoch, double cost_prev); }; -} //namespace MLPP + #endif /* DualSVC_hpp */ diff --git a/mlpp/exp_reg/exp_reg.cpp b/mlpp/exp_reg/exp_reg.cpp index c49cb5e..22327be 100644 --- a/mlpp/exp_reg/exp_reg.cpp +++ b/mlpp/exp_reg/exp_reg.cpp @@ -14,7 +14,7 @@ #include #include -namespace MLPP { + ExpReg::ExpReg(std::vector> inputSet, std::vector outputSet, std::string reg, double lambda, double alpha) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { y_hat.resize(n); @@ -235,4 +235,3 @@ double ExpReg::Evaluate(std::vector x) { void ExpReg::forwardPass() { y_hat = Evaluate(inputSet); } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/exp_reg/exp_reg.h b/mlpp/exp_reg/exp_reg.h index 3596bed..af39f70 100644 --- a/mlpp/exp_reg/exp_reg.h +++ b/mlpp/exp_reg/exp_reg.h @@ -11,7 +11,7 @@ #include #include -namespace MLPP { + class ExpReg { public: ExpReg(std::vector> inputSet, std::vector outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); @@ -45,6 +45,6 @@ private: double lambda; double alpha; /* This is the controlling param for Elastic Net*/ }; -} //namespace MLPP + #endif /* ExpReg_hpp */ diff --git a/mlpp/gan/gan.cpp b/mlpp/gan/gan.cpp index 1942536..a2f2ac0 100644 --- a/mlpp/gan/gan.cpp +++ b/mlpp/gan/gan.cpp @@ -14,7 +14,7 @@ #include #include -namespace MLPP { + GAN::GAN(double k, std::vector> outputSet) : outputSet(outputSet), n(outputSet.size()), k(k) { } @@ -283,4 +283,3 @@ void GAN::UI(int epoch, double cost_prev, std::vector y_hat, std::vector } } } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/gan/gan.h b/mlpp/gan/gan.h index b873980..bf755eb 100644 --- a/mlpp/gan/gan.h +++ b/mlpp/gan/gan.h @@ -15,7 +15,7 @@ #include #include -namespace MLPP { + class GAN { public: @@ -52,6 +52,6 @@ private: int n; int k; }; -} //namespace MLPP + #endif /* GAN_hpp */ \ No newline at end of file diff --git a/mlpp/gauss_markov_checker/gauss_markov_checker.cpp b/mlpp/gauss_markov_checker/gauss_markov_checker.cpp index 83a81c7..e9fe326 100644 --- a/mlpp/gauss_markov_checker/gauss_markov_checker.cpp +++ b/mlpp/gauss_markov_checker/gauss_markov_checker.cpp @@ -8,7 +8,7 @@ #include "../stat/stat.h" #include -namespace MLPP { + void GaussMarkovChecker::checkGMConditions(std::vector eps) { bool condition1 = arithmeticMean(eps); bool condition2 = homoscedasticity(eps); @@ -54,4 +54,4 @@ bool GaussMarkovChecker::exogeneity(std::vector eps) { } return 1; } -} //namespace MLPP + diff --git a/mlpp/gauss_markov_checker/gauss_markov_checker.h b/mlpp/gauss_markov_checker/gauss_markov_checker.h index 0db1bc8..b00e59f 100644 --- a/mlpp/gauss_markov_checker/gauss_markov_checker.h +++ b/mlpp/gauss_markov_checker/gauss_markov_checker.h @@ -11,7 +11,7 @@ #include #include -namespace MLPP { + class GaussMarkovChecker { public: void checkGMConditions(std::vector eps); @@ -22,6 +22,6 @@ public: bool exogeneity(std::vector eps); // 3) Cov of any 2 non-equal eps values = 0. private: }; -} //namespace MLPP + #endif /* GaussMarkovChecker_hpp */ diff --git a/mlpp/gaussian_nb/gaussian_nb.cpp b/mlpp/gaussian_nb/gaussian_nb.cpp index ecfc66d..080854e 100644 --- a/mlpp/gaussian_nb/gaussian_nb.cpp +++ b/mlpp/gaussian_nb/gaussian_nb.cpp @@ -13,7 +13,7 @@ #include #include -namespace MLPP { + GaussianNB::GaussianNB(std::vector> inputSet, std::vector outputSet, int class_num) : inputSet(inputSet), outputSet(outputSet), class_num(class_num) { y_hat.resize(outputSet.size()); @@ -88,4 +88,3 @@ void GaussianNB::Evaluate() { std::cout << std::distance(score, std::max_element(score, score + sizeof(score) / sizeof(double))) << std::endl; } } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/gaussian_nb/gaussian_nb.h b/mlpp/gaussian_nb/gaussian_nb.h index 456577b..5b9a0cf 100644 --- a/mlpp/gaussian_nb/gaussian_nb.h +++ b/mlpp/gaussian_nb/gaussian_nb.h @@ -10,7 +10,7 @@ #include -namespace MLPP { + class GaussianNB { public: GaussianNB(std::vector> inputSet, std::vector outputSet, int class_num); @@ -34,4 +34,3 @@ private: }; #endif /* GaussianNB_hpp */ -} \ No newline at end of file diff --git a/mlpp/hidden_layer/hidden_layer.cpp b/mlpp/hidden_layer/hidden_layer.cpp index 2b1362b..a283116 100644 --- a/mlpp/hidden_layer/hidden_layer.cpp +++ b/mlpp/hidden_layer/hidden_layer.cpp @@ -12,7 +12,7 @@ #include #include -namespace MLPP { + HiddenLayer::HiddenLayer(int n_hidden, std::string activation, std::vector> input, std::string weightInit, std::string reg, double lambda, double alpha) : n_hidden(n_hidden), activation(activation), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) { weights = Utilities::weightInitialization(input[0].size(), n_hidden, weightInit); @@ -110,4 +110,3 @@ void HiddenLayer::Test(std::vector x) { z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias); a_test = (avn.*activationTest_map[activation])(z_test, 0); } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/hidden_layer/hidden_layer.h b/mlpp/hidden_layer/hidden_layer.h index a1f3f46..3ce5963 100644 --- a/mlpp/hidden_layer/hidden_layer.h +++ b/mlpp/hidden_layer/hidden_layer.h @@ -14,7 +14,7 @@ #include #include -namespace MLPP { + class HiddenLayer { public: HiddenLayer(int n_hidden, std::string activation, std::vector> input, std::string weightInit, std::string reg, double lambda, double alpha); @@ -48,6 +48,6 @@ public: void forwardPass(); void Test(std::vector x); }; -} //namespace MLPP + #endif /* HiddenLayer_hpp */ \ No newline at end of file diff --git a/mlpp/hypothesis_testing/hypothesis_testing.cpp b/mlpp/hypothesis_testing/hypothesis_testing.cpp index fccadd5..f631070 100644 --- a/mlpp/hypothesis_testing/hypothesis_testing.cpp +++ b/mlpp/hypothesis_testing/hypothesis_testing.cpp @@ -6,7 +6,7 @@ #include "hypothesis_testing.h" -namespace MLPP { + std::tuple HypothesisTesting::chiSquareTest(std::vector observed, std::vector expected) { double df = observed.size() - 1; // These are our degrees of freedom @@ -16,4 +16,3 @@ std::tuple HypothesisTesting::chiSquareTest(std::vector ob } } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/hypothesis_testing/hypothesis_testing.h b/mlpp/hypothesis_testing/hypothesis_testing.h index 1edbedd..4d1b0a6 100644 --- a/mlpp/hypothesis_testing/hypothesis_testing.h +++ b/mlpp/hypothesis_testing/hypothesis_testing.h @@ -11,13 +11,13 @@ #include #include -namespace MLPP { + class HypothesisTesting { public: std::tuple chiSquareTest(std::vector observed, std::vector expected); private: }; -} //namespace MLPP + #endif /* HypothesisTesting_hpp */ diff --git a/mlpp/kmeans/kmeans.cpp b/mlpp/kmeans/kmeans.cpp index 69b8835..305f074 100644 --- a/mlpp/kmeans/kmeans.cpp +++ b/mlpp/kmeans/kmeans.cpp @@ -12,7 +12,7 @@ #include #include -namespace MLPP { + KMeans::KMeans(std::vector> inputSet, int k, std::string init_type) : inputSet(inputSet), k(k), init_type(init_type) { if (init_type == "KMeans++") { @@ -232,4 +232,4 @@ double KMeans::Cost() { } return sum; } -} //namespace MLPP + diff --git a/mlpp/kmeans/kmeans.h b/mlpp/kmeans/kmeans.h index f7577af..7ca43c6 100644 --- a/mlpp/kmeans/kmeans.h +++ b/mlpp/kmeans/kmeans.h @@ -11,7 +11,7 @@ #include #include -namespace MLPP { + class KMeans { public: KMeans(std::vector> inputSet, int k, std::string init_type = "Default"); @@ -40,6 +40,6 @@ private: std::string init_type; }; -} //namespace MLPP + #endif /* KMeans_hpp */ diff --git a/mlpp/knn/knn.cpp b/mlpp/knn/knn.cpp index cfae4e4..6c39d47 100644 --- a/mlpp/knn/knn.cpp +++ b/mlpp/knn/knn.cpp @@ -12,7 +12,7 @@ #include #include -namespace MLPP { + kNN::kNN(std::vector> inputSet, std::vector outputSet, int k) : inputSet(inputSet), outputSet(outputSet), k(k) { } @@ -82,4 +82,3 @@ std::vector kNN::nearestNeighbors(std::vector x) { } return knn; } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/knn/knn.h b/mlpp/knn/knn.h index a7d462f..ed168eb 100644 --- a/mlpp/knn/knn.h +++ b/mlpp/knn/knn.h @@ -10,7 +10,7 @@ #include -namespace MLPP { + class kNN { public: kNN(std::vector> inputSet, std::vector outputSet, int k); @@ -28,6 +28,6 @@ private: std::vector outputSet; int k; }; -} //namespace MLPP + #endif /* kNN_hpp */ diff --git a/mlpp/lin_alg/lin_alg.cpp b/mlpp/lin_alg/lin_alg.cpp index 1ee05a7..8661851 100644 --- a/mlpp/lin_alg/lin_alg.cpp +++ b/mlpp/lin_alg/lin_alg.cpp @@ -11,7 +11,7 @@ #include #include -namespace MLPP { + std::vector> LinAlg::gramMatrix(std::vector> A) { return matmult(transpose(A), A); // AtA @@ -1226,4 +1226,3 @@ std::vector>> LinAlg::vector_wise_tensor_product } return C; } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/lin_alg/lin_alg.h b/mlpp/lin_alg/lin_alg.h index 576d14e..7c9412c 100644 --- a/mlpp/lin_alg/lin_alg.h +++ b/mlpp/lin_alg/lin_alg.h @@ -11,7 +11,7 @@ #include #include -namespace MLPP { + class LinAlg { public: // MATRIX FUNCTIONS @@ -231,6 +231,6 @@ public: private: }; -} //namespace MLPP + #endif /* LinAlg_hpp */ \ No newline at end of file diff --git a/mlpp/lin_reg/lin_reg.cpp b/mlpp/lin_reg/lin_reg.cpp index f3c3859..44ce144 100644 --- a/mlpp/lin_reg/lin_reg.cpp +++ b/mlpp/lin_reg/lin_reg.cpp @@ -15,7 +15,7 @@ #include #include -namespace MLPP { + LinReg::LinReg(std::vector> inputSet, std::vector outputSet, std::string reg, double lambda, double alpha) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { @@ -237,4 +237,3 @@ double LinReg::Evaluate(std::vector x) { void LinReg::forwardPass() { y_hat = Evaluate(inputSet); } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/lin_reg/lin_reg.h b/mlpp/lin_reg/lin_reg.h index aef1acd..97251ed 100644 --- a/mlpp/lin_reg/lin_reg.h +++ b/mlpp/lin_reg/lin_reg.h @@ -11,7 +11,7 @@ #include #include -namespace MLPP { + class LinReg { public: LinReg(std::vector> inputSet, std::vector outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); @@ -46,6 +46,6 @@ private: int lambda; int alpha; /* This is the controlling param for Elastic Net*/ }; -} //namespace MLPP + #endif /* LinReg_hpp */ diff --git a/mlpp/log_reg/log_reg.cpp b/mlpp/log_reg/log_reg.cpp index 8571aa7..93df77d 100644 --- a/mlpp/log_reg/log_reg.cpp +++ b/mlpp/log_reg/log_reg.cpp @@ -14,7 +14,7 @@ #include #include -namespace MLPP { + LogReg::LogReg(std::vector> inputSet, std::vector outputSet, std::string reg, double lambda, double alpha) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { y_hat.resize(n); @@ -202,4 +202,3 @@ double LogReg::Evaluate(std::vector x) { void LogReg::forwardPass() { y_hat = Evaluate(inputSet); } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/log_reg/log_reg.h b/mlpp/log_reg/log_reg.h index 51fe6d2..8de8da8 100644 --- a/mlpp/log_reg/log_reg.h +++ b/mlpp/log_reg/log_reg.h @@ -11,7 +11,7 @@ #include #include -namespace MLPP { + class LogReg { public: @@ -47,6 +47,6 @@ private: double lambda; /* Regularization Parameter */ double alpha; /* This is the controlling param for Elastic Net*/ }; -} //namespace MLPP + #endif /* LogReg_hpp */ diff --git a/mlpp/mann/mann.cpp b/mlpp/mann/mann.cpp index 6046f91..134aa25 100644 --- a/mlpp/mann/mann.cpp +++ b/mlpp/mann/mann.cpp @@ -13,7 +13,7 @@ #include -namespace MLPP { + MANN::MANN(std::vector> inputSet, std::vector> outputSet) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), n_output(outputSet[0].size()) { } @@ -187,4 +187,3 @@ void MANN::forwardPass() { outputLayer->forwardPass(); y_hat = outputLayer->a; } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/mann/mann.h b/mlpp/mann/mann.h index e381ab8..503770d 100644 --- a/mlpp/mann/mann.h +++ b/mlpp/mann/mann.h @@ -14,7 +14,7 @@ #include #include -namespace MLPP { + class MANN { public: @@ -44,6 +44,6 @@ private: int k; int n_output; }; -} //namespace MLPP + #endif /* MANN_hpp */ \ No newline at end of file diff --git a/mlpp/mlp/mlp.cpp b/mlpp/mlp/mlp.cpp index 6650928..68b534e 100644 --- a/mlpp/mlp/mlp.cpp +++ b/mlpp/mlp/mlp.cpp @@ -15,7 +15,7 @@ #include #include -namespace MLPP { + MLP::MLP(std::vector> inputSet, std::vector outputSet, int n_hidden, std::string reg, double lambda, double alpha) : inputSet(inputSet), outputSet(outputSet), n_hidden(n_hidden), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { Activation avn; @@ -270,4 +270,4 @@ void MLP::forwardPass() { a2 = avn.sigmoid(z2); y_hat = avn.sigmoid(alg.scalarAdd(bias2, alg.mat_vec_mult(a2, weights2))); } -} //namespace MLPP + diff --git a/mlpp/mlp/mlp.h b/mlpp/mlp/mlp.h index 1e5b39f..82b56c4 100644 --- a/mlpp/mlp/mlp.h +++ b/mlpp/mlp/mlp.h @@ -12,7 +12,7 @@ #include #include -namespace MLPP { + class MLP { public: @@ -56,6 +56,6 @@ private: double lambda; /* Regularization Parameter */ double alpha; /* This is the controlling param for Elastic Net*/ }; -} //namespace MLPP + #endif /* MLP_hpp */ diff --git a/mlpp/multi_output_layer/multi_output_layer.cpp b/mlpp/multi_output_layer/multi_output_layer.cpp index 1a90b8e..370ae0c 100644 --- a/mlpp/multi_output_layer/multi_output_layer.cpp +++ b/mlpp/multi_output_layer/multi_output_layer.cpp @@ -11,7 +11,7 @@ #include #include -namespace MLPP { + MultiOutputLayer::MultiOutputLayer(int n_output, int n_hidden, std::string activation, std::string cost, std::vector> input, std::string weightInit, std::string reg, double lambda, double alpha) : n_output(n_output), n_hidden(n_hidden), activation(activation), cost(cost), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) { weights = Utilities::weightInitialization(n_hidden, n_output, weightInit); @@ -129,4 +129,3 @@ void MultiOutputLayer::Test(std::vector x) { z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias); a_test = (avn.*activationTest_map[activation])(z_test, 0); } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/multi_output_layer/multi_output_layer.h b/mlpp/multi_output_layer/multi_output_layer.h index 800139e..fea1e71 100644 --- a/mlpp/multi_output_layer/multi_output_layer.h +++ b/mlpp/multi_output_layer/multi_output_layer.h @@ -15,7 +15,7 @@ #include #include -namespace MLPP { + class MultiOutputLayer { public: MultiOutputLayer(int n_output, int n_hidden, std::string activation, std::string cost, std::vector> input, std::string weightInit, std::string reg, double lambda, double alpha); @@ -53,6 +53,6 @@ public: void forwardPass(); void Test(std::vector x); }; -} //namespace MLPP + #endif /* MultiOutputLayer_hpp */ diff --git a/mlpp/multinomial_nb/multinomial_nb.cpp b/mlpp/multinomial_nb/multinomial_nb.cpp index a936e84..0370990 100644 --- a/mlpp/multinomial_nb/multinomial_nb.cpp +++ b/mlpp/multinomial_nb/multinomial_nb.cpp @@ -12,7 +12,7 @@ #include #include -namespace MLPP { + MultinomialNB::MultinomialNB(std::vector> inputSet, std::vector outputSet, int class_num) : inputSet(inputSet), outputSet(outputSet), class_num(class_num) { y_hat.resize(outputSet.size()); @@ -116,4 +116,3 @@ void MultinomialNB::Evaluate() { y_hat[i] = std::distance(score, std::max_element(score, score + sizeof(score) / sizeof(double))); } } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/multinomial_nb/multinomial_nb.h b/mlpp/multinomial_nb/multinomial_nb.h index aec4c79..01ac0b4 100644 --- a/mlpp/multinomial_nb/multinomial_nb.h +++ b/mlpp/multinomial_nb/multinomial_nb.h @@ -11,7 +11,7 @@ #include #include -namespace MLPP { + class MultinomialNB { public: MultinomialNB(std::vector> inputSet, std::vector outputSet, int class_num); @@ -37,4 +37,3 @@ private: }; #endif /* MultinomialNB_hpp */ -} \ No newline at end of file diff --git a/mlpp/numerical_analysis/numerical_analysis.cpp b/mlpp/numerical_analysis/numerical_analysis.cpp index c5d3069..3e0d8e0 100644 --- a/mlpp/numerical_analysis/numerical_analysis.cpp +++ b/mlpp/numerical_analysis/numerical_analysis.cpp @@ -12,7 +12,7 @@ #include #include -namespace MLPP { + double NumericalAnalysis::numDiff(double (*function)(double), double x) { double eps = 1e-10; @@ -293,4 +293,3 @@ std::string NumericalAnalysis::secondPartialDerivativeTest(double (*function)(st } } } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/numerical_analysis/numerical_analysis.h b/mlpp/numerical_analysis/numerical_analysis.h index 3eb5c40..03794f5 100644 --- a/mlpp/numerical_analysis/numerical_analysis.h +++ b/mlpp/numerical_analysis/numerical_analysis.h @@ -10,7 +10,7 @@ #include #include -namespace MLPP { + class NumericalAnalysis { public: /* A numerical method for derivatives is used. This may be subject to change, @@ -52,6 +52,6 @@ public: std::string secondPartialDerivativeTest(double (*function)(std::vector), std::vector x); }; -} //namespace MLPP + #endif /* NumericalAnalysis_hpp */ diff --git a/mlpp/outlier_finder/outlier_finder.cpp b/mlpp/outlier_finder/outlier_finder.cpp index df53075..1c6c734 100644 --- a/mlpp/outlier_finder/outlier_finder.cpp +++ b/mlpp/outlier_finder/outlier_finder.cpp @@ -8,7 +8,7 @@ #include "../stat/stat.h" #include -namespace MLPP { + OutlierFinder::OutlierFinder(int threshold) : threshold(threshold) { } @@ -39,4 +39,3 @@ std::vector OutlierFinder::modelTest(std::vector inputSet) { } return outliers; } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/outlier_finder/outlier_finder.h b/mlpp/outlier_finder/outlier_finder.h index 4d4a172..f23b0ed 100644 --- a/mlpp/outlier_finder/outlier_finder.h +++ b/mlpp/outlier_finder/outlier_finder.h @@ -10,7 +10,7 @@ #include -namespace MLPP { + class OutlierFinder { public: // Cnstr @@ -22,6 +22,6 @@ public: // Variables required int threshold; }; -} //namespace MLPP + #endif /* OutlierFinder_hpp */ diff --git a/mlpp/output_layer/output_layer.cpp b/mlpp/output_layer/output_layer.cpp index c646639..190c7e6 100644 --- a/mlpp/output_layer/output_layer.cpp +++ b/mlpp/output_layer/output_layer.cpp @@ -11,7 +11,7 @@ #include #include -namespace MLPP { + OutputLayer::OutputLayer(int n_hidden, std::string activation, std::string cost, std::vector> input, std::string weightInit, std::string reg, double lambda, double alpha) : n_hidden(n_hidden), activation(activation), cost(cost), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) { weights = Utilities::weightInitialization(n_hidden, weightInit); @@ -126,4 +126,3 @@ void OutputLayer::Test(std::vector x) { z_test = alg.dot(weights, x) + bias; a_test = (avn.*activationTest_map[activation])(z_test, 0); } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/output_layer/output_layer.h b/mlpp/output_layer/output_layer.h index 909e4c5..1da085a 100644 --- a/mlpp/output_layer/output_layer.h +++ b/mlpp/output_layer/output_layer.h @@ -15,7 +15,7 @@ #include #include -namespace MLPP { + class OutputLayer { public: OutputLayer(int n_hidden, std::string activation, std::string cost, std::vector> input, std::string weightInit, std::string reg, double lambda, double alpha); @@ -52,6 +52,6 @@ public: void forwardPass(); void Test(std::vector x); }; -} //namespace MLPP + #endif /* OutputLayer_hpp */ diff --git a/mlpp/pca/pca.cpp b/mlpp/pca/pca.cpp index 1bde36b..74d9534 100644 --- a/mlpp/pca/pca.cpp +++ b/mlpp/pca/pca.cpp @@ -11,7 +11,7 @@ #include #include -namespace MLPP { + PCA::PCA(std::vector> inputSet, int k) : inputSet(inputSet), k(k) { @@ -51,4 +51,4 @@ double PCA::score() { } return 1 - num / den; } -} //namespace MLPP + diff --git a/mlpp/pca/pca.h b/mlpp/pca/pca.h index 56f857a..354d4fd 100644 --- a/mlpp/pca/pca.h +++ b/mlpp/pca/pca.h @@ -10,7 +10,7 @@ #include -namespace MLPP { + class PCA { public: PCA(std::vector> inputSet, int k); @@ -24,6 +24,6 @@ private: std::vector> Z; int k; }; -} //namespace MLPP + #endif /* PCA_hpp */ diff --git a/mlpp/probit_reg/probit_reg.cpp b/mlpp/probit_reg/probit_reg.cpp index 302a27c..ec0018d 100644 --- a/mlpp/probit_reg/probit_reg.cpp +++ b/mlpp/probit_reg/probit_reg.cpp @@ -14,7 +14,7 @@ #include #include -namespace MLPP { + ProbitReg::ProbitReg(std::vector> inputSet, std::vector outputSet, std::string reg, double lambda, double alpha) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { y_hat.resize(n); @@ -243,4 +243,3 @@ void ProbitReg::forwardPass() { z = propagate(inputSet); y_hat = avn.gaussianCDF(z); } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/probit_reg/probit_reg.h b/mlpp/probit_reg/probit_reg.h index 237c146..278f901 100644 --- a/mlpp/probit_reg/probit_reg.h +++ b/mlpp/probit_reg/probit_reg.h @@ -11,7 +11,7 @@ #include #include -namespace MLPP { + class ProbitReg { public: @@ -49,6 +49,6 @@ private: double lambda; double alpha; /* This is the controlling param for Elastic Net*/ }; -} //namespace MLPP + #endif /* ProbitReg_hpp */ diff --git a/mlpp/regularization/reg.cpp b/mlpp/regularization/reg.cpp index a890421..f3d860d 100644 --- a/mlpp/regularization/reg.cpp +++ b/mlpp/regularization/reg.cpp @@ -10,7 +10,7 @@ #include #include -namespace MLPP { + double Reg::regTerm(std::vector weights, double lambda, double alpha, std::string reg) { if (reg == "Ridge") { @@ -162,4 +162,4 @@ double Reg::regDerivTerm(std::vector> weights, double lambda return 0; } } -} //namespace MLPP + diff --git a/mlpp/regularization/reg.h b/mlpp/regularization/reg.h index 17eecaf..c2194cb 100644 --- a/mlpp/regularization/reg.h +++ b/mlpp/regularization/reg.h @@ -12,7 +12,7 @@ #include #include -namespace MLPP { + class Reg { public: double regTerm(std::vector weights, double lambda, double alpha, std::string reg); @@ -28,6 +28,6 @@ private: double regDerivTerm(std::vector weights, double lambda, double alpha, std::string reg, int j); double regDerivTerm(std::vector> weights, double lambda, double alpha, std::string reg, int i, int j); }; -} //namespace MLPP + #endif /* Reg_hpp */ diff --git a/mlpp/softmax_net/softmax_net.cpp b/mlpp/softmax_net/softmax_net.cpp index d00c0c8..d6d7061 100644 --- a/mlpp/softmax_net/softmax_net.cpp +++ b/mlpp/softmax_net/softmax_net.cpp @@ -15,7 +15,7 @@ #include #include -namespace MLPP { + SoftmaxNet::SoftmaxNet(std::vector> inputSet, std::vector> outputSet, int n_hidden, std::string reg, double lambda, double alpha) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), n_hidden(n_hidden), n_class(outputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { y_hat.resize(n); @@ -289,4 +289,3 @@ void SoftmaxNet::forwardPass() { a2 = avn.sigmoid(z2); y_hat = avn.adjSoftmax(alg.mat_vec_add(alg.matmult(a2, weights2), bias2)); } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/softmax_net/softmax_net.h b/mlpp/softmax_net/softmax_net.h index 324a9de..0e8d3c7 100644 --- a/mlpp/softmax_net/softmax_net.h +++ b/mlpp/softmax_net/softmax_net.h @@ -11,7 +11,7 @@ #include #include -namespace MLPP { + class SoftmaxNet { public: @@ -57,6 +57,6 @@ private: double lambda; double alpha; /* This is the controlling param for Elastic Net*/ }; -} //namespace MLPP + #endif /* SoftmaxNet_hpp */ diff --git a/mlpp/softmax_reg/softmax_reg.cpp b/mlpp/softmax_reg/softmax_reg.cpp index 954b0f9..52b7ba0 100644 --- a/mlpp/softmax_reg/softmax_reg.cpp +++ b/mlpp/softmax_reg/softmax_reg.cpp @@ -14,7 +14,7 @@ #include #include -namespace MLPP { + SoftmaxReg::SoftmaxReg(std::vector> inputSet, std::vector> outputSet, std::string reg, double lambda, double alpha) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), n_class(outputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { y_hat.resize(n); @@ -189,4 +189,3 @@ void SoftmaxReg::forwardPass() { y_hat = avn.softmax(alg.mat_vec_add(alg.matmult(inputSet, weights), bias)); } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/softmax_reg/softmax_reg.h b/mlpp/softmax_reg/softmax_reg.h index 1c11f58..6bd5ea7 100644 --- a/mlpp/softmax_reg/softmax_reg.h +++ b/mlpp/softmax_reg/softmax_reg.h @@ -11,7 +11,7 @@ #include #include -namespace MLPP { + class SoftmaxReg { public: @@ -46,6 +46,6 @@ private: double lambda; double alpha; /* This is the controlling param for Elastic Net*/ }; -} //namespace MLPP + #endif /* SoftmaxReg_hpp */ diff --git a/mlpp/stat/stat.cpp b/mlpp/stat/stat.cpp index ad2b9c6..33d9e15 100644 --- a/mlpp/stat/stat.cpp +++ b/mlpp/stat/stat.cpp @@ -14,7 +14,7 @@ #include -namespace MLPP { + double Stat::b0Estimation(const std::vector &x, const std::vector &y) { return mean(y) - b1Estimation(x, y) * mean(x); } @@ -214,4 +214,3 @@ double Stat::logMean(const double x, const double y) { } return (y - x) / (log(y) - std::log(x)); } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/stat/stat.h b/mlpp/stat/stat.h index a5466ae..884ac59 100644 --- a/mlpp/stat/stat.h +++ b/mlpp/stat/stat.h @@ -10,7 +10,7 @@ #include -namespace MLPP { + class Stat { public: // These functions are for univariate lin reg module- not for users. @@ -47,6 +47,6 @@ public: double identricMean(const double x, const double y); double logMean(const double x, const double y); }; -} //namespace MLPP + #endif /* Stat_hpp */ diff --git a/mlpp/svc/svc.cpp b/mlpp/svc/svc.cpp index bebfd7d..87c2a39 100644 --- a/mlpp/svc/svc.cpp +++ b/mlpp/svc/svc.cpp @@ -14,7 +14,7 @@ #include #include -namespace MLPP { + SVC::SVC(std::vector> inputSet, std::vector outputSet, double C) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), C(C) { y_hat.resize(n); @@ -195,4 +195,3 @@ void SVC::forwardPass() { z = propagate(inputSet); y_hat = avn.sign(z); } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/svc/svc.h b/mlpp/svc/svc.h index aeaf4df..d8497f6 100644 --- a/mlpp/svc/svc.h +++ b/mlpp/svc/svc.h @@ -14,7 +14,7 @@ #include #include -namespace MLPP { + class SVC { public: @@ -50,6 +50,6 @@ private: // UI Portion void UI(int epoch, double cost_prev); }; -} //namespace MLPP + #endif /* SVC_hpp */ diff --git a/mlpp/tanh_reg/tanh_reg.cpp b/mlpp/tanh_reg/tanh_reg.cpp index f6e1676..db90207 100644 --- a/mlpp/tanh_reg/tanh_reg.cpp +++ b/mlpp/tanh_reg/tanh_reg.cpp @@ -14,7 +14,7 @@ #include #include -namespace MLPP { + TanhReg::TanhReg(std::vector> inputSet, std::vector outputSet, std::string reg, double lambda, double alpha) : inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { y_hat.resize(n); @@ -192,4 +192,3 @@ void TanhReg::forwardPass() { z = propagate(inputSet); y_hat = avn.tanh(z); } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/tanh_reg/tanh_reg.h b/mlpp/tanh_reg/tanh_reg.h index ed7c797..3d66612 100644 --- a/mlpp/tanh_reg/tanh_reg.h +++ b/mlpp/tanh_reg/tanh_reg.h @@ -11,7 +11,7 @@ #include #include -namespace MLPP { + class TanhReg { public: @@ -51,6 +51,6 @@ private: double lambda; double alpha; /* This is the controlling param for Elastic Net*/ }; -} //namespace MLPP + #endif /* TanhReg_hpp */ diff --git a/mlpp/transforms/transforms.cpp b/mlpp/transforms/transforms.cpp index eb6e523..b78e144 100644 --- a/mlpp/transforms/transforms.cpp +++ b/mlpp/transforms/transforms.cpp @@ -10,7 +10,7 @@ #include #include -namespace MLPP { + // DCT ii. // https://www.mathworks.com/help/images/discrete-cosine-transform.html @@ -53,4 +53,3 @@ std::vector> Transforms::discreteCosineTransform(std::vector } return B; } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/transforms/transforms.h b/mlpp/transforms/transforms.h index 8836022..99685d7 100644 --- a/mlpp/transforms/transforms.h +++ b/mlpp/transforms/transforms.h @@ -10,11 +10,11 @@ #include #include -namespace MLPP { + class Transforms { public: std::vector> discreteCosineTransform(std::vector> A); }; -} //namespace MLPP + #endif /* Transforms_hpp */ diff --git a/mlpp/uni_lin_reg/uni_lin_reg.cpp b/mlpp/uni_lin_reg/uni_lin_reg.cpp index 7c6104c..e942e62 100644 --- a/mlpp/uni_lin_reg/uni_lin_reg.cpp +++ b/mlpp/uni_lin_reg/uni_lin_reg.cpp @@ -15,7 +15,7 @@ // Univariate Linear Regression Model // ลท = b0 + b1x1 -namespace MLPP { + UniLinReg::UniLinReg(std::vector x, std::vector y) : inputSet(x), outputSet(y) { Stat estimator; @@ -31,4 +31,4 @@ std::vector UniLinReg::modelSetTest(std::vector x) { double UniLinReg::modelTest(double input) { return b0 + b1 * input; } -} //namespace MLPP + diff --git a/mlpp/uni_lin_reg/uni_lin_reg.h b/mlpp/uni_lin_reg/uni_lin_reg.h index b333ea3..8832405 100644 --- a/mlpp/uni_lin_reg/uni_lin_reg.h +++ b/mlpp/uni_lin_reg/uni_lin_reg.h @@ -10,7 +10,7 @@ #include -namespace MLPP { + class UniLinReg { public: UniLinReg(std::vector x, std::vector y); @@ -24,6 +24,6 @@ private: double b0; double b1; }; -} //namespace MLPP + #endif /* UniLinReg_hpp */ diff --git a/mlpp/utilities/utilities.cpp b/mlpp/utilities/utilities.cpp index cbb1892..4f5525f 100644 --- a/mlpp/utilities/utilities.cpp +++ b/mlpp/utilities/utilities.cpp @@ -10,7 +10,7 @@ #include #include -namespace MLPP { + std::vector Utilities::weightInitialization(int n, std::string type) { std::random_device rd; @@ -380,4 +380,3 @@ double Utilities::accuracy(std::vector y_hat, std::vector y) { double Utilities::f1_score(std::vector y_hat, std::vector y) { return 2 * precision(y_hat, y) * recall(y_hat, y) / (precision(y_hat, y) + recall(y_hat, y)); } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/utilities/utilities.h b/mlpp/utilities/utilities.h index f6d1c6c..1c7d8dd 100644 --- a/mlpp/utilities/utilities.h +++ b/mlpp/utilities/utilities.h @@ -12,7 +12,7 @@ #include #include -namespace MLPP { + class Utilities { public: // Weight Init @@ -50,6 +50,6 @@ public: private: }; -} //namespace MLPP + #endif /* Utilities_hpp */ diff --git a/mlpp/wgan/wgan.cpp b/mlpp/wgan/wgan.cpp index 7995e84..5fc35c6 100644 --- a/mlpp/wgan/wgan.cpp +++ b/mlpp/wgan/wgan.cpp @@ -14,7 +14,7 @@ #include #include -namespace MLPP { + WGAN::WGAN(double k, std::vector> outputSet) : outputSet(outputSet), n(outputSet.size()), k(k) { } @@ -292,4 +292,3 @@ void WGAN::UI(int epoch, double cost_prev, std::vector y_hat, std::vecto } } } -} //namespace MLPP \ No newline at end of file diff --git a/mlpp/wgan/wgan.h b/mlpp/wgan/wgan.h index 25f11ec..f343594 100644 --- a/mlpp/wgan/wgan.h +++ b/mlpp/wgan/wgan.h @@ -15,7 +15,7 @@ #include #include -namespace MLPP { + class WGAN { public: @@ -52,6 +52,6 @@ private: int n; int k; }; -} //namespace MLPP + #endif /* WGAN_hpp */ \ No newline at end of file