Removed the MLPP namespace.

This commit is contained in:
Relintai 2023-01-24 19:20:18 +01:00
parent ffa44dfda5
commit ab0d41203b
79 changed files with 105 additions and 164 deletions

View File

@ -51,8 +51,6 @@
#include "MLPP/WGAN/WGAN.hpp" #include "MLPP/WGAN/WGAN.hpp"
#include "MLPP/Transforms/Transforms.hpp" #include "MLPP/Transforms/Transforms.hpp"
using namespace MLPP;
// double f(double x){ // double f(double x){
// return x*x*x + 2*x - 2; // return x*x*x + 2*x - 2;

View File

@ -10,8 +10,6 @@
#include <cmath> #include <cmath>
#include <iostream> #include <iostream>
namespace MLPP {
double Activation::linear(double z, bool deriv) { double Activation::linear(double z, bool deriv) {
if (deriv) { if (deriv) {
return 1; return 1;
@ -951,4 +949,3 @@ std::vector<double> Activation::activation(std::vector<double> z, bool deriv, do
} }
return a; return a;
} }
} //namespace MLPP

View File

@ -10,7 +10,6 @@
#include <vector> #include <vector>
namespace MLPP {
class Activation { class Activation {
public: public:
double linear(double z, bool deriv = 0); double linear(double z, bool deriv = 0);
@ -142,6 +141,5 @@ public:
private: private:
}; };
} //namespace MLPP
#endif /* Activation_hpp */ #endif /* Activation_hpp */

View File

@ -15,7 +15,6 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
namespace MLPP {
ANN::ANN(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet) : ANN::ANN(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), lrScheduler("None"), decayConstant(0), dropRate(0) { inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), lrScheduler("None"), decayConstant(0), dropRate(0) {
} }
@ -761,4 +760,3 @@ void ANN::UI(int epoch, double cost_prev, std::vector<double> y_hat, std::vector
} }
} }
} }
} //namespace MLPP

View File

@ -14,8 +14,6 @@
#include <tuple> #include <tuple>
#include <vector> #include <vector>
namespace MLPP {
class ANN { class ANN {
public: public:
ANN(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet); ANN(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet);
@ -66,6 +64,5 @@ private:
double decayConstant; double decayConstant;
double dropRate; double dropRate;
}; };
} //namespace MLPP
#endif /* ANN_hpp */ #endif /* ANN_hpp */

View File

@ -13,7 +13,6 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
namespace MLPP {
AutoEncoder::AutoEncoder(std::vector<std::vector<double>> inputSet, int n_hidden) : AutoEncoder::AutoEncoder(std::vector<std::vector<double>> inputSet, int n_hidden) :
inputSet(inputSet), n_hidden(n_hidden), n(inputSet.size()), k(inputSet[0].size()) { inputSet(inputSet), n_hidden(n_hidden), n(inputSet.size()), k(inputSet[0].size()) {
Activation avn; Activation avn;
@ -252,4 +251,3 @@ void AutoEncoder::forwardPass() {
a2 = avn.sigmoid(z2); a2 = avn.sigmoid(z2);
y_hat = alg.mat_vec_add(alg.matmult(a2, weights2), bias2); y_hat = alg.mat_vec_add(alg.matmult(a2, weights2), bias2);
} }
} //namespace MLPP

View File

@ -12,8 +12,6 @@
#include <tuple> #include <tuple>
#include <vector> #include <vector>
namespace MLPP {
class AutoEncoder { class AutoEncoder {
public: public:
AutoEncoder(std::vector<std::vector<double>> inputSet, int n_hidden); AutoEncoder(std::vector<std::vector<double>> inputSet, int n_hidden);
@ -50,6 +48,5 @@ private:
int k; int k;
int n_hidden; int n_hidden;
}; };
} //namespace MLPP
#endif /* AutoEncoder_hpp */ #endif /* AutoEncoder_hpp */

View File

@ -12,7 +12,6 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
namespace MLPP {
BernoulliNB::BernoulliNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet) : BernoulliNB::BernoulliNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet) :
inputSet(inputSet), outputSet(outputSet), class_num(2) { inputSet(inputSet), outputSet(outputSet), class_num(2) {
y_hat.resize(outputSet.size()); y_hat.resize(outputSet.size());
@ -175,4 +174,3 @@ void BernoulliNB::Evaluate() {
} }
} }
} }
} //namespace MLPP

View File

@ -11,7 +11,6 @@
#include <map> #include <map>
#include <vector> #include <vector>
namespace MLPP {
class BernoulliNB { class BernoulliNB {
public: public:
BernoulliNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet); BernoulliNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet);
@ -39,4 +38,3 @@ private:
}; };
#endif /* BernoulliNB_hpp */ #endif /* BernoulliNB_hpp */
}

View File

@ -14,7 +14,6 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
namespace MLPP {
CLogLogReg::CLogLogReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) : CLogLogReg::CLogLogReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n); y_hat.resize(n);
@ -221,4 +220,3 @@ void CLogLogReg::forwardPass() {
z = propagate(inputSet); z = propagate(inputSet);
y_hat = avn.cloglog(z); y_hat = avn.cloglog(z);
} }
} //namespace MLPP

View File

@ -11,8 +11,6 @@
#include <string> #include <string>
#include <vector> #include <vector>
namespace MLPP {
class CLogLogReg { class CLogLogReg {
public: public:
CLogLogReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); CLogLogReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5);
@ -50,6 +48,5 @@ private:
double lambda; double lambda;
double alpha; /* This is the controlling param for Elastic Net*/ double alpha; /* This is the controlling param for Elastic Net*/
}; };
} //namespace MLPP
#endif /* CLogLogReg_hpp */ #endif /* CLogLogReg_hpp */

View File

@ -10,8 +10,6 @@
#include <cmath> #include <cmath>
#include <iostream> #include <iostream>
namespace MLPP {
Convolutions::Convolutions() : Convolutions::Convolutions() :
prewittHorizontal({ { 1, 1, 1 }, { 0, 0, 0 }, { -1, -1, -1 } }), prewittVertical({ { 1, 0, -1 }, { 1, 0, -1 }, { 1, 0, -1 } }), sobelHorizontal({ { 1, 2, 1 }, { 0, 0, 0 }, { -1, -2, -1 } }), sobelVertical({ { -1, 0, 1 }, { -2, 0, 2 }, { -1, 0, 1 } }), scharrHorizontal({ { 3, 10, 3 }, { 0, 0, 0 }, { -3, -10, -3 } }), scharrVertical({ { 3, 0, -3 }, { 10, 0, -10 }, { 3, 0, -3 } }), robertsHorizontal({ { 0, 1 }, { -1, 0 } }), robertsVertical({ { 1, 0 }, { 0, -1 } }) { prewittHorizontal({ { 1, 1, 1 }, { 0, 0, 0 }, { -1, -1, -1 } }), prewittVertical({ { 1, 0, -1 }, { 1, 0, -1 }, { 1, 0, -1 } }), sobelHorizontal({ { 1, 2, 1 }, { 0, 0, 0 }, { -1, -2, -1 } }), sobelVertical({ { -1, 0, 1 }, { -2, 0, 2 }, { -1, 0, 1 } }), scharrHorizontal({ { 3, 10, 3 }, { 0, 0, 0 }, { -3, -10, -3 } }), scharrVertical({ { 3, 0, -3 }, { 10, 0, -10 }, { 3, 0, -3 } }), robertsHorizontal({ { 0, 1 }, { -1, 0 } }), robertsVertical({ { 1, 0 }, { 0, -1 } }) {
} }
@ -373,4 +371,3 @@ std::vector<std::vector<double>> Convolutions::getRobertsHorizontal() {
std::vector<std::vector<double>> Convolutions::getRobertsVertical() { std::vector<std::vector<double>> Convolutions::getRobertsVertical() {
return robertsVertical; return robertsVertical;
} }
} //namespace MLPP

View File

@ -5,7 +5,6 @@
#include <vector> #include <vector>
#include <string> #include <string>
namespace MLPP {
class Convolutions { class Convolutions {
public: public:
Convolutions(); Convolutions();
@ -47,6 +46,5 @@ private:
std::vector<std::vector<double>> robertsHorizontal; std::vector<std::vector<double>> robertsHorizontal;
std::vector<std::vector<double>> robertsVertical; std::vector<std::vector<double>> robertsVertical;
}; };
} //namespace MLPP
#endif // Convolutions_hpp #endif // Convolutions_hpp

View File

@ -10,7 +10,7 @@
#include <cmath> #include <cmath>
#include <iostream> #include <iostream>
namespace MLPP {
double Cost::MSE(std::vector<double> y_hat, std::vector<double> y) { double Cost::MSE(std::vector<double> y_hat, std::vector<double> y) {
double sum = 0; double sum = 0;
for (int i = 0; i < y_hat.size(); i++) { for (int i = 0; i < y_hat.size(); i++) {
@ -404,4 +404,3 @@ std::vector<double> Cost::dualFormSVMDeriv(std::vector<double> alpha, std::vecto
return alg.subtraction(alphaQDeriv, one); return alg.subtraction(alphaQDeriv, one);
} }
} //namespace MLPP

View File

@ -10,7 +10,7 @@
#include <vector> #include <vector>
namespace MLPP {
class Cost { class Cost {
public: public:
// Regression Costs // Regression Costs
@ -81,6 +81,6 @@ public:
private: private:
}; };
} //namespace MLPP
#endif /* Cost_hpp */ #endif /* Cost_hpp */

View File

@ -16,7 +16,7 @@
#include <random> #include <random>
#include <sstream> #include <sstream>
namespace MLPP {
// Loading Datasets // Loading Datasets
std::tuple<std::vector<std::vector<double>>, std::vector<double>> Data::loadBreastCancer() { std::tuple<std::vector<std::vector<double>>, std::vector<double>> Data::loadBreastCancer() {
const int BREAST_CANCER_SIZE = 30; // k = 30 const int BREAST_CANCER_SIZE = 30; // k = 30
@ -753,4 +753,4 @@ std::vector<double> Data::reverseOneHot(std::vector<std::vector<double>> tempOut
return outputSet; return outputSet;
} }
} //namespace MLPP

View File

@ -13,7 +13,7 @@
#include <tuple> #include <tuple>
#include <vector> #include <vector>
namespace MLPP {
class Data { class Data {
public: public:
// Load Datasets // Load Datasets
@ -94,6 +94,6 @@ public:
private: private:
}; };
} //namespace MLPP
#endif /* Data_hpp */ #endif /* Data_hpp */

View File

@ -14,7 +14,7 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
namespace MLPP {
DualSVC::DualSVC(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, double C, std::string kernel) : DualSVC::DualSVC(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, double C, std::string kernel) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), C(C), kernel(kernel) { inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), C(C), kernel(kernel) {
y_hat.resize(n); y_hat.resize(n);
@ -236,4 +236,3 @@ std::vector<std::vector<double>> DualSVC::kernelFunction(std::vector<std::vector
return alg.matmult(inputSet, alg.transpose(inputSet)); return alg.matmult(inputSet, alg.transpose(inputSet));
} // warning: non-void function does not return a value in all control paths [-Wreturn-type] } // warning: non-void function does not return a value in all control paths [-Wreturn-type]
} }
} //namespace MLPP

View File

@ -14,7 +14,7 @@
#include <string> #include <string>
#include <vector> #include <vector>
namespace MLPP {
class DualSVC { class DualSVC {
public: public:
@ -65,6 +65,6 @@ private:
// UI Portion // UI Portion
void UI(int epoch, double cost_prev); void UI(int epoch, double cost_prev);
}; };
} //namespace MLPP
#endif /* DualSVC_hpp */ #endif /* DualSVC_hpp */

View File

@ -14,7 +14,7 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
namespace MLPP {
ExpReg::ExpReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) : ExpReg::ExpReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n); y_hat.resize(n);
@ -235,4 +235,3 @@ double ExpReg::Evaluate(std::vector<double> x) {
void ExpReg::forwardPass() { void ExpReg::forwardPass() {
y_hat = Evaluate(inputSet); y_hat = Evaluate(inputSet);
} }
} //namespace MLPP

View File

@ -11,7 +11,7 @@
#include <string> #include <string>
#include <vector> #include <vector>
namespace MLPP {
class ExpReg { class ExpReg {
public: public:
ExpReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); ExpReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5);
@ -45,6 +45,6 @@ private:
double lambda; double lambda;
double alpha; /* This is the controlling param for Elastic Net*/ double alpha; /* This is the controlling param for Elastic Net*/
}; };
} //namespace MLPP
#endif /* ExpReg_hpp */ #endif /* ExpReg_hpp */

View File

@ -14,7 +14,7 @@
#include <cmath> #include <cmath>
#include <iostream> #include <iostream>
namespace MLPP {
GAN::GAN(double k, std::vector<std::vector<double>> outputSet) : GAN::GAN(double k, std::vector<std::vector<double>> outputSet) :
outputSet(outputSet), n(outputSet.size()), k(k) { outputSet(outputSet), n(outputSet.size()), k(k) {
} }
@ -283,4 +283,3 @@ void GAN::UI(int epoch, double cost_prev, std::vector<double> y_hat, std::vector
} }
} }
} }
} //namespace MLPP

View File

@ -15,7 +15,7 @@
#include <tuple> #include <tuple>
#include <vector> #include <vector>
namespace MLPP {
class GAN { class GAN {
public: public:
@ -52,6 +52,6 @@ private:
int n; int n;
int k; int k;
}; };
} //namespace MLPP
#endif /* GAN_hpp */ #endif /* GAN_hpp */

View File

@ -8,7 +8,7 @@
#include "../stat/stat.h" #include "../stat/stat.h"
#include <iostream> #include <iostream>
namespace MLPP {
void GaussMarkovChecker::checkGMConditions(std::vector<double> eps) { void GaussMarkovChecker::checkGMConditions(std::vector<double> eps) {
bool condition1 = arithmeticMean(eps); bool condition1 = arithmeticMean(eps);
bool condition2 = homoscedasticity(eps); bool condition2 = homoscedasticity(eps);
@ -54,4 +54,4 @@ bool GaussMarkovChecker::exogeneity(std::vector<double> eps) {
} }
return 1; return 1;
} }
} //namespace MLPP

View File

@ -11,7 +11,7 @@
#include <string> #include <string>
#include <vector> #include <vector>
namespace MLPP {
class GaussMarkovChecker { class GaussMarkovChecker {
public: public:
void checkGMConditions(std::vector<double> eps); void checkGMConditions(std::vector<double> eps);
@ -22,6 +22,6 @@ public:
bool exogeneity(std::vector<double> eps); // 3) Cov of any 2 non-equal eps values = 0. bool exogeneity(std::vector<double> eps); // 3) Cov of any 2 non-equal eps values = 0.
private: private:
}; };
} //namespace MLPP
#endif /* GaussMarkovChecker_hpp */ #endif /* GaussMarkovChecker_hpp */

View File

@ -13,7 +13,7 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
namespace MLPP {
GaussianNB::GaussianNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int class_num) : GaussianNB::GaussianNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int class_num) :
inputSet(inputSet), outputSet(outputSet), class_num(class_num) { inputSet(inputSet), outputSet(outputSet), class_num(class_num) {
y_hat.resize(outputSet.size()); y_hat.resize(outputSet.size());
@ -88,4 +88,3 @@ void GaussianNB::Evaluate() {
std::cout << std::distance(score, std::max_element(score, score + sizeof(score) / sizeof(double))) << std::endl; std::cout << std::distance(score, std::max_element(score, score + sizeof(score) / sizeof(double))) << std::endl;
} }
} }
} //namespace MLPP

View File

@ -10,7 +10,7 @@
#include <vector> #include <vector>
namespace MLPP {
class GaussianNB { class GaussianNB {
public: public:
GaussianNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int class_num); GaussianNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int class_num);
@ -34,4 +34,3 @@ private:
}; };
#endif /* GaussianNB_hpp */ #endif /* GaussianNB_hpp */
}

View File

@ -12,7 +12,7 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
namespace MLPP {
HiddenLayer::HiddenLayer(int n_hidden, std::string activation, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha) : HiddenLayer::HiddenLayer(int n_hidden, std::string activation, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha) :
n_hidden(n_hidden), activation(activation), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) { n_hidden(n_hidden), activation(activation), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) {
weights = Utilities::weightInitialization(input[0].size(), n_hidden, weightInit); weights = Utilities::weightInitialization(input[0].size(), n_hidden, weightInit);
@ -110,4 +110,3 @@ void HiddenLayer::Test(std::vector<double> x) {
z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias); z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias);
a_test = (avn.*activationTest_map[activation])(z_test, 0); a_test = (avn.*activationTest_map[activation])(z_test, 0);
} }
} //namespace MLPP

View File

@ -14,7 +14,7 @@
#include <string> #include <string>
#include <vector> #include <vector>
namespace MLPP {
class HiddenLayer { class HiddenLayer {
public: public:
HiddenLayer(int n_hidden, std::string activation, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha); HiddenLayer(int n_hidden, std::string activation, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha);
@ -48,6 +48,6 @@ public:
void forwardPass(); void forwardPass();
void Test(std::vector<double> x); void Test(std::vector<double> x);
}; };
} //namespace MLPP
#endif /* HiddenLayer_hpp */ #endif /* HiddenLayer_hpp */

View File

@ -6,7 +6,7 @@
#include "hypothesis_testing.h" #include "hypothesis_testing.h"
namespace MLPP {
std::tuple<bool, double> HypothesisTesting::chiSquareTest(std::vector<double> observed, std::vector<double> expected) { std::tuple<bool, double> HypothesisTesting::chiSquareTest(std::vector<double> observed, std::vector<double> expected) {
double df = observed.size() - 1; // These are our degrees of freedom double df = observed.size() - 1; // These are our degrees of freedom
@ -16,4 +16,3 @@ std::tuple<bool, double> HypothesisTesting::chiSquareTest(std::vector<double> ob
} }
} }
} //namespace MLPP

View File

@ -11,13 +11,13 @@
#include <tuple> #include <tuple>
#include <vector> #include <vector>
namespace MLPP {
class HypothesisTesting { class HypothesisTesting {
public: public:
std::tuple<bool, double> chiSquareTest(std::vector<double> observed, std::vector<double> expected); std::tuple<bool, double> chiSquareTest(std::vector<double> observed, std::vector<double> expected);
private: private:
}; };
} //namespace MLPP
#endif /* HypothesisTesting_hpp */ #endif /* HypothesisTesting_hpp */

View File

@ -12,7 +12,7 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
namespace MLPP {
KMeans::KMeans(std::vector<std::vector<double>> inputSet, int k, std::string init_type) : KMeans::KMeans(std::vector<std::vector<double>> inputSet, int k, std::string init_type) :
inputSet(inputSet), k(k), init_type(init_type) { inputSet(inputSet), k(k), init_type(init_type) {
if (init_type == "KMeans++") { if (init_type == "KMeans++") {
@ -232,4 +232,4 @@ double KMeans::Cost() {
} }
return sum; return sum;
} }
} //namespace MLPP

View File

@ -11,7 +11,7 @@
#include <string> #include <string>
#include <vector> #include <vector>
namespace MLPP {
class KMeans { class KMeans {
public: public:
KMeans(std::vector<std::vector<double>> inputSet, int k, std::string init_type = "Default"); KMeans(std::vector<std::vector<double>> inputSet, int k, std::string init_type = "Default");
@ -40,6 +40,6 @@ private:
std::string init_type; std::string init_type;
}; };
} //namespace MLPP
#endif /* KMeans_hpp */ #endif /* KMeans_hpp */

View File

@ -12,7 +12,7 @@
#include <iostream> #include <iostream>
#include <map> #include <map>
namespace MLPP {
kNN::kNN(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int k) : kNN::kNN(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int k) :
inputSet(inputSet), outputSet(outputSet), k(k) { inputSet(inputSet), outputSet(outputSet), k(k) {
} }
@ -82,4 +82,3 @@ std::vector<double> kNN::nearestNeighbors(std::vector<double> x) {
} }
return knn; return knn;
} }
} //namespace MLPP

View File

@ -10,7 +10,7 @@
#include <vector> #include <vector>
namespace MLPP {
class kNN { class kNN {
public: public:
kNN(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int k); kNN(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int k);
@ -28,6 +28,6 @@ private:
std::vector<double> outputSet; std::vector<double> outputSet;
int k; int k;
}; };
} //namespace MLPP
#endif /* kNN_hpp */ #endif /* kNN_hpp */

View File

@ -11,7 +11,7 @@
#include <map> #include <map>
#include <random> #include <random>
namespace MLPP {
std::vector<std::vector<double>> LinAlg::gramMatrix(std::vector<std::vector<double>> A) { std::vector<std::vector<double>> LinAlg::gramMatrix(std::vector<std::vector<double>> A) {
return matmult(transpose(A), A); // AtA return matmult(transpose(A), A); // AtA
@ -1226,4 +1226,3 @@ std::vector<std::vector<std::vector<double>>> LinAlg::vector_wise_tensor_product
} }
return C; return C;
} }
} //namespace MLPP

View File

@ -11,7 +11,7 @@
#include <tuple> #include <tuple>
#include <vector> #include <vector>
namespace MLPP {
class LinAlg { class LinAlg {
public: public:
// MATRIX FUNCTIONS // MATRIX FUNCTIONS
@ -231,6 +231,6 @@ public:
private: private:
}; };
} //namespace MLPP
#endif /* LinAlg_hpp */ #endif /* LinAlg_hpp */

View File

@ -15,7 +15,7 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
namespace MLPP {
LinReg::LinReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) : LinReg::LinReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
@ -237,4 +237,3 @@ double LinReg::Evaluate(std::vector<double> x) {
void LinReg::forwardPass() { void LinReg::forwardPass() {
y_hat = Evaluate(inputSet); y_hat = Evaluate(inputSet);
} }
} //namespace MLPP

View File

@ -11,7 +11,7 @@
#include <string> #include <string>
#include <vector> #include <vector>
namespace MLPP {
class LinReg { class LinReg {
public: public:
LinReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5); LinReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5);
@ -46,6 +46,6 @@ private:
int lambda; int lambda;
int alpha; /* This is the controlling param for Elastic Net*/ int alpha; /* This is the controlling param for Elastic Net*/
}; };
} //namespace MLPP
#endif /* LinReg_hpp */ #endif /* LinReg_hpp */

View File

@ -14,7 +14,7 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
namespace MLPP {
LogReg::LogReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) : LogReg::LogReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n); y_hat.resize(n);
@ -202,4 +202,3 @@ double LogReg::Evaluate(std::vector<double> x) {
void LogReg::forwardPass() { void LogReg::forwardPass() {
y_hat = Evaluate(inputSet); y_hat = Evaluate(inputSet);
} }
} //namespace MLPP

View File

@ -11,7 +11,7 @@
#include <string> #include <string>
#include <vector> #include <vector>
namespace MLPP {
class LogReg { class LogReg {
public: public:
@ -47,6 +47,6 @@ private:
double lambda; /* Regularization Parameter */ double lambda; /* Regularization Parameter */
double alpha; /* This is the controlling param for Elastic Net*/ double alpha; /* This is the controlling param for Elastic Net*/
}; };
} //namespace MLPP
#endif /* LogReg_hpp */ #endif /* LogReg_hpp */

View File

@ -13,7 +13,7 @@
#include <iostream> #include <iostream>
namespace MLPP {
MANN::MANN(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet) : MANN::MANN(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), n_output(outputSet[0].size()) { inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), n_output(outputSet[0].size()) {
} }
@ -187,4 +187,3 @@ void MANN::forwardPass() {
outputLayer->forwardPass(); outputLayer->forwardPass();
y_hat = outputLayer->a; y_hat = outputLayer->a;
} }
} //namespace MLPP

View File

@ -14,7 +14,7 @@
#include <string> #include <string>
#include <vector> #include <vector>
namespace MLPP {
class MANN { class MANN {
public: public:
@ -44,6 +44,6 @@ private:
int k; int k;
int n_output; int n_output;
}; };
} //namespace MLPP
#endif /* MANN_hpp */ #endif /* MANN_hpp */

View File

@ -15,7 +15,7 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
namespace MLPP {
MLP::MLP(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int n_hidden, std::string reg, double lambda, double alpha) : MLP::MLP(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int n_hidden, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n_hidden(n_hidden), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { inputSet(inputSet), outputSet(outputSet), n_hidden(n_hidden), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
Activation avn; Activation avn;
@ -270,4 +270,4 @@ void MLP::forwardPass() {
a2 = avn.sigmoid(z2); a2 = avn.sigmoid(z2);
y_hat = avn.sigmoid(alg.scalarAdd(bias2, alg.mat_vec_mult(a2, weights2))); y_hat = avn.sigmoid(alg.scalarAdd(bias2, alg.mat_vec_mult(a2, weights2)));
} }
} //namespace MLPP

View File

@ -12,7 +12,7 @@
#include <string> #include <string>
#include <vector> #include <vector>
namespace MLPP {
class MLP { class MLP {
public: public:
@ -56,6 +56,6 @@ private:
double lambda; /* Regularization Parameter */ double lambda; /* Regularization Parameter */
double alpha; /* This is the controlling param for Elastic Net*/ double alpha; /* This is the controlling param for Elastic Net*/
}; };
} //namespace MLPP
#endif /* MLP_hpp */ #endif /* MLP_hpp */

View File

@ -11,7 +11,7 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
namespace MLPP {
MultiOutputLayer::MultiOutputLayer(int n_output, int n_hidden, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha) : MultiOutputLayer::MultiOutputLayer(int n_output, int n_hidden, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha) :
n_output(n_output), n_hidden(n_hidden), activation(activation), cost(cost), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) { n_output(n_output), n_hidden(n_hidden), activation(activation), cost(cost), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) {
weights = Utilities::weightInitialization(n_hidden, n_output, weightInit); weights = Utilities::weightInitialization(n_hidden, n_output, weightInit);
@ -129,4 +129,3 @@ void MultiOutputLayer::Test(std::vector<double> x) {
z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias); z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias);
a_test = (avn.*activationTest_map[activation])(z_test, 0); a_test = (avn.*activationTest_map[activation])(z_test, 0);
} }
} //namespace MLPP

View File

@ -15,7 +15,7 @@
#include <string> #include <string>
#include <vector> #include <vector>
namespace MLPP {
class MultiOutputLayer { class MultiOutputLayer {
public: public:
MultiOutputLayer(int n_output, int n_hidden, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha); MultiOutputLayer(int n_output, int n_hidden, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha);
@ -53,6 +53,6 @@ public:
void forwardPass(); void forwardPass();
void Test(std::vector<double> x); void Test(std::vector<double> x);
}; };
} //namespace MLPP
#endif /* MultiOutputLayer_hpp */ #endif /* MultiOutputLayer_hpp */

View File

@ -12,7 +12,7 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
namespace MLPP {
MultinomialNB::MultinomialNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int class_num) : MultinomialNB::MultinomialNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int class_num) :
inputSet(inputSet), outputSet(outputSet), class_num(class_num) { inputSet(inputSet), outputSet(outputSet), class_num(class_num) {
y_hat.resize(outputSet.size()); y_hat.resize(outputSet.size());
@ -116,4 +116,3 @@ void MultinomialNB::Evaluate() {
y_hat[i] = std::distance(score, std::max_element(score, score + sizeof(score) / sizeof(double))); y_hat[i] = std::distance(score, std::max_element(score, score + sizeof(score) / sizeof(double)));
} }
} }
} //namespace MLPP

View File

@ -11,7 +11,7 @@
#include <map> #include <map>
#include <vector> #include <vector>
namespace MLPP {
class MultinomialNB { class MultinomialNB {
public: public:
MultinomialNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int class_num); MultinomialNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int class_num);
@ -37,4 +37,3 @@ private:
}; };
#endif /* MultinomialNB_hpp */ #endif /* MultinomialNB_hpp */
}

View File

@ -12,7 +12,7 @@
#include <iostream> #include <iostream>
#include <string> #include <string>
namespace MLPP {
double NumericalAnalysis::numDiff(double (*function)(double), double x) { double NumericalAnalysis::numDiff(double (*function)(double), double x) {
double eps = 1e-10; double eps = 1e-10;
@ -293,4 +293,3 @@ std::string NumericalAnalysis::secondPartialDerivativeTest(double (*function)(st
} }
} }
} }
} //namespace MLPP

View File

@ -10,7 +10,7 @@
#include <string> #include <string>
#include <vector> #include <vector>
namespace MLPP {
class NumericalAnalysis { class NumericalAnalysis {
public: public:
/* A numerical method for derivatives is used. This may be subject to change, /* A numerical method for derivatives is used. This may be subject to change,
@ -52,6 +52,6 @@ public:
std::string secondPartialDerivativeTest(double (*function)(std::vector<double>), std::vector<double> x); std::string secondPartialDerivativeTest(double (*function)(std::vector<double>), std::vector<double> x);
}; };
} //namespace MLPP
#endif /* NumericalAnalysis_hpp */ #endif /* NumericalAnalysis_hpp */

View File

@ -8,7 +8,7 @@
#include "../stat/stat.h" #include "../stat/stat.h"
#include <iostream> #include <iostream>
namespace MLPP {
OutlierFinder::OutlierFinder(int threshold) : OutlierFinder::OutlierFinder(int threshold) :
threshold(threshold) { threshold(threshold) {
} }
@ -39,4 +39,3 @@ std::vector<double> OutlierFinder::modelTest(std::vector<double> inputSet) {
} }
return outliers; return outliers;
} }
} //namespace MLPP

View File

@ -10,7 +10,7 @@
#include <vector> #include <vector>
namespace MLPP {
class OutlierFinder { class OutlierFinder {
public: public:
// Cnstr // Cnstr
@ -22,6 +22,6 @@ public:
// Variables required // Variables required
int threshold; int threshold;
}; };
} //namespace MLPP
#endif /* OutlierFinder_hpp */ #endif /* OutlierFinder_hpp */

View File

@ -11,7 +11,7 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
namespace MLPP {
OutputLayer::OutputLayer(int n_hidden, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha) : OutputLayer::OutputLayer(int n_hidden, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha) :
n_hidden(n_hidden), activation(activation), cost(cost), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) { n_hidden(n_hidden), activation(activation), cost(cost), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) {
weights = Utilities::weightInitialization(n_hidden, weightInit); weights = Utilities::weightInitialization(n_hidden, weightInit);
@ -126,4 +126,3 @@ void OutputLayer::Test(std::vector<double> x) {
z_test = alg.dot(weights, x) + bias; z_test = alg.dot(weights, x) + bias;
a_test = (avn.*activationTest_map[activation])(z_test, 0); a_test = (avn.*activationTest_map[activation])(z_test, 0);
} }
} //namespace MLPP

View File

@ -15,7 +15,7 @@
#include <string> #include <string>
#include <vector> #include <vector>
namespace MLPP {
class OutputLayer { class OutputLayer {
public: public:
OutputLayer(int n_hidden, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha); OutputLayer(int n_hidden, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha);
@ -52,6 +52,6 @@ public:
void forwardPass(); void forwardPass();
void Test(std::vector<double> x); void Test(std::vector<double> x);
}; };
} //namespace MLPP
#endif /* OutputLayer_hpp */ #endif /* OutputLayer_hpp */

View File

@ -11,7 +11,7 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
namespace MLPP {
PCA::PCA(std::vector<std::vector<double>> inputSet, int k) : PCA::PCA(std::vector<std::vector<double>> inputSet, int k) :
inputSet(inputSet), k(k) { inputSet(inputSet), k(k) {
@ -51,4 +51,4 @@ double PCA::score() {
} }
return 1 - num / den; return 1 - num / den;
} }
} //namespace MLPP

View File

@ -10,7 +10,7 @@
#include <vector> #include <vector>
namespace MLPP {
class PCA { class PCA {
public: public:
PCA(std::vector<std::vector<double>> inputSet, int k); PCA(std::vector<std::vector<double>> inputSet, int k);
@ -24,6 +24,6 @@ private:
std::vector<std::vector<double>> Z; std::vector<std::vector<double>> Z;
int k; int k;
}; };
} //namespace MLPP
#endif /* PCA_hpp */ #endif /* PCA_hpp */

View File

@ -14,7 +14,7 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
namespace MLPP {
ProbitReg::ProbitReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) : ProbitReg::ProbitReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n); y_hat.resize(n);
@ -243,4 +243,3 @@ void ProbitReg::forwardPass() {
z = propagate(inputSet); z = propagate(inputSet);
y_hat = avn.gaussianCDF(z); y_hat = avn.gaussianCDF(z);
} }
} //namespace MLPP

View File

@ -11,7 +11,7 @@
#include <string> #include <string>
#include <vector> #include <vector>
namespace MLPP {
class ProbitReg { class ProbitReg {
public: public:
@ -49,6 +49,6 @@ private:
double lambda; double lambda;
double alpha; /* This is the controlling param for Elastic Net*/ double alpha; /* This is the controlling param for Elastic Net*/
}; };
} //namespace MLPP
#endif /* ProbitReg_hpp */ #endif /* ProbitReg_hpp */

View File

@ -10,7 +10,7 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
namespace MLPP {
double Reg::regTerm(std::vector<double> weights, double lambda, double alpha, std::string reg) { double Reg::regTerm(std::vector<double> weights, double lambda, double alpha, std::string reg) {
if (reg == "Ridge") { if (reg == "Ridge") {
@ -162,4 +162,4 @@ double Reg::regDerivTerm(std::vector<std::vector<double>> weights, double lambda
return 0; return 0;
} }
} }
} //namespace MLPP

View File

@ -12,7 +12,7 @@
#include <vector> #include <vector>
#include <string> #include <string>
namespace MLPP {
class Reg { class Reg {
public: public:
double regTerm(std::vector<double> weights, double lambda, double alpha, std::string reg); double regTerm(std::vector<double> weights, double lambda, double alpha, std::string reg);
@ -28,6 +28,6 @@ private:
double regDerivTerm(std::vector<double> weights, double lambda, double alpha, std::string reg, int j); double regDerivTerm(std::vector<double> weights, double lambda, double alpha, std::string reg, int j);
double regDerivTerm(std::vector<std::vector<double>> weights, double lambda, double alpha, std::string reg, int i, int j); double regDerivTerm(std::vector<std::vector<double>> weights, double lambda, double alpha, std::string reg, int i, int j);
}; };
} //namespace MLPP
#endif /* Reg_hpp */ #endif /* Reg_hpp */

View File

@ -15,7 +15,7 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
namespace MLPP {
SoftmaxNet::SoftmaxNet(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet, int n_hidden, std::string reg, double lambda, double alpha) : SoftmaxNet::SoftmaxNet(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet, int n_hidden, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), n_hidden(n_hidden), n_class(outputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), n_hidden(n_hidden), n_class(outputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n); y_hat.resize(n);
@ -289,4 +289,3 @@ void SoftmaxNet::forwardPass() {
a2 = avn.sigmoid(z2); a2 = avn.sigmoid(z2);
y_hat = avn.adjSoftmax(alg.mat_vec_add(alg.matmult(a2, weights2), bias2)); y_hat = avn.adjSoftmax(alg.mat_vec_add(alg.matmult(a2, weights2), bias2));
} }
} //namespace MLPP

View File

@ -11,7 +11,7 @@
#include <string> #include <string>
#include <vector> #include <vector>
namespace MLPP {
class SoftmaxNet { class SoftmaxNet {
public: public:
@ -57,6 +57,6 @@ private:
double lambda; double lambda;
double alpha; /* This is the controlling param for Elastic Net*/ double alpha; /* This is the controlling param for Elastic Net*/
}; };
} //namespace MLPP
#endif /* SoftmaxNet_hpp */ #endif /* SoftmaxNet_hpp */

View File

@ -14,7 +14,7 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
namespace MLPP {
SoftmaxReg::SoftmaxReg(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet, std::string reg, double lambda, double alpha) : SoftmaxReg::SoftmaxReg(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), n_class(outputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), n_class(outputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n); y_hat.resize(n);
@ -189,4 +189,3 @@ void SoftmaxReg::forwardPass() {
y_hat = avn.softmax(alg.mat_vec_add(alg.matmult(inputSet, weights), bias)); y_hat = avn.softmax(alg.mat_vec_add(alg.matmult(inputSet, weights), bias));
} }
} //namespace MLPP

View File

@ -11,7 +11,7 @@
#include <string> #include <string>
#include <vector> #include <vector>
namespace MLPP {
class SoftmaxReg { class SoftmaxReg {
public: public:
@ -46,6 +46,6 @@ private:
double lambda; double lambda;
double alpha; /* This is the controlling param for Elastic Net*/ double alpha; /* This is the controlling param for Elastic Net*/
}; };
} //namespace MLPP
#endif /* SoftmaxReg_hpp */ #endif /* SoftmaxReg_hpp */

View File

@ -14,7 +14,7 @@
#include <iostream> #include <iostream>
namespace MLPP {
double Stat::b0Estimation(const std::vector<double> &x, const std::vector<double> &y) { double Stat::b0Estimation(const std::vector<double> &x, const std::vector<double> &y) {
return mean(y) - b1Estimation(x, y) * mean(x); return mean(y) - b1Estimation(x, y) * mean(x);
} }
@ -214,4 +214,3 @@ double Stat::logMean(const double x, const double y) {
} }
return (y - x) / (log(y) - std::log(x)); return (y - x) / (log(y) - std::log(x));
} }
} //namespace MLPP

View File

@ -10,7 +10,7 @@
#include <vector> #include <vector>
namespace MLPP {
class Stat { class Stat {
public: public:
// These functions are for univariate lin reg module- not for users. // These functions are for univariate lin reg module- not for users.
@ -47,6 +47,6 @@ public:
double identricMean(const double x, const double y); double identricMean(const double x, const double y);
double logMean(const double x, const double y); double logMean(const double x, const double y);
}; };
} //namespace MLPP
#endif /* Stat_hpp */ #endif /* Stat_hpp */

View File

@ -14,7 +14,7 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
namespace MLPP {
SVC::SVC(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, double C) : SVC::SVC(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, double C) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), C(C) { inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), C(C) {
y_hat.resize(n); y_hat.resize(n);
@ -195,4 +195,3 @@ void SVC::forwardPass() {
z = propagate(inputSet); z = propagate(inputSet);
y_hat = avn.sign(z); y_hat = avn.sign(z);
} }
} //namespace MLPP

View File

@ -14,7 +14,7 @@
#include <string> #include <string>
#include <vector> #include <vector>
namespace MLPP {
class SVC { class SVC {
public: public:
@ -50,6 +50,6 @@ private:
// UI Portion // UI Portion
void UI(int epoch, double cost_prev); void UI(int epoch, double cost_prev);
}; };
} //namespace MLPP
#endif /* SVC_hpp */ #endif /* SVC_hpp */

View File

@ -14,7 +14,7 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
namespace MLPP {
TanhReg::TanhReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) : TanhReg::TanhReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n); y_hat.resize(n);
@ -192,4 +192,3 @@ void TanhReg::forwardPass() {
z = propagate(inputSet); z = propagate(inputSet);
y_hat = avn.tanh(z); y_hat = avn.tanh(z);
} }
} //namespace MLPP

View File

@ -11,7 +11,7 @@
#include <string> #include <string>
#include <vector> #include <vector>
namespace MLPP {
class TanhReg { class TanhReg {
public: public:
@ -51,6 +51,6 @@ private:
double lambda; double lambda;
double alpha; /* This is the controlling param for Elastic Net*/ double alpha; /* This is the controlling param for Elastic Net*/
}; };
} //namespace MLPP
#endif /* TanhReg_hpp */ #endif /* TanhReg_hpp */

View File

@ -10,7 +10,7 @@
#include <iostream> #include <iostream>
#include <string> #include <string>
namespace MLPP {
// DCT ii. // DCT ii.
// https://www.mathworks.com/help/images/discrete-cosine-transform.html // https://www.mathworks.com/help/images/discrete-cosine-transform.html
@ -53,4 +53,3 @@ std::vector<std::vector<double>> Transforms::discreteCosineTransform(std::vector
} }
return B; return B;
} }
} //namespace MLPP

View File

@ -10,11 +10,11 @@
#include <string> #include <string>
#include <vector> #include <vector>
namespace MLPP {
class Transforms { class Transforms {
public: public:
std::vector<std::vector<double>> discreteCosineTransform(std::vector<std::vector<double>> A); std::vector<std::vector<double>> discreteCosineTransform(std::vector<std::vector<double>> A);
}; };
} //namespace MLPP
#endif /* Transforms_hpp */ #endif /* Transforms_hpp */

View File

@ -15,7 +15,7 @@
// Univariate Linear Regression Model // Univariate Linear Regression Model
// ŷ = b0 + b1x1 // ŷ = b0 + b1x1
namespace MLPP {
UniLinReg::UniLinReg(std::vector<double> x, std::vector<double> y) : UniLinReg::UniLinReg(std::vector<double> x, std::vector<double> y) :
inputSet(x), outputSet(y) { inputSet(x), outputSet(y) {
Stat estimator; Stat estimator;
@ -31,4 +31,4 @@ std::vector<double> UniLinReg::modelSetTest(std::vector<double> x) {
double UniLinReg::modelTest(double input) { double UniLinReg::modelTest(double input) {
return b0 + b1 * input; return b0 + b1 * input;
} }
} //namespace MLPP

View File

@ -10,7 +10,7 @@
#include <vector> #include <vector>
namespace MLPP {
class UniLinReg { class UniLinReg {
public: public:
UniLinReg(std::vector<double> x, std::vector<double> y); UniLinReg(std::vector<double> x, std::vector<double> y);
@ -24,6 +24,6 @@ private:
double b0; double b0;
double b1; double b1;
}; };
} //namespace MLPP
#endif /* UniLinReg_hpp */ #endif /* UniLinReg_hpp */

View File

@ -10,7 +10,7 @@
#include <random> #include <random>
#include <string> #include <string>
namespace MLPP {
std::vector<double> Utilities::weightInitialization(int n, std::string type) { std::vector<double> Utilities::weightInitialization(int n, std::string type) {
std::random_device rd; std::random_device rd;
@ -380,4 +380,3 @@ double Utilities::accuracy(std::vector<double> y_hat, std::vector<double> y) {
double Utilities::f1_score(std::vector<double> y_hat, std::vector<double> y) { double Utilities::f1_score(std::vector<double> y_hat, std::vector<double> y) {
return 2 * precision(y_hat, y) * recall(y_hat, y) / (precision(y_hat, y) + recall(y_hat, y)); return 2 * precision(y_hat, y) * recall(y_hat, y) / (precision(y_hat, y) + recall(y_hat, y));
} }
} //namespace MLPP

View File

@ -12,7 +12,7 @@
#include <tuple> #include <tuple>
#include <vector> #include <vector>
namespace MLPP {
class Utilities { class Utilities {
public: public:
// Weight Init // Weight Init
@ -50,6 +50,6 @@ public:
private: private:
}; };
} //namespace MLPP
#endif /* Utilities_hpp */ #endif /* Utilities_hpp */

View File

@ -14,7 +14,7 @@
#include <cmath> #include <cmath>
#include <iostream> #include <iostream>
namespace MLPP {
WGAN::WGAN(double k, std::vector<std::vector<double>> outputSet) : WGAN::WGAN(double k, std::vector<std::vector<double>> outputSet) :
outputSet(outputSet), n(outputSet.size()), k(k) { outputSet(outputSet), n(outputSet.size()), k(k) {
} }
@ -292,4 +292,3 @@ void WGAN::UI(int epoch, double cost_prev, std::vector<double> y_hat, std::vecto
} }
} }
} }
} //namespace MLPP

View File

@ -15,7 +15,7 @@
#include <tuple> #include <tuple>
#include <vector> #include <vector>
namespace MLPP {
class WGAN { class WGAN {
public: public:
@ -52,6 +52,6 @@ private:
int n; int n;
int k; int k;
}; };
} //namespace MLPP
#endif /* WGAN_hpp */ #endif /* WGAN_hpp */