Removed the MLPP namespace.

This commit is contained in:
Relintai 2023-01-24 19:20:18 +01:00
parent ffa44dfda5
commit ab0d41203b
79 changed files with 105 additions and 164 deletions

View File

@ -51,8 +51,6 @@
#include "MLPP/WGAN/WGAN.hpp"
#include "MLPP/Transforms/Transforms.hpp"
using namespace MLPP;
// double f(double x){
// return x*x*x + 2*x - 2;

View File

@ -10,8 +10,6 @@
#include <cmath>
#include <iostream>
namespace MLPP {
double Activation::linear(double z, bool deriv) {
if (deriv) {
return 1;
@ -951,4 +949,3 @@ std::vector<double> Activation::activation(std::vector<double> z, bool deriv, do
}
return a;
}
} //namespace MLPP

View File

@ -10,7 +10,6 @@
#include <vector>
namespace MLPP {
class Activation {
public:
double linear(double z, bool deriv = 0);
@ -142,6 +141,5 @@ public:
private:
};
} //namespace MLPP
#endif /* Activation_hpp */

View File

@ -15,7 +15,6 @@
#include <iostream>
#include <random>
namespace MLPP {
ANN::ANN(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), lrScheduler("None"), decayConstant(0), dropRate(0) {
}
@ -761,4 +760,3 @@ void ANN::UI(int epoch, double cost_prev, std::vector<double> y_hat, std::vector
}
}
}
} //namespace MLPP

View File

@ -14,8 +14,6 @@
#include <tuple>
#include <vector>
namespace MLPP {
class ANN {
public:
ANN(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet);
@ -66,6 +64,5 @@ private:
double decayConstant;
double dropRate;
};
} //namespace MLPP
#endif /* ANN_hpp */

View File

@ -13,7 +13,6 @@
#include <iostream>
#include <random>
namespace MLPP {
AutoEncoder::AutoEncoder(std::vector<std::vector<double>> inputSet, int n_hidden) :
inputSet(inputSet), n_hidden(n_hidden), n(inputSet.size()), k(inputSet[0].size()) {
Activation avn;
@ -252,4 +251,3 @@ void AutoEncoder::forwardPass() {
a2 = avn.sigmoid(z2);
y_hat = alg.mat_vec_add(alg.matmult(a2, weights2), bias2);
}
} //namespace MLPP

View File

@ -12,8 +12,6 @@
#include <tuple>
#include <vector>
namespace MLPP {
class AutoEncoder {
public:
AutoEncoder(std::vector<std::vector<double>> inputSet, int n_hidden);
@ -50,6 +48,5 @@ private:
int k;
int n_hidden;
};
} //namespace MLPP
#endif /* AutoEncoder_hpp */

View File

@ -12,7 +12,6 @@
#include <iostream>
#include <random>
namespace MLPP {
BernoulliNB::BernoulliNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet) :
inputSet(inputSet), outputSet(outputSet), class_num(2) {
y_hat.resize(outputSet.size());
@ -175,4 +174,3 @@ void BernoulliNB::Evaluate() {
}
}
}
} //namespace MLPP

View File

@ -11,7 +11,6 @@
#include <map>
#include <vector>
namespace MLPP {
class BernoulliNB {
public:
BernoulliNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet);
@ -38,5 +37,4 @@ private:
std::vector<double> y_hat;
};
#endif /* BernoulliNB_hpp */
}
#endif /* BernoulliNB_hpp */

View File

@ -14,7 +14,6 @@
#include <iostream>
#include <random>
namespace MLPP {
CLogLogReg::CLogLogReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n);
@ -221,4 +220,3 @@ void CLogLogReg::forwardPass() {
z = propagate(inputSet);
y_hat = avn.cloglog(z);
}
} //namespace MLPP

View File

@ -11,8 +11,6 @@
#include <string>
#include <vector>
namespace MLPP {
class CLogLogReg {
public:
CLogLogReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5);
@ -50,6 +48,5 @@ private:
double lambda;
double alpha; /* This is the controlling param for Elastic Net*/
};
} //namespace MLPP
#endif /* CLogLogReg_hpp */

View File

@ -10,8 +10,6 @@
#include <cmath>
#include <iostream>
namespace MLPP {
Convolutions::Convolutions() :
prewittHorizontal({ { 1, 1, 1 }, { 0, 0, 0 }, { -1, -1, -1 } }), prewittVertical({ { 1, 0, -1 }, { 1, 0, -1 }, { 1, 0, -1 } }), sobelHorizontal({ { 1, 2, 1 }, { 0, 0, 0 }, { -1, -2, -1 } }), sobelVertical({ { -1, 0, 1 }, { -2, 0, 2 }, { -1, 0, 1 } }), scharrHorizontal({ { 3, 10, 3 }, { 0, 0, 0 }, { -3, -10, -3 } }), scharrVertical({ { 3, 0, -3 }, { 10, 0, -10 }, { 3, 0, -3 } }), robertsHorizontal({ { 0, 1 }, { -1, 0 } }), robertsVertical({ { 1, 0 }, { 0, -1 } }) {
}
@ -373,4 +371,3 @@ std::vector<std::vector<double>> Convolutions::getRobertsHorizontal() {
std::vector<std::vector<double>> Convolutions::getRobertsVertical() {
return robertsVertical;
}
} //namespace MLPP

View File

@ -5,7 +5,6 @@
#include <vector>
#include <string>
namespace MLPP {
class Convolutions {
public:
Convolutions();
@ -47,6 +46,5 @@ private:
std::vector<std::vector<double>> robertsHorizontal;
std::vector<std::vector<double>> robertsVertical;
};
} //namespace MLPP
#endif // Convolutions_hpp

View File

@ -10,7 +10,7 @@
#include <cmath>
#include <iostream>
namespace MLPP {
double Cost::MSE(std::vector<double> y_hat, std::vector<double> y) {
double sum = 0;
for (int i = 0; i < y_hat.size(); i++) {
@ -404,4 +404,3 @@ std::vector<double> Cost::dualFormSVMDeriv(std::vector<double> alpha, std::vecto
return alg.subtraction(alphaQDeriv, one);
}
} //namespace MLPP

View File

@ -10,7 +10,7 @@
#include <vector>
namespace MLPP {
class Cost {
public:
// Regression Costs
@ -81,6 +81,6 @@ public:
private:
};
} //namespace MLPP
#endif /* Cost_hpp */

View File

@ -16,7 +16,7 @@
#include <random>
#include <sstream>
namespace MLPP {
// Loading Datasets
std::tuple<std::vector<std::vector<double>>, std::vector<double>> Data::loadBreastCancer() {
const int BREAST_CANCER_SIZE = 30; // k = 30
@ -753,4 +753,4 @@ std::vector<double> Data::reverseOneHot(std::vector<std::vector<double>> tempOut
return outputSet;
}
} //namespace MLPP

View File

@ -13,7 +13,7 @@
#include <tuple>
#include <vector>
namespace MLPP {
class Data {
public:
// Load Datasets
@ -94,6 +94,6 @@ public:
private:
};
} //namespace MLPP
#endif /* Data_hpp */

View File

@ -14,7 +14,7 @@
#include <iostream>
#include <random>
namespace MLPP {
DualSVC::DualSVC(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, double C, std::string kernel) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), C(C), kernel(kernel) {
y_hat.resize(n);
@ -236,4 +236,3 @@ std::vector<std::vector<double>> DualSVC::kernelFunction(std::vector<std::vector
return alg.matmult(inputSet, alg.transpose(inputSet));
} // warning: non-void function does not return a value in all control paths [-Wreturn-type]
}
} //namespace MLPP

View File

@ -14,7 +14,7 @@
#include <string>
#include <vector>
namespace MLPP {
class DualSVC {
public:
@ -65,6 +65,6 @@ private:
// UI Portion
void UI(int epoch, double cost_prev);
};
} //namespace MLPP
#endif /* DualSVC_hpp */

View File

@ -14,7 +14,7 @@
#include <iostream>
#include <random>
namespace MLPP {
ExpReg::ExpReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n);
@ -235,4 +235,3 @@ double ExpReg::Evaluate(std::vector<double> x) {
void ExpReg::forwardPass() {
y_hat = Evaluate(inputSet);
}
} //namespace MLPP

View File

@ -11,7 +11,7 @@
#include <string>
#include <vector>
namespace MLPP {
class ExpReg {
public:
ExpReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5);
@ -45,6 +45,6 @@ private:
double lambda;
double alpha; /* This is the controlling param for Elastic Net*/
};
} //namespace MLPP
#endif /* ExpReg_hpp */

View File

@ -14,7 +14,7 @@
#include <cmath>
#include <iostream>
namespace MLPP {
GAN::GAN(double k, std::vector<std::vector<double>> outputSet) :
outputSet(outputSet), n(outputSet.size()), k(k) {
}
@ -283,4 +283,3 @@ void GAN::UI(int epoch, double cost_prev, std::vector<double> y_hat, std::vector
}
}
}
} //namespace MLPP

View File

@ -15,7 +15,7 @@
#include <tuple>
#include <vector>
namespace MLPP {
class GAN {
public:
@ -52,6 +52,6 @@ private:
int n;
int k;
};
} //namespace MLPP
#endif /* GAN_hpp */

View File

@ -8,7 +8,7 @@
#include "../stat/stat.h"
#include <iostream>
namespace MLPP {
void GaussMarkovChecker::checkGMConditions(std::vector<double> eps) {
bool condition1 = arithmeticMean(eps);
bool condition2 = homoscedasticity(eps);
@ -54,4 +54,4 @@ bool GaussMarkovChecker::exogeneity(std::vector<double> eps) {
}
return 1;
}
} //namespace MLPP

View File

@ -11,7 +11,7 @@
#include <string>
#include <vector>
namespace MLPP {
class GaussMarkovChecker {
public:
void checkGMConditions(std::vector<double> eps);
@ -22,6 +22,6 @@ public:
bool exogeneity(std::vector<double> eps); // 3) Cov of any 2 non-equal eps values = 0.
private:
};
} //namespace MLPP
#endif /* GaussMarkovChecker_hpp */

View File

@ -13,7 +13,7 @@
#include <iostream>
#include <random>
namespace MLPP {
GaussianNB::GaussianNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int class_num) :
inputSet(inputSet), outputSet(outputSet), class_num(class_num) {
y_hat.resize(outputSet.size());
@ -88,4 +88,3 @@ void GaussianNB::Evaluate() {
std::cout << std::distance(score, std::max_element(score, score + sizeof(score) / sizeof(double))) << std::endl;
}
}
} //namespace MLPP

View File

@ -10,7 +10,7 @@
#include <vector>
namespace MLPP {
class GaussianNB {
public:
GaussianNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int class_num);
@ -34,4 +34,3 @@ private:
};
#endif /* GaussianNB_hpp */
}

View File

@ -12,7 +12,7 @@
#include <iostream>
#include <random>
namespace MLPP {
HiddenLayer::HiddenLayer(int n_hidden, std::string activation, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha) :
n_hidden(n_hidden), activation(activation), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) {
weights = Utilities::weightInitialization(input[0].size(), n_hidden, weightInit);
@ -110,4 +110,3 @@ void HiddenLayer::Test(std::vector<double> x) {
z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias);
a_test = (avn.*activationTest_map[activation])(z_test, 0);
}
} //namespace MLPP

View File

@ -14,7 +14,7 @@
#include <string>
#include <vector>
namespace MLPP {
class HiddenLayer {
public:
HiddenLayer(int n_hidden, std::string activation, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha);
@ -48,6 +48,6 @@ public:
void forwardPass();
void Test(std::vector<double> x);
};
} //namespace MLPP
#endif /* HiddenLayer_hpp */

View File

@ -6,7 +6,7 @@
#include "hypothesis_testing.h"
namespace MLPP {
std::tuple<bool, double> HypothesisTesting::chiSquareTest(std::vector<double> observed, std::vector<double> expected) {
double df = observed.size() - 1; // These are our degrees of freedom
@ -16,4 +16,3 @@ std::tuple<bool, double> HypothesisTesting::chiSquareTest(std::vector<double> ob
}
}
} //namespace MLPP

View File

@ -11,13 +11,13 @@
#include <tuple>
#include <vector>
namespace MLPP {
class HypothesisTesting {
public:
std::tuple<bool, double> chiSquareTest(std::vector<double> observed, std::vector<double> expected);
private:
};
} //namespace MLPP
#endif /* HypothesisTesting_hpp */

View File

@ -12,7 +12,7 @@
#include <iostream>
#include <random>
namespace MLPP {
KMeans::KMeans(std::vector<std::vector<double>> inputSet, int k, std::string init_type) :
inputSet(inputSet), k(k), init_type(init_type) {
if (init_type == "KMeans++") {
@ -232,4 +232,4 @@ double KMeans::Cost() {
}
return sum;
}
} //namespace MLPP

View File

@ -11,7 +11,7 @@
#include <string>
#include <vector>
namespace MLPP {
class KMeans {
public:
KMeans(std::vector<std::vector<double>> inputSet, int k, std::string init_type = "Default");
@ -40,6 +40,6 @@ private:
std::string init_type;
};
} //namespace MLPP
#endif /* KMeans_hpp */

View File

@ -12,7 +12,7 @@
#include <iostream>
#include <map>
namespace MLPP {
kNN::kNN(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int k) :
inputSet(inputSet), outputSet(outputSet), k(k) {
}
@ -82,4 +82,3 @@ std::vector<double> kNN::nearestNeighbors(std::vector<double> x) {
}
return knn;
}
} //namespace MLPP

View File

@ -10,7 +10,7 @@
#include <vector>
namespace MLPP {
class kNN {
public:
kNN(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int k);
@ -28,6 +28,6 @@ private:
std::vector<double> outputSet;
int k;
};
} //namespace MLPP
#endif /* kNN_hpp */

View File

@ -11,7 +11,7 @@
#include <map>
#include <random>
namespace MLPP {
std::vector<std::vector<double>> LinAlg::gramMatrix(std::vector<std::vector<double>> A) {
return matmult(transpose(A), A); // AtA
@ -1226,4 +1226,3 @@ std::vector<std::vector<std::vector<double>>> LinAlg::vector_wise_tensor_product
}
return C;
}
} //namespace MLPP

View File

@ -11,7 +11,7 @@
#include <tuple>
#include <vector>
namespace MLPP {
class LinAlg {
public:
// MATRIX FUNCTIONS
@ -231,6 +231,6 @@ public:
private:
};
} //namespace MLPP
#endif /* LinAlg_hpp */

View File

@ -15,7 +15,7 @@
#include <iostream>
#include <random>
namespace MLPP {
LinReg::LinReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
@ -237,4 +237,3 @@ double LinReg::Evaluate(std::vector<double> x) {
void LinReg::forwardPass() {
y_hat = Evaluate(inputSet);
}
} //namespace MLPP

View File

@ -11,7 +11,7 @@
#include <string>
#include <vector>
namespace MLPP {
class LinReg {
public:
LinReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5);
@ -46,6 +46,6 @@ private:
int lambda;
int alpha; /* This is the controlling param for Elastic Net*/
};
} //namespace MLPP
#endif /* LinReg_hpp */

View File

@ -14,7 +14,7 @@
#include <iostream>
#include <random>
namespace MLPP {
LogReg::LogReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n);
@ -202,4 +202,3 @@ double LogReg::Evaluate(std::vector<double> x) {
void LogReg::forwardPass() {
y_hat = Evaluate(inputSet);
}
} //namespace MLPP

View File

@ -11,7 +11,7 @@
#include <string>
#include <vector>
namespace MLPP {
class LogReg {
public:
@ -47,6 +47,6 @@ private:
double lambda; /* Regularization Parameter */
double alpha; /* This is the controlling param for Elastic Net*/
};
} //namespace MLPP
#endif /* LogReg_hpp */

View File

@ -13,7 +13,7 @@
#include <iostream>
namespace MLPP {
MANN::MANN(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), n_output(outputSet[0].size()) {
}
@ -187,4 +187,3 @@ void MANN::forwardPass() {
outputLayer->forwardPass();
y_hat = outputLayer->a;
}
} //namespace MLPP

View File

@ -14,7 +14,7 @@
#include <string>
#include <vector>
namespace MLPP {
class MANN {
public:
@ -44,6 +44,6 @@ private:
int k;
int n_output;
};
} //namespace MLPP
#endif /* MANN_hpp */

View File

@ -15,7 +15,7 @@
#include <iostream>
#include <random>
namespace MLPP {
MLP::MLP(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int n_hidden, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n_hidden(n_hidden), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
Activation avn;
@ -270,4 +270,4 @@ void MLP::forwardPass() {
a2 = avn.sigmoid(z2);
y_hat = avn.sigmoid(alg.scalarAdd(bias2, alg.mat_vec_mult(a2, weights2)));
}
} //namespace MLPP

View File

@ -12,7 +12,7 @@
#include <string>
#include <vector>
namespace MLPP {
class MLP {
public:
@ -56,6 +56,6 @@ private:
double lambda; /* Regularization Parameter */
double alpha; /* This is the controlling param for Elastic Net*/
};
} //namespace MLPP
#endif /* MLP_hpp */

View File

@ -11,7 +11,7 @@
#include <iostream>
#include <random>
namespace MLPP {
MultiOutputLayer::MultiOutputLayer(int n_output, int n_hidden, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha) :
n_output(n_output), n_hidden(n_hidden), activation(activation), cost(cost), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) {
weights = Utilities::weightInitialization(n_hidden, n_output, weightInit);
@ -129,4 +129,3 @@ void MultiOutputLayer::Test(std::vector<double> x) {
z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias);
a_test = (avn.*activationTest_map[activation])(z_test, 0);
}
} //namespace MLPP

View File

@ -15,7 +15,7 @@
#include <string>
#include <vector>
namespace MLPP {
class MultiOutputLayer {
public:
MultiOutputLayer(int n_output, int n_hidden, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha);
@ -53,6 +53,6 @@ public:
void forwardPass();
void Test(std::vector<double> x);
};
} //namespace MLPP
#endif /* MultiOutputLayer_hpp */

View File

@ -12,7 +12,7 @@
#include <iostream>
#include <random>
namespace MLPP {
MultinomialNB::MultinomialNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int class_num) :
inputSet(inputSet), outputSet(outputSet), class_num(class_num) {
y_hat.resize(outputSet.size());
@ -116,4 +116,3 @@ void MultinomialNB::Evaluate() {
y_hat[i] = std::distance(score, std::max_element(score, score + sizeof(score) / sizeof(double)));
}
}
} //namespace MLPP

View File

@ -11,7 +11,7 @@
#include <map>
#include <vector>
namespace MLPP {
class MultinomialNB {
public:
MultinomialNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int class_num);
@ -37,4 +37,3 @@ private:
};
#endif /* MultinomialNB_hpp */
}

View File

@ -12,7 +12,7 @@
#include <iostream>
#include <string>
namespace MLPP {
double NumericalAnalysis::numDiff(double (*function)(double), double x) {
double eps = 1e-10;
@ -293,4 +293,3 @@ std::string NumericalAnalysis::secondPartialDerivativeTest(double (*function)(st
}
}
}
} //namespace MLPP

View File

@ -10,7 +10,7 @@
#include <string>
#include <vector>
namespace MLPP {
class NumericalAnalysis {
public:
/* A numerical method for derivatives is used. This may be subject to change,
@ -52,6 +52,6 @@ public:
std::string secondPartialDerivativeTest(double (*function)(std::vector<double>), std::vector<double> x);
};
} //namespace MLPP
#endif /* NumericalAnalysis_hpp */

View File

@ -8,7 +8,7 @@
#include "../stat/stat.h"
#include <iostream>
namespace MLPP {
OutlierFinder::OutlierFinder(int threshold) :
threshold(threshold) {
}
@ -39,4 +39,3 @@ std::vector<double> OutlierFinder::modelTest(std::vector<double> inputSet) {
}
return outliers;
}
} //namespace MLPP

View File

@ -10,7 +10,7 @@
#include <vector>
namespace MLPP {
class OutlierFinder {
public:
// Cnstr
@ -22,6 +22,6 @@ public:
// Variables required
int threshold;
};
} //namespace MLPP
#endif /* OutlierFinder_hpp */

View File

@ -11,7 +11,7 @@
#include <iostream>
#include <random>
namespace MLPP {
OutputLayer::OutputLayer(int n_hidden, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha) :
n_hidden(n_hidden), activation(activation), cost(cost), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) {
weights = Utilities::weightInitialization(n_hidden, weightInit);
@ -126,4 +126,3 @@ void OutputLayer::Test(std::vector<double> x) {
z_test = alg.dot(weights, x) + bias;
a_test = (avn.*activationTest_map[activation])(z_test, 0);
}
} //namespace MLPP

View File

@ -15,7 +15,7 @@
#include <string>
#include <vector>
namespace MLPP {
class OutputLayer {
public:
OutputLayer(int n_hidden, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha);
@ -52,6 +52,6 @@ public:
void forwardPass();
void Test(std::vector<double> x);
};
} //namespace MLPP
#endif /* OutputLayer_hpp */

View File

@ -11,7 +11,7 @@
#include <iostream>
#include <random>
namespace MLPP {
PCA::PCA(std::vector<std::vector<double>> inputSet, int k) :
inputSet(inputSet), k(k) {
@ -51,4 +51,4 @@ double PCA::score() {
}
return 1 - num / den;
}
} //namespace MLPP

View File

@ -10,7 +10,7 @@
#include <vector>
namespace MLPP {
class PCA {
public:
PCA(std::vector<std::vector<double>> inputSet, int k);
@ -24,6 +24,6 @@ private:
std::vector<std::vector<double>> Z;
int k;
};
} //namespace MLPP
#endif /* PCA_hpp */

View File

@ -14,7 +14,7 @@
#include <iostream>
#include <random>
namespace MLPP {
ProbitReg::ProbitReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n);
@ -243,4 +243,3 @@ void ProbitReg::forwardPass() {
z = propagate(inputSet);
y_hat = avn.gaussianCDF(z);
}
} //namespace MLPP

View File

@ -11,7 +11,7 @@
#include <string>
#include <vector>
namespace MLPP {
class ProbitReg {
public:
@ -49,6 +49,6 @@ private:
double lambda;
double alpha; /* This is the controlling param for Elastic Net*/
};
} //namespace MLPP
#endif /* ProbitReg_hpp */

View File

@ -10,7 +10,7 @@
#include <iostream>
#include <random>
namespace MLPP {
double Reg::regTerm(std::vector<double> weights, double lambda, double alpha, std::string reg) {
if (reg == "Ridge") {
@ -162,4 +162,4 @@ double Reg::regDerivTerm(std::vector<std::vector<double>> weights, double lambda
return 0;
}
}
} //namespace MLPP

View File

@ -12,7 +12,7 @@
#include <vector>
#include <string>
namespace MLPP {
class Reg {
public:
double regTerm(std::vector<double> weights, double lambda, double alpha, std::string reg);
@ -28,6 +28,6 @@ private:
double regDerivTerm(std::vector<double> weights, double lambda, double alpha, std::string reg, int j);
double regDerivTerm(std::vector<std::vector<double>> weights, double lambda, double alpha, std::string reg, int i, int j);
};
} //namespace MLPP
#endif /* Reg_hpp */

View File

@ -15,7 +15,7 @@
#include <iostream>
#include <random>
namespace MLPP {
SoftmaxNet::SoftmaxNet(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet, int n_hidden, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), n_hidden(n_hidden), n_class(outputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n);
@ -289,4 +289,3 @@ void SoftmaxNet::forwardPass() {
a2 = avn.sigmoid(z2);
y_hat = avn.adjSoftmax(alg.mat_vec_add(alg.matmult(a2, weights2), bias2));
}
} //namespace MLPP

View File

@ -11,7 +11,7 @@
#include <string>
#include <vector>
namespace MLPP {
class SoftmaxNet {
public:
@ -57,6 +57,6 @@ private:
double lambda;
double alpha; /* This is the controlling param for Elastic Net*/
};
} //namespace MLPP
#endif /* SoftmaxNet_hpp */

View File

@ -14,7 +14,7 @@
#include <iostream>
#include <random>
namespace MLPP {
SoftmaxReg::SoftmaxReg(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), n_class(outputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n);
@ -189,4 +189,3 @@ void SoftmaxReg::forwardPass() {
y_hat = avn.softmax(alg.mat_vec_add(alg.matmult(inputSet, weights), bias));
}
} //namespace MLPP

View File

@ -11,7 +11,7 @@
#include <string>
#include <vector>
namespace MLPP {
class SoftmaxReg {
public:
@ -46,6 +46,6 @@ private:
double lambda;
double alpha; /* This is the controlling param for Elastic Net*/
};
} //namespace MLPP
#endif /* SoftmaxReg_hpp */

View File

@ -14,7 +14,7 @@
#include <iostream>
namespace MLPP {
double Stat::b0Estimation(const std::vector<double> &x, const std::vector<double> &y) {
return mean(y) - b1Estimation(x, y) * mean(x);
}
@ -214,4 +214,3 @@ double Stat::logMean(const double x, const double y) {
}
return (y - x) / (log(y) - std::log(x));
}
} //namespace MLPP

View File

@ -10,7 +10,7 @@
#include <vector>
namespace MLPP {
class Stat {
public:
// These functions are for univariate lin reg module- not for users.
@ -47,6 +47,6 @@ public:
double identricMean(const double x, const double y);
double logMean(const double x, const double y);
};
} //namespace MLPP
#endif /* Stat_hpp */

View File

@ -14,7 +14,7 @@
#include <iostream>
#include <random>
namespace MLPP {
SVC::SVC(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, double C) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), C(C) {
y_hat.resize(n);
@ -195,4 +195,3 @@ void SVC::forwardPass() {
z = propagate(inputSet);
y_hat = avn.sign(z);
}
} //namespace MLPP

View File

@ -14,7 +14,7 @@
#include <string>
#include <vector>
namespace MLPP {
class SVC {
public:
@ -50,6 +50,6 @@ private:
// UI Portion
void UI(int epoch, double cost_prev);
};
} //namespace MLPP
#endif /* SVC_hpp */

View File

@ -14,7 +14,7 @@
#include <iostream>
#include <random>
namespace MLPP {
TanhReg::TanhReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n);
@ -192,4 +192,3 @@ void TanhReg::forwardPass() {
z = propagate(inputSet);
y_hat = avn.tanh(z);
}
} //namespace MLPP

View File

@ -11,7 +11,7 @@
#include <string>
#include <vector>
namespace MLPP {
class TanhReg {
public:
@ -51,6 +51,6 @@ private:
double lambda;
double alpha; /* This is the controlling param for Elastic Net*/
};
} //namespace MLPP
#endif /* TanhReg_hpp */

View File

@ -10,7 +10,7 @@
#include <iostream>
#include <string>
namespace MLPP {
// DCT ii.
// https://www.mathworks.com/help/images/discrete-cosine-transform.html
@ -53,4 +53,3 @@ std::vector<std::vector<double>> Transforms::discreteCosineTransform(std::vector
}
return B;
}
} //namespace MLPP

View File

@ -10,11 +10,11 @@
#include <string>
#include <vector>
namespace MLPP {
class Transforms {
public:
std::vector<std::vector<double>> discreteCosineTransform(std::vector<std::vector<double>> A);
};
} //namespace MLPP
#endif /* Transforms_hpp */

View File

@ -15,7 +15,7 @@
// Univariate Linear Regression Model
// ŷ = b0 + b1x1
namespace MLPP {
UniLinReg::UniLinReg(std::vector<double> x, std::vector<double> y) :
inputSet(x), outputSet(y) {
Stat estimator;
@ -31,4 +31,4 @@ std::vector<double> UniLinReg::modelSetTest(std::vector<double> x) {
double UniLinReg::modelTest(double input) {
return b0 + b1 * input;
}
} //namespace MLPP

View File

@ -10,7 +10,7 @@
#include <vector>
namespace MLPP {
class UniLinReg {
public:
UniLinReg(std::vector<double> x, std::vector<double> y);
@ -24,6 +24,6 @@ private:
double b0;
double b1;
};
} //namespace MLPP
#endif /* UniLinReg_hpp */

View File

@ -10,7 +10,7 @@
#include <random>
#include <string>
namespace MLPP {
std::vector<double> Utilities::weightInitialization(int n, std::string type) {
std::random_device rd;
@ -380,4 +380,3 @@ double Utilities::accuracy(std::vector<double> y_hat, std::vector<double> y) {
double Utilities::f1_score(std::vector<double> y_hat, std::vector<double> y) {
return 2 * precision(y_hat, y) * recall(y_hat, y) / (precision(y_hat, y) + recall(y_hat, y));
}
} //namespace MLPP

View File

@ -12,7 +12,7 @@
#include <tuple>
#include <vector>
namespace MLPP {
class Utilities {
public:
// Weight Init
@ -50,6 +50,6 @@ public:
private:
};
} //namespace MLPP
#endif /* Utilities_hpp */

View File

@ -14,7 +14,7 @@
#include <cmath>
#include <iostream>
namespace MLPP {
WGAN::WGAN(double k, std::vector<std::vector<double>> outputSet) :
outputSet(outputSet), n(outputSet.size()), k(k) {
}
@ -292,4 +292,3 @@ void WGAN::UI(int epoch, double cost_prev, std::vector<double> y_hat, std::vecto
}
}
}
} //namespace MLPP

View File

@ -15,7 +15,7 @@
#include <tuple>
#include <vector>
namespace MLPP {
class WGAN {
public:
@ -52,6 +52,6 @@ private:
int n;
int k;
};
} //namespace MLPP
#endif /* WGAN_hpp */