Added MANN (multidimensional output artifical neural net), cleaned up code for ANN and MANN

This commit is contained in:
novak_99 2021-06-05 22:28:23 -07:00
parent 0753ffe261
commit f7c175745f
13 changed files with 426 additions and 12 deletions

View File

@ -49,8 +49,8 @@ namespace MLPP {
void ANN::gradientDescent(double learning_rate, int max_epoch, bool UI){
class Cost cost;
LinAlg alg;
Activation avn;
LinAlg alg;
Reg regularization;
double cost_prev = 0;
@ -132,7 +132,7 @@ namespace MLPP {
}
void ANN::addOutputLayer(std::string activation, std::string loss, std::string weightInit, std::string reg, double lambda, double alpha){
outputLayer = new OutputLayer(network[0].n_hidden, outputSet.size(), activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha);
outputLayer = new OutputLayer(network[0].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha);
}
double ANN::Cost(std::vector<double> y_hat, std::vector<double> y){

View File

@ -49,7 +49,7 @@ namespace MLPP{
return alg.elementWiseDivision(alg.onemat(z.size(), z[0].size()), alg.addition(alg.onemat(z.size(), z[0].size()), alg.exp(alg.scalarMultiply(-1, z))));
}
std::vector<double> Activation::softmax(std::vector<double> z){
std::vector<double> Activation::softmax(std::vector<double> z, bool deriv){
LinAlg alg;
std::vector<double> a;
a.resize(z.size());
@ -65,7 +65,7 @@ namespace MLPP{
return a;
}
std::vector<std::vector<double>> Activation::softmax(std::vector<std::vector<double>> z){
std::vector<std::vector<double>> Activation::softmax(std::vector<std::vector<double>> z, bool deriv){
LinAlg alg;
std::vector<std::vector<double>> a;
a.resize(z.size());

View File

@ -20,8 +20,8 @@ namespace MLPP{
std::vector<double> sigmoid(std::vector<double> z, bool deriv = 0);
std::vector<std::vector<double>> sigmoid(std::vector<std::vector<double>> z, bool deriv = 0);
std::vector<double> softmax(std::vector<double> z);
std::vector<std::vector<double>> softmax(std::vector<std::vector<double>> z);
std::vector<double> softmax(std::vector<double> z, bool deriv = 0);
std::vector<std::vector<double>> softmax(std::vector<std::vector<double>> z, bool deriv = 0);
std::vector<double> adjSoftmax(std::vector<double> z);
std::vector<std::vector<double>> adjSoftmax(std::vector<std::vector<double>> z);

168
MLPP/MANN/MANN.cpp Normal file
View File

@ -0,0 +1,168 @@
//
// MANN.cpp
//
// Created by Marc Melikyan on 11/4/20.
//
#include "MANN.hpp"
#include "Activation/Activation.hpp"
#include "LinAlg/LinAlg.hpp"
#include "Regularization/Reg.hpp"
#include "Utilities/Utilities.hpp"
#include "Cost/Cost.hpp"
#include <iostream>
namespace MLPP {
MANN::MANN(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet)
: inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), n_output(outputSet[0].size())
{
}
MANN::~MANN(){
delete outputLayer;
}
std::vector<std::vector<double>> MANN::modelSetTest(std::vector<std::vector<double>> X){
network[0].input = X;
network[0].forwardPass();
for(int i = 1; i < network.size(); i++){
network[i].input = network[i - 1].a;
network[i].forwardPass();
}
outputLayer->input = network[network.size() - 1].a;
outputLayer->forwardPass();
return outputLayer->a;
}
std::vector<double> MANN::modelTest(std::vector<double> x){
network[0].Test(x);
for(int i = 1; i < network.size(); i++){
network[i].Test(network[i - 1].a_test);
}
outputLayer->Test(network[network.size() - 1].a_test);
return outputLayer->a_test;
}
void MANN::gradientDescent(double learning_rate, int max_epoch, bool UI){
class Cost cost;
Activation avn;
LinAlg alg;
Reg regularization;
double cost_prev = 0;
int epoch = 1;
forwardPass();
while(true){
cost_prev = Cost(y_hat, outputSet);
if(outputLayer->activation == "Softmax"){
outputLayer->delta = alg.subtraction(y_hat, outputSet);
}
else{
auto costDeriv = outputLayer->costDeriv_map[outputLayer->cost];
auto outputAvn = outputLayer->activation_map[outputLayer->activation];
outputLayer->delta = alg.hadamard_product((cost.*costDeriv)(y_hat, outputSet), (avn.*outputAvn)(outputLayer->z, 1));
}
std::vector<std::vector<double>> outputWGrad = alg.matmult(alg.transpose(outputLayer->input), outputLayer->delta);
outputLayer->weights = alg.subtraction(outputLayer->weights, alg.scalarMultiply(learning_rate/n, outputWGrad));
outputLayer->weights = regularization.regWeights(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg);
outputLayer->bias = alg.subtractMatrixRows(outputLayer->bias, alg.scalarMultiply(learning_rate/n, outputLayer->delta));
auto hiddenLayerAvn = network[network.size() - 1].activation_map[network[network.size() - 1].activation];
network[network.size() - 1].delta = alg.hadamard_product(alg.matmult(outputLayer->delta, alg.transpose(outputLayer->weights)), (avn.*hiddenLayerAvn)(network[network.size() - 1].z, 1));
std::vector<std::vector<double>> hiddenLayerWGrad = alg.matmult(alg.transpose(network[network.size() - 1].input), network[network.size() - 1].delta);
network[network.size() - 1].weights = alg.subtraction(network[network.size() - 1].weights, alg.scalarMultiply(learning_rate/n, hiddenLayerWGrad));
network[network.size() - 1].weights = regularization.regWeights(network[network.size() - 1].weights, network[network.size() - 1].lambda, network[network.size() - 1].alpha, network[network.size() - 1].reg);
network[network.size() - 1].bias = alg.subtractMatrixRows(network[network.size() - 1].bias, alg.scalarMultiply(learning_rate/n, network[network.size() - 1].delta));
for(int i = network.size() - 2; i >= 0; i--){
auto hiddenLayerAvn = network[i].activation_map[network[i].activation];
network[i].delta = alg.hadamard_product(alg.matmult(network[i + 1].delta, network[i + 1].weights), (avn.*hiddenLayerAvn)(network[i].z, 1));
std::vector<std::vector<double>> hiddenLayerWGrad = alg.matmult(alg.transpose(network[i].input), network[i].delta);
network[i].weights = alg.subtraction(network[i].weights, alg.scalarMultiply(learning_rate/n, hiddenLayerWGrad));
network[i].weights = regularization.regWeights(network[i].weights, network[i].lambda, network[i].alpha, network[i].reg);
network[i].bias = alg.subtractMatrixRows(network[i].bias, alg.scalarMultiply(learning_rate/n, network[i].delta));
}
forwardPass();
if(UI) {
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
std::cout << "Layer " << network.size() + 1 << ": " << std::endl;
Utilities::UI(outputLayer->weights, outputLayer->bias);
std::cout << "Layer " << network.size() << ": " << std::endl;
Utilities::UI(network[network.size() - 1].weights, network[network.size() - 1].bias);
for(int i = network.size() - 2; i >= 0; i--){
std::cout << "Layer " << i + 1 << ": " << std::endl;
Utilities::UI(network[i].weights, network[i].bias);
}
}
epoch++;
if(epoch > max_epoch) { break; }
}
}
double MANN::score(){
Utilities util;
forwardPass();
return util.performance(y_hat, outputSet);
}
void MANN::save(std::string fileName){
Utilities util;
util.saveParameters(fileName, network[0].weights, network[0].bias, 0, 1);
for(int i = 1; i < network.size(); i++){
util.saveParameters(fileName, network[i].weights, network[i].bias, 1, i + 1);
}
util.saveParameters(fileName, outputLayer->weights, outputLayer->bias, 1, network.size() + 1);
}
void MANN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, double lambda, double alpha){
if(network.empty()){
network.push_back(HiddenLayer(n_hidden, activation, inputSet, weightInit, reg, lambda, alpha));
network[0].forwardPass();
}
else{
network.push_back(HiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
network[network.size() - 1].forwardPass();
}
}
void MANN::addOutputLayer(std::string activation, std::string loss, std::string weightInit, std::string reg, double lambda, double alpha){
outputLayer = new MultiOutputLayer(n_output, network[0].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha);
}
double MANN::Cost(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y){
Reg regularization;
class Cost cost;
double totalRegTerm = 0;
auto cost_function = outputLayer->cost_map[outputLayer->cost];
for(int i = 0; i < network.size() - 1; i++){
totalRegTerm += regularization.regTerm(network[i].weights, network[i].lambda, network[i].alpha, network[i].reg);
}
return (cost.*cost_function)(y_hat, y) + totalRegTerm + regularization.regTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg);
}
void MANN::forwardPass(){
network[0].input = inputSet;
network[0].forwardPass();
for(int i = 1; i < network.size(); i++){
network[i].input = network[i - 1].a;
network[i].forwardPass();
}
outputLayer->input = network[network.size() - 1].a;
outputLayer->forwardPass();
y_hat = outputLayer->a;
}
}

48
MLPP/MANN/MANN.hpp Normal file
View File

@ -0,0 +1,48 @@
//
// MANN.hpp
//
// Created by Marc Melikyan on 11/4/20.
//
#ifndef MANN_hpp
#define MANN_hpp
#include "HiddenLayer/HiddenLayer.hpp"
#include "MultiOutputLayer/MultiOutputLayer.hpp"
#include <vector>
#include <string>
namespace MLPP{
class MANN{
public:
MANN(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet);
~MANN();
std::vector<std::vector<double>> modelSetTest(std::vector<std::vector<double>> X);
std::vector<double> modelTest(std::vector<double> x);
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);
double score();
void save(std::string fileName);
void addLayer(int n_hidden, std::string activation, std::string weightInit = "Default", std::string reg = "None", double lambda = 0.5, double alpha = 0.5);
void addOutputLayer(std::string activation, std::string loss, std::string weightInit = "Default", std::string reg = "None", double lambda = 0.5, double alpha = 0.5);
private:
double Cost(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y);
void forwardPass();
std::vector<std::vector<double>> inputSet;
std::vector<std::vector<double>> outputSet;
std::vector<std::vector<double>> y_hat;
std::vector<HiddenLayer> network;
MultiOutputLayer *outputLayer;
int n;
int k;
int n_output;
};
}
#endif /* MANN_hpp */

View File

@ -0,0 +1,119 @@
//
// MultiOutputLayer.cpp
//
// Created by Marc Melikyan on 11/4/20.
//
#include "MultiOutputLayer.hpp"
#include "LinAlg/LinAlg.hpp"
#include "Utilities/Utilities.hpp"
#include <iostream>
#include <random>
namespace MLPP {
MultiOutputLayer::MultiOutputLayer(int n_output, int n_hidden, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha)
: n_output(n_output), n_hidden(n_hidden), activation(activation), cost(cost), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha)
{
weights = Utilities::weightInitialization(n_hidden, n_output, weightInit);
bias = Utilities::biasInitialization(n_output);
activation_map["Linear"] = &Activation::linear;
activationTest_map["Linear"] = &Activation::linear;
activation_map["Sigmoid"] = &Activation::sigmoid;
activationTest_map["Sigmoid"] = &Activation::sigmoid;
activation_map["Softmax"] = &Activation::softmax;
activationTest_map["Softmax"] = &Activation::softmax;
activation_map["Swish"] = &Activation::swish;
activationTest_map["Swish"] = &Activation::swish;
activation_map["Softplus"] = &Activation::softplus;
activationTest_map["Softplus"] = &Activation::softplus;
activation_map["Softsign"] = &Activation::softsign;
activationTest_map["Softsign"] = &Activation::softsign;
activation_map["CLogLog"] = &Activation::cloglog;
activationTest_map["CLogLog"] = &Activation::cloglog;
activation_map["GaussianCDF"] = &Activation::gaussianCDF;
activationTest_map["GaussianCDF"] = &Activation::gaussianCDF;
activation_map["RELU"] = &Activation::RELU;
activationTest_map["RELU"] = &Activation::RELU;
activation_map["GELU"] = &Activation::GELU;
activationTest_map["GELU"] = &Activation::GELU;
activation_map["UnitStep"] = &Activation::unitStep;
activationTest_map["UnitStep"] = &Activation::unitStep;
activation_map["Sinh"] = &Activation::sinh;
activationTest_map["Sinh"] = &Activation::sinh;
activation_map["Cosh"] = &Activation::cosh;
activationTest_map["Cosh"] = &Activation::cosh;
activation_map["Tanh"] = &Activation::tanh;
activationTest_map["Tanh"] = &Activation::tanh;
activation_map["Csch"] = &Activation::csch;
activationTest_map["Csch"] = &Activation::csch;
activation_map["Sech"] = &Activation::sech;
activationTest_map["Sech"] = &Activation::sech;
activation_map["Coth"] = &Activation::coth;
activationTest_map["Coth"] = &Activation::coth;
activation_map["Arsinh"] = &Activation::arsinh;
activationTest_map["Arsinh"] = &Activation::arsinh;
activation_map["Arcosh"] = &Activation::arcosh;
activationTest_map["Arcosh"] = &Activation::arcosh;
activation_map["Artanh"] = &Activation::artanh;
activationTest_map["Artanh"] = &Activation::artanh;
activation_map["Arcsch"] = &Activation::arcsch;
activationTest_map["Arcsch"] = &Activation::arcsch;
activation_map["Arsech"] = &Activation::arsech;
activationTest_map["Arsech"] = &Activation::arsech;
activation_map["Arcoth"] = &Activation::arcoth;
activationTest_map["Arcoth"] = &Activation::arcoth;
costDeriv_map["MSE"] = &Cost::MSEDeriv;
cost_map["MSE"] = &Cost::MSE;
costDeriv_map["RMSE"] = &Cost::RMSEDeriv;
cost_map["RMSE"] = &Cost::RMSE;
costDeriv_map["MAE"] = &Cost::MAEDeriv;
cost_map["MAE"] = &Cost::MAE;
costDeriv_map["MBE"] = &Cost::MBEDeriv;
cost_map["MBE"] = &Cost::MBE;
costDeriv_map["LogLoss"] = &Cost::LogLossDeriv;
cost_map["LogLoss"] = &Cost::LogLoss;
costDeriv_map["CrossEntropy"] = &Cost::CrossEntropyDeriv;
cost_map["CrossEntropy"] = &Cost::CrossEntropy;
costDeriv_map["HingeLoss"] = &Cost::HingeLossDeriv;
cost_map["HingeLoss"] = &Cost::HingeLoss;
}
void MultiOutputLayer::forwardPass(){
LinAlg alg;
Activation avn;
z = alg.mat_vec_add(alg.matmult(input, weights), bias);
a = (avn.*activation_map[activation])(z, 0);
}
void MultiOutputLayer::Test(std::vector<double> x){
LinAlg alg;
Activation avn;
z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias);
a_test = (avn.*activationTest_map[activation])(z_test, 0);
}
}

View File

@ -0,0 +1,58 @@
//
// MultiOutputLayer.hpp
//
// Created by Marc Melikyan on 11/4/20.
//
#ifndef MultiOutputLayer_hpp
#define MultiOutputLayer_hpp
#include "Activation/Activation.hpp"
#include "Cost/Cost.hpp"
#include <vector>
#include <map>
#include <string>
namespace MLPP {
class MultiOutputLayer{
public:
MultiOutputLayer(int n_output, int n_hidden, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha);
int n_output;
int n_hidden;
std::string activation;
std::string cost;
std::vector<std::vector<double>> input;
std::vector<std::vector<double>> weights;
std::vector<double> bias;
std::vector<std::vector<double>> z;
std::vector<std::vector<double>> a;
std::map<std::string, std::vector<std::vector<double>> (Activation::*)(std::vector<std::vector<double>>, bool)> activation_map;
std::map<std::string, std::vector<double> (Activation::*)(std::vector<double>, bool)> activationTest_map;
std::map<std::string, double (Cost::*)(std::vector<std::vector<double>>, std::vector<std::vector<double>>)> cost_map;
std::map<std::string, std::vector<std::vector<double>> (Cost::*)(std::vector<std::vector<double>>, std::vector<std::vector<double>>)> costDeriv_map;
std::vector<double> z_test;
std::vector<double> a_test;
std::vector<std::vector<double>> delta;
// Regularization Params
std::string reg;
double lambda; /* Regularization Parameter */
double alpha; /* This is the controlling param for Elastic Net*/
std::string weightInit;
void forwardPass();
void Test(std::vector<double> x);
};
}
#endif /* MultiOutputLayer_hpp */

View File

@ -12,8 +12,8 @@
#include <random>
namespace MLPP {
OutputLayer::OutputLayer(int n_hidden, int outputSize, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha)
: n_hidden(n_hidden), outputSize(outputSize), activation(activation), cost(cost), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha)
OutputLayer::OutputLayer(int n_hidden, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha)
: n_hidden(n_hidden), activation(activation), cost(cost), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha)
{
weights = Utilities::weightInitialization(n_hidden, weightInit);
bias = Utilities::biasInitialization();

View File

@ -17,10 +17,9 @@
namespace MLPP {
class OutputLayer{
public:
OutputLayer(int n_hidden, int outputSize, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha);
OutputLayer(int n_hidden, std::string activation, std::string cost, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha);
int n_hidden;
int outputSize;
std::string activation;
std::string cost;

Binary file not shown.

BIN
a.out

Binary file not shown.

View File

@ -1,6 +1,6 @@
g++ -I MLPP -c MLPP/Stat/Stat.cpp MLPP/LinAlg/LinAlg.cpp MLPP/Regularization/Reg.cpp MLPP/Activation/Activation.cpp MLPP/Utilities/Utilities.cpp MLPP/Data/Data.cpp MLPP/Cost/Cost.cpp MLPP/ANN/ANN.cpp MLPP/HiddenLayer/HiddenLayer.cpp MLPP/OutputLayer/OutputLayer.cpp MLPP/MLP/MLP.cpp MLPP/LinReg/LinReg.cpp MLPP/LogReg/LogReg.cpp MLPP/UniLinReg/UniLinReg.cpp MLPP/CLogLogReg/CLogLogReg.cpp MLPP/ExpReg/ExpReg.cpp MLPP/ProbitReg/ProbitReg.cpp MLPP/SoftmaxReg/SoftmaxReg.cpp MLPP/TanhReg/TanhReg.cpp MLPP/SoftmaxNet/SoftmaxNet.cpp MLPP/Convolutions/Convolutions.cpp MLPP/AutoEncoder/AutoEncoder.cpp MLPP/MultinomialNB/MultinomialNB.cpp MLPP/BernoulliNB/BernoulliNB.cpp MLPP/GaussianNB/GaussianNB.cpp MLPP/KMeans/KMeans.cpp MLPP/kNN/kNN.cpp MLPP/PCA/PCA.cpp MLPP/OutlierFinder/OutlierFinder.cpp --std=c++17 -pthread
g++ -I MLPP -c MLPP/Stat/Stat.cpp MLPP/LinAlg/LinAlg.cpp MLPP/Regularization/Reg.cpp MLPP/Activation/Activation.cpp MLPP/Utilities/Utilities.cpp MLPP/Data/Data.cpp MLPP/Cost/Cost.cpp MLPP/ANN/ANN.cpp MLPP/HiddenLayer/HiddenLayer.cpp MLPP/OutputLayer/OutputLayer.cpp MLPP/MLP/MLP.cpp MLPP/LinReg/LinReg.cpp MLPP/LogReg/LogReg.cpp MLPP/UniLinReg/UniLinReg.cpp MLPP/CLogLogReg/CLogLogReg.cpp MLPP/ExpReg/ExpReg.cpp MLPP/ProbitReg/ProbitReg.cpp MLPP/SoftmaxReg/SoftmaxReg.cpp MLPP/TanhReg/TanhReg.cpp MLPP/SoftmaxNet/SoftmaxNet.cpp MLPP/Convolutions/Convolutions.cpp MLPP/AutoEncoder/AutoEncoder.cpp MLPP/MultinomialNB/MultinomialNB.cpp MLPP/BernoulliNB/BernoulliNB.cpp MLPP/GaussianNB/GaussianNB.cpp MLPP/KMeans/KMeans.cpp MLPP/kNN/kNN.cpp MLPP/PCA/PCA.cpp MLPP/OutlierFinder/OutlierFinder.cpp MLPP/MultiOutputLayer/MultiOutputLayer.cpp MLPP/MANN/MANN.cpp --std=c++17 -pthread
g++ -shared -o MLPP.so Reg.o LinAlg.o Stat.o Activation.o LinReg.o Utilities.o Cost.o LogReg.o ProbitReg.o ExpReg.o CLogLogReg.o SoftmaxReg.o TanhReg.o kNN.o KMeans.o UniLinReg.o SoftmaxNet.o MLP.o AutoEncoder.o HiddenLayer.o OutputLayer.o ANN.o BernoulliNB.o GaussianNB.o MultinomialNB.o Convolutions.o OutlierFinder.o Data.o
g++ -shared -o MLPP.so Reg.o LinAlg.o Stat.o Activation.o LinReg.o Utilities.o Cost.o LogReg.o ProbitReg.o ExpReg.o CLogLogReg.o SoftmaxReg.o TanhReg.o kNN.o KMeans.o UniLinReg.o SoftmaxNet.o MLP.o AutoEncoder.o HiddenLayer.o OutputLayer.o ANN.o BernoulliNB.o GaussianNB.o MultinomialNB.o Convolutions.o OutlierFinder.o Data.o MultiOutputLayer.o MANN.o
mv MLPP.so SharedLib

View File

@ -29,6 +29,7 @@
#include "MLPP/SoftmaxNet/SoftmaxNet.hpp"
#include "MLPP/AutoEncoder/AutoEncoder.hpp"
#include "MLPP/ANN/ANN.hpp"
#include "MLPP/MANN/MANN.hpp"
#include "MLPP/MultinomialNB/MultinomialNB.hpp"
#include "MLPP/BernoulliNB/BernoulliNB.hpp"
#include "MLPP/GaussianNB/GaussianNB.hpp"
@ -240,6 +241,27 @@ int main() {
// alg.printVector(ann.modelSetTest(alg.transpose(inputSet)));
// std::cout << "ACCURACY: " << 100 * ann.score() << "%" << std::endl;
// // DYNAMICALLY SIZED MANN (Multidimensional Output ANN)
// std::vector<std::vector<double>> inputSet = {{1,2,3},{2,4,6},{3,6,9},{4,8,12}};
// std::vector<std::vector<double>> outputSet = {{1,5}, {2,10}, {3,15}, {4,20}};
// std::vector<std::vector<double>> inputSet;
// std::vector<double> tempOutputSet;
// data.setData(4, "/Users/marcmelikyan/Desktop/Data/Iris.csv", inputSet, tempOutputSet);
// std::vector<std::vector<double>> inputSet;
// std::vector<double> tempOutputSet;
// data.setData(784, "/Users/marcmelikyan/Desktop/Data/mnist_train.csv", inputSet, tempOutputSet);
// std::vector<std::vector<double>> outputSet = data.oneHotRep(tempOutputSet, 10);
// MANN mann(inputSet, outputSet);
// mann.addLayer(128, "RELU");
// mann.addLayer(128, "RELU");
// mann.addOutputLayer("Softmax", "CrossEntropy");
// mann.gradientDescent(0.001, 1, 1);
// alg.printMatrix(mann.modelSetTest(inputSet));
// std::cout << "ACCURACY: " << 100 * mann.score() << "%" << std::endl;
// // NAIVE BAYES
// std::vector<std::vector<double>> inputSet = {{1,1,1,1,1}, {0,0,1,1,1}, {0,0,1,0,1}};
// std::vector<double> outputSet = {0,1,0,1,1};