mirror of
https://github.com/Relintai/pmlpp.git
synced 2024-11-08 13:12:09 +01:00
Prefixed more classes with MLPP.
This commit is contained in:
parent
0e9d8bcb41
commit
d6e9d20c91
@ -652,10 +652,10 @@ double MLPPANN::applyLearningRateScheduler(double learningRate, double decayCons
|
||||
|
||||
void MLPPANN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, double lambda, double alpha) {
|
||||
if (network.empty()) {
|
||||
network.push_back(HiddenLayer(n_hidden, activation, inputSet, weightInit, reg, lambda, alpha));
|
||||
network.push_back(MLPPHiddenLayer(n_hidden, activation, inputSet, weightInit, reg, lambda, alpha));
|
||||
network[0].forwardPass();
|
||||
} else {
|
||||
network.push_back(HiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
|
||||
network.push_back(MLPPHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
|
||||
network[network.size() - 1].forwardPass();
|
||||
}
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ private:
|
||||
std::vector<double> outputSet;
|
||||
std::vector<double> y_hat;
|
||||
|
||||
std::vector<HiddenLayer> network;
|
||||
std::vector<MLPPHiddenLayer> network;
|
||||
OutputLayer *outputLayer;
|
||||
|
||||
int n;
|
||||
|
@ -75,7 +75,7 @@ double MLPPBernoulliNB::score() {
|
||||
|
||||
void MLPPBernoulliNB::computeVocab() {
|
||||
LinAlg alg;
|
||||
Data data;
|
||||
MLPPData data;
|
||||
vocab = data.vecToSet<double>(alg.flatten(inputSet));
|
||||
}
|
||||
|
||||
|
@ -18,7 +18,7 @@
|
||||
|
||||
|
||||
// Loading Datasets
|
||||
std::tuple<std::vector<std::vector<double>>, std::vector<double>> Data::loadBreastCancer() {
|
||||
std::tuple<std::vector<std::vector<double>>, std::vector<double>> MLPPData::loadBreastCancer() {
|
||||
const int BREAST_CANCER_SIZE = 30; // k = 30
|
||||
std::vector<std::vector<double>> inputSet;
|
||||
std::vector<double> outputSet;
|
||||
@ -27,7 +27,7 @@ std::tuple<std::vector<std::vector<double>>, std::vector<double>> Data::loadBrea
|
||||
return { inputSet, outputSet };
|
||||
}
|
||||
|
||||
std::tuple<std::vector<std::vector<double>>, std::vector<double>> Data::loadBreastCancerSVC() {
|
||||
std::tuple<std::vector<std::vector<double>>, std::vector<double>> MLPPData::loadBreastCancerSVC() {
|
||||
const int BREAST_CANCER_SIZE = 30; // k = 30
|
||||
std::vector<std::vector<double>> inputSet;
|
||||
std::vector<double> outputSet;
|
||||
@ -36,7 +36,7 @@ std::tuple<std::vector<std::vector<double>>, std::vector<double>> Data::loadBrea
|
||||
return { inputSet, outputSet };
|
||||
}
|
||||
|
||||
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> Data::loadIris() {
|
||||
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> MLPPData::loadIris() {
|
||||
const int IRIS_SIZE = 4;
|
||||
const int ONE_HOT_NUM = 3;
|
||||
std::vector<std::vector<double>> inputSet;
|
||||
@ -47,7 +47,7 @@ std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> D
|
||||
return { inputSet, outputSet };
|
||||
}
|
||||
|
||||
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> Data::loadWine() {
|
||||
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> MLPPData::loadWine() {
|
||||
const int WINE_SIZE = 4;
|
||||
const int ONE_HOT_NUM = 3;
|
||||
std::vector<std::vector<double>> inputSet;
|
||||
@ -58,7 +58,7 @@ std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> D
|
||||
return { inputSet, outputSet };
|
||||
}
|
||||
|
||||
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> Data::loadMnistTrain() {
|
||||
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> MLPPData::loadMnistTrain() {
|
||||
const int MNIST_SIZE = 784;
|
||||
const int ONE_HOT_NUM = 10;
|
||||
std::vector<std::vector<double>> inputSet;
|
||||
@ -69,7 +69,7 @@ std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> D
|
||||
return { inputSet, outputSet };
|
||||
}
|
||||
|
||||
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> Data::loadMnistTest() {
|
||||
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> MLPPData::loadMnistTest() {
|
||||
const int MNIST_SIZE = 784;
|
||||
const int ONE_HOT_NUM = 10;
|
||||
std::vector<std::vector<double>> inputSet;
|
||||
@ -80,7 +80,7 @@ std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>> D
|
||||
return { inputSet, outputSet };
|
||||
}
|
||||
|
||||
std::tuple<std::vector<std::vector<double>>, std::vector<double>> Data::loadCaliforniaHousing() {
|
||||
std::tuple<std::vector<std::vector<double>>, std::vector<double>> MLPPData::loadCaliforniaHousing() {
|
||||
const int CALIFORNIA_HOUSING_SIZE = 13; // k = 30
|
||||
std::vector<std::vector<double>> inputSet;
|
||||
std::vector<double> outputSet;
|
||||
@ -89,7 +89,7 @@ std::tuple<std::vector<std::vector<double>>, std::vector<double>> Data::loadCali
|
||||
return { inputSet, outputSet };
|
||||
}
|
||||
|
||||
std::tuple<std::vector<double>, std::vector<double>> Data::loadFiresAndCrime() {
|
||||
std::tuple<std::vector<double>, std::vector<double>> MLPPData::loadFiresAndCrime() {
|
||||
std::vector<double> inputSet; // k is implicitly 1.
|
||||
std::vector<double> outputSet;
|
||||
|
||||
@ -97,7 +97,7 @@ std::tuple<std::vector<double>, std::vector<double>> Data::loadFiresAndCrime() {
|
||||
return { inputSet, outputSet };
|
||||
}
|
||||
|
||||
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>, std::vector<std::vector<double>>, std::vector<std::vector<double>>> Data::trainTestSplit(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet, double testSize) {
|
||||
std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>, std::vector<std::vector<double>>, std::vector<std::vector<double>>> MLPPData::trainTestSplit(std::vector<std::vector<double>> inputSet, std::vector<std::vector<double>> outputSet, double testSize) {
|
||||
std::random_device rd;
|
||||
std::default_random_engine generator(rd());
|
||||
|
||||
@ -125,7 +125,7 @@ std::tuple<std::vector<std::vector<double>>, std::vector<std::vector<double>>, s
|
||||
|
||||
// MULTIVARIATE SUPERVISED
|
||||
|
||||
void Data::setData(int k, std::string fileName, std::vector<std::vector<double>> &inputSet, std::vector<double> &outputSet) {
|
||||
void MLPPData::setData(int k, std::string fileName, std::vector<std::vector<double>> &inputSet, std::vector<double> &outputSet) {
|
||||
LinAlg alg;
|
||||
std::string inputTemp;
|
||||
std::string outputTemp;
|
||||
@ -153,7 +153,7 @@ void Data::setData(int k, std::string fileName, std::vector<std::vector<double>>
|
||||
dataFile.close();
|
||||
}
|
||||
|
||||
void Data::printData(std::vector<std::string> inputName, std::string outputName, std::vector<std::vector<double>> inputSet, std::vector<double> outputSet) {
|
||||
void MLPPData::printData(std::vector<std::string> inputName, std::string outputName, std::vector<std::vector<double>> inputSet, std::vector<double> outputSet) {
|
||||
LinAlg alg;
|
||||
inputSet = alg.transpose(inputSet);
|
||||
for (int i = 0; i < inputSet.size(); i++) {
|
||||
@ -171,7 +171,7 @@ void Data::printData(std::vector<std::string> inputName, std::string outputName,
|
||||
|
||||
// UNSUPERVISED
|
||||
|
||||
void Data::setData(int k, std::string fileName, std::vector<std::vector<double>> &inputSet) {
|
||||
void MLPPData::setData(int k, std::string fileName, std::vector<std::vector<double>> &inputSet) {
|
||||
LinAlg alg;
|
||||
std::string inputTemp;
|
||||
|
||||
@ -195,7 +195,7 @@ void Data::setData(int k, std::string fileName, std::vector<std::vector<double>>
|
||||
dataFile.close();
|
||||
}
|
||||
|
||||
void Data::printData(std::vector<std::string> inputName, std::vector<std::vector<double>> inputSet) {
|
||||
void MLPPData::printData(std::vector<std::string> inputName, std::vector<std::vector<double>> inputSet) {
|
||||
LinAlg alg;
|
||||
inputSet = alg.transpose(inputSet);
|
||||
for (int i = 0; i < inputSet.size(); i++) {
|
||||
@ -208,7 +208,7 @@ void Data::printData(std::vector<std::string> inputName, std::vector<std::vector
|
||||
|
||||
// SIMPLE
|
||||
|
||||
void Data::setData(std::string fileName, std::vector<double> &inputSet, std::vector<double> &outputSet) {
|
||||
void MLPPData::setData(std::string fileName, std::vector<double> &inputSet, std::vector<double> &outputSet) {
|
||||
std::string inputTemp, outputTemp;
|
||||
|
||||
std::ifstream dataFile(fileName);
|
||||
@ -231,7 +231,7 @@ void Data::setData(std::string fileName, std::vector<double> &inputSet, std::vec
|
||||
dataFile.close();
|
||||
}
|
||||
|
||||
void Data::printData(std::string &inputName, std::string &outputName, std::vector<double> &inputSet, std::vector<double> &outputSet) {
|
||||
void MLPPData::printData(std::string &inputName, std::string &outputName, std::vector<double> &inputSet, std::vector<double> &outputSet) {
|
||||
std::cout << inputName << std::endl;
|
||||
for (int i = 0; i < inputSet.size(); i++) {
|
||||
std::cout << inputSet[i] << std::endl;
|
||||
@ -244,7 +244,7 @@ void Data::printData(std::string &inputName, std::string &outputName, std::vecto
|
||||
}
|
||||
|
||||
// Images
|
||||
std::vector<std::vector<double>> Data::rgb2gray(std::vector<std::vector<std::vector<double>>> input) {
|
||||
std::vector<std::vector<double>> MLPPData::rgb2gray(std::vector<std::vector<std::vector<double>>> input) {
|
||||
std::vector<std::vector<double>> grayScale;
|
||||
grayScale.resize(input[0].size());
|
||||
for (int i = 0; i < grayScale.size(); i++) {
|
||||
@ -258,7 +258,7 @@ std::vector<std::vector<double>> Data::rgb2gray(std::vector<std::vector<std::vec
|
||||
return grayScale;
|
||||
}
|
||||
|
||||
std::vector<std::vector<std::vector<double>>> Data::rgb2ycbcr(std::vector<std::vector<std::vector<double>>> input) {
|
||||
std::vector<std::vector<std::vector<double>>> MLPPData::rgb2ycbcr(std::vector<std::vector<std::vector<double>>> input) {
|
||||
LinAlg alg;
|
||||
std::vector<std::vector<std::vector<double>>> YCbCr;
|
||||
YCbCr = alg.resize(YCbCr, input);
|
||||
@ -274,7 +274,7 @@ std::vector<std::vector<std::vector<double>>> Data::rgb2ycbcr(std::vector<std::v
|
||||
|
||||
// Conversion formulas available here:
|
||||
// https://www.rapidtables.com/convert/color/rgb-to-hsv.html
|
||||
std::vector<std::vector<std::vector<double>>> Data::rgb2hsv(std::vector<std::vector<std::vector<double>>> input) {
|
||||
std::vector<std::vector<std::vector<double>>> MLPPData::rgb2hsv(std::vector<std::vector<std::vector<double>>> input) {
|
||||
LinAlg alg;
|
||||
std::vector<std::vector<std::vector<double>>> HSV;
|
||||
HSV = alg.resize(HSV, input);
|
||||
@ -316,7 +316,7 @@ std::vector<std::vector<std::vector<double>>> Data::rgb2hsv(std::vector<std::vec
|
||||
}
|
||||
|
||||
// http://machinethatsees.blogspot.com/2013/07/how-to-convert-rgb-to-xyz-or-vice-versa.html
|
||||
std::vector<std::vector<std::vector<double>>> Data::rgb2xyz(std::vector<std::vector<std::vector<double>>> input) {
|
||||
std::vector<std::vector<std::vector<double>>> MLPPData::rgb2xyz(std::vector<std::vector<std::vector<double>>> input) {
|
||||
LinAlg alg;
|
||||
std::vector<std::vector<std::vector<double>>> XYZ;
|
||||
XYZ = alg.resize(XYZ, input);
|
||||
@ -324,7 +324,7 @@ std::vector<std::vector<std::vector<double>>> Data::rgb2xyz(std::vector<std::vec
|
||||
return alg.vector_wise_tensor_product(input, RGB2XYZ);
|
||||
}
|
||||
|
||||
std::vector<std::vector<std::vector<double>>> Data::xyz2rgb(std::vector<std::vector<std::vector<double>>> input) {
|
||||
std::vector<std::vector<std::vector<double>>> MLPPData::xyz2rgb(std::vector<std::vector<std::vector<double>>> input) {
|
||||
LinAlg alg;
|
||||
std::vector<std::vector<std::vector<double>>> XYZ;
|
||||
XYZ = alg.resize(XYZ, input);
|
||||
@ -333,14 +333,14 @@ std::vector<std::vector<std::vector<double>>> Data::xyz2rgb(std::vector<std::vec
|
||||
}
|
||||
|
||||
// TEXT-BASED & NLP
|
||||
std::string Data::toLower(std::string text) {
|
||||
std::string MLPPData::toLower(std::string text) {
|
||||
for (int i = 0; i < text.size(); i++) {
|
||||
text[i] = tolower(text[i]);
|
||||
}
|
||||
return text;
|
||||
}
|
||||
|
||||
std::vector<char> Data::split(std::string text) {
|
||||
std::vector<char> MLPPData::split(std::string text) {
|
||||
std::vector<char> split_data;
|
||||
for (int i = 0; i < text.size(); i++) {
|
||||
split_data.push_back(text[i]);
|
||||
@ -348,7 +348,7 @@ std::vector<char> Data::split(std::string text) {
|
||||
return split_data;
|
||||
}
|
||||
|
||||
std::vector<std::string> Data::splitSentences(std::string data) {
|
||||
std::vector<std::string> MLPPData::splitSentences(std::string data) {
|
||||
std::vector<std::string> sentences;
|
||||
std::string currentStr = "";
|
||||
|
||||
@ -363,7 +363,7 @@ std::vector<std::string> Data::splitSentences(std::string data) {
|
||||
return sentences;
|
||||
}
|
||||
|
||||
std::vector<std::string> Data::removeSpaces(std::vector<std::string> data) {
|
||||
std::vector<std::string> MLPPData::removeSpaces(std::vector<std::string> data) {
|
||||
for (int i = 0; i < data.size(); i++) {
|
||||
auto it = data[i].begin();
|
||||
for (int j = 0; j < data[i].length(); j++) {
|
||||
@ -376,7 +376,7 @@ std::vector<std::string> Data::removeSpaces(std::vector<std::string> data) {
|
||||
return data;
|
||||
}
|
||||
|
||||
std::vector<std::string> Data::removeNullByte(std::vector<std::string> data) {
|
||||
std::vector<std::string> MLPPData::removeNullByte(std::vector<std::string> data) {
|
||||
for (int i = 0; i < data.size(); i++) {
|
||||
if (data[i] == "\0") {
|
||||
data.erase(data.begin() + i);
|
||||
@ -385,7 +385,7 @@ std::vector<std::string> Data::removeNullByte(std::vector<std::string> data) {
|
||||
return data;
|
||||
}
|
||||
|
||||
std::vector<std::string> Data::segment(std::string text) {
|
||||
std::vector<std::string> MLPPData::segment(std::string text) {
|
||||
std::vector<std::string> segmented_data;
|
||||
int prev_delim = 0;
|
||||
for (int i = 0; i < text.length(); i++) {
|
||||
@ -407,7 +407,7 @@ std::vector<std::string> Data::segment(std::string text) {
|
||||
return segmented_data;
|
||||
}
|
||||
|
||||
std::vector<double> Data::tokenize(std::string text) {
|
||||
std::vector<double> MLPPData::tokenize(std::string text) {
|
||||
int max_num = 0;
|
||||
bool new_num = true;
|
||||
std::vector<std::string> segmented_data = segment(text);
|
||||
@ -430,7 +430,7 @@ std::vector<double> Data::tokenize(std::string text) {
|
||||
return tokenized_data;
|
||||
}
|
||||
|
||||
std::vector<std::string> Data::removeStopWords(std::string text) {
|
||||
std::vector<std::string> MLPPData::removeStopWords(std::string text) {
|
||||
std::vector<std::string> stopWords = { "i", "me", "my", "myself", "we", "our", "ours", "ourselves", "you", "your", "yours", "yourself", "yourselves", "he", "him", "his", "himself", "she", "her", "hers", "herself", "it", "its", "itself", "they", "them", "their", "theirs", "themselves", "what", "which", "who", "whom", "this", "that", "these", "those", "am", "is", "are", "was", "were", "be", "been", "being", "have", "has", "had", "having", "do", "does", "did", "doing", "a", "an", "the", "and", "but", "if", "or", "because", "as", "until", "while", "of", "at", "by", "for", "with", "about", "against", "between", "into", "through", "during", "before", "after", "above", "below", "to", "from", "up", "down", "in", "out", "on", "off", "over", "under", "again", "further", "then", "once", "here", "there", "when", "where", "why", "how", "all", "any", "both", "each", "few", "more", "most", "other", "some", "such", "no", "nor", "not", "only", "own", "same", "so", "than", "too", "very", "s", "t", "can", "will", "just", "don", "should", "now" };
|
||||
std::vector<std::string> segmented_data = removeSpaces(segment(toLower(text)));
|
||||
|
||||
@ -444,7 +444,7 @@ std::vector<std::string> Data::removeStopWords(std::string text) {
|
||||
return segmented_data;
|
||||
}
|
||||
|
||||
std::vector<std::string> Data::removeStopWords(std::vector<std::string> segmented_data) {
|
||||
std::vector<std::string> MLPPData::removeStopWords(std::vector<std::string> segmented_data) {
|
||||
std::vector<std::string> stopWords = { "i", "me", "my", "myself", "we", "our", "ours", "ourselves", "you", "your", "yours", "yourself", "yourselves", "he", "him", "his", "himself", "she", "her", "hers", "herself", "it", "its", "itself", "they", "them", "their", "theirs", "themselves", "what", "which", "who", "whom", "this", "that", "these", "those", "am", "is", "are", "was", "were", "be", "been", "being", "have", "has", "had", "having", "do", "does", "did", "doing", "a", "an", "the", "and", "but", "if", "or", "because", "as", "until", "while", "of", "at", "by", "for", "with", "about", "against", "between", "into", "through", "during", "before", "after", "above", "below", "to", "from", "up", "down", "in", "out", "on", "off", "over", "under", "again", "further", "then", "once", "here", "there", "when", "where", "why", "how", "all", "any", "both", "each", "few", "more", "most", "other", "some", "such", "no", "nor", "not", "only", "own", "same", "so", "than", "too", "very", "s", "t", "can", "will", "just", "don", "should", "now" };
|
||||
for (int i = 0; i < segmented_data.size(); i++) {
|
||||
for (int j = 0; j < stopWords.size(); j++) {
|
||||
@ -456,7 +456,7 @@ std::vector<std::string> Data::removeStopWords(std::vector<std::string> segmente
|
||||
return segmented_data;
|
||||
}
|
||||
|
||||
std::string Data::stemming(std::string text) {
|
||||
std::string MLPPData::stemming(std::string text) {
|
||||
// Our list of suffixes which we use to compare against
|
||||
std::vector<std::string> suffixes = { "eer", "er", "ion", "ity", "ment", "ness", "or", "sion", "ship", "th", "able", "ible", "al", "ant", "ary", "ful", "ic", "ious", "ous", "ive", "less", "y", "ed", "en", "ing", "ize", "ise", "ly", "ward", "wise" };
|
||||
int padding_size = 4;
|
||||
@ -477,7 +477,7 @@ std::string Data::stemming(std::string text) {
|
||||
return text;
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Data::BOW(std::vector<std::string> sentences, std::string type) {
|
||||
std::vector<std::vector<double>> MLPPData::BOW(std::vector<std::string> sentences, std::string type) {
|
||||
/*
|
||||
STEPS OF BOW:
|
||||
1) To lowercase (done by removeStopWords function by def)
|
||||
@ -519,7 +519,7 @@ std::vector<std::vector<double>> Data::BOW(std::vector<std::string> sentences, s
|
||||
return bow;
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Data::TFIDF(std::vector<std::string> sentences) {
|
||||
std::vector<std::vector<double>> MLPPData::TFIDF(std::vector<std::string> sentences) {
|
||||
LinAlg alg;
|
||||
std::vector<std::string> wordList = removeNullByte(removeStopWords(createWordList(sentences)));
|
||||
|
||||
@ -575,7 +575,7 @@ std::vector<std::vector<double>> Data::TFIDF(std::vector<std::string> sentences)
|
||||
return TFIDF;
|
||||
}
|
||||
|
||||
std::tuple<std::vector<std::vector<double>>, std::vector<std::string>> Data::word2Vec(std::vector<std::string> sentences, std::string type, int windowSize, int dimension, double learning_rate, int max_epoch) {
|
||||
std::tuple<std::vector<std::vector<double>>, std::vector<std::string>> MLPPData::word2Vec(std::vector<std::string> sentences, std::string type, int windowSize, int dimension, double learning_rate, int max_epoch) {
|
||||
std::vector<std::string> wordList = removeNullByte(removeStopWords(createWordList(sentences)));
|
||||
|
||||
std::vector<std::vector<std::string>> segmented_sentences;
|
||||
@ -608,7 +608,7 @@ std::tuple<std::vector<std::vector<double>>, std::vector<std::string>> Data::wor
|
||||
|
||||
inputStrings.insert(inputStrings.end(), outputStrings.begin(), outputStrings.end());
|
||||
|
||||
std::vector<std::vector<double>> BOW = Data::BOW(inputStrings, "Binary");
|
||||
std::vector<std::vector<double>> BOW = MLPPData::BOW(inputStrings, "Binary");
|
||||
|
||||
std::vector<std::vector<double>> inputSet;
|
||||
std::vector<std::vector<double>> outputSet;
|
||||
@ -634,7 +634,7 @@ std::tuple<std::vector<std::vector<double>>, std::vector<std::string>> Data::wor
|
||||
return { wordEmbeddings, wordList };
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Data::LSA(std::vector<std::string> sentences, int dim) {
|
||||
std::vector<std::vector<double>> MLPPData::LSA(std::vector<std::string> sentences, int dim) {
|
||||
LinAlg alg;
|
||||
std::vector<std::vector<double>> docWordData = BOW(sentences, "Binary");
|
||||
|
||||
@ -650,7 +650,7 @@ std::vector<std::vector<double>> Data::LSA(std::vector<std::string> sentences, i
|
||||
return embeddings;
|
||||
}
|
||||
|
||||
std::vector<std::string> Data::createWordList(std::vector<std::string> sentences) {
|
||||
std::vector<std::string> MLPPData::createWordList(std::vector<std::string> sentences) {
|
||||
std::string combinedText = "";
|
||||
for (int i = 0; i < sentences.size(); i++) {
|
||||
if (i != 0) {
|
||||
@ -663,7 +663,7 @@ std::vector<std::string> Data::createWordList(std::vector<std::string> sentences
|
||||
}
|
||||
|
||||
// EXTRA
|
||||
void Data::setInputNames(std::string fileName, std::vector<std::string> &inputNames) {
|
||||
void MLPPData::setInputNames(std::string fileName, std::vector<std::string> &inputNames) {
|
||||
std::string inputNameTemp;
|
||||
std::ifstream dataFile(fileName);
|
||||
if (!dataFile.is_open()) {
|
||||
@ -677,7 +677,7 @@ void Data::setInputNames(std::string fileName, std::vector<std::string> &inputNa
|
||||
dataFile.close();
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Data::featureScaling(std::vector<std::vector<double>> X) {
|
||||
std::vector<std::vector<double>> MLPPData::featureScaling(std::vector<std::vector<double>> X) {
|
||||
LinAlg alg;
|
||||
X = alg.transpose(X);
|
||||
std::vector<double> max_elements, min_elements;
|
||||
@ -697,7 +697,7 @@ std::vector<std::vector<double>> Data::featureScaling(std::vector<std::vector<do
|
||||
return alg.transpose(X);
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Data::meanNormalization(std::vector<std::vector<double>> X) {
|
||||
std::vector<std::vector<double>> MLPPData::meanNormalization(std::vector<std::vector<double>> X) {
|
||||
LinAlg alg;
|
||||
Stat stat;
|
||||
// (X_j - mu_j) / std_j, for every j
|
||||
@ -709,7 +709,7 @@ std::vector<std::vector<double>> Data::meanNormalization(std::vector<std::vector
|
||||
return X;
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Data::meanCentering(std::vector<std::vector<double>> X) {
|
||||
std::vector<std::vector<double>> MLPPData::meanCentering(std::vector<std::vector<double>> X) {
|
||||
LinAlg alg;
|
||||
Stat stat;
|
||||
for (int i = 0; i < X.size(); i++) {
|
||||
@ -721,7 +721,7 @@ std::vector<std::vector<double>> Data::meanCentering(std::vector<std::vector<dou
|
||||
return X;
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Data::oneHotRep(std::vector<double> tempOutputSet, int n_class) {
|
||||
std::vector<std::vector<double>> MLPPData::oneHotRep(std::vector<double> tempOutputSet, int n_class) {
|
||||
std::vector<std::vector<double>> outputSet;
|
||||
outputSet.resize(tempOutputSet.size());
|
||||
for (int i = 0; i < tempOutputSet.size(); i++) {
|
||||
@ -736,7 +736,7 @@ std::vector<std::vector<double>> Data::oneHotRep(std::vector<double> tempOutputS
|
||||
return outputSet;
|
||||
}
|
||||
|
||||
std::vector<double> Data::reverseOneHot(std::vector<std::vector<double>> tempOutputSet) {
|
||||
std::vector<double> MLPPData::reverseOneHot(std::vector<std::vector<double>> tempOutputSet) {
|
||||
std::vector<double> outputSet;
|
||||
int n_class = tempOutputSet[0].size();
|
||||
for (int i = 0; i < tempOutputSet.size(); i++) {
|
||||
|
@ -14,7 +14,7 @@
|
||||
#include <vector>
|
||||
|
||||
|
||||
class Data {
|
||||
class MLPPData {
|
||||
public:
|
||||
// Load Datasets
|
||||
std::tuple<std::vector<std::vector<double>>, std::vector<double>> loadBreastCancer();
|
||||
|
@ -15,7 +15,7 @@
|
||||
#include <random>
|
||||
|
||||
|
||||
DualSVC::DualSVC(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, double C, std::string kernel) :
|
||||
MLPPDualSVC::MLPPDualSVC(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, double C, std::string kernel) :
|
||||
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), C(C), kernel(kernel) {
|
||||
y_hat.resize(n);
|
||||
bias = Utilities::biasInitialization();
|
||||
@ -23,15 +23,15 @@ DualSVC::DualSVC(std::vector<std::vector<double>> inputSet, std::vector<double>
|
||||
K = kernelFunction(inputSet, inputSet, kernel); // For now this is unused. When non-linear kernels are added, the K will be manipulated.
|
||||
}
|
||||
|
||||
std::vector<double> DualSVC::modelSetTest(std::vector<std::vector<double>> X) {
|
||||
std::vector<double> MLPPDualSVC::modelSetTest(std::vector<std::vector<double>> X) {
|
||||
return Evaluate(X);
|
||||
}
|
||||
|
||||
double DualSVC::modelTest(std::vector<double> x) {
|
||||
double MLPPDualSVC::modelTest(std::vector<double> x) {
|
||||
return Evaluate(x);
|
||||
}
|
||||
|
||||
void DualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) {
|
||||
void MLPPDualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) {
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
LinAlg alg;
|
||||
@ -79,7 +79,7 @@ void DualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) {
|
||||
}
|
||||
}
|
||||
|
||||
// void DualSVC::SGD(double learning_rate, int max_epoch, bool UI){
|
||||
// void MLPPDualSVC::SGD(double learning_rate, int max_epoch, bool UI){
|
||||
// class MLPPCost cost;
|
||||
// MLPPActivation avn;
|
||||
// LinAlg alg;
|
||||
@ -112,7 +112,7 @@ void DualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) {
|
||||
// forwardPass();
|
||||
// }
|
||||
|
||||
// void DualSVC::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){
|
||||
// void MLPPDualSVC::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){
|
||||
// class MLPPCost cost;
|
||||
// MLPPActivation avn;
|
||||
// LinAlg alg;
|
||||
@ -152,27 +152,27 @@ void DualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) {
|
||||
// forwardPass();
|
||||
// }
|
||||
|
||||
double DualSVC::score() {
|
||||
double MLPPDualSVC::score() {
|
||||
Utilities util;
|
||||
return util.performance(y_hat, outputSet);
|
||||
}
|
||||
|
||||
void DualSVC::save(std::string fileName) {
|
||||
void MLPPDualSVC::save(std::string fileName) {
|
||||
Utilities util;
|
||||
util.saveParameters(fileName, alpha, bias);
|
||||
}
|
||||
|
||||
double DualSVC::Cost(std::vector<double> alpha, std::vector<std::vector<double>> X, std::vector<double> y) {
|
||||
double MLPPDualSVC::Cost(std::vector<double> alpha, std::vector<std::vector<double>> X, std::vector<double> y) {
|
||||
class MLPPCost cost;
|
||||
return cost.dualFormSVM(alpha, X, y);
|
||||
}
|
||||
|
||||
std::vector<double> DualSVC::Evaluate(std::vector<std::vector<double>> X) {
|
||||
std::vector<double> MLPPDualSVC::Evaluate(std::vector<std::vector<double>> X) {
|
||||
MLPPActivation avn;
|
||||
return avn.sign(propagate(X));
|
||||
}
|
||||
|
||||
std::vector<double> DualSVC::propagate(std::vector<std::vector<double>> X) {
|
||||
std::vector<double> MLPPDualSVC::propagate(std::vector<std::vector<double>> X) {
|
||||
LinAlg alg;
|
||||
std::vector<double> z;
|
||||
for (int i = 0; i < X.size(); i++) {
|
||||
@ -188,12 +188,12 @@ std::vector<double> DualSVC::propagate(std::vector<std::vector<double>> X) {
|
||||
return z;
|
||||
}
|
||||
|
||||
double DualSVC::Evaluate(std::vector<double> x) {
|
||||
double MLPPDualSVC::Evaluate(std::vector<double> x) {
|
||||
MLPPActivation avn;
|
||||
return avn.sign(propagate(x));
|
||||
}
|
||||
|
||||
double DualSVC::propagate(std::vector<double> x) {
|
||||
double MLPPDualSVC::propagate(std::vector<double> x) {
|
||||
LinAlg alg;
|
||||
double z = 0;
|
||||
for (int j = 0; j < alpha.size(); j++) {
|
||||
@ -205,7 +205,7 @@ double DualSVC::propagate(std::vector<double> x) {
|
||||
return z;
|
||||
}
|
||||
|
||||
void DualSVC::forwardPass() {
|
||||
void MLPPDualSVC::forwardPass() {
|
||||
LinAlg alg;
|
||||
MLPPActivation avn;
|
||||
|
||||
@ -213,7 +213,7 @@ void DualSVC::forwardPass() {
|
||||
y_hat = avn.sign(z);
|
||||
}
|
||||
|
||||
void DualSVC::alphaProjection() {
|
||||
void MLPPDualSVC::alphaProjection() {
|
||||
for (int i = 0; i < alpha.size(); i++) {
|
||||
if (alpha[i] > C) {
|
||||
alpha[i] = C;
|
||||
@ -223,14 +223,14 @@ void DualSVC::alphaProjection() {
|
||||
}
|
||||
}
|
||||
|
||||
double DualSVC::kernelFunction(std::vector<double> u, std::vector<double> v, std::string kernel) {
|
||||
double MLPPDualSVC::kernelFunction(std::vector<double> u, std::vector<double> v, std::string kernel) {
|
||||
LinAlg alg;
|
||||
if (kernel == "Linear") {
|
||||
return alg.dot(u, v);
|
||||
} // warning: non-void function does not return a value in all control paths [-Wreturn-type]
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> DualSVC::kernelFunction(std::vector<std::vector<double>> A, std::vector<std::vector<double>> B, std::string kernel) {
|
||||
std::vector<std::vector<double>> MLPPDualSVC::kernelFunction(std::vector<std::vector<double>> A, std::vector<std::vector<double>> B, std::string kernel) {
|
||||
LinAlg alg;
|
||||
if (kernel == "Linear") {
|
||||
return alg.matmult(inputSet, alg.transpose(inputSet));
|
||||
|
@ -16,10 +16,10 @@
|
||||
|
||||
|
||||
|
||||
class DualSVC {
|
||||
class MLPPDualSVC {
|
||||
public:
|
||||
DualSVC(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, double C, std::string kernel = "Linear");
|
||||
DualSVC(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, double C, std::string kernel, double p, double c);
|
||||
MLPPDualSVC(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, double C, std::string kernel = "Linear");
|
||||
MLPPDualSVC(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, double C, std::string kernel, double p, double c);
|
||||
|
||||
std::vector<double> modelSetTest(std::vector<std::vector<double>> X);
|
||||
double modelTest(std::vector<double> x);
|
||||
|
@ -15,7 +15,7 @@
|
||||
#include <random>
|
||||
|
||||
|
||||
ExpReg::ExpReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) :
|
||||
MLPPExpReg::MLPPExpReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg, double lambda, double alpha) :
|
||||
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
|
||||
y_hat.resize(n);
|
||||
weights = Utilities::weightInitialization(k);
|
||||
@ -23,15 +23,15 @@ ExpReg::ExpReg(std::vector<std::vector<double>> inputSet, std::vector<double> ou
|
||||
bias = Utilities::biasInitialization();
|
||||
}
|
||||
|
||||
std::vector<double> ExpReg::modelSetTest(std::vector<std::vector<double>> X) {
|
||||
std::vector<double> MLPPExpReg::modelSetTest(std::vector<std::vector<double>> X) {
|
||||
return Evaluate(X);
|
||||
}
|
||||
|
||||
double ExpReg::modelTest(std::vector<double> x) {
|
||||
double MLPPExpReg::modelTest(std::vector<double> x) {
|
||||
return Evaluate(x);
|
||||
}
|
||||
|
||||
void ExpReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
|
||||
void MLPPExpReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
@ -88,7 +88,7 @@ void ExpReg::gradientDescent(double learning_rate, int max_epoch, bool UI) {
|
||||
}
|
||||
}
|
||||
|
||||
void ExpReg::SGD(double learning_rate, int max_epoch, bool UI) {
|
||||
void MLPPExpReg::SGD(double learning_rate, int max_epoch, bool UI) {
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
int epoch = 1;
|
||||
@ -134,7 +134,7 @@ void ExpReg::SGD(double learning_rate, int max_epoch, bool UI) {
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
void ExpReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
|
||||
void MLPPExpReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) {
|
||||
LinAlg alg;
|
||||
Reg regularization;
|
||||
double cost_prev = 0;
|
||||
@ -193,23 +193,23 @@ void ExpReg::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool
|
||||
forwardPass();
|
||||
}
|
||||
|
||||
double ExpReg::score() {
|
||||
double MLPPExpReg::score() {
|
||||
Utilities util;
|
||||
return util.performance(y_hat, outputSet);
|
||||
}
|
||||
|
||||
void ExpReg::save(std::string fileName) {
|
||||
void MLPPExpReg::save(std::string fileName) {
|
||||
Utilities util;
|
||||
util.saveParameters(fileName, weights, initial, bias);
|
||||
}
|
||||
|
||||
double ExpReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
|
||||
double MLPPExpReg::Cost(std::vector<double> y_hat, std::vector<double> y) {
|
||||
Reg regularization;
|
||||
class MLPPCost cost;
|
||||
return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
|
||||
}
|
||||
|
||||
std::vector<double> ExpReg::Evaluate(std::vector<std::vector<double>> X) {
|
||||
std::vector<double> MLPPExpReg::Evaluate(std::vector<std::vector<double>> X) {
|
||||
std::vector<double> y_hat;
|
||||
y_hat.resize(X.size());
|
||||
for (int i = 0; i < X.size(); i++) {
|
||||
@ -222,7 +222,7 @@ std::vector<double> ExpReg::Evaluate(std::vector<std::vector<double>> X) {
|
||||
return y_hat;
|
||||
}
|
||||
|
||||
double ExpReg::Evaluate(std::vector<double> x) {
|
||||
double MLPPExpReg::Evaluate(std::vector<double> x) {
|
||||
double y_hat = 0;
|
||||
for (int i = 0; i < x.size(); i++) {
|
||||
y_hat += initial[i] * std::pow(weights[i], x[i]);
|
||||
@ -232,6 +232,6 @@ double ExpReg::Evaluate(std::vector<double> x) {
|
||||
}
|
||||
|
||||
// a * w^x + b
|
||||
void ExpReg::forwardPass() {
|
||||
void MLPPExpReg::forwardPass() {
|
||||
y_hat = Evaluate(inputSet);
|
||||
}
|
||||
|
@ -12,9 +12,9 @@
|
||||
#include <vector>
|
||||
|
||||
|
||||
class ExpReg {
|
||||
class MLPPExpReg {
|
||||
public:
|
||||
ExpReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5);
|
||||
MLPPExpReg(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, std::string reg = "None", double lambda = 0.5, double alpha = 0.5);
|
||||
std::vector<double> modelSetTest(std::vector<std::vector<double>> X);
|
||||
double modelTest(std::vector<double> x);
|
||||
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);
|
||||
|
@ -15,20 +15,20 @@
|
||||
#include <iostream>
|
||||
|
||||
|
||||
GAN::GAN(double k, std::vector<std::vector<double>> outputSet) :
|
||||
MLPPGAN::MLPPGAN(double k, std::vector<std::vector<double>> outputSet) :
|
||||
outputSet(outputSet), n(outputSet.size()), k(k) {
|
||||
}
|
||||
|
||||
GAN::~GAN() {
|
||||
MLPPGAN::~MLPPGAN() {
|
||||
delete outputLayer;
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> GAN::generateExample(int n) {
|
||||
std::vector<std::vector<double>> MLPPGAN::generateExample(int n) {
|
||||
LinAlg alg;
|
||||
return modelSetTestGenerator(alg.gaussianNoise(n, k));
|
||||
}
|
||||
|
||||
void GAN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
|
||||
void MLPPGAN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
|
||||
class MLPPCost cost;
|
||||
LinAlg alg;
|
||||
double cost_prev = 0;
|
||||
@ -66,7 +66,7 @@ void GAN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
|
||||
|
||||
forwardPass();
|
||||
if (UI) {
|
||||
GAN::UI(epoch, cost_prev, GAN::y_hat, alg.onevec(n));
|
||||
MLPPGAN::UI(epoch, cost_prev, MLPPGAN::y_hat, alg.onevec(n));
|
||||
}
|
||||
|
||||
epoch++;
|
||||
@ -76,14 +76,14 @@ void GAN::gradientDescent(double learning_rate, int max_epoch, bool UI) {
|
||||
}
|
||||
}
|
||||
|
||||
double GAN::score() {
|
||||
double MLPPGAN::score() {
|
||||
LinAlg alg;
|
||||
Utilities util;
|
||||
forwardPass();
|
||||
return util.performance(y_hat, alg.onevec(n));
|
||||
}
|
||||
|
||||
void GAN::save(std::string fileName) {
|
||||
void MLPPGAN::save(std::string fileName) {
|
||||
Utilities util;
|
||||
if (!network.empty()) {
|
||||
util.saveParameters(fileName, network[0].weights, network[0].bias, 0, 1);
|
||||
@ -96,18 +96,18 @@ void GAN::save(std::string fileName) {
|
||||
}
|
||||
}
|
||||
|
||||
void GAN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, double lambda, double alpha) {
|
||||
void MLPPGAN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, double lambda, double alpha) {
|
||||
LinAlg alg;
|
||||
if (network.empty()) {
|
||||
network.push_back(HiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha));
|
||||
network.push_back(MLPPHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha));
|
||||
network[0].forwardPass();
|
||||
} else {
|
||||
network.push_back(HiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
|
||||
network.push_back(MLPPHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
|
||||
network[network.size() - 1].forwardPass();
|
||||
}
|
||||
}
|
||||
|
||||
void GAN::addOutputLayer(std::string weightInit, std::string reg, double lambda, double alpha) {
|
||||
void MLPPGAN::addOutputLayer(std::string weightInit, std::string reg, double lambda, double alpha) {
|
||||
LinAlg alg;
|
||||
if (!network.empty()) {
|
||||
outputLayer = new OutputLayer(network[network.size() - 1].n_hidden, "Sigmoid", "LogLoss", network[network.size() - 1].a, weightInit, reg, lambda, alpha);
|
||||
@ -116,7 +116,7 @@ void GAN::addOutputLayer(std::string weightInit, std::string reg, double lambda,
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> GAN::modelSetTestGenerator(std::vector<std::vector<double>> X) {
|
||||
std::vector<std::vector<double>> MLPPGAN::modelSetTestGenerator(std::vector<std::vector<double>> X) {
|
||||
if (!network.empty()) {
|
||||
network[0].input = X;
|
||||
network[0].forwardPass();
|
||||
@ -129,7 +129,7 @@ std::vector<std::vector<double>> GAN::modelSetTestGenerator(std::vector<std::vec
|
||||
return network[network.size() / 2].a;
|
||||
}
|
||||
|
||||
std::vector<double> GAN::modelSetTestDiscriminator(std::vector<std::vector<double>> X) {
|
||||
std::vector<double> MLPPGAN::modelSetTestDiscriminator(std::vector<std::vector<double>> X) {
|
||||
if (!network.empty()) {
|
||||
for (int i = network.size() / 2 + 1; i < network.size(); i++) {
|
||||
if (i == network.size() / 2 + 1) {
|
||||
@ -145,7 +145,7 @@ std::vector<double> GAN::modelSetTestDiscriminator(std::vector<std::vector<doubl
|
||||
return outputLayer->a;
|
||||
}
|
||||
|
||||
double GAN::Cost(std::vector<double> y_hat, std::vector<double> y) {
|
||||
double MLPPGAN::Cost(std::vector<double> y_hat, std::vector<double> y) {
|
||||
Reg regularization;
|
||||
class MLPPCost cost;
|
||||
double totalRegTerm = 0;
|
||||
@ -159,7 +159,7 @@ double GAN::Cost(std::vector<double> y_hat, std::vector<double> y) {
|
||||
return (cost.*cost_function)(y_hat, y) + totalRegTerm + regularization.regTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg);
|
||||
}
|
||||
|
||||
void GAN::forwardPass() {
|
||||
void MLPPGAN::forwardPass() {
|
||||
LinAlg alg;
|
||||
if (!network.empty()) {
|
||||
network[0].input = alg.gaussianNoise(n, k);
|
||||
@ -177,7 +177,7 @@ void GAN::forwardPass() {
|
||||
y_hat = outputLayer->a;
|
||||
}
|
||||
|
||||
void GAN::updateDiscriminatorParameters(std::vector<std::vector<std::vector<double>>> hiddenLayerUpdations, std::vector<double> outputLayerUpdation, double learning_rate) {
|
||||
void MLPPGAN::updateDiscriminatorParameters(std::vector<std::vector<std::vector<double>>> hiddenLayerUpdations, std::vector<double> outputLayerUpdation, double learning_rate) {
|
||||
LinAlg alg;
|
||||
|
||||
outputLayer->weights = alg.subtraction(outputLayer->weights, outputLayerUpdation);
|
||||
@ -194,7 +194,7 @@ void GAN::updateDiscriminatorParameters(std::vector<std::vector<std::vector<doub
|
||||
}
|
||||
}
|
||||
|
||||
void GAN::updateGeneratorParameters(std::vector<std::vector<std::vector<double>>> hiddenLayerUpdations, double learning_rate) {
|
||||
void MLPPGAN::updateGeneratorParameters(std::vector<std::vector<std::vector<double>>> hiddenLayerUpdations, double learning_rate) {
|
||||
LinAlg alg;
|
||||
|
||||
if (!network.empty()) {
|
||||
@ -207,7 +207,7 @@ void GAN::updateGeneratorParameters(std::vector<std::vector<std::vector<double>>
|
||||
}
|
||||
}
|
||||
|
||||
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> GAN::computeDiscriminatorGradients(std::vector<double> y_hat, std::vector<double> outputSet) {
|
||||
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> MLPPGAN::computeDiscriminatorGradients(std::vector<double> y_hat, std::vector<double> outputSet) {
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
LinAlg alg;
|
||||
@ -243,7 +243,7 @@ std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> G
|
||||
return { cumulativeHiddenLayerWGrad, outputWGrad };
|
||||
}
|
||||
|
||||
std::vector<std::vector<std::vector<double>>> GAN::computeGeneratorGradients(std::vector<double> y_hat, std::vector<double> outputSet) {
|
||||
std::vector<std::vector<std::vector<double>>> MLPPGAN::computeGeneratorGradients(std::vector<double> y_hat, std::vector<double> outputSet) {
|
||||
class MLPPCost cost;
|
||||
MLPPActivation avn;
|
||||
LinAlg alg;
|
||||
@ -272,7 +272,7 @@ std::vector<std::vector<std::vector<double>>> GAN::computeGeneratorGradients(std
|
||||
return cumulativeHiddenLayerWGrad;
|
||||
}
|
||||
|
||||
void GAN::UI(int epoch, double cost_prev, std::vector<double> y_hat, std::vector<double> outputSet) {
|
||||
void MLPPGAN::UI(int epoch, double cost_prev, std::vector<double> y_hat, std::vector<double> outputSet) {
|
||||
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
|
||||
std::cout << "Layer " << network.size() + 1 << ": " << std::endl;
|
||||
Utilities::UI(outputLayer->weights, outputLayer->bias);
|
||||
|
@ -17,10 +17,10 @@
|
||||
|
||||
|
||||
|
||||
class GAN {
|
||||
class MLPPGAN {
|
||||
public:
|
||||
GAN(double k, std::vector<std::vector<double>> outputSet);
|
||||
~GAN();
|
||||
MLPPGAN(double k, std::vector<std::vector<double>> outputSet);
|
||||
~MLPPGAN();
|
||||
std::vector<std::vector<double>> generateExample(int n);
|
||||
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);
|
||||
double score();
|
||||
@ -46,7 +46,7 @@ private:
|
||||
std::vector<std::vector<double>> outputSet;
|
||||
std::vector<double> y_hat;
|
||||
|
||||
std::vector<HiddenLayer> network;
|
||||
std::vector<MLPPHiddenLayer> network;
|
||||
OutputLayer *outputLayer;
|
||||
|
||||
int n;
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include <iostream>
|
||||
|
||||
|
||||
void GaussMarkovChecker::checkGMConditions(std::vector<double> eps) {
|
||||
void MLPPGaussMarkovChecker::checkGMConditions(std::vector<double> eps) {
|
||||
bool condition1 = arithmeticMean(eps);
|
||||
bool condition2 = homoscedasticity(eps);
|
||||
bool condition3 = exogeneity(eps);
|
||||
@ -21,7 +21,7 @@ void GaussMarkovChecker::checkGMConditions(std::vector<double> eps) {
|
||||
}
|
||||
}
|
||||
|
||||
bool GaussMarkovChecker::arithmeticMean(std::vector<double> eps) {
|
||||
bool MLPPGaussMarkovChecker::arithmeticMean(std::vector<double> eps) {
|
||||
Stat stat;
|
||||
if (stat.mean(eps) == 0) {
|
||||
return 1;
|
||||
@ -30,7 +30,7 @@ bool GaussMarkovChecker::arithmeticMean(std::vector<double> eps) {
|
||||
}
|
||||
}
|
||||
|
||||
bool GaussMarkovChecker::homoscedasticity(std::vector<double> eps) {
|
||||
bool MLPPGaussMarkovChecker::homoscedasticity(std::vector<double> eps) {
|
||||
Stat stat;
|
||||
double currentVar = (eps[0] - stat.mean(eps)) * (eps[0] - stat.mean(eps)) / eps.size();
|
||||
for (int i = 0; i < eps.size(); i++) {
|
||||
@ -41,7 +41,7 @@ bool GaussMarkovChecker::homoscedasticity(std::vector<double> eps) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
bool GaussMarkovChecker::exogeneity(std::vector<double> eps) {
|
||||
bool MLPPGaussMarkovChecker::exogeneity(std::vector<double> eps) {
|
||||
Stat stat;
|
||||
for (int i = 0; i < eps.size(); i++) {
|
||||
for (int j = 0; j < eps.size(); j++) {
|
||||
|
@ -12,7 +12,7 @@
|
||||
#include <vector>
|
||||
|
||||
|
||||
class GaussMarkovChecker {
|
||||
class MLPPGaussMarkovChecker {
|
||||
public:
|
||||
void checkGMConditions(std::vector<double> eps);
|
||||
|
||||
|
@ -14,14 +14,14 @@
|
||||
#include <random>
|
||||
|
||||
|
||||
GaussianNB::GaussianNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int class_num) :
|
||||
MLPPGaussianNB::MLPPGaussianNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int class_num) :
|
||||
inputSet(inputSet), outputSet(outputSet), class_num(class_num) {
|
||||
y_hat.resize(outputSet.size());
|
||||
Evaluate();
|
||||
LinAlg alg;
|
||||
}
|
||||
|
||||
std::vector<double> GaussianNB::modelSetTest(std::vector<std::vector<double>> X) {
|
||||
std::vector<double> MLPPGaussianNB::modelSetTest(std::vector<std::vector<double>> X) {
|
||||
std::vector<double> y_hat;
|
||||
for (int i = 0; i < X.size(); i++) {
|
||||
y_hat.push_back(modelTest(X[i]));
|
||||
@ -29,7 +29,7 @@ std::vector<double> GaussianNB::modelSetTest(std::vector<std::vector<double>> X)
|
||||
return y_hat;
|
||||
}
|
||||
|
||||
double GaussianNB::modelTest(std::vector<double> x) {
|
||||
double MLPPGaussianNB::modelTest(std::vector<double> x) {
|
||||
Stat stat;
|
||||
LinAlg alg;
|
||||
|
||||
@ -42,12 +42,12 @@ double GaussianNB::modelTest(std::vector<double> x) {
|
||||
return std::distance(score, std::max_element(score, score + sizeof(score) / sizeof(double)));
|
||||
}
|
||||
|
||||
double GaussianNB::score() {
|
||||
double MLPPGaussianNB::score() {
|
||||
Utilities util;
|
||||
return util.performance(y_hat, outputSet);
|
||||
}
|
||||
|
||||
void GaussianNB::Evaluate() {
|
||||
void MLPPGaussianNB::Evaluate() {
|
||||
Stat stat;
|
||||
LinAlg alg;
|
||||
|
||||
|
@ -11,9 +11,9 @@
|
||||
#include <vector>
|
||||
|
||||
|
||||
class GaussianNB {
|
||||
class MLPPGaussianNB {
|
||||
public:
|
||||
GaussianNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int class_num);
|
||||
MLPPGaussianNB(std::vector<std::vector<double>> inputSet, std::vector<double> outputSet, int class_num);
|
||||
std::vector<double> modelSetTest(std::vector<std::vector<double>> X);
|
||||
double modelTest(std::vector<double> x);
|
||||
double score();
|
||||
|
@ -13,7 +13,7 @@
|
||||
#include <random>
|
||||
|
||||
|
||||
HiddenLayer::HiddenLayer(int n_hidden, std::string activation, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha) :
|
||||
MLPPHiddenLayer::MLPPHiddenLayer(int n_hidden, std::string activation, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha) :
|
||||
n_hidden(n_hidden), activation(activation), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) {
|
||||
weights = Utilities::weightInitialization(input[0].size(), n_hidden, weightInit);
|
||||
bias = Utilities::biasInitialization(n_hidden);
|
||||
@ -97,14 +97,14 @@ HiddenLayer::HiddenLayer(int n_hidden, std::string activation, std::vector<std::
|
||||
activationTest_map["Arcoth"] = &MLPPActivation::arcoth;
|
||||
}
|
||||
|
||||
void HiddenLayer::forwardPass() {
|
||||
void MLPPHiddenLayer::forwardPass() {
|
||||
LinAlg alg;
|
||||
MLPPActivation avn;
|
||||
z = alg.mat_vec_add(alg.matmult(input, weights), bias);
|
||||
a = (avn.*activation_map[activation])(z, 0);
|
||||
}
|
||||
|
||||
void HiddenLayer::Test(std::vector<double> x) {
|
||||
void MLPPHiddenLayer::Test(std::vector<double> x) {
|
||||
LinAlg alg;
|
||||
MLPPActivation avn;
|
||||
z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias);
|
||||
|
@ -15,9 +15,9 @@
|
||||
#include <vector>
|
||||
|
||||
|
||||
class HiddenLayer {
|
||||
class MLPPHiddenLayer {
|
||||
public:
|
||||
HiddenLayer(int n_hidden, std::string activation, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha);
|
||||
MLPPHiddenLayer(int n_hidden, std::string activation, std::vector<std::vector<double>> input, std::string weightInit, std::string reg, double lambda, double alpha);
|
||||
|
||||
int n_hidden;
|
||||
std::string activation;
|
||||
|
@ -141,10 +141,10 @@ void MANN::save(std::string fileName) {
|
||||
|
||||
void MANN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, double lambda, double alpha) {
|
||||
if (network.empty()) {
|
||||
network.push_back(HiddenLayer(n_hidden, activation, inputSet, weightInit, reg, lambda, alpha));
|
||||
network.push_back(MLPPHiddenLayer(n_hidden, activation, inputSet, weightInit, reg, lambda, alpha));
|
||||
network[0].forwardPass();
|
||||
} else {
|
||||
network.push_back(HiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
|
||||
network.push_back(MLPPHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
|
||||
network[network.size() - 1].forwardPass();
|
||||
}
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ private:
|
||||
std::vector<std::vector<double>> outputSet;
|
||||
std::vector<std::vector<double>> y_hat;
|
||||
|
||||
std::vector<HiddenLayer> network;
|
||||
std::vector<MLPPHiddenLayer> network;
|
||||
MultiOutputLayer *outputLayer;
|
||||
|
||||
int n;
|
||||
|
@ -19,7 +19,7 @@ PCA::PCA(std::vector<std::vector<double>> inputSet, int k) :
|
||||
|
||||
std::vector<std::vector<double>> PCA::principalComponents() {
|
||||
LinAlg alg;
|
||||
Data data;
|
||||
MLPPData data;
|
||||
|
||||
auto [U, S, Vt] = alg.SVD(alg.cov(inputSet));
|
||||
X_normalized = data.meanCentering(inputSet);
|
||||
|
@ -245,7 +245,7 @@ std::vector<std::vector<double>> SoftmaxNet::getEmbeddings() {
|
||||
|
||||
double SoftmaxNet::Cost(std::vector<std::vector<double>> y_hat, std::vector<std::vector<double>> y) {
|
||||
Reg regularization;
|
||||
Data data;
|
||||
MLPPData data;
|
||||
class MLPPCost cost;
|
||||
return cost.CrossEntropy(y_hat, y) + regularization.regTerm(weights1, lambda, alpha, reg) + regularization.regTerm(weights2, lambda, alpha, reg);
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ double Stat::median(std::vector<double> x) {
|
||||
}
|
||||
|
||||
std::vector<double> Stat::mode(const std::vector<double> &x) {
|
||||
Data data;
|
||||
MLPPData data;
|
||||
std::vector<double> x_set = data.vecToSet(x);
|
||||
std::map<double, int> element_num;
|
||||
for (int i = 0; i < x_set.size(); i++) {
|
||||
|
@ -108,10 +108,10 @@ void WGAN::save(std::string fileName) {
|
||||
void WGAN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, double lambda, double alpha) {
|
||||
LinAlg alg;
|
||||
if (network.empty()) {
|
||||
network.push_back(HiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha));
|
||||
network.push_back(MLPPHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha));
|
||||
network[0].forwardPass();
|
||||
} else {
|
||||
network.push_back(HiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
|
||||
network.push_back(MLPPHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
|
||||
network[network.size() - 1].forwardPass();
|
||||
}
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ private:
|
||||
std::vector<std::vector<double>> outputSet;
|
||||
std::vector<double> y_hat;
|
||||
|
||||
std::vector<HiddenLayer> network;
|
||||
std::vector<MLPPHiddenLayer> network;
|
||||
OutputLayer *outputLayer;
|
||||
|
||||
int n;
|
||||
|
Loading…
Reference in New Issue
Block a user