From 2e935a4d87cfd0f0c961c7e0caa3b4dc421f182b Mon Sep 17 00:00:00 2001 From: novak_99 Date: Tue, 30 Nov 2021 15:00:29 -0800 Subject: [PATCH] added rgb2gray --- MLPP/Data/Data.cpp | 20 +++++++++++-------- MLPP/Data/Data.hpp | 2 +- main.cpp | 50 ++++++++++++++++++++++++++-------------------- 3 files changed, 41 insertions(+), 31 deletions(-) diff --git a/MLPP/Data/Data.cpp b/MLPP/Data/Data.cpp index 43ba2ec..03a17a4 100644 --- a/MLPP/Data/Data.cpp +++ b/MLPP/Data/Data.cpp @@ -139,14 +139,18 @@ namespace MLPP{ } // Images - - void Data::getImage(std::string fileName, std::vector& image){ - std::ifstream img(fileName, std::ios::binary); - if(!img.is_open()){ - std::cout << "The file failed to open." << std::endl; + std::vector> Data::rgb2gray(std::vector>> input){ + std::vector> grayScale; + grayScale.resize(input[0].size()); + for(int i = 0; i < grayScale.size(); i++){ + grayScale[i].resize(input[0][i].size()); } - std::vector v{std::istreambuf_iterator{img}, {}}; - image = v; + for(int i = 0; i < grayScale.size(); i++){ + for(int j = 0; j < grayScale[i].size(); j++){ + grayScale[i][j] = 0.299 * input[0][i][j] + 0.587 * input[1][i][j] + 0.114 * input[2][i][j] + } + } + return grayScale; } // TEXT-BASED & NLP @@ -449,7 +453,7 @@ namespace MLPP{ if(type == "Skipgram"){ model = new SoftmaxNet(outputSet, inputSet, dimension); } - else { // else = CBOW. We maintain it is a default, however. + else { // else = CBOW. We maintain it is a default. model = new SoftmaxNet(inputSet, outputSet, dimension); } model->gradientDescent(learning_rate, max_epoch, 1); diff --git a/MLPP/Data/Data.hpp b/MLPP/Data/Data.hpp index 0173dc3..aae8de0 100644 --- a/MLPP/Data/Data.hpp +++ b/MLPP/Data/Data.hpp @@ -29,7 +29,7 @@ class Data{ void printData(std::string& inputName, std::string& outputName, std::vector & inputSet, std::vector & outputSet); // Images - void getImage(std::string fileName, std::vector& image); + std::vector> rgb2gray(std::vector>> input); // Text-Based & NLP std::string toLower(std::string text); diff --git a/main.cpp b/main.cpp index 9b4427e..f00d724 100644 --- a/main.cpp +++ b/main.cpp @@ -121,9 +121,9 @@ int main() { // // OBJECTS Stat stat; LinAlg alg; - // Activation avn; - // Cost cost; - // Data data; + Activation avn; + Cost cost; + Data data; Convolutions conv; // DATA SETS @@ -305,10 +305,12 @@ int main() { // // MLP // std::vector> inputSet = {{0,0,1,1}, {0,1,0,1}}; + // inputSet = alg.transpose(inputSet); // std::vector outputSet = {0,1,1,0}; - // MLP model(alg.transpose(inputSet), outputSet, 2); + + // MLP model(inputSet, outputSet, 2); // model.gradientDescent(0.1, 10000, 0); - // alg.printVector(model.modelSetTest(alg.transpose(inputSet))); + // alg.printVector(model.modelSetTest(inputSet)); // std::cout << "ACCURACY: " << 100 * model.score() << "%" << std::endl; // // SOFTMAX NETWORK @@ -343,17 +345,21 @@ int main() { // alg.printVector(ann.modelSetTest(alg.transpose(inputSet))); // std::cout << "ACCURACY: " << 100 * ann.score() << "%" << std::endl; - // std::vector> inputSet = {{0,0,1,1}, {0,1,0,1}}; - // std::vector outputSet = {0,1,1,0}; - // ANN ann(alg.transpose(inputSet), outputSet); - // ann.addLayer(10, "Sigmoid"); - // ann.addLayer(10, "Sigmoid"); - // ann.addLayer(10, "Sigmoid"); + // typedef std::vector> Matrix; + // typedef std::vector Vector; + + // Matrix inputSet = {{0,0}, {0,1}, {1,0}, {1,1}}; // XOR + // Vector outputSet = {0,1,1,0}; + + // ANN ann(inputSet, outputSet); // ann.addLayer(10, "Sigmoid"); + // ann.addLayer(10, "Sigmoid"); // Add more layers as needed. // ann.addOutputLayer("Sigmoid", "LogLoss"); - // ann.gradientDescent(0.1, 80000, 0); - // alg.printVector(ann.modelSetTest(alg.transpose(inputSet))); - // std::cout << "ACCURACY: " << 100 * ann.score() << "%" << std::endl; + // ann.gradientDescent(0.1, 20000, 0); + + // Vector predictions = ann.modelSetTest(inputSet); + // alg.printVector(predictions); // Testing out the model's preds for train set. + // std::cout << "ACCURACY: " << 100 * ann.score() << "%" << std::endl; // Accuracy. // // DYNAMICALLY SIZED MANN (Multidimensional Output ANN) // std::vector> inputSet = {{1,2,3},{2,4,6},{3,6,9},{4,8,12}}; @@ -542,13 +548,13 @@ int main() { // alg.printMatrix(R); // // Checking positive-definiteness checker. For Cholesky Decomp. - std::vector> A = - { - {1,-1,-1,-1}, - {-1,2,2,2}, - {-1,2,3,1}, - {-1,2,1,4} - }; + // std::vector> A = + // { + // {1,-1,-1,-1}, + // {-1,2,2,2}, + // {-1,2,3,1}, + // {-1,2,1,4} + // }; // std::cout << std::boolalpha << alg.positiveDefiniteChecker(A) << std::endl; // auto [L, Lt] = alg.chol(A); // works. @@ -604,7 +610,7 @@ int main() { // alg.printMatrix(conv.dx(A)); // alg.printMatrix(conv.dy(A)); - alg.printMatrix(conv.gradOrientation(A)); + // alg.printMatrix(conv.gradOrientation(A)); return 0; }