diff --git a/MLPP/Data/Data.cpp b/MLPP/Data/Data.cpp index 9d29bea..c27bd4d 100644 --- a/MLPP/Data/Data.cpp +++ b/MLPP/Data/Data.cpp @@ -154,6 +154,63 @@ namespace MLPP{ } return grayScale; } + + std::vector>> Data::rgb2ycbcr(std::vector>> input){ + LinAlg alg; + std::vector>> YCbCr; + YCbCr = alg.resize(YCbCr, input); + for(int i = 0; i < YCbCr.size(); i++){ + for(int j = 0; j < YCbCr[i].size(); j++){ + YCbCr[0][i][j] = 0.299 * input[0][i][j] + 0.587 * input[1][i][j] + 0.114 * input[2][i][j]; + YCbCr[1][i][j] = -0.169 * input[0][i][j] - 0.331 * input[1][i][j] + 0.500 * input[2][i][j]; + YCbCr[2][i][j] = 0.500 * input[0][i][j] - 0.419 * input[1][i][j] - 0.081 * input[2][i][j]; + } + } + return YCbCr; + } + + std::vector>> Data::rgb2hsv(std::vector>> input){ + LinAlg alg; + std::vector>> HSV; + HSV = alg.resize(HSV, input); + for(int i = 0; i < HSV.size(); i++){ + for(int j = 0; j < HSV[i].size(); j++){ + double rPrime = input[0][i][j] / 255; + double gPrime = input[1][i][j] / 255; + double bPrime = input[2][i][j] / 255; + + double cMax = alg.max({rPrime, gPrime, bPrime}); + double cMin = alg.min({rPrime, gPrime, bPrime}); + double delta = cMax - cMin; + + // H calculation. + if(delta == 0){ + HSV[0][i][j] = 0; + } + else{ + if(cMax == rPrime){ + HSV[0][i][j] = 60 * fmod(((gPrime - bPrime) / delta), 6); + } + else if(cMax == gPrime){ + HSV[0][i][j] = 60 * ( (bPrime - rPrime) / delta + 2); + } + else{ // cMax == bPrime + HSV[0][i][j] = 60 * ( (rPrime - gPrime) / delta + 6); + } + } + + // S calculation. + if(cMax == 0){ + HSV[1][i][j] = 0; + } + else{ HSV[1][i][j] = delta/cMax; } + + // V calculation. + HSV[2][i][j] = cMax; + } + } + return HSV; + } // TEXT-BASED & NLP std::string Data::toLower(std::string text){ diff --git a/MLPP/Data/Data.hpp b/MLPP/Data/Data.hpp index 9de85d9..9770d79 100644 --- a/MLPP/Data/Data.hpp +++ b/MLPP/Data/Data.hpp @@ -30,6 +30,8 @@ class Data{ // Images std::vector> rgb2gray(std::vector>> input); + std::vector>> rgb2ycbcr(std::vector>> input); + std::vector>> rgb2hsv(std::vector>> input); // Text-Based & NLP std::string toLower(std::string text); diff --git a/a.out b/a.out index d22b03a..540093a 100755 Binary files a/a.out and b/a.out differ diff --git a/main.cpp b/main.cpp index e965fd2..b79b4e9 100644 --- a/main.cpp +++ b/main.cpp @@ -363,18 +363,18 @@ int main() { // Possible Weight Init Methods: Default, Uniform, HeNormal, HeUniform, XavierNormal, XavierUniform // Possible Activations: Linear, Sigmoid, Swish, Softplus, Softsign, CLogLog, Ar{Sinh, Cosh, Tanh, Csch, Sech, Coth}, GaussianCDF, GELU, UnitStep // Possible Loss Functions: MSE, RMSE, MBE, LogLoss, CrossEntropy, HingeLoss - std::vector> inputSet = {{0,0,1,1}, {0,1,0,1}}; - std::vector outputSet = {0,1,1,0}; - ANN ann(alg.transpose(inputSet), outputSet); - ann.addLayer(2, "Sigmoid"); - ann.addLayer(2, "Sigmoid"); - ann.addOutputLayer("Sigmoid", "LogLoss"); + // std::vector> inputSet = {{0,0,1,1}, {0,1,0,1}}; + // std::vector outputSet = {0,1,1,0}; + // ANN ann(alg.transpose(inputSet), outputSet); + // ann.addLayer(2, "Sigmoid"); + // ann.addLayer(2, "Sigmoid"); + // ann.addOutputLayer("Sigmoid", "LogLoss"); //ann.AMSGrad(0.1, 10000, 1, 0.9, 0.999, 0.000001, 1); //ann.Adadelta(1, 1000, 2, 0.9, 0.000001, 1); //ann.Momentum(0.1, 8000, 2, 0.9, true, 1); //ann.setLearningRateScheduler("Step", 0.5, 1000); - ann.gradientDescent(1, 5, 1); + // ann.gradientDescent(1, 5, 1); //alg.printVector(ann.modelSetTest(alg.transpose(inputSet))); //std::cout << "ACCURACY: " << 100 * ann.score() << "%" << std::endl; @@ -466,16 +466,23 @@ int main() { // // CONVOLUTION, POOLING, ETC.. - // std::vector> input = { - // {1,1,1,1,0,0,0,0}, - // {1,1,1,1,0,0,0,0}, - // {1,1,1,1,0,0,0,0}, - // {1,1,1,1,0,0,0,0}, - // {1,1,1,1,0,0,0,0}, - // {1,1,1,1,0,0,0,0}, - // {1,1,1,1,0,0,0,0}, - // {1,1,1,1,0,0,0,0} - // }; + std::vector> input = { + {255,255,255,255,0,0,0,0}, + {1,1,1,1,0,0,0,0}, + {1,1,1,1,0,0,0,0}, + {1,1,1,1,0,0,0,0}, + {1,1,1,1,0,0,0,0}, + {1,1,1,1,0,0,0,0}, + {1,1,1,1,0,0,0,0}, + {1,1,1,1,0,0,0,0} + }; + + std::vector>> tensorSet; + tensorSet.push_back(input); + tensorSet.push_back(input); + tensorSet.push_back(input); + + alg.printTensor(data.rgb2hsv(tensorSet)); // alg.printMatrix(conv.convolve(input, conv.getPrewittVertical(), 1)); // Can use padding // alg.printMatrix(conv.pool(input, 4, 4, "Max")); // Can use Max, Min, or Average pooling.