added rgb 2 hsv

This commit is contained in:
novak_99 2022-02-11 19:26:04 -08:00
parent 2465c3b3aa
commit 453b1a8648
4 changed files with 83 additions and 17 deletions

View File

@ -154,6 +154,63 @@ namespace MLPP{
}
return grayScale;
}
std::vector<std::vector<std::vector<double>>> Data::rgb2ycbcr(std::vector<std::vector<std::vector<double>>> input){
LinAlg alg;
std::vector<std::vector<std::vector<double>>> YCbCr;
YCbCr = alg.resize(YCbCr, input);
for(int i = 0; i < YCbCr.size(); i++){
for(int j = 0; j < YCbCr[i].size(); j++){
YCbCr[0][i][j] = 0.299 * input[0][i][j] + 0.587 * input[1][i][j] + 0.114 * input[2][i][j];
YCbCr[1][i][j] = -0.169 * input[0][i][j] - 0.331 * input[1][i][j] + 0.500 * input[2][i][j];
YCbCr[2][i][j] = 0.500 * input[0][i][j] - 0.419 * input[1][i][j] - 0.081 * input[2][i][j];
}
}
return YCbCr;
}
std::vector<std::vector<std::vector<double>>> Data::rgb2hsv(std::vector<std::vector<std::vector<double>>> input){
LinAlg alg;
std::vector<std::vector<std::vector<double>>> HSV;
HSV = alg.resize(HSV, input);
for(int i = 0; i < HSV.size(); i++){
for(int j = 0; j < HSV[i].size(); j++){
double rPrime = input[0][i][j] / 255;
double gPrime = input[1][i][j] / 255;
double bPrime = input[2][i][j] / 255;
double cMax = alg.max({rPrime, gPrime, bPrime});
double cMin = alg.min({rPrime, gPrime, bPrime});
double delta = cMax - cMin;
// H calculation.
if(delta == 0){
HSV[0][i][j] = 0;
}
else{
if(cMax == rPrime){
HSV[0][i][j] = 60 * fmod(((gPrime - bPrime) / delta), 6);
}
else if(cMax == gPrime){
HSV[0][i][j] = 60 * ( (bPrime - rPrime) / delta + 2);
}
else{ // cMax == bPrime
HSV[0][i][j] = 60 * ( (rPrime - gPrime) / delta + 6);
}
}
// S calculation.
if(cMax == 0){
HSV[1][i][j] = 0;
}
else{ HSV[1][i][j] = delta/cMax; }
// V calculation.
HSV[2][i][j] = cMax;
}
}
return HSV;
}
// TEXT-BASED & NLP
std::string Data::toLower(std::string text){

View File

@ -30,6 +30,8 @@ class Data{
// Images
std::vector<std::vector<double>> rgb2gray(std::vector<std::vector<std::vector<double>>> input);
std::vector<std::vector<std::vector<double>>> rgb2ycbcr(std::vector<std::vector<std::vector<double>>> input);
std::vector<std::vector<std::vector<double>>> rgb2hsv(std::vector<std::vector<std::vector<double>>> input);
// Text-Based & NLP
std::string toLower(std::string text);

BIN
a.out

Binary file not shown.

View File

@ -363,18 +363,18 @@ int main() {
// Possible Weight Init Methods: Default, Uniform, HeNormal, HeUniform, XavierNormal, XavierUniform
// Possible Activations: Linear, Sigmoid, Swish, Softplus, Softsign, CLogLog, Ar{Sinh, Cosh, Tanh, Csch, Sech, Coth}, GaussianCDF, GELU, UnitStep
// Possible Loss Functions: MSE, RMSE, MBE, LogLoss, CrossEntropy, HingeLoss
std::vector<std::vector<double>> inputSet = {{0,0,1,1}, {0,1,0,1}};
std::vector<double> outputSet = {0,1,1,0};
ANN ann(alg.transpose(inputSet), outputSet);
ann.addLayer(2, "Sigmoid");
ann.addLayer(2, "Sigmoid");
ann.addOutputLayer("Sigmoid", "LogLoss");
// std::vector<std::vector<double>> inputSet = {{0,0,1,1}, {0,1,0,1}};
// std::vector<double> outputSet = {0,1,1,0};
// ANN ann(alg.transpose(inputSet), outputSet);
// ann.addLayer(2, "Sigmoid");
// ann.addLayer(2, "Sigmoid");
// ann.addOutputLayer("Sigmoid", "LogLoss");
//ann.AMSGrad(0.1, 10000, 1, 0.9, 0.999, 0.000001, 1);
//ann.Adadelta(1, 1000, 2, 0.9, 0.000001, 1);
//ann.Momentum(0.1, 8000, 2, 0.9, true, 1);
//ann.setLearningRateScheduler("Step", 0.5, 1000);
ann.gradientDescent(1, 5, 1);
// ann.gradientDescent(1, 5, 1);
//alg.printVector(ann.modelSetTest(alg.transpose(inputSet)));
//std::cout << "ACCURACY: " << 100 * ann.score() << "%" << std::endl;
@ -466,16 +466,23 @@ int main() {
// // CONVOLUTION, POOLING, ETC..
// std::vector<std::vector<double>> input = {
// {1,1,1,1,0,0,0,0},
// {1,1,1,1,0,0,0,0},
// {1,1,1,1,0,0,0,0},
// {1,1,1,1,0,0,0,0},
// {1,1,1,1,0,0,0,0},
// {1,1,1,1,0,0,0,0},
// {1,1,1,1,0,0,0,0},
// {1,1,1,1,0,0,0,0}
// };
std::vector<std::vector<double>> input = {
{255,255,255,255,0,0,0,0},
{1,1,1,1,0,0,0,0},
{1,1,1,1,0,0,0,0},
{1,1,1,1,0,0,0,0},
{1,1,1,1,0,0,0,0},
{1,1,1,1,0,0,0,0},
{1,1,1,1,0,0,0,0},
{1,1,1,1,0,0,0,0}
};
std::vector<std::vector<std::vector<double>>> tensorSet;
tensorSet.push_back(input);
tensorSet.push_back(input);
tensorSet.push_back(input);
alg.printTensor(data.rgb2hsv(tensorSet));
// alg.printMatrix(conv.convolve(input, conv.getPrewittVertical(), 1)); // Can use padding
// alg.printMatrix(conv.pool(input, 4, 4, "Max")); // Can use Max, Min, or Average pooling.