added rgb2gray

This commit is contained in:
novak_99 2021-11-30 15:00:29 -08:00
parent 66883a3f5d
commit 2e935a4d87
3 changed files with 41 additions and 31 deletions

View File

@ -139,14 +139,18 @@ namespace MLPP{
} }
// Images // Images
std::vector<std::vector<double>> Data::rgb2gray(std::vector<std::vector<std::vector<double>>> input){
void Data::getImage(std::string fileName, std::vector<double>& image){ std::vector<std::vector<double>> grayScale;
std::ifstream img(fileName, std::ios::binary); grayScale.resize(input[0].size());
if(!img.is_open()){ for(int i = 0; i < grayScale.size(); i++){
std::cout << "The file failed to open." << std::endl; grayScale[i].resize(input[0][i].size());
} }
std::vector<double> v{std::istreambuf_iterator<char>{img}, {}}; for(int i = 0; i < grayScale.size(); i++){
image = v; for(int j = 0; j < grayScale[i].size(); j++){
grayScale[i][j] = 0.299 * input[0][i][j] + 0.587 * input[1][i][j] + 0.114 * input[2][i][j]
}
}
return grayScale;
} }
// TEXT-BASED & NLP // TEXT-BASED & NLP
@ -449,7 +453,7 @@ namespace MLPP{
if(type == "Skipgram"){ if(type == "Skipgram"){
model = new SoftmaxNet(outputSet, inputSet, dimension); model = new SoftmaxNet(outputSet, inputSet, dimension);
} }
else { // else = CBOW. We maintain it is a default, however. else { // else = CBOW. We maintain it is a default.
model = new SoftmaxNet(inputSet, outputSet, dimension); model = new SoftmaxNet(inputSet, outputSet, dimension);
} }
model->gradientDescent(learning_rate, max_epoch, 1); model->gradientDescent(learning_rate, max_epoch, 1);

View File

@ -29,7 +29,7 @@ class Data{
void printData(std::string& inputName, std::string& outputName, std::vector <double>& inputSet, std::vector <double>& outputSet); void printData(std::string& inputName, std::string& outputName, std::vector <double>& inputSet, std::vector <double>& outputSet);
// Images // Images
void getImage(std::string fileName, std::vector<double>& image); std::vector<std::vector<double>> rgb2gray(std::vector<std::vector<std::vector<double>>> input);
// Text-Based & NLP // Text-Based & NLP
std::string toLower(std::string text); std::string toLower(std::string text);

View File

@ -121,9 +121,9 @@ int main() {
// // OBJECTS // // OBJECTS
Stat stat; Stat stat;
LinAlg alg; LinAlg alg;
// Activation avn; Activation avn;
// Cost cost; Cost cost;
// Data data; Data data;
Convolutions conv; Convolutions conv;
// DATA SETS // DATA SETS
@ -305,10 +305,12 @@ int main() {
// // MLP // // MLP
// std::vector<std::vector<double>> inputSet = {{0,0,1,1}, {0,1,0,1}}; // std::vector<std::vector<double>> inputSet = {{0,0,1,1}, {0,1,0,1}};
// inputSet = alg.transpose(inputSet);
// std::vector<double> outputSet = {0,1,1,0}; // std::vector<double> outputSet = {0,1,1,0};
// MLP model(alg.transpose(inputSet), outputSet, 2);
// MLP model(inputSet, outputSet, 2);
// model.gradientDescent(0.1, 10000, 0); // model.gradientDescent(0.1, 10000, 0);
// alg.printVector(model.modelSetTest(alg.transpose(inputSet))); // alg.printVector(model.modelSetTest(inputSet));
// std::cout << "ACCURACY: " << 100 * model.score() << "%" << std::endl; // std::cout << "ACCURACY: " << 100 * model.score() << "%" << std::endl;
// // SOFTMAX NETWORK // // SOFTMAX NETWORK
@ -343,17 +345,21 @@ int main() {
// alg.printVector(ann.modelSetTest(alg.transpose(inputSet))); // alg.printVector(ann.modelSetTest(alg.transpose(inputSet)));
// std::cout << "ACCURACY: " << 100 * ann.score() << "%" << std::endl; // std::cout << "ACCURACY: " << 100 * ann.score() << "%" << std::endl;
// std::vector<std::vector<double>> inputSet = {{0,0,1,1}, {0,1,0,1}}; // typedef std::vector<std::vector<double>> Matrix;
// std::vector<double> outputSet = {0,1,1,0}; // typedef std::vector<double> Vector;
// ANN ann(alg.transpose(inputSet), outputSet);
// ann.addLayer(10, "Sigmoid"); // Matrix inputSet = {{0,0}, {0,1}, {1,0}, {1,1}}; // XOR
// ann.addLayer(10, "Sigmoid"); // Vector outputSet = {0,1,1,0};
// ann.addLayer(10, "Sigmoid");
// ANN ann(inputSet, outputSet);
// ann.addLayer(10, "Sigmoid"); // ann.addLayer(10, "Sigmoid");
// ann.addLayer(10, "Sigmoid"); // Add more layers as needed.
// ann.addOutputLayer("Sigmoid", "LogLoss"); // ann.addOutputLayer("Sigmoid", "LogLoss");
// ann.gradientDescent(0.1, 80000, 0); // ann.gradientDescent(0.1, 20000, 0);
// alg.printVector(ann.modelSetTest(alg.transpose(inputSet)));
// std::cout << "ACCURACY: " << 100 * ann.score() << "%" << std::endl; // Vector predictions = ann.modelSetTest(inputSet);
// alg.printVector(predictions); // Testing out the model's preds for train set.
// std::cout << "ACCURACY: " << 100 * ann.score() << "%" << std::endl; // Accuracy.
// // DYNAMICALLY SIZED MANN (Multidimensional Output ANN) // // DYNAMICALLY SIZED MANN (Multidimensional Output ANN)
// std::vector<std::vector<double>> inputSet = {{1,2,3},{2,4,6},{3,6,9},{4,8,12}}; // std::vector<std::vector<double>> inputSet = {{1,2,3},{2,4,6},{3,6,9},{4,8,12}};
@ -542,13 +548,13 @@ int main() {
// alg.printMatrix(R); // alg.printMatrix(R);
// // Checking positive-definiteness checker. For Cholesky Decomp. // // Checking positive-definiteness checker. For Cholesky Decomp.
std::vector<std::vector<double>> A = // std::vector<std::vector<double>> A =
{ // {
{1,-1,-1,-1}, // {1,-1,-1,-1},
{-1,2,2,2}, // {-1,2,2,2},
{-1,2,3,1}, // {-1,2,3,1},
{-1,2,1,4} // {-1,2,1,4}
}; // };
// std::cout << std::boolalpha << alg.positiveDefiniteChecker(A) << std::endl; // std::cout << std::boolalpha << alg.positiveDefiniteChecker(A) << std::endl;
// auto [L, Lt] = alg.chol(A); // works. // auto [L, Lt] = alg.chol(A); // works.
@ -604,7 +610,7 @@ int main() {
// alg.printMatrix(conv.dx(A)); // alg.printMatrix(conv.dx(A));
// alg.printMatrix(conv.dy(A)); // alg.printMatrix(conv.dy(A));
alg.printMatrix(conv.gradOrientation(A)); // alg.printMatrix(conv.gradOrientation(A));
return 0; return 0;
} }