diff --git a/.DS_Store b/.DS_Store index ec4cd07..6444e66 100644 Binary files a/.DS_Store and b/.DS_Store differ diff --git a/MLPP/.DS_Store b/MLPP/.DS_Store index a09b026..cda6ec9 100644 Binary files a/MLPP/.DS_Store and b/MLPP/.DS_Store differ diff --git a/MLPP/Transforms/Transforms.cpp b/MLPP/Transforms/Transforms.cpp new file mode 100644 index 0000000..09e2ae0 --- /dev/null +++ b/MLPP/Transforms/Transforms.cpp @@ -0,0 +1,58 @@ +// +// Transforms.cpp +// +// Created by Marc Melikyan on 11/13/20. +// + +#include "Transforms.hpp" +#include "LinAlg/LinAlg.hpp" +#include +#include +#include + +namespace MLPP{ + + // DCT ii. + std::vector> Transforms::discreteCosineTransform(std::vector> A){ + LinAlg alg; + A = alg.scalarAdd(-128, A); // Center around 0. + + std::vector> B; + B.resize(A.size()); + for(int i = 0; i < B.size(); i++){ + B[i].resize(A[i].size()); + } + + int M = A.size(); + + for(int i = 0; i < B.size(); i++){ + for(int j = 0; j < B[i].size(); j++){ + double sum = 0; + double alphaI; + if(i == 0){ + alphaI = 1/std::sqrt(M); + } + else{ + alphaI = std::sqrt(double(2)/double(M)); + } + double alphaJ; + if(j == 0){ + alphaJ = 1/std::sqrt(M); + } + else{ + alphaJ = std::sqrt(double(2)/double(M)); + } + + for(int k = 0; k < B.size(); k++){ + for(int f = 0; f < B[k].size(); f++){ + sum += A[k][f] * std::cos( (M_PI * i * (2 * k + 1)) / (2 * M)) * std::cos( (M_PI * j * (2 * f + 1)) / (2 * M)); + } + } + B[i][j] = sum; + B[i][j] *= alphaI * alphaJ; + + } + } + return B; + } +} \ No newline at end of file diff --git a/MLPP/Transforms/Transforms.hpp b/MLPP/Transforms/Transforms.hpp new file mode 100644 index 0000000..d8bbb53 --- /dev/null +++ b/MLPP/Transforms/Transforms.hpp @@ -0,0 +1,20 @@ +// +// Transforms.hpp +// +// + +#ifndef Transforms_hpp +#define Transforms_hpp + +#include +#include + +namespace MLPP{ + class Transforms{ + public: + std::vector> discreteCosineTransform(std::vector> A); + + }; +} + +#endif /* Transforms_hpp */ diff --git a/README.md b/README.md index 55813c8..05d8e0b 100644 --- a/README.md +++ b/README.md @@ -190,9 +190,11 @@ The result will be the model's predictions for the entire dataset. 5. Diffrential Equations Solvers - Euler's Method - Growth Method -15. ***Linear Algebra Module*** -16. ***Statistics Module*** -17. ***Data Processing Module*** +15. ***Mathematical Transforms*** + 1. Discrete Cosine Transform +16. ***Linear Algebra Module*** +17. ***Statistics Module*** +18. ***Data Processing Module*** 1. Setting and Printing Datasets 2. Feature Scaling 3. Mean Normalization @@ -204,7 +206,7 @@ The result will be the model's predictions for the entire dataset. - RGB to YCbCr - RGB to XYZ - XYZ to RGB -18. ***Utilities*** +19. ***Utilities*** 1. TP, FP, TN, FN function 2. Precision 3. Recall diff --git a/a.out b/a.out index 067f67a..abb6d89 100755 Binary files a/a.out and b/a.out differ diff --git a/buildSO.sh b/buildSO.sh index 614a1bf..e628070 100755 --- a/buildSO.sh +++ b/buildSO.sh @@ -1,4 +1,4 @@ -g++ -I MLPP -c -fPIC main.cpp MLPP/Stat/Stat.cpp MLPP/LinAlg/LinAlg.cpp MLPP/Regularization/Reg.cpp MLPP/Activation/Activation.cpp MLPP/Utilities/Utilities.cpp MLPP/Data/Data.cpp MLPP/Cost/Cost.cpp MLPP/ANN/ANN.cpp MLPP/HiddenLayer/HiddenLayer.cpp MLPP/OutputLayer/OutputLayer.cpp MLPP/MLP/MLP.cpp MLPP/LinReg/LinReg.cpp MLPP/LogReg/LogReg.cpp MLPP/UniLinReg/UniLinReg.cpp MLPP/CLogLogReg/CLogLogReg.cpp MLPP/ExpReg/ExpReg.cpp MLPP/ProbitReg/ProbitReg.cpp MLPP/SoftmaxReg/SoftmaxReg.cpp MLPP/TanhReg/TanhReg.cpp MLPP/SoftmaxNet/SoftmaxNet.cpp MLPP/Convolutions/Convolutions.cpp MLPP/AutoEncoder/AutoEncoder.cpp MLPP/MultinomialNB/MultinomialNB.cpp MLPP/BernoulliNB/BernoulliNB.cpp MLPP/GaussianNB/GaussianNB.cpp MLPP/KMeans/KMeans.cpp MLPP/kNN/kNN.cpp MLPP/PCA/PCA.cpp MLPP/OutlierFinder/OutlierFinder.cpp MLPP/MANN/MANN.cpp MLPP/MultiOutputLayer/MultiOutputLayer.cpp MLPP/SVC/SVC.cpp MLPP/NumericalAnalysis/NumericalAnalysis.cpp MLPP/DualSVC/DualSVC.cpp --std=c++17 +g++ -I MLPP -c -fPIC main.cpp MLPP/Stat/Stat.cpp MLPP/LinAlg/LinAlg.cpp MLPP/Regularization/Reg.cpp MLPP/Activation/Activation.cpp MLPP/Utilities/Utilities.cpp MLPP/Data/Data.cpp MLPP/Cost/Cost.cpp MLPP/ANN/ANN.cpp MLPP/HiddenLayer/HiddenLayer.cpp MLPP/OutputLayer/OutputLayer.cpp MLPP/MLP/MLP.cpp MLPP/LinReg/LinReg.cpp MLPP/LogReg/LogReg.cpp MLPP/UniLinReg/UniLinReg.cpp MLPP/CLogLogReg/CLogLogReg.cpp MLPP/ExpReg/ExpReg.cpp MLPP/ProbitReg/ProbitReg.cpp MLPP/SoftmaxReg/SoftmaxReg.cpp MLPP/TanhReg/TanhReg.cpp MLPP/SoftmaxNet/SoftmaxNet.cpp MLPP/Convolutions/Convolutions.cpp MLPP/AutoEncoder/AutoEncoder.cpp MLPP/MultinomialNB/MultinomialNB.cpp MLPP/BernoulliNB/BernoulliNB.cpp MLPP/GaussianNB/GaussianNB.cpp MLPP/KMeans/KMeans.cpp MLPP/kNN/kNN.cpp MLPP/PCA/PCA.cpp MLPP/OutlierFinder/OutlierFinder.cpp MLPP/MANN/MANN.cpp MLPP/MultiOutputLayer/MultiOutputLayer.cpp MLPP/SVC/SVC.cpp MLPP/NumericalAnalysis/NumericalAnalysis.cpp MLPP/DualSVC/DualSVC.cpp MLPP/Transforms/Transforms.cpp --std=c++17 g++ -shared -o MLPP.so Reg.o LinAlg.o Stat.o Activation.o LinReg.o Utilities.o Cost.o LogReg.o ProbitReg.o ExpReg.o CLogLogReg.o SoftmaxReg.o TanhReg.o kNN.o KMeans.o UniLinReg.o SoftmaxNet.o MLP.o AutoEncoder.o HiddenLayer.o OutputLayer.o ANN.o BernoulliNB.o GaussianNB.o MultinomialNB.o Convolutions.o OutlierFinder.o Data.o MultiOutputLayer.o MANN.o SVC.o NumericalAnalysis.o DualSVC.o sudo mv MLPP.so /usr/local/lib diff --git a/main.cpp b/main.cpp index 13bc938..fef2d5c 100644 --- a/main.cpp +++ b/main.cpp @@ -48,7 +48,7 @@ #include "MLPP/NumericalAnalysis/NumericalAnalysis.hpp" #include "MLPP/DualSVC/DualSVC.hpp" #include "MLPP/GAN/GAN.hpp" - +#include "MLPP/Transforms/Transforms.hpp" using namespace MLPP; @@ -466,16 +466,31 @@ int main() { // // CONVOLUTION, POOLING, ETC.. + // std::vector> input = { + // {1}, + // }; + + // std::vector>> tensorSet; + // tensorSet.push_back(input); + // tensorSet.push_back(input); + // tensorSet.push_back(input); + + // alg.printTensor(data.rgb2xyz(tensorSet)); + std::vector> input = { - {1}, + {62,55,55,54,49,48,47,55}, + {62,57,54,52,48,47,48,53}, + {61,60,52,49,48,47,49,54}, + {63,61,60,60,63,65,68,65}, + {67,67,70,74,79,85,91,92}, + {82,95,101,106,114,115,112,117}, + {96,111,115,119,128,128,130,127}, + {109,121,127,133,139,141,140,133}, }; - std::vector>> tensorSet; - tensorSet.push_back(input); - tensorSet.push_back(input); - tensorSet.push_back(input); + Transforms trans; - alg.printTensor(data.rgb2xyz(tensorSet)); + alg.printMatrix(trans.discreteCosineTransform(input)); // alg.printMatrix(conv.convolve(input, conv.getPrewittVertical(), 1)); // Can use padding // alg.printMatrix(conv.pool(input, 4, 4, "Max")); // Can use Max, Min, or Average pooling.