mirror of
https://github.com/Relintai/MLPP.git
synced 2024-11-12 10:15:01 +01:00
discrete cosine transform
This commit is contained in:
parent
c21c750727
commit
4cc61e4c1e
BIN
MLPP/.DS_Store
vendored
BIN
MLPP/.DS_Store
vendored
Binary file not shown.
58
MLPP/Transforms/Transforms.cpp
Normal file
58
MLPP/Transforms/Transforms.cpp
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
//
|
||||||
|
// Transforms.cpp
|
||||||
|
//
|
||||||
|
// Created by Marc Melikyan on 11/13/20.
|
||||||
|
//
|
||||||
|
|
||||||
|
#include "Transforms.hpp"
|
||||||
|
#include "LinAlg/LinAlg.hpp"
|
||||||
|
#include <iostream>
|
||||||
|
#include <string>
|
||||||
|
#include <cmath>
|
||||||
|
|
||||||
|
namespace MLPP{
|
||||||
|
|
||||||
|
// DCT ii.
|
||||||
|
std::vector<std::vector<double>> Transforms::discreteCosineTransform(std::vector<std::vector<double>> A){
|
||||||
|
LinAlg alg;
|
||||||
|
A = alg.scalarAdd(-128, A); // Center around 0.
|
||||||
|
|
||||||
|
std::vector<std::vector<double>> B;
|
||||||
|
B.resize(A.size());
|
||||||
|
for(int i = 0; i < B.size(); i++){
|
||||||
|
B[i].resize(A[i].size());
|
||||||
|
}
|
||||||
|
|
||||||
|
int M = A.size();
|
||||||
|
|
||||||
|
for(int i = 0; i < B.size(); i++){
|
||||||
|
for(int j = 0; j < B[i].size(); j++){
|
||||||
|
double sum = 0;
|
||||||
|
double alphaI;
|
||||||
|
if(i == 0){
|
||||||
|
alphaI = 1/std::sqrt(M);
|
||||||
|
}
|
||||||
|
else{
|
||||||
|
alphaI = std::sqrt(double(2)/double(M));
|
||||||
|
}
|
||||||
|
double alphaJ;
|
||||||
|
if(j == 0){
|
||||||
|
alphaJ = 1/std::sqrt(M);
|
||||||
|
}
|
||||||
|
else{
|
||||||
|
alphaJ = std::sqrt(double(2)/double(M));
|
||||||
|
}
|
||||||
|
|
||||||
|
for(int k = 0; k < B.size(); k++){
|
||||||
|
for(int f = 0; f < B[k].size(); f++){
|
||||||
|
sum += A[k][f] * std::cos( (M_PI * i * (2 * k + 1)) / (2 * M)) * std::cos( (M_PI * j * (2 * f + 1)) / (2 * M));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
B[i][j] = sum;
|
||||||
|
B[i][j] *= alphaI * alphaJ;
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return B;
|
||||||
|
}
|
||||||
|
}
|
20
MLPP/Transforms/Transforms.hpp
Normal file
20
MLPP/Transforms/Transforms.hpp
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
//
|
||||||
|
// Transforms.hpp
|
||||||
|
//
|
||||||
|
//
|
||||||
|
|
||||||
|
#ifndef Transforms_hpp
|
||||||
|
#define Transforms_hpp
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
namespace MLPP{
|
||||||
|
class Transforms{
|
||||||
|
public:
|
||||||
|
std::vector<std::vector<double>> discreteCosineTransform(std::vector<std::vector<double>> A);
|
||||||
|
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* Transforms_hpp */
|
10
README.md
10
README.md
@ -190,9 +190,11 @@ The result will be the model's predictions for the entire dataset.
|
|||||||
5. Diffrential Equations Solvers
|
5. Diffrential Equations Solvers
|
||||||
- Euler's Method
|
- Euler's Method
|
||||||
- Growth Method
|
- Growth Method
|
||||||
15. ***Linear Algebra Module***
|
15. ***Mathematical Transforms***
|
||||||
16. ***Statistics Module***
|
1. Discrete Cosine Transform
|
||||||
17. ***Data Processing Module***
|
16. ***Linear Algebra Module***
|
||||||
|
17. ***Statistics Module***
|
||||||
|
18. ***Data Processing Module***
|
||||||
1. Setting and Printing Datasets
|
1. Setting and Printing Datasets
|
||||||
2. Feature Scaling
|
2. Feature Scaling
|
||||||
3. Mean Normalization
|
3. Mean Normalization
|
||||||
@ -204,7 +206,7 @@ The result will be the model's predictions for the entire dataset.
|
|||||||
- RGB to YCbCr
|
- RGB to YCbCr
|
||||||
- RGB to XYZ
|
- RGB to XYZ
|
||||||
- XYZ to RGB
|
- XYZ to RGB
|
||||||
18. ***Utilities***
|
19. ***Utilities***
|
||||||
1. TP, FP, TN, FN function
|
1. TP, FP, TN, FN function
|
||||||
2. Precision
|
2. Precision
|
||||||
3. Recall
|
3. Recall
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
g++ -I MLPP -c -fPIC main.cpp MLPP/Stat/Stat.cpp MLPP/LinAlg/LinAlg.cpp MLPP/Regularization/Reg.cpp MLPP/Activation/Activation.cpp MLPP/Utilities/Utilities.cpp MLPP/Data/Data.cpp MLPP/Cost/Cost.cpp MLPP/ANN/ANN.cpp MLPP/HiddenLayer/HiddenLayer.cpp MLPP/OutputLayer/OutputLayer.cpp MLPP/MLP/MLP.cpp MLPP/LinReg/LinReg.cpp MLPP/LogReg/LogReg.cpp MLPP/UniLinReg/UniLinReg.cpp MLPP/CLogLogReg/CLogLogReg.cpp MLPP/ExpReg/ExpReg.cpp MLPP/ProbitReg/ProbitReg.cpp MLPP/SoftmaxReg/SoftmaxReg.cpp MLPP/TanhReg/TanhReg.cpp MLPP/SoftmaxNet/SoftmaxNet.cpp MLPP/Convolutions/Convolutions.cpp MLPP/AutoEncoder/AutoEncoder.cpp MLPP/MultinomialNB/MultinomialNB.cpp MLPP/BernoulliNB/BernoulliNB.cpp MLPP/GaussianNB/GaussianNB.cpp MLPP/KMeans/KMeans.cpp MLPP/kNN/kNN.cpp MLPP/PCA/PCA.cpp MLPP/OutlierFinder/OutlierFinder.cpp MLPP/MANN/MANN.cpp MLPP/MultiOutputLayer/MultiOutputLayer.cpp MLPP/SVC/SVC.cpp MLPP/NumericalAnalysis/NumericalAnalysis.cpp MLPP/DualSVC/DualSVC.cpp --std=c++17
|
g++ -I MLPP -c -fPIC main.cpp MLPP/Stat/Stat.cpp MLPP/LinAlg/LinAlg.cpp MLPP/Regularization/Reg.cpp MLPP/Activation/Activation.cpp MLPP/Utilities/Utilities.cpp MLPP/Data/Data.cpp MLPP/Cost/Cost.cpp MLPP/ANN/ANN.cpp MLPP/HiddenLayer/HiddenLayer.cpp MLPP/OutputLayer/OutputLayer.cpp MLPP/MLP/MLP.cpp MLPP/LinReg/LinReg.cpp MLPP/LogReg/LogReg.cpp MLPP/UniLinReg/UniLinReg.cpp MLPP/CLogLogReg/CLogLogReg.cpp MLPP/ExpReg/ExpReg.cpp MLPP/ProbitReg/ProbitReg.cpp MLPP/SoftmaxReg/SoftmaxReg.cpp MLPP/TanhReg/TanhReg.cpp MLPP/SoftmaxNet/SoftmaxNet.cpp MLPP/Convolutions/Convolutions.cpp MLPP/AutoEncoder/AutoEncoder.cpp MLPP/MultinomialNB/MultinomialNB.cpp MLPP/BernoulliNB/BernoulliNB.cpp MLPP/GaussianNB/GaussianNB.cpp MLPP/KMeans/KMeans.cpp MLPP/kNN/kNN.cpp MLPP/PCA/PCA.cpp MLPP/OutlierFinder/OutlierFinder.cpp MLPP/MANN/MANN.cpp MLPP/MultiOutputLayer/MultiOutputLayer.cpp MLPP/SVC/SVC.cpp MLPP/NumericalAnalysis/NumericalAnalysis.cpp MLPP/DualSVC/DualSVC.cpp MLPP/Transforms/Transforms.cpp --std=c++17
|
||||||
|
|
||||||
g++ -shared -o MLPP.so Reg.o LinAlg.o Stat.o Activation.o LinReg.o Utilities.o Cost.o LogReg.o ProbitReg.o ExpReg.o CLogLogReg.o SoftmaxReg.o TanhReg.o kNN.o KMeans.o UniLinReg.o SoftmaxNet.o MLP.o AutoEncoder.o HiddenLayer.o OutputLayer.o ANN.o BernoulliNB.o GaussianNB.o MultinomialNB.o Convolutions.o OutlierFinder.o Data.o MultiOutputLayer.o MANN.o SVC.o NumericalAnalysis.o DualSVC.o
|
g++ -shared -o MLPP.so Reg.o LinAlg.o Stat.o Activation.o LinReg.o Utilities.o Cost.o LogReg.o ProbitReg.o ExpReg.o CLogLogReg.o SoftmaxReg.o TanhReg.o kNN.o KMeans.o UniLinReg.o SoftmaxNet.o MLP.o AutoEncoder.o HiddenLayer.o OutputLayer.o ANN.o BernoulliNB.o GaussianNB.o MultinomialNB.o Convolutions.o OutlierFinder.o Data.o MultiOutputLayer.o MANN.o SVC.o NumericalAnalysis.o DualSVC.o
|
||||||
sudo mv MLPP.so /usr/local/lib
|
sudo mv MLPP.so /usr/local/lib
|
||||||
|
29
main.cpp
29
main.cpp
@ -48,7 +48,7 @@
|
|||||||
#include "MLPP/NumericalAnalysis/NumericalAnalysis.hpp"
|
#include "MLPP/NumericalAnalysis/NumericalAnalysis.hpp"
|
||||||
#include "MLPP/DualSVC/DualSVC.hpp"
|
#include "MLPP/DualSVC/DualSVC.hpp"
|
||||||
#include "MLPP/GAN/GAN.hpp"
|
#include "MLPP/GAN/GAN.hpp"
|
||||||
|
#include "MLPP/Transforms/Transforms.hpp"
|
||||||
|
|
||||||
using namespace MLPP;
|
using namespace MLPP;
|
||||||
|
|
||||||
@ -466,16 +466,31 @@ int main() {
|
|||||||
|
|
||||||
|
|
||||||
// // CONVOLUTION, POOLING, ETC..
|
// // CONVOLUTION, POOLING, ETC..
|
||||||
|
// std::vector<std::vector<double>> input = {
|
||||||
|
// {1},
|
||||||
|
// };
|
||||||
|
|
||||||
|
// std::vector<std::vector<std::vector<double>>> tensorSet;
|
||||||
|
// tensorSet.push_back(input);
|
||||||
|
// tensorSet.push_back(input);
|
||||||
|
// tensorSet.push_back(input);
|
||||||
|
|
||||||
|
// alg.printTensor(data.rgb2xyz(tensorSet));
|
||||||
|
|
||||||
std::vector<std::vector<double>> input = {
|
std::vector<std::vector<double>> input = {
|
||||||
{1},
|
{62,55,55,54,49,48,47,55},
|
||||||
|
{62,57,54,52,48,47,48,53},
|
||||||
|
{61,60,52,49,48,47,49,54},
|
||||||
|
{63,61,60,60,63,65,68,65},
|
||||||
|
{67,67,70,74,79,85,91,92},
|
||||||
|
{82,95,101,106,114,115,112,117},
|
||||||
|
{96,111,115,119,128,128,130,127},
|
||||||
|
{109,121,127,133,139,141,140,133},
|
||||||
};
|
};
|
||||||
|
|
||||||
std::vector<std::vector<std::vector<double>>> tensorSet;
|
Transforms trans;
|
||||||
tensorSet.push_back(input);
|
|
||||||
tensorSet.push_back(input);
|
|
||||||
tensorSet.push_back(input);
|
|
||||||
|
|
||||||
alg.printTensor(data.rgb2xyz(tensorSet));
|
alg.printMatrix(trans.discreteCosineTransform(input));
|
||||||
|
|
||||||
// alg.printMatrix(conv.convolve(input, conv.getPrewittVertical(), 1)); // Can use padding
|
// alg.printMatrix(conv.convolve(input, conv.getPrewittVertical(), 1)); // Can use padding
|
||||||
// alg.printMatrix(conv.pool(input, 4, 4, "Max")); // Can use Max, Min, or Average pooling.
|
// alg.printMatrix(conv.pool(input, 4, 4, "Max")); // Can use Max, Min, or Average pooling.
|
||||||
|
Loading…
Reference in New Issue
Block a user