mirror of
https://github.com/Relintai/MLPP.git
synced 2024-11-12 10:15:01 +01:00
Added growth method for solving diffrential equations, added dy, dx, gradient magnitude and orientation of image calculators
This commit is contained in:
parent
074de5b24b
commit
5764f20ed2
@ -245,6 +245,82 @@ namespace MLPP{
|
||||
return filter;
|
||||
}
|
||||
|
||||
/*
|
||||
Indeed a filter could have been used for this purpose, but I decided that it would've just
|
||||
been easier to carry out the calculation explicitly, mainly because it is more informative,
|
||||
and also because my convolution algorithm is only built for filters with equally sized
|
||||
heights and widths.
|
||||
*/
|
||||
std::vector<std::vector<double>> Convolutions::dx(std::vector<std::vector<double>> input){
|
||||
std::vector<std::vector<double>> deriv; // We assume a gray scale image.
|
||||
deriv.resize(input.size());
|
||||
for(int i = 0; i < deriv.size(); i++){
|
||||
deriv[i].resize(input[i].size());
|
||||
}
|
||||
|
||||
for(int i = 0; i < input.size(); i++){
|
||||
for(int j = 0; j < input[i].size(); j++){
|
||||
if(j != 0 && j != input.size() - 1){
|
||||
deriv[i][j] = input[i][j + 1] - input[i][j - 1];
|
||||
}
|
||||
else if(j == 0){
|
||||
deriv[i][j] = input[i][j + 1] - 0; // Implicit zero-padding
|
||||
}
|
||||
else{
|
||||
deriv[i][j] = 0 - input[i][j - 1]; // Implicit zero-padding
|
||||
}
|
||||
}
|
||||
}
|
||||
return deriv;
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Convolutions::dy(std::vector<std::vector<double>> input){
|
||||
std::vector<std::vector<double>> deriv;
|
||||
deriv.resize(input.size());
|
||||
for(int i = 0; i < deriv.size(); i++){
|
||||
deriv[i].resize(input[i].size());
|
||||
}
|
||||
|
||||
for(int i = 0; i < input.size(); i++){
|
||||
for(int j = 0; j < input[i].size(); j++){
|
||||
if(i != 0 && i != input.size() - 1){
|
||||
deriv[i][j] = input[i - 1][j] - input[i + 1][j];
|
||||
}
|
||||
else if(i == 0){
|
||||
deriv[i][j] = 0 - input[i + 1][j]; // Implicit zero-padding
|
||||
}
|
||||
else{
|
||||
deriv[i][j] = input[i - 1][j] - 0; // Implicit zero-padding
|
||||
}
|
||||
}
|
||||
}
|
||||
return deriv;
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Convolutions::gradMagnitude(std::vector<std::vector<double>> input){
|
||||
LinAlg alg;
|
||||
std::vector<std::vector<double>> xDeriv_2 = alg.hadamard_product(dx(input), dx(input));
|
||||
std::vector<std::vector<double>> yDeriv_2 = alg.hadamard_product(dy(input), dy(input));
|
||||
return alg.sqrt(alg.addition(xDeriv_2, yDeriv_2));
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Convolutions::gradOrientation(std::vector<std::vector<double>> input){
|
||||
std::vector<std::vector<double>> deriv;
|
||||
deriv.resize(input.size());
|
||||
for(int i = 0; i < deriv.size(); i++){
|
||||
deriv[i].resize(input[i].size());
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> xDeriv = dx(input);
|
||||
std::vector<std::vector<double>> yDeriv = dy(input);
|
||||
for(int i = 0; i < deriv.size(); i++){
|
||||
for(int j = 0; j < deriv[i].size(); j++){
|
||||
deriv[i][j] = std::atan2(yDeriv[i][j], xDeriv[i][j]);
|
||||
}
|
||||
}
|
||||
return deriv;
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> Convolutions::getPrewittHorizontal(){
|
||||
return prewittHorizontal;
|
||||
}
|
||||
|
@ -17,6 +17,12 @@ namespace MLPP{
|
||||
double gaussian2D(double x, double y, double std);
|
||||
std::vector<std::vector<double>> gaussianFilter2D(int size, double std);
|
||||
|
||||
std::vector<std::vector<double>> dx(std::vector<std::vector<double>> input);
|
||||
std::vector<std::vector<double>> dy(std::vector<std::vector<double>> input);
|
||||
|
||||
std::vector<std::vector<double>> gradMagnitude(std::vector<std::vector<double>> input);
|
||||
std::vector<std::vector<double>> gradOrientation(std::vector<std::vector<double>> input);
|
||||
|
||||
std::vector<std::vector<double>> getPrewittHorizontal();
|
||||
std::vector<std::vector<double>> getPrewittVertical();
|
||||
std::vector<std::vector<double>> getSobelHorizontal();
|
||||
|
@ -163,6 +163,22 @@ namespace MLPP{
|
||||
}
|
||||
return y;
|
||||
}
|
||||
|
||||
double NumericalAnalysis::growthMethod(double C, double k, double t){
|
||||
/*
|
||||
dP/dt = kP
|
||||
dP/P = kdt
|
||||
integral(1/P)dP = integral(k) dt
|
||||
ln|P| = kt + C_initial
|
||||
|P| = e^(kt + C_initial)
|
||||
|P| = e^(C_initial) * e^(kt)
|
||||
P = +/- e^(C_initial) * e^(kt)
|
||||
P = C * e^(kt)
|
||||
*/
|
||||
|
||||
// auto growthFunction = [&C, &k](double t) { return C * exp(k * t); };
|
||||
return C * exp(k * t);
|
||||
}
|
||||
|
||||
std::vector<double> NumericalAnalysis::jacobian(double(*function)(std::vector<double>), std::vector<double> x){
|
||||
std::vector<double> jacobian;
|
||||
|
@ -35,6 +35,8 @@ namespace MLPP{
|
||||
double eulerianMethod(double(*derivative)(double), std::vector<double> q_0, double p, double h); // Euler's method for solving diffrential equations.
|
||||
double eulerianMethod(double(*derivative)(std::vector<double>), std::vector<double> q_0, double p, double h); // Euler's method for solving diffrential equations.
|
||||
|
||||
double growthMethod(double C, double k, double t); // General growth-based diffrential equations can be solved by seperation of variables.
|
||||
|
||||
std::vector<double> jacobian(double(*function)(std::vector<double>), std::vector<double> x); // Indeed, for functions with scalar outputs the Jacobians will be vectors.
|
||||
std::vector<std::vector<double>> hessian(double(*function)(std::vector<double>), std::vector<double> x);
|
||||
std::vector<std::vector<std::vector<double>>> thirdOrderTensor(double(*function)(std::vector<double>), std::vector<double> x);
|
||||
|
25
main.cpp
25
main.cpp
@ -542,16 +542,16 @@ int main() {
|
||||
// alg.printMatrix(R);
|
||||
|
||||
// // Checking positive-definiteness checker. For Cholesky Decomp.
|
||||
// std::vector<std::vector<double>> A =
|
||||
// {
|
||||
// {1,-1,-1,-1},
|
||||
// {-1,2,2,2},
|
||||
// {-1,2,3,1},
|
||||
// {-1,2,1,4}
|
||||
// };
|
||||
std::vector<std::vector<double>> A =
|
||||
{
|
||||
{1,-1,-1,-1},
|
||||
{-1,2,2,2},
|
||||
{-1,2,3,1},
|
||||
{-1,2,1,4}
|
||||
};
|
||||
|
||||
// std::cout << std::boolalpha << alg.positiveDefiniteChecker(A) << std::endl;
|
||||
// auto [L, Lt] = alg.chol(A); // WORKS !!!!
|
||||
// auto [L, Lt] = alg.chol(A); // works.
|
||||
// alg.printMatrix(L);
|
||||
// alg.printMatrix(Lt);
|
||||
|
||||
@ -597,9 +597,14 @@ int main() {
|
||||
|
||||
// std::cout << numAn.cubicApproximation(f_mv, {0, 0, 0}, {1, 1, 1}) << std::endl;
|
||||
|
||||
//std::cout << numAn.eulerianMethod(f_prime, {1, 1}, 1.5, 0.000001) << std::endl;
|
||||
// std::cout << numAn.eulerianMethod(f_prime, {1, 1}, 1.5, 0.000001) << std::endl;
|
||||
|
||||
std::cout << numAn.eulerianMethod(f_prime_2var, {2, 3}, 2.5, 0.00000001) << std::endl;
|
||||
// std::cout << numAn.eulerianMethod(f_prime_2var, {2, 3}, 2.5, 0.00000001) << std::endl;
|
||||
|
||||
// alg.printMatrix(conv.dx(A));
|
||||
// alg.printMatrix(conv.dy(A));
|
||||
|
||||
alg.printMatrix(conv.gradOrientation(A));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user