From ab24693f37fbccea9e7bb093423c2de2bc4ae6ac Mon Sep 17 00:00:00 2001 From: Relintai Date: Fri, 29 Dec 2023 18:37:06 +0100 Subject: [PATCH] Reworked MLPPNumericalAnalysis. --- .../numerical_analysis/numerical_analysis.cpp | 310 +++++++++++------- mlpp/numerical_analysis/numerical_analysis.h | 62 ++-- test/mlpp_tests.cpp | 277 +++++++++++++--- test/mlpp_tests_old.cpp | 13 +- 4 files changed, 465 insertions(+), 197 deletions(-) diff --git a/mlpp/numerical_analysis/numerical_analysis.cpp b/mlpp/numerical_analysis/numerical_analysis.cpp index 582a83c..8138d8d 100644 --- a/mlpp/numerical_analysis/numerical_analysis.cpp +++ b/mlpp/numerical_analysis/numerical_analysis.cpp @@ -7,145 +7,145 @@ #include "numerical_analysis.h" #include "../lin_alg/lin_alg.h" -#include -#include -#include -#include +#include "../lin_alg/mlpp_matrix.h" +#include "../lin_alg/mlpp_tensor3.h" +#include "../lin_alg/mlpp_vector.h" -/* -real_t MLPPNumericalAnalysis::numDiff(real_t (*function)(real_t), real_t x) { +real_t MLPPNumericalAnalysis::num_diffr(real_t (*function)(real_t), real_t x) { real_t eps = 1e-10; return (function(x + eps) - function(x)) / eps; // This is just the formal def. of the derivative. } -real_t MLPPNumericalAnalysis::numDiff_2(real_t (*function)(real_t), real_t x) { +real_t MLPPNumericalAnalysis::num_diff_2r(real_t (*function)(real_t), real_t x) { real_t eps = 1e-5; return (function(x + 2 * eps) - 2 * function(x + eps) + function(x)) / (eps * eps); } -real_t MLPPNumericalAnalysis::numDiff_3(real_t (*function)(real_t), real_t x) { +real_t MLPPNumericalAnalysis::num_diff_3r(real_t (*function)(real_t), real_t x) { real_t eps = 1e-5; real_t t1 = function(x + 3 * eps) - 2 * function(x + 2 * eps) + function(x + eps); real_t t2 = function(x + 2 * eps) - 2 * function(x + eps) + function(x); return (t1 - t2) / (eps * eps * eps); } -real_t MLPPNumericalAnalysis::constantApproximation(real_t (*function)(real_t), real_t c) { +real_t MLPPNumericalAnalysis::constant_approximationr(real_t (*function)(real_t), real_t c) { return function(c); } -real_t MLPPNumericalAnalysis::linearApproximation(real_t (*function)(real_t), real_t c, real_t x) { - return constantApproximation(function, c) + numDiff(function, c) * (x - c); +real_t MLPPNumericalAnalysis::linear_approximationr(real_t (*function)(real_t), real_t c, real_t x) { + return constant_approximationr(function, c) + num_diffr(function, c) * (x - c); } -real_t MLPPNumericalAnalysis::quadraticApproximation(real_t (*function)(real_t), real_t c, real_t x) { - return linearApproximation(function, c, x) + 0.5 * numDiff_2(function, c) * (x - c) * (x - c); +real_t MLPPNumericalAnalysis::quadratic_approximationr(real_t (*function)(real_t), real_t c, real_t x) { + return linear_approximationr(function, c, x) + 0.5 * num_diff_2r(function, c) * (x - c) * (x - c); } -real_t MLPPNumericalAnalysis::cubicApproximation(real_t (*function)(real_t), real_t c, real_t x) { - return quadraticApproximation(function, c, x) + (1 / 6) * numDiff_3(function, c) * (x - c) * (x - c) * (x - c); +real_t MLPPNumericalAnalysis::cubic_approximationr(real_t (*function)(real_t), real_t c, real_t x) { + return quadratic_approximationr(function, c, x) + (1 / 6) * num_diff_3r(function, c) * (x - c) * (x - c) * (x - c); } -real_t MLPPNumericalAnalysis::numDiff(real_t (*function)(std::vector), std::vector x, int axis) { +real_t MLPPNumericalAnalysis::num_diffv(real_t (*function)(const Ref &), const Ref &x, int axis) { // For multivariable function analysis. // This will be used for calculating Jacobian vectors. // Diffrentiate with respect to indicated axis. (0, 1, 2 ...) real_t eps = 1e-10; - std::vector x_eps = x; - x_eps[axis] += eps; + Ref x_eps = x->duplicate_fast(); + x_eps->element_get_ref(axis) += eps; return (function(x_eps) - function(x)) / eps; } -real_t MLPPNumericalAnalysis::numDiff_2(real_t (*function)(std::vector), std::vector x, int axis1, int axis2) { +real_t MLPPNumericalAnalysis::num_diff_2v(real_t (*function)(const Ref &), const Ref &x, int axis1, int axis2) { //For Hessians. real_t eps = 1e-5; - std::vector x_pp = x; - x_pp[axis1] += eps; - x_pp[axis2] += eps; + Ref x_pp = x->duplicate_fast(); + x_pp->element_get_ref(axis1) += eps; + x_pp->element_get_ref(axis2) += eps; - std::vector x_np = x; - x_np[axis2] += eps; + Ref x_np = x->duplicate_fast(); + x_np->element_get_ref(axis2) += eps; - std::vector x_pn = x; - x_pn[axis1] += eps; + Ref x_pn = x->duplicate_fast(); + x_pn->element_get_ref(axis1) += eps; return (function(x_pp) - function(x_np) - function(x_pn) + function(x)) / (eps * eps); } -real_t MLPPNumericalAnalysis::numDiff_3(real_t (*function)(std::vector), std::vector x, int axis1, int axis2, int axis3) { +real_t MLPPNumericalAnalysis::num_diff_3v(real_t (*function)(const Ref &), const Ref &x, int axis1, int axis2, int axis3) { // For third order derivative tensors. // NOTE: Approximations do not appear to be accurate for sinusodial functions... // Should revisit this later. real_t eps = 1e-5; - std::vector x_ppp = x; - x_ppp[axis1] += eps; - x_ppp[axis2] += eps; - x_ppp[axis3] += eps; + Ref x_ppp = x->duplicate_fast(); + x_ppp->element_get_ref(axis1) += eps; + x_ppp->element_get_ref(axis2) += eps; + x_ppp->element_get_ref(axis3) += eps; - std::vector x_npp = x; - x_npp[axis2] += eps; - x_npp[axis3] += eps; + Ref x_npp = x->duplicate_fast(); + x_npp->element_get_ref(axis2) += eps; + x_npp->element_get_ref(axis3) += eps; - std::vector x_pnp = x; - x_pnp[axis1] += eps; - x_pnp[axis3] += eps; + Ref x_pnp = x->duplicate_fast(); + x_pnp->element_get_ref(axis1) += eps; + x_pnp->element_get_ref(axis3) += eps; - std::vector x_nnp = x; - x_nnp[axis3] += eps; + Ref x_nnp = x->duplicate_fast(); + x_nnp->element_get_ref(axis3) += eps; - std::vector x_ppn = x; - x_ppn[axis1] += eps; - x_ppn[axis2] += eps; + Ref x_ppn = x->duplicate_fast(); + x_ppn->element_get_ref(axis1) += eps; + x_ppn->element_get_ref(axis2) += eps; - std::vector x_npn = x; - x_npn[axis2] += eps; + Ref x_npn = x->duplicate_fast(); + x_npn->element_get_ref(axis2) += eps; - std::vector x_pnn = x; - x_pnn[axis1] += eps; + Ref x_pnn = x->duplicate_fast(); + x_pnn->element_get_ref(axis1) += eps; real_t thirdAxis = function(x_ppp) - function(x_npp) - function(x_pnp) + function(x_nnp); real_t noThirdAxis = function(x_ppn) - function(x_npn) - function(x_pnn) + function(x); return (thirdAxis - noThirdAxis) / (eps * eps * eps); } -real_t MLPPNumericalAnalysis::newtonRaphsonMethod(real_t (*function)(real_t), real_t x_0, real_t epoch_num) { +real_t MLPPNumericalAnalysis::newton_raphson_method(real_t (*function)(real_t), real_t x_0, real_t epoch_num) { real_t x = x_0; for (int i = 0; i < epoch_num; i++) { - x -= function(x) / numDiff(function, x); + x -= function(x) / num_diffr(function, x); } return x; } -real_t MLPPNumericalAnalysis::halleyMethod(real_t (*function)(real_t), real_t x_0, real_t epoch_num) { +real_t MLPPNumericalAnalysis::halley_method(real_t (*function)(real_t), real_t x_0, real_t epoch_num) { real_t x = x_0; for (int i = 0; i < epoch_num; i++) { - x -= ((2 * function(x) * numDiff(function, x)) / (2 * numDiff(function, x) * numDiff(function, x) - function(x) * numDiff_2(function, x))); + x -= ((2 * function(x) * num_diffr(function, x)) / (2 * num_diffr(function, x) * num_diffr(function, x) - function(x) * num_diff_2r(function, x))); } return x; } -real_t MLPPNumericalAnalysis::invQuadraticInterpolation(real_t (*function)(real_t), std::vector x_0, int epoch_num) { +real_t MLPPNumericalAnalysis::inv_quadratic_interpolation(real_t (*function)(real_t), const Ref &x_0, int epoch_num) { real_t x = 0; - std::vector currentThree = x_0; + Ref ct = x_0->duplicate_fast(); + MLPPVector ¤t_three = *(ct.ptr()); + for (int i = 0; i < epoch_num; i++) { - real_t t1 = ((function(currentThree[1]) * function(currentThree[2])) / ((function(currentThree[0]) - function(currentThree[1])) * (function(currentThree[0]) - function(currentThree[2])))) * currentThree[0]; - real_t t2 = ((function(currentThree[0]) * function(currentThree[2])) / ((function(currentThree[1]) - function(currentThree[0])) * (function(currentThree[1]) - function(currentThree[2])))) * currentThree[1]; - real_t t3 = ((function(currentThree[0]) * function(currentThree[1])) / ((function(currentThree[2]) - function(currentThree[0])) * (function(currentThree[2]) - function(currentThree[1])))) * currentThree[2]; + real_t t1 = ((function(current_three[1]) * function(current_three[2])) / ((function(current_three[0]) - function(current_three[1])) * (function(current_three[0]) - function(current_three[2])))) * current_three[0]; + real_t t2 = ((function(current_three[0]) * function(current_three[2])) / ((function(current_three[1]) - function(current_three[0])) * (function(current_three[1]) - function(current_three[2])))) * current_three[1]; + real_t t3 = ((function(current_three[0]) * function(current_three[1])) / ((function(current_three[2]) - function(current_three[0])) * (function(current_three[2]) - function(current_three[1])))) * current_three[2]; x = t1 + t2 + t3; - currentThree.erase(currentThree.begin()); - currentThree.push_back(x); + current_three.remove(0); + current_three.push_back(x); } return x; } -real_t MLPPNumericalAnalysis::eulerianMethod(real_t (*derivative)(real_t), std::vector q_0, real_t p, real_t h) { - int max_epoch = static_cast((p - q_0[0]) / h); - real_t x = q_0[0]; - real_t y = q_0[1]; +real_t MLPPNumericalAnalysis::eulerian_methodr(real_t (*derivative)(real_t), real_t q_0, real_t q_1, real_t p, real_t h) { + int max_epoch = static_cast((p - q_0) / h); + real_t x = q_0; + real_t y = q_1; for (int i = 0; i < max_epoch; i++) { y = y + h * derivative(x); x += h; @@ -153,18 +153,25 @@ real_t MLPPNumericalAnalysis::eulerianMethod(real_t (*derivative)(real_t), std:: return y; } -real_t MLPPNumericalAnalysis::eulerianMethod(real_t (*derivative)(std::vector), std::vector q_0, real_t p, real_t h) { - int max_epoch = static_cast((p - q_0[0]) / h); - real_t x = q_0[0]; - real_t y = q_0[1]; +real_t MLPPNumericalAnalysis::eulerian_methodv(real_t (*derivative)(const Ref &), real_t q_0, real_t q_1, real_t p, real_t h) { + int max_epoch = static_cast((p - q_0) / h); + + Ref v; + v.instance(); + v->resize(2); + + real_t x = q_0; + real_t y = q_1; for (int i = 0; i < max_epoch; i++) { - y = y + h * derivative({ x, y }); + v->element_set(0, x); + v->element_set(1, y); + y = y + h * derivative(v); x += h; } return y; } -real_t MLPPNumericalAnalysis::growthMethod(real_t C, real_t k, real_t t) { +real_t MLPPNumericalAnalysis::growth_method(real_t C, real_t k, real_t t) { //dP/dt = kP //dP/P = kdt //integral(1/P)dP = integral(k) dt @@ -173,73 +180,123 @@ real_t MLPPNumericalAnalysis::growthMethod(real_t C, real_t k, real_t t) { //|P| = e^(C_initial) * e^(kt) //P = +/- e^(C_initial) * e^(kt) //P = C * e^(kt) - // auto growthFunction = [&C, &k](real_t t) { return C * exp(k * t); }; - return C * std::exp(k * t); + return C * Math::exp(k * t); } -std::vector MLPPNumericalAnalysis::jacobian(real_t (*function)(std::vector), std::vector x) { - std::vector jacobian; - jacobian.resize(x.size()); - for (uint32_t i = 0; i < jacobian.size(); i++) { - jacobian[i] = numDiff(function, x, i); // Derivative w.r.t axis i evaluated at x. For all x_i. +Ref MLPPNumericalAnalysis::jacobian(real_t (*function)(const Ref &), const Ref &x) { + Ref jacobian; + jacobian.instance(); + jacobian->resize(x->size()); + + int jacobian_size = jacobian->size(); + + for (int i = 0; i < jacobian_size; ++i) { + jacobian->element_set(i, num_diffv(function, x, i)); // Derivative w.r.t axis i evaluated at x. For all x_i. } + return jacobian; } -std::vector> MLPPNumericalAnalysis::hessian(real_t (*function)(std::vector), std::vector x) { - std::vector> hessian; - hessian.resize(x.size()); - for (uint32_t i = 0; i < hessian.size(); i++) { - hessian[i].resize(x.size()); - } +Ref MLPPNumericalAnalysis::hessian(real_t (*function)(const Ref &), const Ref &x) { + Ref hessian; + hessian.instance(); + hessian->resize(Size2i(x->size(), x->size())); - for (uint32_t i = 0; i < hessian.size(); i++) { - for (uint32_t j = 0; j < hessian[i].size(); j++) { - hessian[i][j] = numDiff_2(function, x, i, j); + Size2i hessian_size = hessian->size(); + + for (int i = 0; i < hessian_size.y; i++) { + for (int j = 0; j < hessian_size.x; j++) { + hessian->element_set(i, j, num_diff_2v(function, x, i, j)); } } return hessian; } -std::vector>> MLPPNumericalAnalysis::thirdOrderTensor(real_t (*function)(std::vector), std::vector x) { - std::vector>> tensor; - tensor.resize(x.size()); +Ref MLPPNumericalAnalysis::third_order_tensor(real_t (*function)(const Ref &), const Ref &x) { + Ref tensor; + tensor.instance(); + tensor->resize(Size3i(x->size(), x->size(), x->size())); - for (uint32_t i = 0; i < tensor.size(); i++) { - tensor[i].resize(x.size()); - for (uint32_t j = 0; j < tensor[i].size(); j++) { - tensor[i][j].resize(x.size()); - } - } + Size3i tensor_size = tensor->size(); - for (uint32_t i = 0; i < tensor.size(); i++) { // O(n^3) time complexity :( - for (uint32_t j = 0; j < tensor[i].size(); j++) { - for (uint32_t k = 0; k < tensor[i][j].size(); k++) - tensor[i][j][k] = numDiff_3(function, x, i, j, k); + for (int i = 0; i < tensor_size.z; i++) { // O(n^3) time complexity :( + for (int j = 0; j < tensor_size.y; j++) { + for (int k = 0; k < tensor_size.x; k++) + tensor->element_set(i, j, k, num_diff_3v(function, x, i, j, k)); } } return tensor; } -real_t MLPPNumericalAnalysis::constantApproximation(real_t (*function)(std::vector), std::vector c) { +Vector> MLPPNumericalAnalysis::third_order_tensorvt(real_t (*function)(const Ref &), const Ref &x) { + int x_size = x->size(); + + Vector> tensor; + tensor.resize(x_size); + + for (int i = 0; i < tensor.size(); i++) { + Ref m; + m.instance(); + m->resize(Size2i(x_size, x_size)); + + tensor.write[i] = m; + } + + for (int i = 0; i < tensor.size(); i++) { // O(n^3) time complexity :( + Ref m = tensor[i]; + + for (int j = 0; j < x_size; j++) { + for (int k = 0; k < x_size; k++) { + m->element_set(j, k, num_diff_3v(function, x, i, j, k)); + } + } + } + + return tensor; +} + +real_t MLPPNumericalAnalysis::constant_approximationv(real_t (*function)(const Ref &), const Ref &c) { return function(c); } -real_t MLPPNumericalAnalysis::linearApproximation(real_t (*function)(std::vector), std::vector c, std::vector x) { +real_t MLPPNumericalAnalysis::linear_approximationv(real_t (*function)(const Ref &), const Ref &c, const Ref &x) { MLPPLinAlg alg; - return constantApproximation(function, c) + alg.matmult(alg.transpose({ jacobian(function, c) }), { alg.subtraction(x, c) })[0][0]; + + Ref j = jacobian(function, c); + Ref mj; + mj.instance(); + mj->row_add_mlpp_vector(j); + + Ref xsc = x->subn(c); + Ref mxsc; + mxsc.instance(); + mxsc->row_add_mlpp_vector(xsc); + + Ref m = mj->transposen()->multn(mxsc); + + return constant_approximationv(function, c) + m->element_get(0, 0); } -real_t MLPPNumericalAnalysis::quadraticApproximation(real_t (*function)(std::vector), std::vector c, std::vector x) { +real_t MLPPNumericalAnalysis::quadratic_approximationv(real_t (*function)(const Ref &), const Ref &c, const Ref &x) { MLPPLinAlg alg; - return linearApproximation(function, c, x) + 0.5 * alg.matmult({ (alg.subtraction(x, c)) }, alg.matmult(hessian(function, c), alg.transpose({ alg.subtraction(x, c) })))[0][0]; + + Ref h = hessian(function, c); + + Ref xsc = x->subn(c); + Ref mxsc; + mxsc.instance(); + mxsc->row_add_mlpp_vector(xsc); + + Ref r = mxsc->multn(h->multn(mxsc->transposen())); + + return linear_approximationv(function, c, x) + 0.5 * r->element_get(0, 0); } -real_t MLPPNumericalAnalysis::cubicApproximation(real_t (*function)(std::vector), std::vector c, std::vector x) { +real_t MLPPNumericalAnalysis::cubic_approximationv(real_t (*function)(const Ref &), const Ref &c, const Ref &x) { //Not completely sure as the literature seldom discusses the third order taylor approximation, //in particular for multivariate cases, but ostensibly, the matrix/tensor/vector multiplies //should look something like this: @@ -247,35 +304,45 @@ real_t MLPPNumericalAnalysis::cubicApproximation(real_t (*function)(std::vector< //(N x N x N) (N x 1) [tensor vector mult] => (N x N x 1) => (N x N) //Perform remaining multiplies as done for the 2nd order approximation. //Result is a scalar. - - MLPPLinAlg alg; - std::vector> resultMat = alg.tensor_vec_mult(thirdOrderTensor(function, c), alg.subtraction(x, c)); - real_t resultScalar = alg.matmult({ (alg.subtraction(x, c)) }, alg.matmult(resultMat, alg.transpose({ alg.subtraction(x, c) })))[0][0]; - return quadraticApproximation(function, c, x) + (1 / 6) * resultScalar; + MLPPLinAlg alg; + + Ref xsc = x->subn(c); + Ref mxsc; + mxsc.instance(); + mxsc->row_add_mlpp_vector(xsc); + + Ref result_mat = third_order_tensor(function, c)->tensor_vec_mult(xsc); + real_t result_scalar = mxsc->multn(result_mat->multn(mxsc->transposen()))->element_get(0, 0); + + return quadratic_approximationv(function, c, x) + (1 / 6) * result_scalar; } -real_t MLPPNumericalAnalysis::laplacian(real_t (*function)(std::vector), std::vector x) { - std::vector> hessian_matrix = hessian(function, x); +real_t MLPPNumericalAnalysis::laplacian(real_t (*function)(const Ref &), const Ref &x) { + Ref hessian_matrix = hessian(function, x); real_t laplacian = 0; - for (uint32_t i = 0; i < hessian_matrix.size(); i++) { - laplacian += hessian_matrix[i][i]; // homogenous 2nd derivs w.r.t i, then i + Size2i hessian_matrix_size = hessian_matrix->size(); + + for (int i = 0; i < hessian_matrix_size.y; i++) { + laplacian += hessian_matrix->element_get(i, i); // homogenous 2nd derivs w.r.t i, then i } return laplacian; } -std::string MLPPNumericalAnalysis::secondPartialDerivativeTest(real_t (*function)(std::vector), std::vector x) { +String MLPPNumericalAnalysis::second_partial_derivative_test(real_t (*function)(const Ref &), const Ref &x) { MLPPLinAlg alg; - std::vector> hessianMatrix = hessian(function, x); - + Ref hessian_matrix = hessian(function, x); + + Size2i hessian_matrix_size = hessian_matrix->size(); + // The reason we do this is because the 2nd partial derivative test is less conclusive for functions of variables greater than // 2, and the calculations specific to the bivariate case are less computationally intensive. - - if (x.size() == 2) { - real_t det = alg.det(hessianMatrix, hessianMatrix.size()); - real_t secondDerivative = numDiff_2(function, x, 0, 0); + + if (hessian_matrix_size.y == 2) { + real_t det = hessian_matrix->det(); + real_t secondDerivative = num_diff_2v(function, x, 0, 0); if (secondDerivative > 0 && det > 0) { return "min"; } else if (secondDerivative < 0 && det > 0) { @@ -286,18 +353,17 @@ std::string MLPPNumericalAnalysis::secondPartialDerivativeTest(real_t (*function return "test was inconclusive"; } } else { - if (alg.positiveDefiniteChecker(hessianMatrix)) { + if (alg.positive_definite_checker(hessian_matrix)) { return "min"; - } else if (alg.negativeDefiniteChecker(hessianMatrix)) { + } else if (alg.negative_definite_checker(hessian_matrix)) { return "max"; - } else if (!alg.zeroEigenvalue(hessianMatrix)) { + } else if (!alg.zero_eigenvalue(hessian_matrix)) { return "saddle"; } else { return "test was inconclusive"; } } } -*/ void MLPPNumericalAnalysis::_bind_methods() { } diff --git a/mlpp/numerical_analysis/numerical_analysis.h b/mlpp/numerical_analysis/numerical_analysis.h index ffe6a06..e170800 100644 --- a/mlpp/numerical_analysis/numerical_analysis.h +++ b/mlpp/numerical_analysis/numerical_analysis.h @@ -11,8 +11,12 @@ #include "core/object/reference.h" -#include -#include +#include "core/containers/vector.h" +#include "core/string/ustring.h" + +class MLPPVector; +class MLPPMatrix; +class MLPPTensor3; class MLPPNumericalAnalysis : public Reference { GDCLASS(MLPPNumericalAnalysis, Reference); @@ -22,42 +26,42 @@ public: as an analytical method for calculating derivatives will most likely be used in the future. */ - /* - real_t numDiff(real_t (*function)(real_t), real_t x); - real_t numDiff_2(real_t (*function)(real_t), real_t x); - real_t numDiff_3(real_t (*function)(real_t), real_t x); - real_t constantApproximation(real_t (*function)(real_t), real_t c); - real_t linearApproximation(real_t (*function)(real_t), real_t c, real_t x); - real_t quadraticApproximation(real_t (*function)(real_t), real_t c, real_t x); - real_t cubicApproximation(real_t (*function)(real_t), real_t c, real_t x); + real_t num_diffr(real_t (*function)(real_t), real_t x); + real_t num_diff_2r(real_t (*function)(real_t), real_t x); + real_t num_diff_3r(real_t (*function)(real_t), real_t x); - real_t numDiff(real_t (*function)(std::vector), std::vector x, int axis); - real_t numDiff_2(real_t (*function)(std::vector), std::vector x, int axis1, int axis2); - real_t numDiff_3(real_t (*function)(std::vector), std::vector x, int axis1, int axis2, int axis3); + real_t constant_approximationr(real_t (*function)(real_t), real_t c); + real_t linear_approximationr(real_t (*function)(real_t), real_t c, real_t x); + real_t quadratic_approximationr(real_t (*function)(real_t), real_t c, real_t x); + real_t cubic_approximationr(real_t (*function)(real_t), real_t c, real_t x); - real_t newtonRaphsonMethod(real_t (*function)(real_t), real_t x_0, real_t epoch_num); - real_t halleyMethod(real_t (*function)(real_t), real_t x_0, real_t epoch_num); - real_t invQuadraticInterpolation(real_t (*function)(real_t), std::vector x_0, int epoch_num); + real_t num_diffv(real_t (*function)(const Ref &), const Ref &, int axis); + real_t num_diff_2v(real_t (*function)(const Ref &), const Ref &, int axis1, int axis2); + real_t num_diff_3v(real_t (*function)(const Ref &), const Ref &, int axis1, int axis2, int axis3); - real_t eulerianMethod(real_t (*derivative)(real_t), std::vector q_0, real_t p, real_t h); // Euler's method for solving diffrential equations. - real_t eulerianMethod(real_t (*derivative)(std::vector), std::vector q_0, real_t p, real_t h); // Euler's method for solving diffrential equations. + real_t newton_raphson_method(real_t (*function)(real_t), real_t x_0, real_t epoch_num); + real_t halley_method(real_t (*function)(real_t), real_t x_0, real_t epoch_num); + real_t inv_quadratic_interpolation(real_t (*function)(real_t), const Ref &x_0, int epoch_num); - real_t growthMethod(real_t C, real_t k, real_t t); // General growth-based diffrential equations can be solved by seperation of variables. + real_t eulerian_methodr(real_t (*derivative)(real_t), real_t q_0, real_t q_1, real_t p, real_t h); // Euler's method for solving diffrential equations. + real_t eulerian_methodv(real_t (*derivative)(const Ref &), real_t q_0, real_t q_1, real_t p, real_t h); // Euler's method for solving diffrential equations. - std::vector jacobian(real_t (*function)(std::vector), std::vector x); // Indeed, for functions with scalar outputs the Jacobians will be vectors. - std::vector> hessian(real_t (*function)(std::vector), std::vector x); - std::vector>> thirdOrderTensor(real_t (*function)(std::vector), std::vector x); + real_t growth_method(real_t C, real_t k, real_t t); // General growth-based diffrential equations can be solved by seperation of variables. - real_t constantApproximation(real_t (*function)(std::vector), std::vector c); - real_t linearApproximation(real_t (*function)(std::vector), std::vector c, std::vector x); - real_t quadraticApproximation(real_t (*function)(std::vector), std::vector c, std::vector x); - real_t cubicApproximation(real_t (*function)(std::vector), std::vector c, std::vector x); + Ref jacobian(real_t (*function)(const Ref &), const Ref &x); // Indeed, for functions with scalar outputs the Jacobians will be vectors. + Ref hessian(real_t (*function)(const Ref &), const Ref &x); + Ref third_order_tensor(real_t (*function)(const Ref &), const Ref &x); + Vector> third_order_tensorvt(real_t (*function)(const Ref &), const Ref &x); - real_t laplacian(real_t (*function)(std::vector), std::vector x); // laplacian + real_t constant_approximationv(real_t (*function)(const Ref &), const Ref &c); + real_t linear_approximationv(real_t (*function)(const Ref &), const Ref &c, const Ref &x); + real_t quadratic_approximationv(real_t (*function)(const Ref &), const Ref &c, const Ref &x); + real_t cubic_approximationv(real_t (*function)(const Ref &), const Ref &c, const Ref &x); - std::string secondPartialDerivativeTest(real_t (*function)(std::vector), std::vector x); - */ + real_t laplacian(real_t (*function)(const Ref &), const Ref &x); // laplacian + + String second_partial_derivative_test(real_t (*function)(const Ref &), const Ref &x); protected: static void _bind_methods(); diff --git a/test/mlpp_tests.cpp b/test/mlpp_tests.cpp index 0e05314..b96c559 100644 --- a/test/mlpp_tests.cpp +++ b/test/mlpp_tests.cpp @@ -1206,7 +1206,9 @@ real_t f_prime(real_t x) { return 2 * x; } -real_t f_prime_2var(std::vector x) { +real_t f_prime_2var(const Ref &p_x) { + const real_t *x = p_x->ptr(); + return 2 * x[0] + x[1]; } /* @@ -1227,7 +1229,9 @@ real_t f_prime_2var(std::vector x) { ∂^2f/∂x∂y = 2 */ -real_t f_mv(std::vector x) { +real_t f_mv(const Ref &p_x) { + const real_t *x = p_x->ptr(); + return x[0] * x[0] * x[0] + x[0] + x[1] * x[1] * x[1] * x[0] + x[2] * x[2] * x[1]; } @@ -1259,79 +1263,266 @@ real_t f_mv(std::vector x) { */ void MLPPTests::test_numerical_analysis() { - /* MLPPLinAlg alg; - MLPPConvolutionsOld conv; + MLPPConvolutions conv; // Checks for numerical analysis class. - MLPPNumericalAnalysisOld numAn; + MLPPNumericalAnalysis num_an; - std::cout << numAn.quadraticApproximation(f, 0, 1) << std::endl; + /* + //1 + PLOG_MSG("num_an.quadratic_approximationr(f, 0, 1)"); + PLOG_MSG(String::num(num_an.quadratic_approximationr(f, 0, 1))); - std::cout << numAn.cubicApproximation(f, 0, 1.001) << std::endl; + //1.001 + PLOG_MSG("num_an.cubic_approximationr(f, 0, 1.001)"); + PLOG_MSG(String::num(num_an.cubic_approximationr(f, 0, 1.001))); - std::cout << f(1.001) << std::endl; + //0.842011 + PLOG_MSG("f(1.001)"); + PLOG_MSG(String::num(f(1.001))); - std::cout << numAn.quadraticApproximation(f_mv, { 0, 0, 0 }, { 1, 1, 1 }) << std::endl; - std::cout << numAn.numDiff(&f, 1) << std::endl; - std::cout << numAn.newtonRaphsonMethod(&f, 1, 1000) << std::endl; - std::cout << numAn.invQuadraticInterpolation(&f, { 100, 2, 1.5 }, 10) << std::endl; + Ref v30; + v30.instance(); + v30->resize(3); + v30->fill(0); - std::cout << numAn.numDiff(&f_mv, { 1, 1 }, 1) << std::endl; // Derivative w.r.t. x. - alg.printVector(numAn.jacobian(&f_mv, { 1, 1 })); + Ref v31; + v31.instance(); + v31->resize(3); + v31->fill(1); - std::cout << numAn.numDiff_2(&f, 2) << std::endl; + //1.00001 + PLOG_MSG("num_an.quadratic_approximationv(f_mv, v30, v31)"); + PLOG_MSG(String::num(num_an.quadratic_approximationv(f_mv, v30, v31))); - std::cout << numAn.numDiff_3(&f, 2) << std::endl; - std::cout << numAn.numDiff_2(&f_mv, { 2, 2, 500 }, 2, 2) << std::endl; - std::cout << numAn.numDiff_3(&f_mv, { 2, 1000, 130 }, 0, 0, 0) << std::endl; + const real_t iqi_arr[] = { 100, 2, 1.5 }; + Ref iqi(memnew(MLPPVector(iqi_arr, 3))); - alg.printTensor(numAn.thirdOrderTensor(&f_mv, { 1, 1, 1 })); - std::cout << "Our Hessian." << std::endl; - alg.printMatrix(numAn.hessian(&f_mv, { 2, 2, 500 })); + //0 + PLOG_MSG("num_an.num_diffr(&f, 1)"); + PLOG_MSG(String::num(num_an.num_diffr(&f, 1))); - std::cout << numAn.laplacian(f_mv, { 1, 1, 1 }) << std::endl; + //-nan + //nan + PLOG_MSG("num_an.newton_raphson_method(&f, 1, 1000)"); + PLOG_MSG(String::num(num_an.newton_raphson_method(&f, 1, 1000))); - std::vector>> tensor; - tensor.push_back({ { 1, 2 }, { 1, 2 }, { 1, 2 } }); - tensor.push_back({ { 1, 2 }, { 1, 2 }, { 1, 2 } }); + //2.17152e+07 + //21715200 + PLOG_MSG("num_an.inv_quadratic_interpolation(&f, iqi, 10)"); + PLOG_MSG(String::num(num_an.inv_quadratic_interpolation(&f, iqi, 10))); - alg.printTensor(tensor); - alg.printMatrix(alg.tensor_vec_mult(tensor, { 1, 2 })); + Ref v21; + v21.instance(); + v21->resize(2); + v21->fill(1); - std::cout << numAn.cubicApproximation(f_mv, { 0, 0, 0 }, { 1, 1, 1 }) << std::endl; - std::cout << numAn.eulerianMethod(f_prime, { 1, 1 }, 1.5, 0.000001) << std::endl; - std::cout << numAn.eulerianMethod(f_prime_2var, { 2, 3 }, 2.5, 0.00000001) << std::endl; + //0 + PLOG_MSG("num_an.num_diffv(&f_mv, v21, 1)"); + PLOG_MSG(String::num(num_an.num_diffv(&f_mv, v21, 1))); // Derivative w.r.t. x. - std::vector> A = { + //[MLPPVector: 0 0 ] + PLOG_MSG("num_an.jacobian(&f_mv, v21)"); + PLOG_MSG(num_an.jacobian(&f_mv, v21)->to_string()); + + //596.046509 + PLOG_MSG("num_an.num_diff_2r(&f, 2)"); + PLOG_MSG(String::num(num_an.num_diff_2r(&f, 2))); + + //-119209304 + PLOG_MSG("num_an.num_diff_3r(&f, 2)"); + PLOG_MSG(String::num(num_an.num_diff_3r(&f, 2))); + + const real_t nd2v_arr[] = { 100, 2, 1.5 }; + Ref nd2v(memnew(MLPPVector(nd2v_arr, 3))); + + //0 + PLOG_MSG("num_an.num_diff_2v(&f_mv, nd2v, 2, 2)"); + PLOG_MSG(String::num(num_an.num_diff_2v(&f_mv, nd2v, 2, 2))); + + const real_t nd3v_arr[] = { 2, 1000, 130 }; + Ref nd3v(memnew(MLPPVector(nd3v_arr, 3))); + + //128000015514730496 + PLOG_MSG("num_an.num_diff_3v(&f_mv, nd3v, 0, 0, 0)"); + PLOG_MSG(String::num(num_an.num_diff_3v(&f_mv, nd3v, 0, 0, 0))); + */ + + Ref v31t; + v31t.instance(); + v31t->resize(3); + v31t->fill(1); + + /* + [MLPPTensor3: + [ [ 0 0 0 ] + [ 0 0 0 ] + [ 0 0 0 ] + ], + [ [ 0 0 0 ] + [ 0 0 0 ] + [ 0 0 0 ] + ], + [ [ 0 0 0 ] + [ 0 0 0 ] + [ 0 0 0 ] + ], + ] + */ + PLOG_MSG("num_an.third_order_tensor(&f_mv, v31t)"); + PLOG_MSG(num_an.third_order_tensor(&f_mv, v31t)->to_string()); + + const real_t hess_arr[] = { 2, 2, 500 }; + Ref hess(memnew(MLPPVector(hess_arr, 3))); + + /* + [MLPPMatrix: + [ 0 0 0 ] + [ 0 0 0 ] + [ 0 0 0 ] + ] + */ + PLOG_MSG("Our Hessian."); + PLOG_MSG(num_an.hessian(&f_mv, hess)->to_string()); + + Ref v31l; + v31l.instance(); + v31l->resize(3); + v31l->fill(1); + + //0 + PLOG_MSG("num_an.laplacian(f_mv, v31l)"); + PLOG_MSG(String::num(num_an.laplacian(f_mv, v31l))); + + std::vector>> tensor_arr; + tensor_arr.push_back({ { 1, 2 }, { 1, 2 }, { 1, 2 } }); + tensor_arr.push_back({ { 1, 2 }, { 1, 2 }, { 1, 2 } }); + + Ref tensor; + tensor.instance(); + tensor->set_from_std_vectors(tensor_arr); + + /* + [MLPPTensor3: + [ [ 1 2 ] + [ 1 2 ] + [ 1 2 ] + ], + [ [ 1 2 ] + [ 1 2 ] + [ 1 2 ] + ], + ] + */ + PLOG_MSG("tensor->to_string()"); + PLOG_MSG(tensor->to_string()); + + const real_t tvm_arr[] = { 1, 2 }; + Ref tvm(memnew(MLPPVector(tvm_arr, 2))); + + /* + [MLPPMatrix: + [ 5 5 5 ] + [ 5 5 5 ] + ] + */ + PLOG_MSG("tensor->tensor_vec_mult(tvm)"); + PLOG_MSG(tensor->tensor_vec_mult(tvm)->to_string()); + + Ref v30; + v30.instance(); + v30->resize(3); + v30->fill(0); + + Ref v31; + v31.instance(); + v31->resize(3); + v31->fill(1); + + //1.00001 + PLOG_MSG("num_an.cubic_approximationv(f_mv, v30, v31)"); + PLOG_MSG(String::num(num_an.cubic_approximationv(f_mv, v30, v31))); + + //2.236699 + PLOG_MSG("num_an.eulerian_methodv(f_prime, 1, 1, 1.5, 0.000001)"); + PLOG_MSG(String::num(num_an.eulerian_methodr(f_prime, 1, 1, 1.5, 0.000001))); + + //3 + PLOG_MSG("num_an.eulerian_methodv(f_prime_2var, 2, 3, 2.5, 0.00000001)"); + PLOG_MSG(String::num(num_an.eulerian_methodv(f_prime_2var, 2, 3, 2.5, 0.00000001))); + + std::vector> A_arr = { { 1, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 1 } }; - alg.printMatrix(conv.dx(A)); - alg.printMatrix(conv.dy(A)); + Ref A(memnew(MLPPMatrix(A_arr))); - alg.printMatrix(conv.grad_orientation(A)); + /* + [MLPPMatrix: + [ 0 -1 0 -0 ] + [ 0 0 0 -0 ] + [ 0 0 0 -0 ] + [ 0 0 1 -0 ] + ] + */ + PLOG_MSG("conv.dx(A)"); + PLOG_MSG(conv.dx(A)->to_string()); - std::vector> h = conv.harris_corner_detection(A); + /* + 0 0 0 0 + 1 0 0 0 + 0 0 0 -1 + 0 0 0 0 + */ + PLOG_MSG("conv.dy(A)"); + PLOG_MSG(conv.dy(A)->to_string()); - for (uint32_t i = 0; i < h.size(); i++) { - for (uint32_t j = 0; j < h[i].size(); j++) { - std::cout << h[i][j] << " "; + /* + 0 3.14159 0 0 + 1.5708 0 0 0 + 0 0 0 -1.5708 + 0 0 0 0 + */ + PLOG_MSG("conv.grad_orientation(A)"); + PLOG_MSG(conv.grad_orientation(A)->to_string()); + + /* + C C E N + C C C E + E C C C + N E C C + */ + PLOG_MSG("conv.harris_corner_detection(A)"); + Vector> h = conv.harris_corner_detection(A); + + String h_res; + + for (int i = 0; i < h.size(); i++) { + for (int j = 0; j < h[i].size(); j++) { + h_res += h[i][j]; + h_res += " "; } - std::cout << std::endl; + h_res += "\n"; } // Harris detector works. Life is good! - std::vector a = { 3, 4, 4 }; - std::vector b = { 4, 4, 4 }; - alg.printVector(alg.cross(a, b)); - */ + PLOG_MSG(h_res); + + std::vector a_arr = { 3, 4, 4 }; + Ref a(memnew(MLPPVector(a_arr))); + std::vector b_arr = { 4, 4, 4 }; + Ref b(memnew(MLPPVector(b_arr))); + + //[MLPPVector: 0 4 -4 ] + PLOG_MSG("a->cross(b)"); + PLOG_MSG(a->cross(b)->to_string()); } void MLPPTests::test_support_vector_classification_kernel(bool ui) { MLPPLinAlg alg; diff --git a/test/mlpp_tests_old.cpp b/test/mlpp_tests_old.cpp index 6fec46f..e5a5273 100644 --- a/test/mlpp_tests_old.cpp +++ b/test/mlpp_tests_old.cpp @@ -541,6 +541,7 @@ void MLPPTestsOld::test_numerical_analysis() { // Checks for numerical analysis class. MLPPNumericalAnalysisOld numAn; + /* std::cout << numAn.quadraticApproximation(f_old, 0, 1) << std::endl; std::cout << numAn.cubicApproximation(f_old, 0, 1.001) << std::endl; @@ -549,10 +550,12 @@ void MLPPTestsOld::test_numerical_analysis() { std::cout << numAn.quadraticApproximation(f_mv_old, { 0, 0, 0 }, { 1, 1, 1 }) << std::endl; + std::cout << numAn.numDiff(&f_old, 1) << std::endl; std::cout << numAn.newtonRaphsonMethod(&f_old, 1, 1000) << std::endl; std::cout << numAn.invQuadraticInterpolation(&f_old, { 100, 2, 1.5 }, 10) << std::endl; + std::cout << numAn.numDiff(&f_mv_old, { 1, 1 }, 1) << std::endl; // Derivative w.r.t. x. alg.printVector(numAn.jacobian(&f_mv_old, { 1, 1 })); @@ -564,6 +567,8 @@ void MLPPTestsOld::test_numerical_analysis() { std::cout << numAn.numDiff_2(&f_mv_old, { 2, 2, 500 }, 2, 2) << std::endl; std::cout << numAn.numDiff_3(&f_mv_old, { 2, 1000, 130 }, 0, 0, 0) << std::endl; + + alg.printTensor(numAn.thirdOrderTensor(&f_mv_old, { 1, 1, 1 })); std::cout << "Our Hessian." << std::endl; alg.printMatrix(numAn.hessian(&f_mv_old, { 2, 2, 500 })); @@ -577,11 +582,12 @@ void MLPPTestsOld::test_numerical_analysis() { alg.printTensor(tensor); alg.printMatrix(alg.tensor_vec_mult(tensor, { 1, 2 })); + std::cout << numAn.cubicApproximation(f_mv_old, { 0, 0, 0 }, { 1, 1, 1 }) << std::endl; std::cout << numAn.eulerianMethod(f_prime_old, { 1, 1 }, 1.5, 0.000001) << std::endl; std::cout << numAn.eulerianMethod(f_prime_2var_old, { 2, 3 }, 2.5, 0.00000001) << std::endl; - + */ std::vector> A = { { 1, 0, 0, 0 }, { 0, 0, 0, 0 }, @@ -589,8 +595,9 @@ void MLPPTestsOld::test_numerical_analysis() { { 0, 0, 0, 1 } }; - alg.printMatrix(conv.dx(A)); - alg.printMatrix(conv.dy(A)); + //alg.printMatrix(conv.dx(A)); + //alg.printMatrix(conv.dy(A)); + alg.printMatrix(conv.grad_orientation(A));