From 719556e9bc47551f60b0aeed6530b54459a61410 Mon Sep 17 00:00:00 2001 From: Relintai Date: Thu, 28 Dec 2023 23:26:14 +0100 Subject: [PATCH] Reworked more methods. --- mlpp/lin_alg/lin_alg.cpp | 81 ++++++++++------ mlpp/lin_alg/lin_alg.h | 14 ++- mlpp/lin_alg/mlpp_matrix.cpp | 39 ++++++-- mlpp/lin_alg/mlpp_matrix.h | 9 +- mlpp/lin_alg/mlpp_vector.cpp | 17 ++-- mlpp/lin_alg/mlpp_vector.h | 5 +- test/mlpp_tests.cpp | 173 +++++++++++++++++++++++++++++------ test/mlpp_tests_old.cpp | 7 +- 8 files changed, 252 insertions(+), 93 deletions(-) diff --git a/mlpp/lin_alg/lin_alg.cpp b/mlpp/lin_alg/lin_alg.cpp index 910616e..9c14435 100644 --- a/mlpp/lin_alg/lin_alg.cpp +++ b/mlpp/lin_alg/lin_alg.cpp @@ -869,45 +869,68 @@ MLPPLinAlg::SVDResult MLPPLinAlg::svd(const Ref &A) { return res; } -/* -std::vector MLPPLinAlg::vectorProjection(std::vector a, std::vector b) { - real_t product = dot(a, b) / dot(a, a); - return scalarMultiply(product, a); // Projection of vector a onto b. Denotated as proj_a(b). +Ref MLPPLinAlg::vector_projection(const Ref &a, const Ref &b) { + real_t product = a->dot(b) / a->dot(a); + + return a->scalar_multiplyn(product); // Projection of vector a onto b. Denotated as proj_a(b). } -*/ -/* -std::vector> MLPPLinAlg::gramSchmidtProcess(std::vector> A) { - A = transpose(A); // C++ vectors lack a mechanism to directly index columns. So, we transpose *a copy* of A for this purpose for ease of use. - std::vector> B; - B.resize(A.size()); - for (uint32_t i = 0; i < B.size(); i++) { - B[i].resize(A[0].size()); - } +Ref MLPPLinAlg::gram_schmidt_process(const Ref &p_A) { + Ref A = p_A->transposen(); + Size2i a_size = A->size(); + + Ref B; + B.instance(); + B->resize(a_size); + B->fill(0); + + Ref b_i_row_tmp; + b_i_row_tmp.instance(); + b_i_row_tmp->resize(a_size.x); + + A->row_get_into_mlpp_vector(0, b_i_row_tmp); + b_i_row_tmp->scalar_multiply((real_t)1 / b_i_row_tmp->norm_2()); + B->row_set_mlpp_vector(0, b_i_row_tmp); + + Ref a_i_row_tmp; + a_i_row_tmp.instance(); + a_i_row_tmp->resize(a_size.x); + + Ref b_j_row_tmp; + b_j_row_tmp.instance(); + b_j_row_tmp->resize(a_size.x); + + for (int i = 1; i < a_size.y; ++i) { + A->row_get_into_mlpp_vector(i, b_i_row_tmp); + B->row_set_mlpp_vector(i, b_i_row_tmp); - B[0] = A[0]; // We set a_1 = b_1 as an initial condition. - B[0] = scalarMultiply(1 / norm_2(B[0]), B[0]); - for (uint32_t i = 1; i < B.size(); i++) { - B[i] = A[i]; for (int j = i - 1; j >= 0; j--) { - B[i] = subtraction(B[i], vectorProjection(B[j], A[i])); - } - B[i] = scalarMultiply(1 / norm_2(B[i]), B[i]); // Very simply multiply all elements of vec B[i] by 1/||B[i]||_2 - } - return transpose(B); // We re-transpose the marix. -} -*/ + A->row_get_into_mlpp_vector(i, a_i_row_tmp); + B->row_get_into_mlpp_vector(j, b_j_row_tmp); + B->row_get_into_mlpp_vector(i, b_i_row_tmp); -/* -MLPPLinAlg::QRDResult MLPPLinAlg::qrd(std::vector> A) { + b_i_row_tmp->sub(vector_projection(b_j_row_tmp, a_i_row_tmp)); + + B->row_set_mlpp_vector(i, b_i_row_tmp); + } + + // Very simply multiply all elements of vec B[i] by 1/||B[i]||_2 + B->row_get_into_mlpp_vector(i, b_i_row_tmp); + b_i_row_tmp->scalar_multiply((real_t)1 / b_i_row_tmp->norm_2()); + B->row_set_mlpp_vector(i, b_i_row_tmp); + } + + return B->transposen(); // We re-transpose the marix. +} + +MLPPLinAlg::QRDResult MLPPLinAlg::qrd(const Ref &A) { QRDResult res; - res.Q = gramSchmidtProcess(A); - res.R = matmult(transpose(res.Q), A); + res.Q = gram_schmidt_process(A); + res.R = res.Q->transposen()->multn(A); return res; } -*/ /* MLPPLinAlg::CholeskyResult MLPPLinAlg::cholesky(std::vector> A) { diff --git a/mlpp/lin_alg/lin_alg.h b/mlpp/lin_alg/lin_alg.h index e71308d..3a5d245 100644 --- a/mlpp/lin_alg/lin_alg.h +++ b/mlpp/lin_alg/lin_alg.h @@ -16,7 +16,7 @@ #include "core/object/reference.h" -#else +#else #include "core/defs.h" #include "core/math_funcs.h" @@ -110,18 +110,16 @@ public: SVDResult svd(const Ref &A); - //std::vector vectorProjection(std::vector a, std::vector b); + Ref vector_projection(const Ref &a, const Ref &b); - //std::vector> gramSchmidtProcess(std::vector> A); + Ref gram_schmidt_process(const Ref &A); - /* struct QRDResult { - std::vector> Q; - std::vector> R; + Ref Q; + Ref R; }; - */ - //QRDResult qrd(std::vector> A); + QRDResult qrd(const Ref &A); /* struct CholeskyResult { diff --git a/mlpp/lin_alg/mlpp_matrix.cpp b/mlpp/lin_alg/mlpp_matrix.cpp index 75f9669..0c878fd 100644 --- a/mlpp/lin_alg/mlpp_matrix.cpp +++ b/mlpp/lin_alg/mlpp_matrix.cpp @@ -1475,6 +1475,27 @@ void MLPPMatrix::cbrtb(const Ref &A) { exponentiateb(A, real_t(1) / real_t(3)); } +Ref MLPPMatrix::matrix_powern(const int n) const { + if (n == 0) { + return identity_mat(_size.y); + } + + Ref A = Ref(this); + Ref B = identity_mat(_size.y); + + if (n < 0) { + A = inverse(); + } + + int absn = ABS(n); + + for (int i = 0; i < absn; i++) { + B->mult(A); + } + + return B; +} + /* std::vector> MLPPMatrix::matrixPower(std::vector> A, int n) { std::vector> B = identity(A.size()); @@ -1580,15 +1601,17 @@ real_t MLPPMatrix::detb(const Ref &A, int d) const { return deter; } -/* -real_t MLPPMatrix::trace(std::vector> A) { +real_t MLPPMatrix::trace() const { real_t trace = 0; - for (uint32_t i = 0; i < A.size(); i++) { - trace += A[i][i]; + + int sm = MIN(_size.x, _size.y); + + for (int i = 0; i < sm; ++i) { + trace += element_get(i, i); } + return trace; } -*/ Ref MLPPMatrix::cofactor(int n, int i, int j) const { Ref cof; @@ -2663,11 +2686,11 @@ void MLPPMatrix::flatteno(Ref out) const { } } -/* -std::vector MLPPMatrix::solve(std::vector> A, std::vector b) { - return mat_vec_mult(inverse(A), b); +Ref MLPPMatrix::solve(const Ref &b) const { + return inverse()->mult_vec(b); } +/* bool MLPPMatrix::positiveDefiniteChecker(std::vector> A) { auto eig_result = eig(A); auto eigenvectors = std::get<0>(eig_result); diff --git a/mlpp/lin_alg/mlpp_matrix.h b/mlpp/lin_alg/mlpp_matrix.h index b890a18..614b012 100644 --- a/mlpp/lin_alg/mlpp_matrix.h +++ b/mlpp/lin_alg/mlpp_matrix.h @@ -209,7 +209,7 @@ public: Ref cbrtn() const; void cbrtb(const Ref &A); - //std::vector> matrixPower(std::vector> A, int n); + Ref matrix_powern(const int n) const; void abs(); Ref absn() const; @@ -218,7 +218,7 @@ public: real_t det(int d = -1) const; real_t detb(const Ref &A, int d) const; - //real_t trace(std::vector> A); + real_t trace() const; Ref cofactor(int n, int i, int j) const; void cofactoro(int n, int i, int j, Ref out) const; @@ -322,9 +322,10 @@ public: Ref flatten() const; void flatteno(Ref out) const; - /* - std::vector solve(std::vector> A, std::vector b); + Ref solve(const Ref& b) const; + + /* bool positiveDefiniteChecker(std::vector> A); bool negativeDefiniteChecker(std::vector> A); diff --git a/mlpp/lin_alg/mlpp_vector.cpp b/mlpp/lin_alg/mlpp_vector.cpp index fd9e2de..1625967 100644 --- a/mlpp/lin_alg/mlpp_vector.cpp +++ b/mlpp/lin_alg/mlpp_vector.cpp @@ -1283,17 +1283,16 @@ real_t MLPPVector::euclidean_distance_squared(const Ref &b) const { return dist; } -/* -real_t MLPPVector::norm_2(std::vector> A) { - real_t sum = 0; - for (uint32_t i = 0; i < A.size(); i++) { - for (uint32_t j = 0; j < A[i].size(); j++) { - sum += A[i][j] * A[i][j]; - } +real_t MLPPVector::norm_2() const { + const real_t *a_ptr = ptr(); + + real_t n_sq = 0; + for (int i = 0; i < _size; ++i) { + n_sq += a_ptr[i] * a_ptr[i]; } - return Math::sqrt(sum); + + return Math::sqrt(n_sq); } -*/ real_t MLPPVector::norm_sq() const { const real_t *a_ptr = ptr(); diff --git a/mlpp/lin_alg/mlpp_vector.h b/mlpp/lin_alg/mlpp_vector.h index 0b2f64a..15086b4 100644 --- a/mlpp/lin_alg/mlpp_vector.h +++ b/mlpp/lin_alg/mlpp_vector.h @@ -226,10 +226,7 @@ public: real_t euclidean_distance(const Ref &b) const; real_t euclidean_distance_squared(const Ref &b) const; - /* - real_t norm_2(std::vector a); - */ - + real_t norm_2() const; real_t norm_sq() const; real_t sum_elements() const; diff --git a/test/mlpp_tests.cpp b/test/mlpp_tests.cpp index f8c00f9..949839b 100644 --- a/test/mlpp_tests.cpp +++ b/test/mlpp_tests.cpp @@ -1006,51 +1006,170 @@ void MLPPTests::test_outlier_finder(bool ui) { PLOG_MSG(Variant(outlier_finder.model_test(input_set))); } void MLPPTests::test_new_math_functions() { - /* MLPPLinAlg alg; - MLPPActivationOld avn; + MLPPActivation avn; MLPPData data; + PLOG_MSG("logit:"); + // Testing new Functions real_t z_s = 0.001; - std::cout << avn.logit(z_s) << std::endl; - std::cout << avn.logit(z_s, true) << std::endl; - std::vector z_v = { 0.001 }; - alg.printVector(avn.logit(z_v)); - alg.printVector(avn.logit(z_v, true)); + //-6.906755 + PLOG_MSG(String::num(avn.logit_normr(z_s))); + //1001.000916 + PLOG_MSG(String::num(avn.logit_derivr(z_s))); - std::vector> Z_m = { { 0.001 } }; - alg.printMatrix(avn.logit(Z_m)); - alg.printMatrix(avn.logit(Z_m, true)); + std::vector z_v_sv = { 0.001 }; - std::cout << alg.trace({ { 1, 2 }, { 3, 4 } }) << std::endl; - alg.printMatrix(alg.pinverse({ { 1, 2 }, { 3, 4 } })); - alg.printMatrix(alg.diag({ 1, 2, 3, 4, 5 })); - alg.printMatrix(alg.kronecker_product({ { 1, 2, 3, 4, 5 } }, { { 6, 7, 8, 9, 10 } })); - alg.printMatrix(alg.matrixPower({ { 5, 5 }, { 5, 5 } }, 2)); - alg.printVector(alg.solve({ { 1, 1 }, { 1.5, 4.0 } }, { 2200, 5050 })); + Ref z_v; + z_v.instance(); + z_v->set_from_std_vector(z_v_sv); + + //[MLPPVector: -6.906755 ] + PLOG_MSG(avn.logit_normv(z_v)->to_string()); + //[MLPPVector: 1001.000916 ] + PLOG_MSG(avn.logit_derivv(z_v)->to_string()); + + std::vector> Z_m_sv = { { 0.001 } }; + + Ref Z_m; + Z_m.instance(); + Z_m->set_from_std_vectors(Z_m_sv); + + //[MLPPMatrix: + //[ -6.906755 ] + //] + PLOG_MSG(avn.logit_normm(Z_m)->to_string()); + //[MLPPMatrix: + //[ 1001.000916 ] + //] + PLOG_MSG(avn.logit_derivm(Z_m)->to_string()); + + PLOG_MSG(avn.logit_derivm(Z_m)->to_string()); + PLOG_MSG(avn.logit_derivm(Z_m)->to_string()); + PLOG_MSG(avn.logit_derivm(Z_m)->to_string()); + PLOG_MSG(avn.logit_derivm(Z_m)->to_string()); + PLOG_MSG(avn.logit_derivm(Z_m)->to_string()); + PLOG_MSG(avn.logit_derivm(Z_m)->to_string()); + PLOG_MSG(avn.logit_derivm(Z_m)->to_string()); + + const real_t trace_arr[] = { + 1, 2, // + 3, 4 // + }; + + Ref trace_mat(memnew(MLPPMatrix(trace_arr, 2, 2))); + //5 + PLOG_MSG(String::num(trace_mat->trace())); + + const real_t pinverse_arr[] = { + 1, 2, // + 3, 4 // + }; + + Ref pinverse_mat(memnew(MLPPMatrix(pinverse_arr, 2, 2))); + //[MLPPMatrix: + //[ -2 1.5 ] + //[ 1 -0.5 ] + //] + PLOG_MSG(pinverse_mat->pinverse()->to_string()); + + const real_t diag_arr[] = { + 1, 2, 3, 4, 5 + }; + + Ref diag_vec(memnew(MLPPVector(diag_arr, 5))); + //[MLPPMatrix: + // [ 1 0 0 0 0 ] + // [ 0 2 0 0 0 ] + // [ 0 0 3 0 0 ] + // [ 0 0 0 4 0 ] + // [ 0 0 0 0 5 ] + //] + PLOG_MSG(alg.diagnm(diag_vec)->to_string()); + + const real_t kronecker_product1_arr[] = { + 1, 2, 3, 4, 5, // + }; + + const real_t kronecker_product2_arr[] = { + 6, 7, 8, 9, 10 // + }; + + Ref kronecker_product1_mat(memnew(MLPPMatrix(kronecker_product1_arr, 1, 5))); + Ref kronecker_product2_mat(memnew(MLPPMatrix(kronecker_product2_arr, 1, 5))); + //[MLPPMatrix: + // [ 6 7 8 9 10 12 14 16 18 20 18 21 24 27 30 24 28 32 36 40 30 35 40 45 50 ] + //] + PLOG_MSG(kronecker_product1_mat->kronecker_productn(kronecker_product2_mat)->to_string()); + + const real_t power_arr[] = { + 5, 5, // + 5, 5 // + }; + + Ref power_mat(memnew(MLPPMatrix(power_arr, 2, 2))); + //[MLPPMatrix: + // [ 50 50 ] + // [ 50 50 ] + //] + PLOG_MSG(power_mat->matrix_powern(2)->to_string()); + + const real_t solve1_arr[] = { + 1, 1, // + 1.5, 4.0 // + }; + + const real_t solve2_arr[] = { + 2200, 5050 + }; + + Ref solve_mat(memnew(MLPPMatrix(solve1_arr, 2, 2))); + Ref solve_vec(memnew(MLPPVector(solve2_arr, 2))); + //[MLPPVector: 1500 700 ] + PLOG_MSG(solve_mat->solve(solve_vec)->to_string()); std::vector> matrixOfCubes = { { 1, 2, 64, 27 } }; std::vector vectorOfCubes = { 1, 2, 64, 27 }; - alg.printMatrix(alg.cbrt(matrixOfCubes)); - alg.printVector(alg.cbrt(vectorOfCubes)); - std::cout << alg.max({ { 1, 2, 3, 4, 5 }, { 6, 5, 3, 4, 1 }, { 9, 9, 9, 9, 9 } }) << std::endl; - std::cout << alg.min({ { 1, 2, 3, 4, 5 }, { 6, 5, 3, 4, 1 }, { 9, 9, 9, 9, 9 } }) << std::endl; + + Ref matrix_of_cubes(memnew(MLPPMatrix(matrixOfCubes))); + Ref vector_of_cubes(memnew(MLPPVector(vectorOfCubes))); + PLOG_MSG(matrix_of_cubes->cbrtn()->to_string()); + PLOG_MSG(vector_of_cubes->cbrtn()->to_string()); + + //std::vector> min_max_svec = { { 1, 2, 3, 4, 5 }, { 6, 5, 3, 4, 1 }, { 9, 9, 9, 9, 9 } }; + //Ref min_max(memnew(MLPPMatrix(min_max_svec))); //std::vector chicken; //data.getImage("../../Data/apple.jpeg", chicken); //alg.printVector(chicken); - std::vector> P = { { 12, -51, 4 }, { 6, 167, -68 }, { -4, 24, -41 } }; - alg.printMatrix(P); + std::vector> Pvec = { { 12, -51, 4 }, { 6, 167, -68 }, { -4, 24, -41 } }; + Ref P(memnew(MLPPMatrix(Pvec))); - alg.printMatrix(alg.gramSchmidtProcess(P)); + PLOG_MSG(P->to_string()); + //[MLPPMatrix: + // [ 0.857143 -0.394286 -0.331429 ] + // [ 0.428571 0.902857 0.034286 ] + // [ -0.285714 0.171429 -0.942857 ] + //] + PLOG_MSG(alg.gram_schmidt_process(P)->to_string()); - //MLPPLinAlg::QRDResult qrd_result = alg.qrd(P); // It works! - //alg.printMatrix(qrd_result.Q); - //alg.printMatrix(qrd_result.R); - */ + MLPPLinAlg::QRDResult qrd_result = alg.qrd(P); // It works! + + //[MLPPMatrix: + // [ 0.857143 -0.394286 -0.331429 ] + // [ 0.428571 0.902857 0.034286 ] + // [ -0.285714 0.171429 -0.942857 ] + //] + PLOG_MSG(qrd_result.Q->to_string()); + //[MLPPMatrix: + // [ 14.000001 21 -14.000003 ] + // [ -0 175 -70 ] + // [ 0.000001 0.000029 34.999989 ] + //] + PLOG_MSG(qrd_result.R->to_string()); } void MLPPTests::test_positive_definiteness_checker() { /* diff --git a/test/mlpp_tests_old.cpp b/test/mlpp_tests_old.cpp index 57173c4..6fec46f 100644 --- a/test/mlpp_tests_old.cpp +++ b/test/mlpp_tests_old.cpp @@ -442,12 +442,11 @@ void MLPPTestsOld::test_new_math_functions() { std::vector> P = { { 12, -51, 4 }, { 6, 167, -68 }, { -4, 24, -41 } }; alg.printMatrix(P); - alg.printMatrix(alg.gramSchmidtProcess(P)); - //MLPPLinAlgOld::QRDResult qrd_result = alg.qrd(P); // It works! - //alg.printMatrix(qrd_result.Q); - //alg.printMatrix(qrd_result.R); + MLPPLinAlgOld::QRDResult qrd_result = alg.qrd(P); // It works! + alg.printMatrix(qrd_result.Q); + alg.printMatrix(qrd_result.R); } void MLPPTestsOld::test_positive_definiteness_checker() { //MLPPStat stat;