From cfea6f9d833f4026df5a2a75b48bf342cf4f9b0d Mon Sep 17 00:00:00 2001 From: Relintai Date: Mon, 24 Apr 2023 08:24:59 +0200 Subject: [PATCH] Added the matrix manipulation methods to MLPPMatrix. --- mlpp/lin_alg/lin_alg.cpp | 58 +- mlpp/lin_alg/lin_alg.h | 4 +- mlpp/lin_alg/mlpp_matrix.cpp | 1065 ++++++++++++++++++++++++++++++++++ mlpp/lin_alg/mlpp_matrix.h | 115 ++++ 4 files changed, 1211 insertions(+), 31 deletions(-) diff --git a/mlpp/lin_alg/lin_alg.cpp b/mlpp/lin_alg/lin_alg.cpp index 8805297..7e622cb 100644 --- a/mlpp/lin_alg/lin_alg.cpp +++ b/mlpp/lin_alg/lin_alg.cpp @@ -950,35 +950,6 @@ real_t MLPPLinAlg::sum_elements(std::vector> A) { } */ -Ref MLPPLinAlg::flattenmnv(const Vector> &A) { - Ref a; - a.instance(); - - int vsize = 0; - for (int i = 0; i < A.size(); ++i) { - vsize += A[i]->size(); - } - - a->resize(vsize); - - int a_index = 0; - real_t *a_ptr = a->ptrw(); - - for (int i = 0; i < A.size(); ++i) { - const Ref &r = A[i]; - - int r_size = r->size(); - const real_t *r_ptr = r->ptr(); - - for (int j = 0; j < r_size; ++j) { - a_ptr[a_index] = r_ptr[j]; - ++a_index; - } - } - - return a; -} - Ref MLPPLinAlg::flattenvvnv(const Ref &A) { int data_size = A->data_size(); @@ -1053,6 +1024,35 @@ bool MLPPLinAlg::zeroEigenvalue(std::vector> A) { } */ +Ref MLPPLinAlg::flattenmnv(const Vector> &A) { + Ref a; + a.instance(); + + int vsize = 0; + for (int i = 0; i < A.size(); ++i) { + vsize += A[i]->size(); + } + + a->resize(vsize); + + int a_index = 0; + real_t *a_ptr = a->ptrw(); + + for (int i = 0; i < A.size(); ++i) { + const Ref &r = A[i]; + + int r_size = r->size(); + const real_t *r_ptr = r->ptr(); + + for (int j = 0; j < r_size; ++j) { + a_ptr[a_index] = r_ptr[j]; + ++a_index; + } + } + + return a; +} + Ref MLPPLinAlg::outer_product(const Ref &a, const Ref &b) { Ref C; C.instance(); diff --git a/mlpp/lin_alg/lin_alg.h b/mlpp/lin_alg/lin_alg.h index c3d7fb6..1dc879e 100644 --- a/mlpp/lin_alg/lin_alg.h +++ b/mlpp/lin_alg/lin_alg.h @@ -125,7 +125,6 @@ public: //real_t sum_elements(std::vector> A); - Ref flattenmnv(const Vector> &A); Ref flattenvvnv(const Ref &A); /* @@ -140,6 +139,8 @@ public: // VECTOR FUNCTIONS + Ref flattenmnv(const Vector> &A); + Ref outer_product(const Ref &a, const Ref &b); // This multiplies a, bT Ref hadamard_productnv(const Ref &a, const Ref &b); @@ -200,7 +201,6 @@ public: real_t norm_sqv(const Ref &a); - real_t sum_elementsv(const Ref &a); //real_t cosineSimilarity(std::vector a, std::vector b); diff --git a/mlpp/lin_alg/mlpp_matrix.cpp b/mlpp/lin_alg/mlpp_matrix.cpp index 78e750f..d43cfd2 100644 --- a/mlpp/lin_alg/mlpp_matrix.cpp +++ b/mlpp/lin_alg/mlpp_matrix.cpp @@ -1,6 +1,1071 @@ #include "mlpp_matrix.h" +#include "../stat/stat.h" +#include + +Ref MLPPMatrix::scalar_multiplynv(real_t scalar, const Ref &a) { + ERR_FAIL_COND_V(!a.is_valid(), Ref()); + + Ref out; + out.instance(); + + int size = a->size(); + + out->resize(size); + + const real_t *a_ptr = a->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < size; ++i) { + out_ptr[i] = a_ptr[i] * scalar; + } + + return out; +} + +Ref MLPPMatrix::flattenmnv(const Vector> &A) { + Ref a; + a.instance(); + + int vsize = 0; + for (int i = 0; i < A.size(); ++i) { + vsize += A[i]->size(); + } + + a->resize(vsize); + + int a_index = 0; + real_t *a_ptr = a->ptrw(); + + for (int i = 0; i < A.size(); ++i) { + const Ref &r = A[i]; + + int r_size = r->size(); + const real_t *r_ptr = r->ptr(); + + for (int j = 0; j < r_size; ++j) { + a_ptr[a_index] = r_ptr[j]; + ++a_index; + } + } + + return a; +} + +/* +std::vector> MLPPMatrix::gramMatrix(std::vector> A) { + return matmult(transpose(A), A); // AtA +} +*/ + +/* +bool MLPPMatrix::linearIndependenceChecker(std::vector> A) { + if (det(gramMatrix(A), A.size()) == 0) { + return false; + } + return true; +} +*/ + +Ref MLPPMatrix::gaussian_noise(int n, int m) { + std::random_device rd; + std::default_random_engine generator(rd()); + std::normal_distribution distribution(0, 1); // Standard normal distribution. Mean of 0, std of 1. + + Ref A; + A.instance(); + A->resize(Size2i(m, n)); + + int a_data_size = A->data_size(); + real_t *a_ptr = A->ptrw(); + + for (int i = 0; i < a_data_size; ++i) { + a_ptr[i] = distribution(generator); + } + + return A; +} + +Ref MLPPMatrix::additionnm(const Ref &A, const Ref &B) { + ERR_FAIL_COND_V(!A.is_valid() || !B.is_valid(), Ref()); + Size2i a_size = A->size(); + ERR_FAIL_COND_V(a_size != B->size(), Ref()); + + Ref C; + C.instance(); + C->resize(a_size); + + const real_t *a_ptr = A->ptr(); + const real_t *b_ptr = B->ptr(); + real_t *c_ptr = C->ptrw(); + + int data_size = A->data_size(); + + for (int i = 0; i < data_size; ++i) { + c_ptr[i] = a_ptr[i] + b_ptr[i]; + } + + return C; +} +Ref MLPPMatrix::subtractionnm(const Ref &A, const Ref &B) { + ERR_FAIL_COND_V(!A.is_valid() || !B.is_valid(), Ref()); + Size2i a_size = A->size(); + ERR_FAIL_COND_V(a_size != B->size(), Ref()); + + Ref C; + C.instance(); + C->resize(a_size); + + const real_t *a_ptr = A->ptr(); + const real_t *b_ptr = B->ptr(); + real_t *c_ptr = C->ptrw(); + + int data_size = A->data_size(); + + for (int i = 0; i < data_size; ++i) { + c_ptr[i] = a_ptr[i] - b_ptr[i]; + } + + return C; +} +Ref MLPPMatrix::matmultnm(const Ref &A, const Ref &B) { + ERR_FAIL_COND_V(!A.is_valid() || !B.is_valid(), Ref()); + + Size2i a_size = A->size(); + Size2i b_size = B->size(); + + ERR_FAIL_COND_V(a_size.x != b_size.y, Ref()); + + Ref C; + C.instance(); + C->resize(Size2i(b_size.x, a_size.y)); + C->fill(0); + + const real_t *a_ptr = A->ptr(); + const real_t *b_ptr = B->ptr(); + real_t *c_ptr = C->ptrw(); + + for (int i = 0; i < a_size.y; i++) { + for (int k = 0; k < b_size.y; k++) { + int ind_i_k = A->calculate_index(i, k); + + for (int j = 0; j < b_size.x; j++) { + int ind_i_j = C->calculate_index(i, j); + int ind_k_j = B->calculate_index(k, j); + + c_ptr[ind_i_j] += a_ptr[ind_i_k] * b_ptr[ind_k_j]; + + //C->set_element(i, j, C->get_element(i, j) + A->get_element(i, k) * B->get_element(k, j + } + } + } + + return C; +} + +Ref MLPPMatrix::hadamard_productnm(const Ref &A, const Ref &B) { + ERR_FAIL_COND_V(!A.is_valid() || !B.is_valid(), Ref()); + Size2i a_size = A->size(); + ERR_FAIL_COND_V(a_size != B->size(), Ref()); + + Ref C; + C.instance(); + C->resize(a_size); + + const real_t *a_ptr = A->ptr(); + const real_t *b_ptr = B->ptr(); + real_t *c_ptr = C->ptrw(); + + for (int i = 0; i < a_size.y; i++) { + for (int j = 0; j < a_size.x; j++) { + int ind_i_j = A->calculate_index(i, j); + c_ptr[ind_i_j] = a_ptr[ind_i_j] * b_ptr[ind_i_j]; + } + } + + return C; +} +Ref MLPPMatrix::kronecker_productnm(const Ref &A, const Ref &B) { + // [1,1,1,1] [1,2,3,4,5] + // [1,1,1,1] [1,2,3,4,5] + // [1,2,3,4,5] + + // [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] + // [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] + // [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] + // [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] + // [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] + // [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] + + // Resulting matrix: A.size() * B.size() + // A[0].size() * B[0].size() + + ERR_FAIL_COND_V(!A.is_valid() || !B.is_valid(), Ref()); + Size2i a_size = A->size(); + Size2i b_size = B->size(); + + Ref C; + C.instance(); + C->resize(Size2i(b_size.x * a_size.x, b_size.y * a_size.y)); + + const real_t *a_ptr = A->ptr(); + + Ref row_tmp; + row_tmp.instance(); + row_tmp->resize(b_size.x); + + for (int i = 0; i < a_size.y; ++i) { + for (int j = 0; j < b_size.y; ++j) { + B->get_row_into_mlpp_vector(j, row_tmp); + + Vector> row; + for (int k = 0; k < a_size.x; ++k) { + row.push_back(scalar_multiplynv(a_ptr[A->calculate_index(i, k)], row_tmp)); + } + + Ref flattened_row = flattenmnv(row); + + C->set_row_mlpp_vector(i * b_size.y + j, flattened_row); + } + } + + return C; +} +Ref MLPPMatrix::element_wise_divisionnvnm(const Ref &A, const Ref &B) { + ERR_FAIL_COND_V(!A.is_valid() || !B.is_valid(), Ref()); + Size2i a_size = A->size(); + ERR_FAIL_COND_V(a_size != B->size(), Ref()); + + Ref C; + C.instance(); + C->resize(a_size); + + const real_t *a_ptr = A->ptr(); + const real_t *b_ptr = B->ptr(); + real_t *c_ptr = C->ptrw(); + + for (int i = 0; i < a_size.y; i++) { + for (int j = 0; j < a_size.x; j++) { + int ind_i_j = A->calculate_index(i, j); + c_ptr[ind_i_j] = a_ptr[ind_i_j] / b_ptr[ind_i_j]; + } + } + + return C; +} + +Ref MLPPMatrix::transposenm(const Ref &A) { + Size2i a_size = A->size(); + + Ref AT; + AT.instance(); + AT->resize(Size2i(a_size.y, a_size.x)); + + const real_t *a_ptr = A->ptr(); + real_t *at_ptr = AT->ptrw(); + + for (int i = 0; i < a_size.y; ++i) { + for (int j = 0; j < a_size.x; ++j) { + at_ptr[AT->calculate_index(j, i)] = a_ptr[A->calculate_index(i, j)]; + } + } + + return AT; +} +Ref MLPPMatrix::scalar_multiplynm(real_t scalar, const Ref &A) { + Ref AN = A->duplicate(); + Size2i a_size = AN->size(); + real_t *an_ptr = AN->ptrw(); + + for (int i = 0; i < a_size.y; ++i) { + for (int j = 0; j < a_size.x; ++j) { + an_ptr[AN->calculate_index(i, j)] *= scalar; + } + } + + return AN; +} + +Ref MLPPMatrix::scalar_addnm(real_t scalar, const Ref &A) { + Ref AN = A->duplicate(); + Size2i a_size = AN->size(); + real_t *an_ptr = AN->ptrw(); + + for (int i = 0; i < a_size.y; ++i) { + for (int j = 0; j < a_size.x; ++j) { + an_ptr[AN->calculate_index(i, j)] += scalar; + } + } + + return AN; +} + +Ref MLPPMatrix::lognm(const Ref &A) { + ERR_FAIL_COND_V(!A.is_valid(), Ref()); + + Ref out; + out.instance(); + + int data_size = A->data_size(); + out->resize(A->size()); + + const real_t *a_ptr = A->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < data_size; ++i) { + out_ptr[i] = Math::log(a_ptr[i]); + } + + return out; +} +Ref MLPPMatrix::log10nm(const Ref &A) { + ERR_FAIL_COND_V(!A.is_valid(), Ref()); + + Ref out; + out.instance(); + + int data_size = A->data_size(); + out->resize(A->size()); + + const real_t *a_ptr = A->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < data_size; ++i) { + out_ptr[i] = Math::log10(a_ptr[i]); + } + + return out; +} +Ref MLPPMatrix::expnm(const Ref &A) { + ERR_FAIL_COND_V(!A.is_valid(), Ref()); + + Ref out; + out.instance(); + + int data_size = A->data_size(); + out->resize(A->size()); + + const real_t *a_ptr = A->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < data_size; ++i) { + out_ptr[i] = Math::exp(a_ptr[i]); + } + + return out; +} +Ref MLPPMatrix::erfnm(const Ref &A) { + ERR_FAIL_COND_V(!A.is_valid(), Ref()); + + Ref out; + out.instance(); + + int data_size = A->data_size(); + out->resize(A->size()); + + const real_t *a_ptr = A->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < data_size; ++i) { + out_ptr[i] = Math::erf(a_ptr[i]); + } + + return out; +} +Ref MLPPMatrix::exponentiatenm(const Ref &A, real_t p) { + ERR_FAIL_COND_V(!A.is_valid(), Ref()); + + Ref out; + out.instance(); + + int data_size = A->data_size(); + out->resize(A->size()); + + const real_t *a_ptr = A->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < data_size; ++i) { + out_ptr[i] = Math::pow(a_ptr[i], p); + } + + return out; +} +Ref MLPPMatrix::sqrtnm(const Ref &A) { + ERR_FAIL_COND_V(!A.is_valid(), Ref()); + + Ref out; + out.instance(); + + int data_size = A->data_size(); + out->resize(A->size()); + + const real_t *a_ptr = A->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < data_size; ++i) { + out_ptr[i] = Math::sqrt(a_ptr[i]); + } + + return out; +} +Ref MLPPMatrix::cbrtnm(const Ref &A) { + return exponentiatenm(A, real_t(1) / real_t(3)); +} + +/* +std::vector> MLPPMatrix::matrixPower(std::vector> A, int n) { + std::vector> B = identity(A.size()); + if (n == 0) { + return identity(A.size()); + } else if (n < 0) { + A = inverse(A); + } + for (int i = 0; i < std::abs(n); i++) { + B = matmult(B, A); + } + return B; +} +*/ + +Ref MLPPMatrix::absnm(const Ref &A) { + ERR_FAIL_COND_V(!A.is_valid(), Ref()); + + Ref out; + out.instance(); + + int data_size = A->data_size(); + out->resize(A->size()); + + const real_t *a_ptr = A->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < data_size; ++i) { + out_ptr[i] = ABS(a_ptr[i]); + } + + return out; +} + +real_t MLPPMatrix::detm(const Ref &A, int d) { + ERR_FAIL_COND_V(!A.is_valid(), 0); + + real_t deter = 0; + Ref B; + B.instance(); + B->resize(Size2i(d, d)); + B->fill(0); + + /* This is the base case in which the input is a 2x2 square matrix. + Recursion is performed unless and until we reach this base case, + such that we recieve a scalar as the result. */ + if (d == 2) { + return A->get_element(0, 0) * A->get_element(1, 1) - A->get_element(0, 1) * A->get_element(1, 0); + } else { + for (int i = 0; i < d; i++) { + int sub_i = 0; + for (int j = 1; j < d; j++) { + int sub_j = 0; + for (int k = 0; k < d; k++) { + if (k == i) { + continue; + } + + B->set_element(sub_i, sub_j, A->get_element(j, k)); + sub_j++; + } + sub_i++; + } + + deter += Math::pow(static_cast(-1), static_cast(i)) * A->get_element(0, i) * detm(B, d - 1); + } + } + + return deter; +} + +/* +real_t MLPPMatrix::trace(std::vector> A) { + real_t trace = 0; + for (uint32_t i = 0; i < A.size(); i++) { + trace += A[i][i]; + } + return trace; +} +*/ + +Ref MLPPMatrix::cofactornm(const Ref &A, int n, int i, int j) { + Ref cof; + cof.instance(); + cof->resize(A->size()); + + int sub_i = 0; + int sub_j = 0; + + for (int row = 0; row < n; row++) { + for (int col = 0; col < n; col++) { + if (row != i && col != j) { + cof->set_element(sub_i, sub_j++, A->get_element(row, col)); + + if (sub_j == n - 1) { + sub_j = 0; + sub_i++; + } + } + } + } + + return cof; +} +Ref MLPPMatrix::adjointnm(const Ref &A) { + Ref adj; + + ERR_FAIL_COND_V(!A.is_valid(), adj); + + Size2i a_size = A->size(); + + ERR_FAIL_COND_V(a_size.x != a_size.y, adj); + + //Resizing the initial adjoint matrix + + adj.instance(); + adj->resize(a_size); + + // Checking for the case where the given N x N matrix is a scalar + if (a_size.y == 1) { + adj->set_element(0, 0, 1); + return adj; + } + + if (a_size.y == 2) { + adj->set_element(0, 0, A->get_element(1, 1)); + adj->set_element(1, 1, A->get_element(0, 0)); + + adj->set_element(0, 1, -A->get_element(0, 1)); + adj->set_element(1, 0, -A->get_element(1, 0)); + + return adj; + } + + for (int i = 0; i < a_size.y; i++) { + for (int j = 0; j < a_size.x; j++) { + Ref cof = cofactornm(A, a_size.y, i, j); + // 1 if even, -1 if odd + int sign = (i + j) % 2 == 0 ? 1 : -1; + adj->set_element(j, i, sign * detm(cof, int(a_size.y) - 1)); + } + } + return adj; +} +Ref MLPPMatrix::inversenm(const Ref &A) { + return scalar_multiplynm(1 / detm(A, int(A->size().y)), adjointnm(A)); +} +Ref MLPPMatrix::pinversenm(const Ref &A) { + return matmultnm(inversenm(matmultnm(transposenm(A), A)), transposenm(A)); +} +Ref MLPPMatrix::zeromatnm(int n, int m) { + Ref mat; + mat.instance(); + + mat->resize(Size2i(m, n)); + mat->fill(0); + + return mat; +} +Ref MLPPMatrix::onematnm(int n, int m) { + Ref mat; + mat.instance(); + + mat->resize(Size2i(m, n)); + mat->fill(1); + + return mat; +} +Ref MLPPMatrix::fullnm(int n, int m, int k) { + Ref mat; + mat.instance(); + + mat->resize(Size2i(m, n)); + mat->fill(k); + + return mat; +} + +Ref MLPPMatrix::sinnm(const Ref &A) { + ERR_FAIL_COND_V(!A.is_valid(), Ref()); + + Ref out; + out.instance(); + + int data_size = A->data_size(); + out->resize(A->size()); + + const real_t *a_ptr = A->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < data_size; ++i) { + out_ptr[i] = Math::sin(a_ptr[i]); + } + + return out; +} +Ref MLPPMatrix::cosnm(const Ref &A) { + ERR_FAIL_COND_V(!A.is_valid(), Ref()); + + Ref out; + out.instance(); + + int data_size = A->data_size(); + out->resize(A->size()); + + const real_t *a_ptr = A->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < data_size; ++i) { + out_ptr[i] = Math::cos(a_ptr[i]); + } + + return out; +} + +/* +std::vector> MLPPMatrix::rotate(std::vector> A, real_t theta, int axis) { + std::vector> rotationMatrix = { { Math::cos(theta), -Math::sin(theta) }, { Math::sin(theta), Math::cos(theta) } }; + if (axis == 0) { + rotationMatrix = { { 1, 0, 0 }, { 0, Math::cos(theta), -Math::sin(theta) }, { 0, Math::sin(theta), Math::cos(theta) } }; + } else if (axis == 1) { + rotationMatrix = { { Math::cos(theta), 0, Math::sin(theta) }, { 0, 1, 0 }, { -Math::sin(theta), 0, Math::cos(theta) } }; + } else if (axis == 2) { + rotationMatrix = { { Math::cos(theta), -Math::sin(theta), 0 }, { Math::sin(theta), Math::cos(theta), 0 }, { 1, 0, 0 } }; + } + + return matmult(A, rotationMatrix); +} +*/ + +Ref MLPPMatrix::maxnm(const Ref &A, const Ref &B) { + Ref C; + C.instance(); + C->resize(A->size()); + + const real_t *a_ptr = A->ptr(); + const real_t *b_ptr = B->ptr(); + real_t *c_ptr = C->ptrw(); + + int size = A->data_size(); + + for (int i = 0; i < size; i++) { + c_ptr[i] = MAX(a_ptr[i], b_ptr[i]); + } + + return C; +} + +/* +real_t MLPPMatrix::max(std::vector> A) { + return max(flatten(A)); +} + +real_t MLPPMatrix::min(std::vector> A) { + return min(flatten(A)); +} + +std::vector> MLPPMatrix::round(std::vector> A) { + std::vector> B; + B.resize(A.size()); + for (uint32_t i = 0; i < B.size(); i++) { + B[i].resize(A[0].size()); + } + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A[i].size(); j++) { + B[i][j] = Math::round(A[i][j]); + } + } + return B; +} +*/ + +/* +real_t MLPPMatrix::norm_2(std::vector> A) { + real_t sum = 0; + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A[i].size(); j++) { + sum += A[i][j] * A[i][j]; + } + } + return Math::sqrt(sum); +} +*/ + +Ref MLPPMatrix::identitym(int d) { + Ref identity_mat; + identity_mat.instance(); + identity_mat->resize(Size2i(d, d)); + identity_mat->fill(0); + + real_t *im_ptr = identity_mat->ptrw(); + + for (int i = 0; i < d; i++) { + im_ptr[identity_mat->calculate_index(i, i)] = 1; + } + + return identity_mat; +} + +Ref MLPPMatrix::covnm(const Ref &A) { + MLPPStat stat; + + Ref cov_mat; + cov_mat.instance(); + + Size2i a_size = A->size(); + + cov_mat->resize(a_size); + + Ref a_i_row_tmp; + a_i_row_tmp.instance(); + a_i_row_tmp->resize(a_size.x); + + Ref a_j_row_tmp; + a_j_row_tmp.instance(); + a_j_row_tmp->resize(a_size.x); + + for (int i = 0; i < a_size.y; ++i) { + A->get_row_into_mlpp_vector(i, a_i_row_tmp); + + for (int j = 0; j < a_size.x; ++j) { + A->get_row_into_mlpp_vector(j, a_j_row_tmp); + + cov_mat->set_element(i, j, stat.covariancev(a_i_row_tmp, a_j_row_tmp)); + } + } + + return cov_mat; +} + +MLPPMatrix::EigenResult MLPPMatrix::eigen(Ref A) { + EigenResult res; + + ERR_FAIL_COND_V(!A.is_valid(), res); + + /* + A (the entered parameter) in most use cases will be X'X, XX', etc. and must be symmetric. + That simply means that 1) X' = X and 2) X is a square matrix. This function that computes the + eigenvalues of a matrix is utilizing Jacobi's method. + */ + + real_t diagonal = true; // Perform the iterative Jacobi algorithm unless and until we reach a diagonal matrix which yields us the eigenvals. + + HashMap val_to_vec; + Ref a_new; + Ref eigenvectors = identitym(A->size().y); + Size2i a_size = A->size(); + + do { + real_t a_ij = A->get_element(0, 1); + real_t sub_i = 0; + real_t sub_j = 1; + for (int i = 0; i < a_size.y; ++i) { + for (int j = 0; j < a_size.x; ++j) { + real_t ca_ij = A->get_element(i, j); + real_t abs_ca_ij = ABS(ca_ij); + + if (i != j && abs_ca_ij > a_ij) { + a_ij = ca_ij; + sub_i = i; + sub_j = j; + } else if (i != j && abs_ca_ij == a_ij) { + if (i < sub_i) { + a_ij = ca_ij; + sub_i = i; + sub_j = j; + } + } + } + } + + real_t a_ii = A->get_element(sub_i, sub_i); + real_t a_jj = A->get_element(sub_j, sub_j); + //real_t a_ji = A->get_element(sub_j, sub_i); + real_t theta; + + if (a_ii == a_jj) { + theta = M_PI / 4; + } else { + theta = 0.5 * atan(2 * a_ij / (a_ii - a_jj)); + } + + Ref P = identitym(A->size().y); + P->set_element(sub_i, sub_j, -Math::sin(theta)); + P->set_element(sub_i, sub_i, Math::cos(theta)); + P->set_element(sub_j, sub_j, Math::cos(theta)); + P->set_element(sub_j, sub_i, Math::sin(theta)); + + a_new = matmultnm(matmultnm(inversenm(P), A), P); + + Size2i a_new_size = a_new->size(); + + for (int i = 0; i < a_new_size.y; ++i) { + for (int j = 0; j < a_new_size.x; ++j) { + if (i != j && Math::is_zero_approx(Math::round(a_new->get_element(i, j)))) { + a_new->set_element(i, j, 0); + } + } + } + + bool non_zero = false; + for (int i = 0; i < a_new_size.y; ++i) { + for (int j = 0; j < a_new_size.x; ++j) { + if (i != j && Math::is_zero_approx(Math::round(a_new->get_element(i, j)))) { + non_zero = true; + } + } + } + + if (non_zero) { + diagonal = false; + } else { + diagonal = true; + } + + if (a_new->is_equal_approx(A)) { + diagonal = true; + for (int i = 0; i < a_new_size.y; ++i) { + for (int j = 0; j < a_new_size.x; ++j) { + if (i != j) { + a_new->set_element(i, j, 0); + } + } + } + } + + eigenvectors = matmultnm(eigenvectors, P); + A = a_new; + + } while (!diagonal); + + Ref a_new_prior = a_new->duplicate(); + + Size2i a_new_size = a_new->size(); + + // Bubble Sort. Should change this later. + for (int i = 0; i < a_new_size.y - 1; ++i) { + for (int j = 0; j < a_new_size.x - 1 - i; ++j) { + if (a_new->get_element(j, j) < a_new->get_element(j + 1, j + 1)) { + real_t temp = a_new->get_element(j + 1, j + 1); + a_new->set_element(j + 1, j + 1, a_new->get_element(j, j)); + a_new->set_element(j, j, temp); + } + } + } + + for (int i = 0; i < a_new_size.y; ++i) { + for (int j = 0; j < a_new_size.x; ++j) { + if (a_new->get_element(i, i) == a_new_prior->get_element(j, j)) { + val_to_vec[i] = j; + } + } + } + + Ref eigen_temp = eigenvectors->duplicate(); + + Size2i eigenvectors_size = eigenvectors->size(); + + for (int i = 0; i < eigenvectors_size.y; ++i) { + for (int j = 0; j < eigenvectors_size.x; ++j) { + eigenvectors->set_element(i, j, eigen_temp->get_element(i, val_to_vec[j])); + } + } + + res.eigen_vectors = eigenvectors; + res.eigen_values = a_new; + + return res; +} + +MLPPMatrix::SVDResult MLPPMatrix::svd(const Ref &A) { + SVDResult res; + + ERR_FAIL_COND_V(!A.is_valid(), res); + + Size2i a_size = A->size(); + + EigenResult left_eigen = eigen(matmultnm(A, transposenm(A))); + EigenResult right_eigen = eigen(matmultnm(transposenm(A), A)); + + Ref singularvals = sqrtnm(left_eigen.eigen_values); + Ref sigma = zeromatnm(a_size.y, a_size.x); + + Size2i singularvals_size = singularvals->size(); + + for (int i = 0; i < singularvals_size.y; ++i) { + for (int j = 0; j < singularvals_size.x; ++j) { + sigma->set_element(i, j, singularvals->get_element(i, j)); + } + } + + res.U = left_eigen.eigen_vectors; + res.S = sigma; + res.Vt = right_eigen.eigen_vectors; + + return res; +} + +/* +std::vector MLPPMatrix::vectorProjection(std::vector a, std::vector b) { + real_t product = dot(a, b) / dot(a, a); + return scalarMultiply(product, a); // Projection of vector a onto b. Denotated as proj_a(b). +} +*/ + +/* +std::vector> MLPPMatrix::gramSchmidtProcess(std::vector> A) { + A = transpose(A); // C++ vectors lack a mechanism to directly index columns. So, we transpose *a copy* of A for this purpose for ease of use. + std::vector> B; + B.resize(A.size()); + for (uint32_t i = 0; i < B.size(); i++) { + B[i].resize(A[0].size()); + } + + B[0] = A[0]; // We set a_1 = b_1 as an initial condition. + B[0] = scalarMultiply(1 / norm_2(B[0]), B[0]); + for (uint32_t i = 1; i < B.size(); i++) { + B[i] = A[i]; + for (int j = i - 1; j >= 0; j--) { + B[i] = subtraction(B[i], vectorProjection(B[j], A[i])); + } + B[i] = scalarMultiply(1 / norm_2(B[i]), B[i]); // Very simply multiply all elements of vec B[i] by 1/||B[i]||_2 + } + return transpose(B); // We re-transpose the marix. +} +*/ + +/* +MLPPMatrix::QRDResult MLPPMatrix::qrd(std::vector> A) { + QRDResult res; + + res.Q = gramSchmidtProcess(A); + res.R = matmult(transpose(res.Q), A); + + return res; +} +*/ + +/* +MLPPMatrix::CholeskyResult MLPPMatrix::cholesky(std::vector> A) { + std::vector> L = zeromat(A.size(), A[0].size()); + for (uint32_t j = 0; j < L.size(); j++) { // Matrices entered must be square. No problem here. + for (uint32_t i = j; i < L.size(); i++) { + if (i == j) { + real_t sum = 0; + for (uint32_t k = 0; k < j; k++) { + sum += L[i][k] * L[i][k]; + } + L[i][j] = Math::sqrt(A[i][j] - sum); + } else { // That is, i!=j + real_t sum = 0; + for (uint32_t k = 0; k < j; k++) { + sum += L[i][k] * L[j][k]; + } + L[i][j] = (A[i][j] - sum) / L[j][j]; + } + } + } + + CholeskyResult res; + res.L = L; + res.Lt = transpose(L); // Indeed, L.T is our upper triangular matrix. + + return res; +} +*/ + +/* +real_t MLPPMatrix::sum_elements(std::vector> A) { + real_t sum = 0; + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A[i].size(); j++) { + sum += A[i][j]; + } + } + return sum; +} +*/ + +Ref MLPPMatrix::flattenvvnv(const Ref &A) { + int data_size = A->data_size(); + + Ref res; + res.instance(); + res->resize(data_size); + + real_t *res_ptr = res->ptrw(); + const real_t *a_ptr = A->ptr(); + + for (int i = 0; i < data_size; ++i) { + res_ptr[i] = a_ptr[i]; + } + + return res; +} + +/* +std::vector MLPPMatrix::solve(std::vector> A, std::vector b) { + return mat_vec_mult(inverse(A), b); +} + +bool MLPPMatrix::positiveDefiniteChecker(std::vector> A) { + auto eig_result = eig(A); + auto eigenvectors = std::get<0>(eig_result); + auto eigenvals = std::get<1>(eig_result); + + std::vector eigenvals_vec; + for (uint32_t i = 0; i < eigenvals.size(); i++) { + eigenvals_vec.push_back(eigenvals[i][i]); + } + for (uint32_t i = 0; i < eigenvals_vec.size(); i++) { + if (eigenvals_vec[i] <= 0) { // Simply check to ensure all eigenvalues are positive. + return false; + } + } + return true; +} + +bool MLPPMatrix::negativeDefiniteChecker(std::vector> A) { + auto eig_result = eig(A); + auto eigenvectors = std::get<0>(eig_result); + auto eigenvals = std::get<1>(eig_result); + + std::vector eigenvals_vec; + for (uint32_t i = 0; i < eigenvals.size(); i++) { + eigenvals_vec.push_back(eigenvals[i][i]); + } + for (uint32_t i = 0; i < eigenvals_vec.size(); i++) { + if (eigenvals_vec[i] >= 0) { // Simply check to ensure all eigenvalues are negative. + return false; + } + } + return true; +} + +bool MLPPMatrix::zeroEigenvalue(std::vector> A) { + auto eig_result = eig(A); + auto eigenvectors = std::get<0>(eig_result); + auto eigenvals = std::get<1>(eig_result); + + std::vector eigenvals_vec; + for (uint32_t i = 0; i < eigenvals.size(); i++) { + eigenvals_vec.push_back(eigenvals[i][i]); + } + for (uint32_t i = 0; i < eigenvals_vec.size(); i++) { + if (eigenvals_vec[i] == 0) { + return true; + } + } + return false; +} +*/ + String MLPPMatrix::to_string() { String str; diff --git a/mlpp/lin_alg/mlpp_matrix.h b/mlpp/lin_alg/mlpp_matrix.h index d6dcc56..56acfb6 100644 --- a/mlpp/lin_alg/mlpp_matrix.h +++ b/mlpp/lin_alg/mlpp_matrix.h @@ -583,6 +583,121 @@ public: } } + //TODO remove these + Ref scalar_multiplynv(real_t scalar, const Ref &a); + Ref flattenmnv(const Vector> &A); + + //std::vector> gramMatrix(std::vector> A); + //bool linearIndependenceChecker(std::vector> A); + + Ref gaussian_noise(int n, int m); + + Ref additionnm(const Ref &A, const Ref &B); + Ref subtractionnm(const Ref &A, const Ref &B); + Ref matmultnm(const Ref &A, const Ref &B); + + Ref hadamard_productnm(const Ref &A, const Ref &B); + Ref kronecker_productnm(const Ref &A, const Ref &B); + Ref element_wise_divisionnvnm(const Ref &A, const Ref &B); + + Ref transposenm(const Ref &A); + Ref scalar_multiplynm(real_t scalar, const Ref &A); + Ref scalar_addnm(real_t scalar, const Ref &A); + + Ref lognm(const Ref &A); + Ref log10nm(const Ref &A); + Ref expnm(const Ref &A); + Ref erfnm(const Ref &A); + Ref exponentiatenm(const Ref &A, real_t p); + Ref sqrtnm(const Ref &A); + Ref cbrtnm(const Ref &A); + + //std::vector> matrixPower(std::vector> A, int n); + + Ref absnm(const Ref &A); + + real_t detm(const Ref &A, int d); + + //real_t trace(std::vector> A); + + Ref cofactornm(const Ref &A, int n, int i, int j); + Ref adjointnm(const Ref &A); + Ref inversenm(const Ref &A); + Ref pinversenm(const Ref &A); + + Ref zeromatnm(int n, int m); + Ref onematnm(int n, int m); + Ref fullnm(int n, int m, int k); + + Ref sinnm(const Ref &A); + Ref cosnm(const Ref &A); + + //std::vector> rotate(std::vector> A, real_t theta, int axis = -1); + + Ref maxnm(const Ref &A, const Ref &B); + + //real_t max(std::vector> A); + //real_t min(std::vector> A); + + //std::vector> round(std::vector> A); + + //real_t norm_2(std::vector> A); + + Ref identitym(int d); + + Ref covnm(const Ref &A); + + struct EigenResult { + Ref eigen_vectors; + Ref eigen_values; + }; + + EigenResult eigen(Ref A); + + struct SVDResult { + Ref U; + Ref S; + Ref Vt; + }; + + SVDResult svd(const Ref &A); + + //std::vector vectorProjection(std::vector a, std::vector b); + + //std::vector> gramSchmidtProcess(std::vector> A); + + /* + struct QRDResult { + std::vector> Q; + std::vector> R; + }; + */ + + //QRDResult qrd(std::vector> A); + + /* + struct CholeskyResult { + std::vector> L; + std::vector> Lt; + }; + + CholeskyResult cholesky(std::vector> A); + */ + + //real_t sum_elements(std::vector> A); + + Ref flattenvvnv(const Ref &A); + + /* + std::vector solve(std::vector> A, std::vector b); + + bool positiveDefiniteChecker(std::vector> A); + + bool negativeDefiniteChecker(std::vector> A); + + bool zeroEigenvalue(std::vector> A); + */ + _FORCE_INLINE_ bool is_equal_approx(const Ref &p_with, real_t tolerance = static_cast(CMP_EPSILON)) const { ERR_FAIL_COND_V(!p_with.is_valid(), false);