diff --git a/SCsub b/SCsub index 205710a..87e3ac1 100644 --- a/SCsub +++ b/SCsub @@ -81,6 +81,7 @@ sources = [ "mlpp/utilities/utilities_old.cpp", "mlpp/transforms/transforms_old.cpp", "mlpp/stat/stat_old.cpp", + "mlpp/lin_alg/lin_alg_old.cpp", "test/mlpp_tests.cpp", ] diff --git a/mlpp/lin_alg/lin_alg_old.cpp b/mlpp/lin_alg/lin_alg_old.cpp new file mode 100644 index 0000000..db25d21 --- /dev/null +++ b/mlpp/lin_alg/lin_alg_old.cpp @@ -0,0 +1,2808 @@ +// +// LinAlg.cpp +// +// Created by Marc Melikyan on 1/8/21. +// + +#include "lin_alg_old.h" + +#include "core/math/math_funcs.h" + +#include "../stat/stat.h" + +#include +#include +#include +#include + +std::vector> MLPPLinAlgOld::gramMatrix(std::vector> A) { + return matmult(transpose(A), A); // AtA +} + +bool MLPPLinAlgOld::linearIndependenceChecker(std::vector> A) { + if (det(gramMatrix(A), A.size()) == 0) { + return false; + } + return true; +} + +std::vector> MLPPLinAlgOld::gaussianNoise(int n, int m) { + std::random_device rd; + std::default_random_engine generator(rd()); + + std::vector> A; + A.resize(n); + for (int i = 0; i < n; i++) { + A[i].resize(m); + for (int j = 0; j < m; j++) { + std::normal_distribution distribution(0, 1); // Standard normal distribution. Mean of 0, std of 1. + A[i][j] = distribution(generator); + } + } + return A; +} + +Ref MLPPLinAlgOld::gaussian_noise(int n, int m) { + std::random_device rd; + std::default_random_engine generator(rd()); + std::normal_distribution distribution(0, 1); // Standard normal distribution. Mean of 0, std of 1. + + Ref A; + A.instance(); + A->resize(Size2i(m, n)); + + int a_data_size = A->data_size(); + real_t *a_ptr = A->ptrw(); + + for (int i = 0; i < a_data_size; ++i) { + a_ptr[i] = distribution(generator); + } + + return A; +} + +std::vector> MLPPLinAlgOld::addition(std::vector> A, std::vector> B) { + std::vector> C; + C.resize(A.size()); + for (uint32_t i = 0; i < C.size(); i++) { + C[i].resize(A[0].size()); + } + + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A[0].size(); j++) { + C[i][j] = A[i][j] + B[i][j]; + } + } + return C; +} + +std::vector> MLPPLinAlgOld::subtraction(std::vector> A, std::vector> B) { + std::vector> C; + C.resize(A.size()); + for (uint32_t i = 0; i < C.size(); i++) { + C[i].resize(A[0].size()); + } + + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A[0].size(); j++) { + C[i][j] = A[i][j] - B[i][j]; + } + } + return C; +} + +std::vector> MLPPLinAlgOld::matmult(std::vector> A, std::vector> B) { + std::vector> C; + C.resize(A.size()); + for (uint32_t i = 0; i < C.size(); i++) { + C[i].resize(B[0].size()); + } + + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t k = 0; k < B.size(); k++) { + for (uint32_t j = 0; j < B[0].size(); j++) { + C[i][j] += A[i][k] * B[k][j]; + } + } + } + return C; +} + +Ref MLPPLinAlgOld::additionm(const Ref &A, const Ref &B) { + ERR_FAIL_COND_V(!A.is_valid() || !B.is_valid(), Ref()); + Size2i a_size = A->size(); + ERR_FAIL_COND_V(a_size != B->size(), Ref()); + + Ref C; + C.instance(); + C->resize(a_size); + + const real_t *a_ptr = A->ptr(); + const real_t *b_ptr = B->ptr(); + real_t *c_ptr = C->ptrw(); + + int data_size = A->data_size(); + + for (int i = 0; i < data_size; ++i) { + c_ptr[i] = a_ptr[i] + b_ptr[i]; + } + + return C; +} +Ref MLPPLinAlgOld::subtractionm(const Ref &A, const Ref &B) { + ERR_FAIL_COND_V(!A.is_valid() || !B.is_valid(), Ref()); + Size2i a_size = A->size(); + ERR_FAIL_COND_V(a_size != B->size(), Ref()); + + Ref C; + C.instance(); + C->resize(a_size); + + const real_t *a_ptr = A->ptr(); + const real_t *b_ptr = B->ptr(); + real_t *c_ptr = C->ptrw(); + + int data_size = A->data_size(); + + for (int i = 0; i < data_size; ++i) { + c_ptr[i] = a_ptr[i] - b_ptr[i]; + } + + return C; +} +Ref MLPPLinAlgOld::matmultm(const Ref &A, const Ref &B) { + ERR_FAIL_COND_V(!A.is_valid() || !B.is_valid(), Ref()); + + Size2i a_size = A->size(); + Size2i b_size = B->size(); + + ERR_FAIL_COND_V(a_size.x != b_size.y, Ref()); + + Ref C; + C.instance(); + C->resize(Size2i(b_size.x, a_size.y)); + C->fill(0); + + const real_t *a_ptr = A->ptr(); + const real_t *b_ptr = B->ptr(); + real_t *c_ptr = C->ptrw(); + + for (int i = 0; i < a_size.y; i++) { + for (int k = 0; k < b_size.y; k++) { + int ind_i_k = A->calculate_index(i, k); + + for (int j = 0; j < b_size.x; j++) { + int ind_i_j = C->calculate_index(i, j); + int ind_k_j = B->calculate_index(k, j); + + c_ptr[ind_i_j] += a_ptr[ind_i_k] * b_ptr[ind_k_j]; + + //C->set_element(i, j, C->get_element(i, j) + A->get_element(i, k) * B->get_element(k, j + } + } + } + + return C; +} + +std::vector> MLPPLinAlgOld::hadamard_product(std::vector> A, std::vector> B) { + std::vector> C; + C.resize(A.size()); + for (uint32_t i = 0; i < C.size(); i++) { + C[i].resize(A[0].size()); + } + + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A[0].size(); j++) { + C[i][j] = A[i][j] * B[i][j]; + } + } + return C; +} + +std::vector> MLPPLinAlgOld::kronecker_product(std::vector> A, std::vector> B) { + std::vector> C; + + // [1,1,1,1] [1,2,3,4,5] + // [1,1,1,1] [1,2,3,4,5] + // [1,2,3,4,5] + + // [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] + // [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] + // [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] + // [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] + // [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] + // [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] + + // Resulting matrix: A.size() * B.size() + // A[0].size() * B[0].size() + + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < B.size(); j++) { + std::vector> row; + for (uint32_t k = 0; k < A[0].size(); k++) { + row.push_back(scalarMultiply(A[i][k], B[j])); + } + C.push_back(flatten(row)); + } + } + return C; +} + +std::vector> MLPPLinAlgOld::elementWiseDivision(std::vector> A, std::vector> B) { + std::vector> C; + C.resize(A.size()); + for (uint32_t i = 0; i < C.size(); i++) { + C[i].resize(A[0].size()); + } + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A[i].size(); j++) { + C[i][j] = A[i][j] / B[i][j]; + } + } + return C; +} + +Ref MLPPLinAlgOld::hadamard_productm(const Ref &A, const Ref &B) { + ERR_FAIL_COND_V(!A.is_valid() || !B.is_valid(), Ref()); + Size2i a_size = A->size(); + ERR_FAIL_COND_V(a_size != B->size(), Ref()); + + Ref C; + C.instance(); + C->resize(a_size); + + const real_t *a_ptr = A->ptr(); + const real_t *b_ptr = B->ptr(); + real_t *c_ptr = C->ptrw(); + + for (int i = 0; i < a_size.y; i++) { + for (int j = 0; j < a_size.x; j++) { + int ind_i_j = A->calculate_index(i, j); + c_ptr[ind_i_j] = a_ptr[ind_i_j] * b_ptr[ind_i_j]; + } + } + + return C; +} +Ref MLPPLinAlgOld::kronecker_productm(const Ref &A, const Ref &B) { + // [1,1,1,1] [1,2,3,4,5] + // [1,1,1,1] [1,2,3,4,5] + // [1,2,3,4,5] + + // [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] + // [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] + // [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] + // [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] + // [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] + // [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] [1,2,3,4,5] + + // Resulting matrix: A.size() * B.size() + // A[0].size() * B[0].size() + + ERR_FAIL_COND_V(!A.is_valid() || !B.is_valid(), Ref()); + Size2i a_size = A->size(); + Size2i b_size = B->size(); + + Ref C; + C.instance(); + C->resize(Size2i(b_size.x * a_size.x, b_size.y * a_size.y)); + + const real_t *a_ptr = A->ptr(); + + Ref row_tmp; + row_tmp.instance(); + row_tmp->resize(b_size.x); + + for (int i = 0; i < a_size.y; ++i) { + for (int j = 0; j < b_size.y; ++j) { + B->get_row_into_mlpp_vector(j, row_tmp); + + Vector> row; + for (int k = 0; k < a_size.x; ++k) { + row.push_back(scalar_multiplynv(a_ptr[A->calculate_index(i, k)], row_tmp)); + } + + Ref flattened_row = flattenv(row); + + C->set_row_mlpp_vector(i * b_size.y + j, flattened_row); + } + } + + return C; +} +Ref MLPPLinAlgOld::element_wise_divisionm(const Ref &A, const Ref &B) { + ERR_FAIL_COND_V(!A.is_valid() || !B.is_valid(), Ref()); + Size2i a_size = A->size(); + ERR_FAIL_COND_V(a_size != B->size(), Ref()); + + Ref C; + C.instance(); + C->resize(a_size); + + const real_t *a_ptr = A->ptr(); + const real_t *b_ptr = B->ptr(); + real_t *c_ptr = C->ptrw(); + + for (int i = 0; i < a_size.y; i++) { + for (int j = 0; j < a_size.x; j++) { + int ind_i_j = A->calculate_index(i, j); + c_ptr[ind_i_j] = a_ptr[ind_i_j] / b_ptr[ind_i_j]; + } + } + + return C; +} + +std::vector> MLPPLinAlgOld::transpose(std::vector> A) { + std::vector> AT; + AT.resize(A[0].size()); + for (uint32_t i = 0; i < AT.size(); i++) { + AT[i].resize(A.size()); + } + + for (uint32_t i = 0; i < A[0].size(); i++) { + for (uint32_t j = 0; j < A.size(); j++) { + AT[i][j] = A[j][i]; + } + } + return AT; +} + +std::vector> MLPPLinAlgOld::scalarMultiply(real_t scalar, std::vector> A) { + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A[i].size(); j++) { + A[i][j] *= scalar; + } + } + return A; +} + +std::vector> MLPPLinAlgOld::scalarAdd(real_t scalar, std::vector> A) { + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A[i].size(); j++) { + A[i][j] += scalar; + } + } + return A; +} + +Ref MLPPLinAlgOld::transposem(const Ref &A) { + Size2i a_size = A->size(); + + Ref AT; + AT.instance(); + AT->resize(Size2i(a_size.y, a_size.x)); + + const real_t *a_ptr = A->ptr(); + real_t *at_ptr = AT->ptrw(); + + for (int i = 0; i < a_size.y; ++i) { + for (int j = 0; j < a_size.x; ++j) { + at_ptr[AT->calculate_index(j, i)] = a_ptr[A->calculate_index(i, j)]; + } + } + + return AT; +} +Ref MLPPLinAlgOld::scalar_multiplym(real_t scalar, const Ref &A) { + Ref AN = A->duplicate(); + Size2i a_size = AN->size(); + real_t *an_ptr = AN->ptrw(); + + for (int i = 0; i < a_size.y; ++i) { + for (int j = 0; j < a_size.x; ++j) { + an_ptr[AN->calculate_index(i, j)] *= scalar; + } + } + + return AN; +} + +Ref MLPPLinAlgOld::scalar_addm(real_t scalar, const Ref &A) { + Ref AN = A->duplicate(); + Size2i a_size = AN->size(); + real_t *an_ptr = AN->ptrw(); + + for (int i = 0; i < a_size.y; ++i) { + for (int j = 0; j < a_size.x; ++j) { + an_ptr[AN->calculate_index(i, j)] += scalar; + } + } + + return AN; +} + +std::vector> MLPPLinAlgOld::log(std::vector> A) { + std::vector> B; + B.resize(A.size()); + for (uint32_t i = 0; i < B.size(); i++) { + B[i].resize(A[0].size()); + } + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A[i].size(); j++) { + B[i][j] = std::log(A[i][j]); + } + } + return B; +} + +std::vector> MLPPLinAlgOld::log10(std::vector> A) { + std::vector> B; + B.resize(A.size()); + for (uint32_t i = 0; i < B.size(); i++) { + B[i].resize(A[0].size()); + } + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A[i].size(); j++) { + B[i][j] = std::log10(A[i][j]); + } + } + return B; +} + +std::vector> MLPPLinAlgOld::exp(std::vector> A) { + std::vector> B; + B.resize(A.size()); + for (uint32_t i = 0; i < B.size(); i++) { + B[i].resize(A[0].size()); + } + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A[i].size(); j++) { + B[i][j] = std::exp(A[i][j]); + } + } + return B; +} + +std::vector> MLPPLinAlgOld::erf(std::vector> A) { + std::vector> B; + B.resize(A.size()); + for (uint32_t i = 0; i < B.size(); i++) { + B[i].resize(A[0].size()); + } + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A[i].size(); j++) { + B[i][j] = std::erf(A[i][j]); + } + } + return B; +} + +std::vector> MLPPLinAlgOld::exponentiate(std::vector> A, real_t p) { + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A[i].size(); j++) { + A[i][j] = std::pow(A[i][j], p); + } + } + return A; +} + +std::vector> MLPPLinAlgOld::sqrt(std::vector> A) { + return exponentiate(A, 0.5); +} + +std::vector> MLPPLinAlgOld::cbrt(std::vector> A) { + return exponentiate(A, real_t(1) / real_t(3)); +} + +Ref MLPPLinAlgOld::logm(const Ref &A) { + ERR_FAIL_COND_V(!A.is_valid(), Ref()); + + Ref out; + out.instance(); + + int data_size = A->data_size(); + out->resize(A->size()); + + const real_t *a_ptr = A->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < data_size; ++i) { + out_ptr[i] = Math::log(a_ptr[i]); + } + + return out; +} +Ref MLPPLinAlgOld::log10m(const Ref &A) { + ERR_FAIL_COND_V(!A.is_valid(), Ref()); + + Ref out; + out.instance(); + + int data_size = A->data_size(); + out->resize(A->size()); + + const real_t *a_ptr = A->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < data_size; ++i) { + out_ptr[i] = std::log10(a_ptr[i]); + } + + return out; +} +Ref MLPPLinAlgOld::expm(const Ref &A) { + ERR_FAIL_COND_V(!A.is_valid(), Ref()); + + Ref out; + out.instance(); + + int data_size = A->data_size(); + out->resize(A->size()); + + const real_t *a_ptr = A->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < data_size; ++i) { + out_ptr[i] = Math::exp(a_ptr[i]); + } + + return out; +} +Ref MLPPLinAlgOld::erfm(const Ref &A) { + ERR_FAIL_COND_V(!A.is_valid(), Ref()); + + Ref out; + out.instance(); + + int data_size = A->data_size(); + out->resize(A->size()); + + const real_t *a_ptr = A->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < data_size; ++i) { + out_ptr[i] = std::erf(a_ptr[i]); + } + + return out; +} +Ref MLPPLinAlgOld::exponentiatem(const Ref &A, real_t p) { + ERR_FAIL_COND_V(!A.is_valid(), Ref()); + + Ref out; + out.instance(); + + int data_size = A->data_size(); + out->resize(A->size()); + + const real_t *a_ptr = A->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < data_size; ++i) { + out_ptr[i] = Math::pow(a_ptr[i], p); + } + + return out; +} +Ref MLPPLinAlgOld::sqrtm(const Ref &A) { + ERR_FAIL_COND_V(!A.is_valid(), Ref()); + + Ref out; + out.instance(); + + int data_size = A->data_size(); + out->resize(A->size()); + + const real_t *a_ptr = A->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < data_size; ++i) { + out_ptr[i] = Math::sqrt(a_ptr[i]); + } + + return out; +} +Ref MLPPLinAlgOld::cbrtm(const Ref &A) { + return exponentiatem(A, real_t(1) / real_t(3)); +} + +std::vector> MLPPLinAlgOld::matrixPower(std::vector> A, int n) { + std::vector> B = identity(A.size()); + if (n == 0) { + return identity(A.size()); + } else if (n < 0) { + A = inverse(A); + } + for (int i = 0; i < std::abs(n); i++) { + B = matmult(B, A); + } + return B; +} + +std::vector> MLPPLinAlgOld::abs(std::vector> A) { + std::vector> B; + B.resize(A.size()); + for (uint32_t i = 0; i < B.size(); i++) { + B[i].resize(A[0].size()); + } + for (uint32_t i = 0; i < B.size(); i++) { + for (uint32_t j = 0; j < B[i].size(); j++) { + B[i][j] = std::abs(A[i][j]); + } + } + return B; +} + +Ref MLPPLinAlgOld::absm(const Ref &A) { + ERR_FAIL_COND_V(!A.is_valid(), Ref()); + + Ref out; + out.instance(); + + int data_size = A->data_size(); + out->resize(A->size()); + + const real_t *a_ptr = A->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < data_size; ++i) { + out_ptr[i] = ABS(a_ptr[i]); + } + + return out; +} + +real_t MLPPLinAlgOld::det(std::vector> A, int d) { + real_t deter = 0; + std::vector> B; + B.resize(d); + for (int i = 0; i < d; i++) { + B[i].resize(d); + } + + /* This is the base case in which the input is a 2x2 square matrix. + Recursion is performed unless and until we reach this base case, + such that we recieve a scalar as the result. */ + if (d == 2) { + return A[0][0] * A[1][1] - A[0][1] * A[1][0]; + } + + else { + for (int i = 0; i < d; i++) { + int sub_i = 0; + for (int j = 1; j < d; j++) { + int sub_j = 0; + for (int k = 0; k < d; k++) { + if (k == i) { + continue; + } + B[sub_i][sub_j] = A[j][k]; + sub_j++; + } + sub_i++; + } + deter += std::pow(-1, i) * A[0][i] * det(B, d - 1); + } + } + return deter; +} + +real_t MLPPLinAlgOld::detm(const Ref &A, int d) { + ERR_FAIL_COND_V(!A.is_valid(), 0); + + real_t deter = 0; + Ref B; + B.instance(); + B->resize(Size2i(d, d)); + B->fill(0); + + /* This is the base case in which the input is a 2x2 square matrix. + Recursion is performed unless and until we reach this base case, + such that we recieve a scalar as the result. */ + if (d == 2) { + return A->get_element(0, 0) * A->get_element(1, 1) - A->get_element(0, 1) * A->get_element(1, 0); + } else { + for (int i = 0; i < d; i++) { + int sub_i = 0; + for (int j = 1; j < d; j++) { + int sub_j = 0; + for (int k = 0; k < d; k++) { + if (k == i) { + continue; + } + + B->set_element(sub_i, sub_j, A->get_element(j, k)); + sub_j++; + } + sub_i++; + } + + deter += Math::pow(static_cast(-1), static_cast(i)) * A->get_element(0, i) * detm(B, d - 1); + } + } + + return deter; +} + +real_t MLPPLinAlgOld::trace(std::vector> A) { + real_t trace = 0; + for (uint32_t i = 0; i < A.size(); i++) { + trace += A[i][i]; + } + return trace; +} + +std::vector> MLPPLinAlgOld::cofactor(std::vector> A, int n, int i, int j) { + std::vector> cof; + cof.resize(A.size()); + for (uint32_t ii = 0; ii < cof.size(); ii++) { + cof[ii].resize(A.size()); + } + int sub_i = 0, sub_j = 0; + + for (int row = 0; row < n; row++) { + for (int col = 0; col < n; col++) { + if (row != i && col != j) { + cof[sub_i][sub_j++] = A[row][col]; + + if (sub_j == n - 1) { + sub_j = 0; + sub_i++; + } + } + } + } + return cof; +} + +std::vector> MLPPLinAlgOld::adjoint(std::vector> A) { + //Resizing the initial adjoint matrix + std::vector> adj; + adj.resize(A.size()); + for (uint32_t i = 0; i < adj.size(); i++) { + adj[i].resize(A.size()); + } + + // Checking for the case where the given N x N matrix is a scalar + if (A.size() == 1) { + adj[0][0] = 1; + return adj; + } + + if (A.size() == 2) { + adj[0][0] = A[1][1]; + adj[1][1] = A[0][0]; + + adj[0][1] = -A[0][1]; + adj[1][0] = -A[1][0]; + return adj; + } + + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A.size(); j++) { + std::vector> cof = cofactor(A, int(A.size()), i, j); + // 1 if even, -1 if odd + int sign = (i + j) % 2 == 0 ? 1 : -1; + adj[j][i] = sign * det(cof, int(A.size()) - 1); + } + } + return adj; +} + +// The inverse can be computed as (1 / determinant(A)) * adjoint(A) +std::vector> MLPPLinAlgOld::inverse(std::vector> A) { + return scalarMultiply(1 / det(A, int(A.size())), adjoint(A)); +} + +// This is simply the Moore-Penrose least squares approximation of the inverse. +std::vector> MLPPLinAlgOld::pinverse(std::vector> A) { + return matmult(inverse(matmult(transpose(A), A)), transpose(A)); +} + +Ref MLPPLinAlgOld::cofactorm(const Ref &A, int n, int i, int j) { + Ref cof; + cof.instance(); + cof->resize(A->size()); + + int sub_i = 0; + int sub_j = 0; + + for (int row = 0; row < n; row++) { + for (int col = 0; col < n; col++) { + if (row != i && col != j) { + cof->set_element(sub_i, sub_j++, A->get_element(row, col)); + + if (sub_j == n - 1) { + sub_j = 0; + sub_i++; + } + } + } + } + + return cof; +} +Ref MLPPLinAlgOld::adjointm(const Ref &A) { + Ref adj; + + ERR_FAIL_COND_V(!A.is_valid(), adj); + + Size2i a_size = A->size(); + + ERR_FAIL_COND_V(a_size.x != a_size.y, adj); + + //Resizing the initial adjoint matrix + + adj.instance(); + adj->resize(a_size); + + // Checking for the case where the given N x N matrix is a scalar + if (a_size.y == 1) { + adj->set_element(0, 0, 1); + return adj; + } + + if (a_size.y == 2) { + adj->set_element(0, 0, A->get_element(1, 1)); + adj->set_element(1, 1, A->get_element(0, 0)); + + adj->set_element(0, 1, -A->get_element(0, 1)); + adj->set_element(1, 0, -A->get_element(1, 0)); + + return adj; + } + + for (int i = 0; i < a_size.y; i++) { + for (int j = 0; j < a_size.x; j++) { + Ref cof = cofactorm(A, a_size.y, i, j); + // 1 if even, -1 if odd + int sign = (i + j) % 2 == 0 ? 1 : -1; + adj->set_element(j, i, sign * detm(cof, int(a_size.y) - 1)); + } + } + return adj; +} +Ref MLPPLinAlgOld::inversem(const Ref &A) { + return scalar_multiplym(1 / detm(A, int(A->size().y)), adjointm(A)); +} +Ref MLPPLinAlgOld::pinversem(const Ref &A) { + return matmultm(inversem(matmultm(transposem(A), A)), transposem(A)); +} + +std::vector> MLPPLinAlgOld::zeromat(int n, int m) { + std::vector> zeromat; + zeromat.resize(n); + for (uint32_t i = 0; i < zeromat.size(); i++) { + zeromat[i].resize(m); + } + return zeromat; +} + +std::vector> MLPPLinAlgOld::onemat(int n, int m) { + return full(n, m, 1); +} + +Ref MLPPLinAlgOld::zeromatm(int n, int m) { + Ref mat; + mat.instance(); + + mat->resize(Size2i(m, n)); + mat->fill(0); + + return mat; +} +Ref MLPPLinAlgOld::onematm(int n, int m) { + Ref mat; + mat.instance(); + + mat->resize(Size2i(m, n)); + mat->fill(1); + + return mat; +} +Ref MLPPLinAlgOld::fullm(int n, int m, int k) { + Ref mat; + mat.instance(); + + mat->resize(Size2i(m, n)); + mat->fill(k); + + return mat; +} + +std::vector> MLPPLinAlgOld::full(int n, int m, int k) { + std::vector> full; + full.resize(n); + for (uint32_t i = 0; i < full.size(); i++) { + full[i].resize(m); + } + for (uint32_t i = 0; i < full.size(); i++) { + for (uint32_t j = 0; j < full[i].size(); j++) { + full[i][j] = k; + } + } + return full; +} + +std::vector> MLPPLinAlgOld::sin(std::vector> A) { + std::vector> B; + B.resize(A.size()); + for (uint32_t i = 0; i < B.size(); i++) { + B[i].resize(A[0].size()); + } + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A[i].size(); j++) { + B[i][j] = std::sin(A[i][j]); + } + } + return B; +} + +std::vector> MLPPLinAlgOld::cos(std::vector> A) { + std::vector> B; + B.resize(A.size()); + for (uint32_t i = 0; i < B.size(); i++) { + B[i].resize(A[0].size()); + } + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A[i].size(); j++) { + B[i][j] = std::cos(A[i][j]); + } + } + return B; +} + +Ref MLPPLinAlgOld::sinm(const Ref &A) { + ERR_FAIL_COND_V(!A.is_valid(), Ref()); + + Ref out; + out.instance(); + + int data_size = A->data_size(); + out->resize(A->size()); + + const real_t *a_ptr = A->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < data_size; ++i) { + out_ptr[i] = Math::sin(a_ptr[i]); + } + + return out; +} +Ref MLPPLinAlgOld::cosm(const Ref &A) { + ERR_FAIL_COND_V(!A.is_valid(), Ref()); + + Ref out; + out.instance(); + + int data_size = A->data_size(); + out->resize(A->size()); + + const real_t *a_ptr = A->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < data_size; ++i) { + out_ptr[i] = Math::cos(a_ptr[i]); + } + + return out; +} + +std::vector MLPPLinAlgOld::max(std::vector a, std::vector b) { + std::vector c; + c.resize(a.size()); + for (uint32_t i = 0; i < c.size(); i++) { + if (a[i] >= b[i]) { + c[i] = a[i]; + } else { + c[i] = b[i]; + } + } + return c; +} + +real_t MLPPLinAlgOld::max(std::vector> A) { + return max(flatten(A)); +} + +real_t MLPPLinAlgOld::min(std::vector> A) { + return min(flatten(A)); +} + +std::vector> MLPPLinAlgOld::round(std::vector> A) { + std::vector> B; + B.resize(A.size()); + for (uint32_t i = 0; i < B.size(); i++) { + B[i].resize(A[0].size()); + } + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A[i].size(); j++) { + B[i][j] = std::round(A[i][j]); + } + } + return B; +} + +real_t MLPPLinAlgOld::norm_2(std::vector> A) { + real_t sum = 0; + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A[i].size(); j++) { + sum += A[i][j] * A[i][j]; + } + } + return std::sqrt(sum); +} + +std::vector> MLPPLinAlgOld::identity(real_t d) { + std::vector> identityMat; + identityMat.resize(d); + for (uint32_t i = 0; i < identityMat.size(); i++) { + identityMat[i].resize(d); + } + for (uint32_t i = 0; i < identityMat.size(); i++) { + for (uint32_t j = 0; j < identityMat.size(); j++) { + if (i == j) { + identityMat[i][j] = 1; + } else { + identityMat[i][j] = 0; + } + } + } + return identityMat; +} + +Ref MLPPLinAlgOld::identitym(int d) { + Ref identity_mat; + identity_mat.instance(); + identity_mat->resize(Size2i(d, d)); + identity_mat->fill(0); + + real_t *im_ptr = identity_mat->ptrw(); + + for (int i = 0; i < d; i++) { + im_ptr[identity_mat->calculate_index(i, i)] = 1; + } + + return identity_mat; +} + +std::vector> MLPPLinAlgOld::cov(std::vector> A) { + MLPPStat stat; + std::vector> covMat; + covMat.resize(A.size()); + for (uint32_t i = 0; i < covMat.size(); i++) { + covMat[i].resize(A.size()); + } + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A.size(); j++) { + covMat[i][j] = stat.covariance(A[i], A[j]); + } + } + return covMat; +} + +Ref MLPPLinAlgOld::covm(const Ref &A) { + MLPPStat stat; + + Ref cov_mat; + cov_mat.instance(); + + Size2i a_size = A->size(); + + cov_mat->resize(a_size); + + Ref a_i_row_tmp; + a_i_row_tmp.instance(); + a_i_row_tmp->resize(a_size.x); + + Ref a_j_row_tmp; + a_j_row_tmp.instance(); + a_j_row_tmp->resize(a_size.x); + + for (int i = 0; i < a_size.y; ++i) { + A->get_row_into_mlpp_vector(i, a_i_row_tmp); + + for (int j = 0; j < a_size.x; ++j) { + A->get_row_into_mlpp_vector(j, a_j_row_tmp); + + cov_mat->set_element(i, j, stat.covariancev(a_i_row_tmp, a_j_row_tmp)); + } + } + + return cov_mat; +} + +std::tuple>, std::vector>> MLPPLinAlgOld::eig(std::vector> A) { + /* + A (the entered parameter) in most use cases will be X'X, XX', etc. and must be symmetric. + That simply means that 1) X' = X and 2) X is a square matrix. This function that computes the + eigenvalues of a matrix is utilizing Jacobi's method. + */ + + real_t diagonal = true; // Perform the iterative Jacobi algorithm unless and until we reach a diagonal matrix which yields us the eigenvals. + + std::map val_to_vec; + std::vector> a_new; + std::vector> eigenvectors = identity(A.size()); + do { + real_t a_ij = A[0][1]; + real_t sub_i = 0; + real_t sub_j = 1; + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A[i].size(); j++) { + if (i != j && std::abs(A[i][j]) > a_ij) { + a_ij = A[i][j]; + sub_i = i; + sub_j = j; + } else if (i != j && std::abs(A[i][j]) == a_ij) { + if (i < sub_i) { + a_ij = A[i][j]; + sub_i = i; + sub_j = j; + } + } + } + } + + real_t a_ii = A[sub_i][sub_i]; + real_t a_jj = A[sub_j][sub_j]; + //real_t a_ji = A[sub_j][sub_i]; + real_t theta; + + if (a_ii == a_jj) { + theta = M_PI / 4; + } else { + theta = 0.5 * atan(2 * a_ij / (a_ii - a_jj)); + } + + std::vector> P = identity(A.size()); + P[sub_i][sub_j] = -std::sin(theta); + P[sub_i][sub_i] = std::cos(theta); + P[sub_j][sub_j] = std::cos(theta); + P[sub_j][sub_i] = std::sin(theta); + + a_new = matmult(matmult(inverse(P), A), P); + + for (uint32_t i = 0; i < a_new.size(); i++) { + for (uint32_t j = 0; j < a_new[i].size(); j++) { + if (i != j && std::round(a_new[i][j]) == 0) { + a_new[i][j] = 0; + } + } + } + + bool non_zero = false; + for (uint32_t i = 0; i < a_new.size(); i++) { + for (uint32_t j = 0; j < a_new[i].size(); j++) { + if (i != j && std::round(a_new[i][j]) != 0) { + non_zero = true; + } + } + } + + if (non_zero) { + diagonal = false; + } else { + diagonal = true; + } + + if (a_new == A) { + diagonal = true; + for (uint32_t i = 0; i < a_new.size(); i++) { + for (uint32_t j = 0; j < a_new[i].size(); j++) { + if (i != j) { + a_new[i][j] = 0; + } + } + } + } + + eigenvectors = matmult(eigenvectors, P); + A = a_new; + + } while (!diagonal); + + std::vector> a_new_prior = a_new; + + // Bubble Sort. Should change this later. + for (uint32_t i = 0; i < a_new.size() - 1; i++) { + for (uint32_t j = 0; j < a_new.size() - 1 - i; j++) { + if (a_new[j][j] < a_new[j + 1][j + 1]) { + real_t temp = a_new[j + 1][j + 1]; + a_new[j + 1][j + 1] = a_new[j][j]; + a_new[j][j] = temp; + } + } + } + + for (uint32_t i = 0; i < a_new.size(); i++) { + for (uint32_t j = 0; j < a_new.size(); j++) { + if (a_new[i][i] == a_new_prior[j][j]) { + val_to_vec[i] = j; + } + } + } + + std::vector> eigen_temp = eigenvectors; + for (uint32_t i = 0; i < eigenvectors.size(); i++) { + for (uint32_t j = 0; j < eigenvectors[i].size(); j++) { + eigenvectors[i][j] = eigen_temp[i][val_to_vec[j]]; + } + } + return { eigenvectors, a_new }; +} + +MLPPLinAlgOld::EigenResultOld MLPPLinAlgOld::eigen_old(std::vector> A) { + /* + A (the entered parameter) in most use cases will be X'X, XX', etc. and must be symmetric. + That simply means that 1) X' = X and 2) X is a square matrix. This function that computes the + eigenvalues of a matrix is utilizing Jacobi's method. + */ + + real_t diagonal = true; // Perform the iterative Jacobi algorithm unless and until we reach a diagonal matrix which yields us the eigenvals. + + std::map val_to_vec; + std::vector> a_new; + std::vector> eigenvectors = identity(A.size()); + do { + real_t a_ij = A[0][1]; + real_t sub_i = 0; + real_t sub_j = 1; + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A[i].size(); j++) { + if (i != j && std::abs(A[i][j]) > a_ij) { + a_ij = A[i][j]; + sub_i = i; + sub_j = j; + } else if (i != j && std::abs(A[i][j]) == a_ij) { + if (i < sub_i) { + a_ij = A[i][j]; + sub_i = i; + sub_j = j; + } + } + } + } + + real_t a_ii = A[sub_i][sub_i]; + real_t a_jj = A[sub_j][sub_j]; + //real_t a_ji = A[sub_j][sub_i]; + real_t theta; + + if (a_ii == a_jj) { + theta = M_PI / 4; + } else { + theta = 0.5 * atan(2 * a_ij / (a_ii - a_jj)); + } + + std::vector> P = identity(A.size()); + P[sub_i][sub_j] = -std::sin(theta); + P[sub_i][sub_i] = std::cos(theta); + P[sub_j][sub_j] = std::cos(theta); + P[sub_j][sub_i] = std::sin(theta); + + a_new = matmult(matmult(inverse(P), A), P); + + for (uint32_t i = 0; i < a_new.size(); i++) { + for (uint32_t j = 0; j < a_new[i].size(); j++) { + if (i != j && std::round(a_new[i][j]) == 0) { + a_new[i][j] = 0; + } + } + } + + bool non_zero = false; + for (uint32_t i = 0; i < a_new.size(); i++) { + for (uint32_t j = 0; j < a_new[i].size(); j++) { + if (i != j && std::round(a_new[i][j]) != 0) { + non_zero = true; + } + } + } + + if (non_zero) { + diagonal = false; + } else { + diagonal = true; + } + + if (a_new == A) { + diagonal = true; + for (uint32_t i = 0; i < a_new.size(); i++) { + for (uint32_t j = 0; j < a_new[i].size(); j++) { + if (i != j) { + a_new[i][j] = 0; + } + } + } + } + + eigenvectors = matmult(eigenvectors, P); + A = a_new; + + } while (!diagonal); + + std::vector> a_new_prior = a_new; + + // Bubble Sort. Should change this later. + for (uint32_t i = 0; i < a_new.size() - 1; i++) { + for (uint32_t j = 0; j < a_new.size() - 1 - i; j++) { + if (a_new[j][j] < a_new[j + 1][j + 1]) { + real_t temp = a_new[j + 1][j + 1]; + a_new[j + 1][j + 1] = a_new[j][j]; + a_new[j][j] = temp; + } + } + } + + for (uint32_t i = 0; i < a_new.size(); i++) { + for (uint32_t j = 0; j < a_new.size(); j++) { + if (a_new[i][i] == a_new_prior[j][j]) { + val_to_vec[i] = j; + } + } + } + + std::vector> eigen_temp = eigenvectors; + for (uint32_t i = 0; i < eigenvectors.size(); i++) { + for (uint32_t j = 0; j < eigenvectors[i].size(); j++) { + eigenvectors[i][j] = eigen_temp[i][val_to_vec[j]]; + } + } + + EigenResultOld res; + res.eigen_vectors = eigenvectors; + res.eigen_values = a_new; + + return res; +} + +MLPPLinAlgOld::EigenResult MLPPLinAlgOld::eigen(Ref A) { + EigenResult res; + + ERR_FAIL_COND_V(!A.is_valid(), res); + + /* + A (the entered parameter) in most use cases will be X'X, XX', etc. and must be symmetric. + That simply means that 1) X' = X and 2) X is a square matrix. This function that computes the + eigenvalues of a matrix is utilizing Jacobi's method. + */ + + real_t diagonal = true; // Perform the iterative Jacobi algorithm unless and until we reach a diagonal matrix which yields us the eigenvals. + + HashMap val_to_vec; + Ref a_new; + Ref eigenvectors = identitym(A->size().y); + Size2i a_size = A->size(); + + do { + real_t a_ij = A->get_element(0, 1); + real_t sub_i = 0; + real_t sub_j = 1; + for (int i = 0; i < a_size.y; ++i) { + for (int j = 0; j < a_size.x; ++j) { + real_t ca_ij = A->get_element(i, j); + real_t abs_ca_ij = ABS(ca_ij); + + if (i != j && abs_ca_ij > a_ij) { + a_ij = ca_ij; + sub_i = i; + sub_j = j; + } else if (i != j && abs_ca_ij == a_ij) { + if (i < sub_i) { + a_ij = ca_ij; + sub_i = i; + sub_j = j; + } + } + } + } + + real_t a_ii = A->get_element(sub_i, sub_i); + real_t a_jj = A->get_element(sub_j, sub_j); + //real_t a_ji = A->get_element(sub_j, sub_i); + real_t theta; + + if (a_ii == a_jj) { + theta = M_PI / 4; + } else { + theta = 0.5 * atan(2 * a_ij / (a_ii - a_jj)); + } + + Ref P = identitym(A->size().y); + P->set_element(sub_i, sub_j, -Math::sin(theta)); + P->set_element(sub_i, sub_i, Math::cos(theta)); + P->set_element(sub_j, sub_j, Math::cos(theta)); + P->set_element(sub_j, sub_i, Math::sin(theta)); + + a_new = matmultm(matmultm(inversem(P), A), P); + + Size2i a_new_size = a_new->size(); + + for (int i = 0; i < a_new_size.y; ++i) { + for (int j = 0; j < a_new_size.x; ++j) { + if (i != j && Math::is_zero_approx(Math::round(a_new->get_element(i, j)))) { + a_new->set_element(i, j, 0); + } + } + } + + bool non_zero = false; + for (int i = 0; i < a_new_size.y; ++i) { + for (int j = 0; j < a_new_size.x; ++j) { + if (i != j && Math::is_zero_approx(Math::round(a_new->get_element(i, j)))) { + non_zero = true; + } + } + } + + if (non_zero) { + diagonal = false; + } else { + diagonal = true; + } + + if (a_new->is_equal_approx(A)) { + diagonal = true; + for (int i = 0; i < a_new_size.y; ++i) { + for (int j = 0; j < a_new_size.x; ++j) { + if (i != j) { + a_new->set_element(i, j, 0); + } + } + } + } + + eigenvectors = matmultm(eigenvectors, P); + A = a_new; + + } while (!diagonal); + + Ref a_new_prior = a_new->duplicate(); + + Size2i a_new_size = a_new->size(); + + // Bubble Sort. Should change this later. + for (int i = 0; i < a_new_size.y - 1; ++i) { + for (int j = 0; j < a_new_size.x - 1 - i; ++j) { + if (a_new->get_element(j, j) < a_new->get_element(j + 1, j + 1)) { + real_t temp = a_new->get_element(j + 1, j + 1); + a_new->set_element(j + 1, j + 1, a_new->get_element(j, j)); + a_new->set_element(j, j, temp); + } + } + } + + for (int i = 0; i < a_new_size.y; ++i) { + for (int j = 0; j < a_new_size.x; ++j) { + if (a_new->get_element(i, i) == a_new_prior->get_element(j, j)) { + val_to_vec[i] = j; + } + } + } + + Ref eigen_temp = eigenvectors->duplicate(); + + Size2i eigenvectors_size = eigenvectors->size(); + + for (int i = 0; i < eigenvectors_size.y; ++i) { + for (int j = 0; j < eigenvectors_size.x; ++j) { + eigenvectors->set_element(i, j, eigen_temp->get_element(i, val_to_vec[j])); + } + } + + res.eigen_vectors = eigenvectors; + res.eigen_values = a_new; + + return res; +} + +MLPPLinAlgOld::SVDResultOld MLPPLinAlgOld::SVD(std::vector> A) { + EigenResultOld left_eigen = eigen_old(matmult(A, transpose(A))); + EigenResultOld right_eigen = eigen_old(matmult(transpose(A), A)); + + std::vector> singularvals = sqrt(left_eigen.eigen_values); + std::vector> sigma = zeromat(A.size(), A[0].size()); + for (uint32_t i = 0; i < singularvals.size(); i++) { + for (uint32_t j = 0; j < singularvals[i].size(); j++) { + sigma[i][j] = singularvals[i][j]; + } + } + + SVDResultOld res; + res.U = left_eigen.eigen_vectors; + res.S = sigma; + res.Vt = right_eigen.eigen_vectors; + + return res; +} + +MLPPLinAlgOld::SVDResult MLPPLinAlgOld::svd(const Ref &A) { + SVDResult res; + + ERR_FAIL_COND_V(!A.is_valid(), res); + + Size2i a_size = A->size(); + + EigenResult left_eigen = eigen(matmultm(A, transposem(A))); + EigenResult right_eigen = eigen(matmultm(transposem(A), A)); + + Ref singularvals = sqrtm(left_eigen.eigen_values); + Ref sigma = zeromatm(a_size.y, a_size.x); + + Size2i singularvals_size = singularvals->size(); + + for (int i = 0; i < singularvals_size.y; ++i) { + for (int j = 0; j < singularvals_size.x; ++j) { + sigma->set_element(i, j, singularvals->get_element(i, j)); + } + } + + res.U = left_eigen.eigen_vectors; + res.S = sigma; + res.Vt = right_eigen.eigen_vectors; + + return res; +} + +std::vector MLPPLinAlgOld::vectorProjection(std::vector a, std::vector b) { + real_t product = dot(a, b) / dot(a, a); + return scalarMultiply(product, a); // Projection of vector a onto b. Denotated as proj_a(b). +} + +std::vector> MLPPLinAlgOld::gramSchmidtProcess(std::vector> A) { + A = transpose(A); // C++ vectors lack a mechanism to directly index columns. So, we transpose *a copy* of A for this purpose for ease of use. + std::vector> B; + B.resize(A.size()); + for (uint32_t i = 0; i < B.size(); i++) { + B[i].resize(A[0].size()); + } + + B[0] = A[0]; // We set a_1 = b_1 as an initial condition. + B[0] = scalarMultiply(1 / norm_2(B[0]), B[0]); + for (uint32_t i = 1; i < B.size(); i++) { + B[i] = A[i]; + for (int j = i - 1; j >= 0; j--) { + B[i] = subtraction(B[i], vectorProjection(B[j], A[i])); + } + B[i] = scalarMultiply(1 / norm_2(B[i]), B[i]); // Very simply multiply all elements of vec B[i] by 1/||B[i]||_2 + } + return transpose(B); // We re-transpose the marix. +} + +std::tuple>, std::vector>> MLPPLinAlgOld::QRD(std::vector> A) { + std::vector> Q = gramSchmidtProcess(A); + std::vector> R = matmult(transpose(Q), A); + return { Q, R }; +} + +MLPPLinAlgOld::QRDResult MLPPLinAlgOld::qrd(std::vector> A) { + QRDResult res; + + res.Q = gramSchmidtProcess(A); + res.R = matmult(transpose(res.Q), A); + + return res; +} + +std::tuple>, std::vector>> MLPPLinAlgOld::chol(std::vector> A) { + std::vector> L = zeromat(A.size(), A[0].size()); + for (uint32_t j = 0; j < L.size(); j++) { // Matrices entered must be square. No problem here. + for (uint32_t i = j; i < L.size(); i++) { + if (i == j) { + real_t sum = 0; + for (uint32_t k = 0; k < j; k++) { + sum += L[i][k] * L[i][k]; + } + L[i][j] = std::sqrt(A[i][j] - sum); + } else { // That is, i!=j + real_t sum = 0; + for (uint32_t k = 0; k < j; k++) { + sum += L[i][k] * L[j][k]; + } + L[i][j] = (A[i][j] - sum) / L[j][j]; + } + } + } + return { L, transpose(L) }; // Indeed, L.T is our upper triangular matrix. +} + +MLPPLinAlgOld::CholeskyResult MLPPLinAlgOld::cholesky(std::vector> A) { + std::vector> L = zeromat(A.size(), A[0].size()); + for (uint32_t j = 0; j < L.size(); j++) { // Matrices entered must be square. No problem here. + for (uint32_t i = j; i < L.size(); i++) { + if (i == j) { + real_t sum = 0; + for (uint32_t k = 0; k < j; k++) { + sum += L[i][k] * L[i][k]; + } + L[i][j] = std::sqrt(A[i][j] - sum); + } else { // That is, i!=j + real_t sum = 0; + for (uint32_t k = 0; k < j; k++) { + sum += L[i][k] * L[j][k]; + } + L[i][j] = (A[i][j] - sum) / L[j][j]; + } + } + } + + CholeskyResult res; + res.L = L; + res.Lt = transpose(L); // Indeed, L.T is our upper triangular matrix. + + return res; +} + +real_t MLPPLinAlgOld::sum_elements(std::vector> A) { + real_t sum = 0; + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A[i].size(); j++) { + sum += A[i][j]; + } + } + return sum; +} + +std::vector MLPPLinAlgOld::flatten(std::vector> A) { + std::vector a; + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A[i].size(); j++) { + a.push_back(A[i][j]); + } + } + return a; +} + +Ref MLPPLinAlgOld::flattenv(const Vector> &A) { + Ref a; + a.instance(); + + int vsize = 0; + for (int i = 0; i < A.size(); ++i) { + vsize += A[i]->size(); + } + + a->resize(vsize); + + int a_index = 0; + real_t *a_ptr = a->ptrw(); + + for (int i = 0; i < A.size(); ++i) { + const Ref &r = A[i]; + + int r_size = r->size(); + const real_t *r_ptr = r->ptr(); + + for (int j = 0; j < r_size; ++j) { + a_ptr[a_index] = r_ptr[j]; + ++a_index; + } + } + + return a; +} + +std::vector MLPPLinAlgOld::solve(std::vector> A, std::vector b) { + return mat_vec_mult(inverse(A), b); +} + +bool MLPPLinAlgOld::positiveDefiniteChecker(std::vector> A) { + auto eig_result = eig(A); + auto eigenvectors = std::get<0>(eig_result); + auto eigenvals = std::get<1>(eig_result); + + std::vector eigenvals_vec; + for (uint32_t i = 0; i < eigenvals.size(); i++) { + eigenvals_vec.push_back(eigenvals[i][i]); + } + for (uint32_t i = 0; i < eigenvals_vec.size(); i++) { + if (eigenvals_vec[i] <= 0) { // Simply check to ensure all eigenvalues are positive. + return false; + } + } + return true; +} + +bool MLPPLinAlgOld::negativeDefiniteChecker(std::vector> A) { + auto eig_result = eig(A); + auto eigenvectors = std::get<0>(eig_result); + auto eigenvals = std::get<1>(eig_result); + + std::vector eigenvals_vec; + for (uint32_t i = 0; i < eigenvals.size(); i++) { + eigenvals_vec.push_back(eigenvals[i][i]); + } + for (uint32_t i = 0; i < eigenvals_vec.size(); i++) { + if (eigenvals_vec[i] >= 0) { // Simply check to ensure all eigenvalues are negative. + return false; + } + } + return true; +} + +bool MLPPLinAlgOld::zeroEigenvalue(std::vector> A) { + auto eig_result = eig(A); + auto eigenvectors = std::get<0>(eig_result); + auto eigenvals = std::get<1>(eig_result); + + std::vector eigenvals_vec; + for (uint32_t i = 0; i < eigenvals.size(); i++) { + eigenvals_vec.push_back(eigenvals[i][i]); + } + for (uint32_t i = 0; i < eigenvals_vec.size(); i++) { + if (eigenvals_vec[i] == 0) { + return true; + } + } + return false; +} + +void MLPPLinAlgOld::printMatrix(std::vector> A) { + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A[i].size(); j++) { + std::cout << A[i][j] << " "; + } + std::cout << std::endl; + } +} + +std::vector> MLPPLinAlgOld::outerProduct(std::vector a, std::vector b) { + std::vector> C; + C.resize(a.size()); + for (uint32_t i = 0; i < C.size(); i++) { + C[i] = scalarMultiply(a[i], b); + } + return C; +} + +Ref MLPPLinAlgOld::outer_product(const Ref &a, const Ref &b) { + Ref C; + C.instance(); + Size2i size = Size2i(b->size(), a->size()); + C->resize(size); + + const real_t *a_ptr = a->ptr(); + const real_t *b_ptr = b->ptr(); + + for (int i = 0; i < size.y; ++i) { + real_t curr_a = a_ptr[i]; + + for (int j = 0; j < size.x; ++j) { + C->set_element(i, j, curr_a * b_ptr[j]); + } + } + + return C; +} + +std::vector MLPPLinAlgOld::hadamard_product(std::vector a, std::vector b) { + std::vector c; + c.resize(a.size()); + + for (uint32_t i = 0; i < a.size(); i++) { + c[i] = a[i] * b[i]; + } + + return c; +} + +Ref MLPPLinAlgOld::hadamard_productnv(const Ref &a, const Ref &b) { + ERR_FAIL_COND_V(!a.is_valid() || !b.is_valid(), Ref()); + + Ref out; + out.instance(); + + int size = a->size(); + + ERR_FAIL_COND_V(size != b->size(), Ref()); + + out->resize(size); + + const real_t *a_ptr = a->ptr(); + const real_t *b_ptr = b->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < size; ++i) { + out_ptr[i] = a_ptr[i] * b_ptr[i]; + } + + return out; +} +void MLPPLinAlgOld::hadamard_productv(const Ref &a, const Ref &b, Ref out) { + ERR_FAIL_COND(!a.is_valid() || !b.is_valid() || !out.is_valid()); + + int size = a->size(); + + ERR_FAIL_COND(size != b->size()); + + if (unlikely(out->size() != size)) { + out->resize(size); + } + + const real_t *a_ptr = a->ptr(); + const real_t *b_ptr = b->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < size; ++i) { + out_ptr[i] = a_ptr[i] * b_ptr[i]; + } +} + +std::vector MLPPLinAlgOld::elementWiseDivision(std::vector a, std::vector b) { + std::vector c; + c.resize(a.size()); + + for (uint32_t i = 0; i < a.size(); i++) { + c[i] = a[i] / b[i]; + } + return c; +} + +Ref MLPPLinAlgOld::element_wise_division(const Ref &a, const Ref &b) { + ERR_FAIL_COND_V(!a.is_valid() || !b.is_valid(), Ref()); + + Ref out; + out.instance(); + + int size = a->size(); + + ERR_FAIL_COND_V(size != b->size(), Ref()); + + out->resize(size); + + const real_t *a_ptr = a->ptr(); + const real_t *b_ptr = b->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < size; ++i) { + out_ptr[i] = a_ptr[i] / b_ptr[i]; + } + + return out; +} + +std::vector MLPPLinAlgOld::scalarMultiply(real_t scalar, std::vector a) { + for (uint32_t i = 0; i < a.size(); i++) { + a[i] *= scalar; + } + return a; +} + +Ref MLPPLinAlgOld::scalar_multiplynv(real_t scalar, const Ref &a) { + ERR_FAIL_COND_V(!a.is_valid(), Ref()); + + Ref out; + out.instance(); + + int size = a->size(); + + out->resize(size); + + const real_t *a_ptr = a->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < size; ++i) { + out_ptr[i] = a_ptr[i] * scalar; + } + + return out; +} +void MLPPLinAlgOld::scalar_multiplyv(real_t scalar, const Ref &a, Ref out) { + ERR_FAIL_COND(!a.is_valid() || !out.is_valid()); + + int size = a->size(); + + if (unlikely(out->size() != size)) { + out->resize(size); + } + + const real_t *a_ptr = a->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < size; ++i) { + out_ptr[i] = a_ptr[i] * scalar; + } +} + +std::vector MLPPLinAlgOld::scalarAdd(real_t scalar, std::vector a) { + for (uint32_t i = 0; i < a.size(); i++) { + a[i] += scalar; + } + return a; +} + +Ref MLPPLinAlgOld::scalar_addnv(real_t scalar, const Ref &a) { + ERR_FAIL_COND_V(!a.is_valid(), Ref()); + + Ref out; + out.instance(); + + int size = a->size(); + + out->resize(size); + + const real_t *a_ptr = a->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < size; ++i) { + out_ptr[i] = a_ptr[i] + scalar; + } + + return out; +} +void MLPPLinAlgOld::scalar_addv(real_t scalar, const Ref &a, Ref out) { + ERR_FAIL_COND(!a.is_valid() || !out.is_valid()); + + int size = a->size(); + + if (unlikely(out->size() != size)) { + out->resize(size); + } + + const real_t *a_ptr = a->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < size; ++i) { + out_ptr[i] = a_ptr[i] + scalar; + } +} + +std::vector MLPPLinAlgOld::addition(std::vector a, std::vector b) { + std::vector c; + c.resize(a.size()); + for (uint32_t i = 0; i < a.size(); i++) { + c[i] = a[i] + b[i]; + } + return c; +} + +Ref MLPPLinAlgOld::additionnv(const Ref &a, const Ref &b) { + ERR_FAIL_COND_V(!a.is_valid() || !b.is_valid(), Ref()); + + int size = a->size(); + + ERR_FAIL_COND_V(size != b->size(), Ref()); + + Ref out; + out.instance(); + out->resize(size); + + const real_t *a_ptr = a->ptr(); + const real_t *b_ptr = b->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < size; ++i) { + out_ptr[i] = a_ptr[i] + b_ptr[i]; + } + + return out; +} +void MLPPLinAlgOld::additionv(const Ref &a, const Ref &b, Ref out) { + ERR_FAIL_COND(!a.is_valid() || !b.is_valid() || !out.is_valid()); + + int size = a->size(); + + ERR_FAIL_COND(size != b->size()); + + if (unlikely(out->size() != size)) { + out->resize(size); + } + + const real_t *a_ptr = a->ptr(); + const real_t *b_ptr = b->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < size; ++i) { + out_ptr[i] = a_ptr[i] + b_ptr[i]; + } +} + +std::vector MLPPLinAlgOld::subtraction(std::vector a, std::vector b) { + std::vector c; + c.resize(a.size()); + for (uint32_t i = 0; i < a.size(); i++) { + c[i] = a[i] - b[i]; + } + return c; +} + +Ref MLPPLinAlgOld::subtractionnv(const Ref &a, const Ref &b) { + ERR_FAIL_COND_V(!a.is_valid() || !b.is_valid(), Ref()); + + int size = a->size(); + + ERR_FAIL_COND_V(size != b->size(), Ref()); + + Ref out; + out.instance(); + + if (unlikely(size == 0)) { + return out; + } + + out->resize(size); + + const real_t *a_ptr = a->ptr(); + const real_t *b_ptr = b->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < size; ++i) { + out_ptr[i] = a_ptr[i] - b_ptr[i]; + } + + return out; +} +void MLPPLinAlgOld::subtractionv(const Ref &a, const Ref &b, Ref out) { + ERR_FAIL_COND(!a.is_valid() || !b.is_valid() || !out.is_valid()); + + int size = a->size(); + + ERR_FAIL_COND(size != b->size()); + + if (unlikely(out->size() != size)) { + out->resize(size); + } + + const real_t *a_ptr = a->ptr(); + const real_t *b_ptr = b->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < size; ++i) { + out_ptr[i] = a_ptr[i] - b_ptr[i]; + } +} + +std::vector MLPPLinAlgOld::subtractMatrixRows(std::vector a, std::vector> B) { + for (uint32_t i = 0; i < B.size(); i++) { + a = subtraction(a, B[i]); + } + return a; +} + +Ref MLPPLinAlgOld::subtract_matrix_rows(const Ref &a, const Ref &B) { + Ref c = a->duplicate(); + + Size2i b_size = B->size(); + + ERR_FAIL_COND_V(b_size.x != c->size(), c); + + const real_t *b_ptr = B->ptr(); + real_t *c_ptr = c->ptrw(); + + for (int i = 0; i < b_size.y; ++i) { + for (int j = 0; j < b_size.x; ++j) { + c_ptr[j] -= b_ptr[B->calculate_index(i, j)]; + } + } + + return c; +} + +std::vector MLPPLinAlgOld::log(std::vector a) { + std::vector b; + b.resize(a.size()); + for (uint32_t i = 0; i < a.size(); i++) { + b[i] = std::log(a[i]); + } + return b; +} + +std::vector MLPPLinAlgOld::log10(std::vector a) { + std::vector b; + b.resize(a.size()); + for (uint32_t i = 0; i < a.size(); i++) { + b[i] = std::log10(a[i]); + } + return b; +} + +std::vector MLPPLinAlgOld::exp(std::vector a) { + std::vector b; + b.resize(a.size()); + for (uint32_t i = 0; i < a.size(); i++) { + b[i] = std::exp(a[i]); + } + return b; +} + +std::vector MLPPLinAlgOld::erf(std::vector a) { + std::vector b; + b.resize(a.size()); + for (uint32_t i = 0; i < a.size(); i++) { + b[i] = std::erf(a[i]); + } + return b; +} + +std::vector MLPPLinAlgOld::exponentiate(std::vector a, real_t p) { + std::vector b; + b.resize(a.size()); + for (uint32_t i = 0; i < b.size(); i++) { + b[i] = std::pow(a[i], p); + } + return b; +} + +std::vector MLPPLinAlgOld::sqrt(std::vector a) { + return exponentiate(a, 0.5); +} + +std::vector MLPPLinAlgOld::cbrt(std::vector a) { + return exponentiate(a, real_t(1) / real_t(3)); +} + +Ref MLPPLinAlgOld::logv(const Ref &a) { + ERR_FAIL_COND_V(!a.is_valid(), Ref()); + + Ref out; + out.instance(); + + int size = a->size(); + out->resize(size); + + const real_t *a_ptr = a->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < size; ++i) { + out_ptr[i] = Math::log(a_ptr[i]); + } + + return out; +} +Ref MLPPLinAlgOld::log10v(const Ref &a) { + ERR_FAIL_COND_V(!a.is_valid(), Ref()); + + Ref out; + out.instance(); + + int size = a->size(); + out->resize(size); + + const real_t *a_ptr = a->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < size; ++i) { + out_ptr[i] = std::log10(a_ptr[i]); + } + + return out; +} +Ref MLPPLinAlgOld::expv(const Ref &a) { + ERR_FAIL_COND_V(!a.is_valid(), Ref()); + + Ref out; + out.instance(); + + int size = a->size(); + out->resize(size); + + const real_t *a_ptr = a->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < size; ++i) { + out_ptr[i] = Math::exp(a_ptr[i]); + } + + return out; +} +Ref MLPPLinAlgOld::erfv(const Ref &a) { + ERR_FAIL_COND_V(!a.is_valid(), Ref()); + + Ref out; + out.instance(); + + int size = a->size(); + out->resize(size); + + const real_t *a_ptr = a->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < size; ++i) { + out_ptr[i] = std::erf(a_ptr[i]); + } + + return out; +} +Ref MLPPLinAlgOld::exponentiatev(const Ref &a, real_t p) { + ERR_FAIL_COND_V(!a.is_valid(), Ref()); + + Ref out; + out.instance(); + + int size = a->size(); + out->resize(size); + + const real_t *a_ptr = a->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < size; ++i) { + out_ptr[i] = Math::pow(a_ptr[i], p); + } + + return out; +} +Ref MLPPLinAlgOld::sqrtv(const Ref &a) { + ERR_FAIL_COND_V(!a.is_valid(), Ref()); + + Ref out; + out.instance(); + + int size = a->size(); + out->resize(size); + + const real_t *a_ptr = a->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < size; ++i) { + out_ptr[i] = Math::sqrt(a_ptr[i]); + } + + return out; +} +Ref MLPPLinAlgOld::cbrtv(const Ref &a) { + return exponentiatev(a, static_cast(1) / static_cast(3)); +} + +real_t MLPPLinAlgOld::dot(std::vector a, std::vector b) { + real_t c = 0; + for (uint32_t i = 0; i < a.size(); i++) { + c += a[i] * b[i]; + } + return c; +} + +real_t MLPPLinAlgOld::dotv(const Ref &a, const Ref &b) { + int a_size = a->size(); + + ERR_FAIL_COND_V(a_size != b->size(), 0); + + const real_t *a_ptr = a->ptr(); + const real_t *b_ptr = b->ptr(); + + real_t c = 0; + for (int i = 0; i < a_size; ++i) { + c += a_ptr[i] * b_ptr[i]; + } + return c; +} + +std::vector MLPPLinAlgOld::cross(std::vector a, std::vector b) { + // Cross products exist in R^7 also. Though, I will limit it to R^3 as Wolfram does this. + std::vector> mat = { onevec(3), a, b }; + + real_t det1 = det({ { a[1], a[2] }, { b[1], b[2] } }, 2); + real_t det2 = -det({ { a[0], a[2] }, { b[0], b[2] } }, 2); + real_t det3 = det({ { a[0], a[1] }, { b[0], b[1] } }, 2); + + return { det1, det2, det3 }; +} + +std::vector MLPPLinAlgOld::abs(std::vector a) { + std::vector b; + b.resize(a.size()); + for (uint32_t i = 0; i < b.size(); i++) { + b[i] = std::abs(a[i]); + } + return b; +} + +std::vector MLPPLinAlgOld::zerovec(int n) { + std::vector zerovec; + zerovec.resize(n); + return zerovec; +} + +std::vector MLPPLinAlgOld::onevec(int n) { + return full(n, 1); +} + +std::vector> MLPPLinAlgOld::diag(std::vector a) { + std::vector> B = zeromat(a.size(), a.size()); + for (uint32_t i = 0; i < B.size(); i++) { + B[i][i] = a[i]; + } + return B; +} + +Ref MLPPLinAlgOld::diagm(const Ref &a) { + int a_size = a->size(); + + Ref B; + B.instance(); + + B->resize(Size2i(a_size, a_size)); + B->fill(0); + + const real_t *a_ptr = a->ptr(); + real_t *b_ptr = B->ptrw(); + + for (int i = 0; i < a_size; ++i) { + b_ptr[B->calculate_index(i, i)] = a_ptr[i]; + } + + return B; +} + +std::vector MLPPLinAlgOld::full(int n, int k) { + std::vector full; + full.resize(n); + for (uint32_t i = 0; i < full.size(); i++) { + full[i] = k; + } + return full; +} + +Ref MLPPLinAlgOld::absv(const Ref &a) { + ERR_FAIL_COND_V(!a.is_valid(), Ref()); + + Ref out; + out.instance(); + + int size = a->size(); + out->resize(size); + + const real_t *a_ptr = a->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < size; ++i) { + out_ptr[i] = ABS(a_ptr[i]); + } + + return out; +} + +Ref MLPPLinAlgOld::zerovecv(int n) { + Ref vec; + vec.instance(); + + vec->resize(n); + vec->fill(0); + + return vec; +} +Ref MLPPLinAlgOld::onevecv(int n) { + Ref vec; + vec.instance(); + + vec->resize(n); + vec->fill(1); + + return vec; +} +Ref MLPPLinAlgOld::fullv(int n, int k) { + Ref vec; + vec.instance(); + + vec->resize(n); + vec->fill(k); + + return vec; +} + +std::vector MLPPLinAlgOld::sin(std::vector a) { + std::vector b; + b.resize(a.size()); + for (uint32_t i = 0; i < a.size(); i++) { + b[i] = std::sin(a[i]); + } + return b; +} + +std::vector MLPPLinAlgOld::cos(std::vector a) { + std::vector b; + b.resize(a.size()); + for (uint32_t i = 0; i < a.size(); i++) { + b[i] = std::cos(a[i]); + } + return b; +} + +Ref MLPPLinAlgOld::sinv(const Ref &a) { + ERR_FAIL_COND_V(!a.is_valid(), Ref()); + + Ref out; + out.instance(); + + int size = a->size(); + out->resize(size); + + const real_t *a_ptr = a->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < size; ++i) { + out_ptr[i] = Math::sin(a_ptr[i]); + } + + return out; +} +Ref MLPPLinAlgOld::cosv(const Ref &a) { + ERR_FAIL_COND_V(!a.is_valid(), Ref()); + + Ref out; + out.instance(); + + int size = a->size(); + out->resize(size); + + const real_t *a_ptr = a->ptr(); + real_t *out_ptr = out->ptrw(); + + for (int i = 0; i < size; ++i) { + out_ptr[i] = Math::cos(a_ptr[i]); + } + + return out; +} + +std::vector> MLPPLinAlgOld::rotate(std::vector> A, real_t theta, int axis) { + std::vector> rotationMatrix = { { std::cos(theta), -std::sin(theta) }, { std::sin(theta), std::cos(theta) } }; + if (axis == 0) { + rotationMatrix = { { 1, 0, 0 }, { 0, std::cos(theta), -std::sin(theta) }, { 0, std::sin(theta), std::cos(theta) } }; + } else if (axis == 1) { + rotationMatrix = { { std::cos(theta), 0, std::sin(theta) }, { 0, 1, 0 }, { -std::sin(theta), 0, std::cos(theta) } }; + } else if (axis == 2) { + rotationMatrix = { { std::cos(theta), -std::sin(theta), 0 }, { std::sin(theta), std::cos(theta), 0 }, { 1, 0, 0 } }; + } + + return matmult(A, rotationMatrix); +} + +std::vector> MLPPLinAlgOld::max(std::vector> A, std::vector> B) { + std::vector> C; + C.resize(A.size()); + for (uint32_t i = 0; i < C.size(); i++) { + C[i].resize(A[0].size()); + } + for (uint32_t i = 0; i < A.size(); i++) { + C[i] = max(A[i], B[i]); + } + return C; +} + +real_t MLPPLinAlgOld::max(std::vector a) { + int max = a[0]; + for (uint32_t i = 0; i < a.size(); i++) { + if (a[i] > max) { + max = a[i]; + } + } + return max; +} + +real_t MLPPLinAlgOld::min(std::vector a) { + int min = a[0]; + for (uint32_t i = 0; i < a.size(); i++) { + if (a[i] < min) { + min = a[i]; + } + } + return min; +} + +std::vector MLPPLinAlgOld::round(std::vector a) { + std::vector b; + b.resize(a.size()); + for (uint32_t i = 0; i < a.size(); i++) { + b[i] = std::round(a[i]); + } + return b; +} + +// Multidimensional Euclidean Distance +real_t MLPPLinAlgOld::euclideanDistance(std::vector a, std::vector b) { + real_t dist = 0; + for (uint32_t i = 0; i < a.size(); i++) { + dist += (a[i] - b[i]) * (a[i] - b[i]); + } + return std::sqrt(dist); +} + +real_t MLPPLinAlgOld::euclidean_distance(const Ref &a, const Ref &b) { + ERR_FAIL_COND_V(!a.is_valid() || !b.is_valid(), 0); + + int a_size = a->size(); + + ERR_FAIL_COND_V(a_size != b->size(), 0); + + const real_t *aa = a->ptr(); + const real_t *ba = b->ptr(); + + real_t dist = 0; + + for (int i = 0; i < a_size; i++) { + dist += (aa[i] - ba[i]) * (aa[i] - ba[i]); + } + + return Math::sqrt(dist); +} +real_t MLPPLinAlgOld::euclidean_distance_squared(const Ref &a, const Ref &b) { + ERR_FAIL_COND_V(!a.is_valid() || !b.is_valid(), 0); + + int a_size = a->size(); + + ERR_FAIL_COND_V(a_size != b->size(), 0); + + const real_t *aa = a->ptr(); + const real_t *ba = b->ptr(); + + real_t dist = 0; + + for (int i = 0; i < a_size; i++) { + dist += (aa[i] - ba[i]) * (aa[i] - ba[i]); + } + + return dist; +} + +real_t MLPPLinAlgOld::norm_2(std::vector a) { + return std::sqrt(norm_sq(a)); +} + +real_t MLPPLinAlgOld::norm_sq(std::vector a) { + real_t n_sq = 0; + for (uint32_t i = 0; i < a.size(); i++) { + n_sq += a[i] * a[i]; + } + return n_sq; +} +real_t MLPPLinAlgOld::norm_sqv(const Ref &a) { + ERR_FAIL_COND_V(!a.is_valid(), 0); + + int size = a->size(); + const real_t *a_ptr = a->ptr(); + + real_t n_sq = 0; + for (int i = 0; i < size; ++i) { + n_sq += a_ptr[i] * a_ptr[i]; + } + return n_sq; +} + +real_t MLPPLinAlgOld::sum_elements(std::vector a) { + real_t sum = 0; + for (uint32_t i = 0; i < a.size(); i++) { + sum += a[i]; + } + return sum; +} + +real_t MLPPLinAlgOld::sum_elementsv(const Ref &a) { + int a_size = a->size(); + + const real_t *a_ptr = a->ptr(); + + real_t sum = 0; + for (int i = 0; i < a_size; ++i) { + sum += a_ptr[i]; + } + return sum; +} + +real_t MLPPLinAlgOld::cosineSimilarity(std::vector a, std::vector b) { + return dot(a, b) / (norm_2(a) * norm_2(b)); +} + +void MLPPLinAlgOld::printVector(std::vector a) { + for (uint32_t i = 0; i < a.size(); i++) { + std::cout << a[i] << " "; + } + std::cout << std::endl; +} + +std::vector> MLPPLinAlgOld::mat_vec_add(std::vector> A, std::vector b) { + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A[i].size(); j++) { + A[i][j] += b[j]; + } + } + return A; +} + +std::vector MLPPLinAlgOld::mat_vec_mult(std::vector> A, std::vector b) { + std::vector c; + c.resize(A.size()); + + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t k = 0; k < b.size(); k++) { + c[i] += A[i][k] * b[k]; + } + } + return c; +} + +Ref MLPPLinAlgOld::mat_vec_addv(const Ref &A, const Ref &b) { + ERR_FAIL_COND_V(!A.is_valid() || !b.is_valid(), Ref()); + + Size2i a_size = A->size(); + + ERR_FAIL_COND_V(a_size.x != b->size(), Ref()); + + Ref ret; + ret.instance(); + ret->resize(a_size); + + const real_t *a_ptr = A->ptr(); + const real_t *b_ptr = b->ptr(); + real_t *ret_ptr = ret->ptrw(); + + for (int i = 0; i < a_size.y; ++i) { + for (int j = 0; j < a_size.x; ++j) { + int mat_index = A->calculate_index(i, j); + + ret_ptr[mat_index] = a_ptr[mat_index] + b_ptr[j]; + } + } + + return ret; +} +Ref MLPPLinAlgOld::mat_vec_multv(const Ref &A, const Ref &b) { + ERR_FAIL_COND_V(!A.is_valid() || !b.is_valid(), Ref()); + + Size2i a_size = A->size(); + int b_size = b->size(); + + ERR_FAIL_COND_V(a_size.x < b->size(), Ref()); + + Ref c; + c.instance(); + c->resize(a_size.y); + c->fill(0); + + const real_t *a_ptr = A->ptr(); + const real_t *b_ptr = b->ptr(); + real_t *c_ptr = c->ptrw(); + + for (int i = 0; i < a_size.y; ++i) { + for (int k = 0; k < b_size; ++k) { + int mat_index = A->calculate_index(i, k); + + c_ptr[i] += a_ptr[mat_index] * b_ptr[k]; + } + } + + return c; +} + +std::vector>> MLPPLinAlgOld::addition(std::vector>> A, std::vector>> B) { + for (uint32_t i = 0; i < A.size(); i++) { + A[i] = addition(A[i], B[i]); + } + return A; +} + +std::vector>> MLPPLinAlgOld::elementWiseDivision(std::vector>> A, std::vector>> B) { + for (uint32_t i = 0; i < A.size(); i++) { + A[i] = elementWiseDivision(A[i], B[i]); + } + return A; +} + +std::vector>> MLPPLinAlgOld::sqrt(std::vector>> A) { + for (uint32_t i = 0; i < A.size(); i++) { + A[i] = sqrt(A[i]); + } + return A; +} + +std::vector>> MLPPLinAlgOld::exponentiate(std::vector>> A, real_t p) { + for (uint32_t i = 0; i < A.size(); i++) { + A[i] = exponentiate(A[i], p); + } + return A; +} + +std::vector> MLPPLinAlgOld::tensor_vec_mult(std::vector>> A, std::vector b) { + std::vector> C; + C.resize(A.size()); + for (uint32_t i = 0; i < C.size(); i++) { + C[i].resize(A[0].size()); + } + for (uint32_t i = 0; i < C.size(); i++) { + for (uint32_t j = 0; j < C[i].size(); j++) { + C[i][j] = dot(A[i][j], b); + } + } + return C; +} + +std::vector MLPPLinAlgOld::flatten(std::vector>> A) { + std::vector c; + for (uint32_t i = 0; i < A.size(); i++) { + std::vector flattenedVec = flatten(A[i]); + c.insert(c.end(), flattenedVec.begin(), flattenedVec.end()); + } + return c; +} + +void MLPPLinAlgOld::printTensor(std::vector>> A) { + for (uint32_t i = 0; i < A.size(); i++) { + printMatrix(A[i]); + if (i != A.size() - 1) { + std::cout << std::endl; + } + } +} + +std::vector>> MLPPLinAlgOld::scalarMultiply(real_t scalar, std::vector>> A) { + for (uint32_t i = 0; i < A.size(); i++) { + A[i] = scalarMultiply(scalar, A[i]); + } + return A; +} + +std::vector>> MLPPLinAlgOld::scalarAdd(real_t scalar, std::vector>> A) { + for (uint32_t i = 0; i < A.size(); i++) { + A[i] = scalarAdd(scalar, A[i]); + } + return A; +} + +Vector> MLPPLinAlgOld::scalar_multiply_vm(real_t scalar, Vector> A) { + for (int i = 0; i < A.size(); i++) { + A.write[i] = scalar_multiplym(scalar, A[i]); + } + return A; +} +Vector> MLPPLinAlgOld::scalar_add_vm(real_t scalar, Vector> A) { + for (int i = 0; i < A.size(); i++) { + A.write[i] = scalar_addm(scalar, A[i]); + } + return A; +} + +std::vector>> MLPPLinAlgOld::resize(std::vector>> A, std::vector>> B) { + A.resize(B.size()); + for (uint32_t i = 0; i < B.size(); i++) { + A[i].resize(B[i].size()); + for (uint32_t j = 0; j < B[i].size(); j++) { + A[i][j].resize(B[i][j].size()); + } + } + return A; +} + +std::vector>> MLPPLinAlgOld::max(std::vector>> A, std::vector>> B) { + for (uint32_t i = 0; i < A.size(); i++) { + A[i] = max(A[i], B[i]); + } + return A; +} + +std::vector>> MLPPLinAlgOld::abs(std::vector>> A) { + for (uint32_t i = 0; i < A.size(); i++) { + A[i] = abs(A[i]); + } + return A; +} + +real_t MLPPLinAlgOld::norm_2(std::vector>> A) { + real_t sum = 0; + for (uint32_t i = 0; i < A.size(); i++) { + for (uint32_t j = 0; j < A[i].size(); j++) { + for (uint32_t k = 0; k < A[i][j].size(); k++) { + sum += A[i][j][k] * A[i][j][k]; + } + } + } + return std::sqrt(sum); +} + +// Bad implementation. Change this later. +std::vector>> MLPPLinAlgOld::vector_wise_tensor_product(std::vector>> A, std::vector> B) { + std::vector>> C; + C = resize(C, A); + for (uint32_t i = 0; i < A[0].size(); i++) { + for (uint32_t j = 0; j < A[0][i].size(); j++) { + std::vector currentVector; + currentVector.resize(A.size()); + + for (uint32_t k = 0; k < C.size(); k++) { + currentVector[k] = A[k][i][j]; + } + + currentVector = mat_vec_mult(B, currentVector); + + for (uint32_t k = 0; k < C.size(); k++) { + C[k][i][j] = currentVector[k]; + } + } + } + return C; +} + +void MLPPLinAlgOld::_bind_methods() { +} diff --git a/mlpp/lin_alg/lin_alg_old.h b/mlpp/lin_alg/lin_alg_old.h new file mode 100644 index 0000000..d933a48 --- /dev/null +++ b/mlpp/lin_alg/lin_alg_old.h @@ -0,0 +1,337 @@ + +#ifndef MLPP_LIN_ALG_OLD_H +#define MLPP_LIN_ALG_OLD_H + +// +// LinAlg.hpp +// +// Created by Marc Melikyan on 1/8/21. +// + +//TODO Methods here should probably use error macros in a way where they get disabled in non-tools(?) (maybe release?) builds + +#include "core/math/math_defs.h" + +#include "core/object/reference.h" + +#include "../lin_alg/mlpp_matrix.h" +#include "../lin_alg/mlpp_vector.h" + +#include +#include + +class MLPPLinAlgOld : public Reference { + GDCLASS(MLPPLinAlgOld, Reference); + +public: + // MATRIX FUNCTIONS + + std::vector> gramMatrix(std::vector> A); + + bool linearIndependenceChecker(std::vector> A); + + std::vector> gaussianNoise(int n, int m); + Ref gaussian_noise(int n, int m); + + std::vector> addition(std::vector> A, std::vector> B); + std::vector> subtraction(std::vector> A, std::vector> B); + std::vector> matmult(std::vector> A, std::vector> B); + + Ref additionm(const Ref &A, const Ref &B); + Ref subtractionm(const Ref &A, const Ref &B); + Ref matmultm(const Ref &A, const Ref &B); + + std::vector> hadamard_product(std::vector> A, std::vector> B); + std::vector> kronecker_product(std::vector> A, std::vector> B); + std::vector> elementWiseDivision(std::vector> A, std::vector> B); + + Ref hadamard_productm(const Ref &A, const Ref &B); + Ref kronecker_productm(const Ref &A, const Ref &B); + Ref element_wise_divisionm(const Ref &A, const Ref &B); + + std::vector> transpose(std::vector> A); + std::vector> scalarMultiply(real_t scalar, std::vector> A); + std::vector> scalarAdd(real_t scalar, std::vector> A); + + Ref transposem(const Ref &A); + Ref scalar_multiplym(real_t scalar, const Ref &A); + Ref scalar_addm(real_t scalar, const Ref &A); + + std::vector> log(std::vector> A); + std::vector> log10(std::vector> A); + std::vector> exp(std::vector> A); + std::vector> erf(std::vector> A); + std::vector> exponentiate(std::vector> A, real_t p); + std::vector> sqrt(std::vector> A); + std::vector> cbrt(std::vector> A); + + Ref logm(const Ref &A); + Ref log10m(const Ref &A); + Ref expm(const Ref &A); + Ref erfm(const Ref &A); + Ref exponentiatem(const Ref &A, real_t p); + Ref sqrtm(const Ref &A); + Ref cbrtm(const Ref &A); + + std::vector> matrixPower(std::vector> A, int n); + + std::vector> abs(std::vector> A); + + Ref absm(const Ref &A); + + real_t det(std::vector> A, int d); + real_t detm(const Ref &A, int d); + + real_t trace(std::vector> A); + + std::vector> cofactor(std::vector> A, int n, int i, int j); + std::vector> adjoint(std::vector> A); + std::vector> inverse(std::vector> A); + std::vector> pinverse(std::vector> A); + + Ref cofactorm(const Ref &A, int n, int i, int j); + Ref adjointm(const Ref &A); + Ref inversem(const Ref &A); + Ref pinversem(const Ref &A); + + std::vector> zeromat(int n, int m); + std::vector> onemat(int n, int m); + std::vector> full(int n, int m, int k); + + Ref zeromatm(int n, int m); + Ref onematm(int n, int m); + Ref fullm(int n, int m, int k); + + std::vector> sin(std::vector> A); + std::vector> cos(std::vector> A); + + Ref sinm(const Ref &A); + Ref cosm(const Ref &A); + + std::vector> rotate(std::vector> A, real_t theta, int axis = -1); + + std::vector> max(std::vector> A, std::vector> B); + real_t max(std::vector> A); + real_t min(std::vector> A); + + std::vector> round(std::vector> A); + + real_t norm_2(std::vector> A); + + std::vector> identity(real_t d); + Ref identitym(int d); + + std::vector> cov(std::vector> A); + Ref covm(const Ref &A); + + std::tuple>, std::vector>> eig(std::vector> A); + + struct EigenResultOld { + std::vector> eigen_vectors; + std::vector> eigen_values; + }; + + EigenResultOld eigen_old(std::vector> A); + + struct EigenResult { + Ref eigen_vectors; + Ref eigen_values; + }; + + EigenResult eigen(Ref A); + + struct SVDResultOld { + std::vector> U; + std::vector> S; + std::vector> Vt; + }; + + SVDResultOld SVD(std::vector> A); + + struct SVDResult { + Ref U; + Ref S; + Ref Vt; + }; + + SVDResult svd(const Ref &A); + + std::vector vectorProjection(std::vector a, std::vector b); + + std::vector> gramSchmidtProcess(std::vector> A); + + std::tuple>, std::vector>> QRD(std::vector> A); + + struct QRDResult { + std::vector> Q; + std::vector> R; + }; + + QRDResult qrd(std::vector> A); + + std::tuple>, std::vector>> chol(std::vector> A); + + struct CholeskyResult { + std::vector> L; + std::vector> Lt; + }; + + CholeskyResult cholesky(std::vector> A); + + real_t sum_elements(std::vector> A); + + std::vector flatten(std::vector> A); + Ref flattenv(const Vector> &A); + + std::vector solve(std::vector> A, std::vector b); + + bool positiveDefiniteChecker(std::vector> A); + + bool negativeDefiniteChecker(std::vector> A); + + bool zeroEigenvalue(std::vector> A); + + void printMatrix(std::vector> A); + + // VECTOR FUNCTIONS + + std::vector> outerProduct(std::vector a, std::vector b); // This multiplies a, bT + Ref outer_product(const Ref &a, const Ref &b); // This multiplies a, bT + + std::vector hadamard_product(std::vector a, std::vector b); + Ref hadamard_productnv(const Ref &a, const Ref &b); + void hadamard_productv(const Ref &a, const Ref &b, Ref out); + + std::vector elementWiseDivision(std::vector a, std::vector b); + Ref element_wise_division(const Ref &a, const Ref &b); + + std::vector scalarMultiply(real_t scalar, std::vector a); + Ref scalar_multiplynv(real_t scalar, const Ref &a); + void scalar_multiplyv(real_t scalar, const Ref &a, Ref out); + + std::vector scalarAdd(real_t scalar, std::vector a); + Ref scalar_addnv(real_t scalar, const Ref &a); + void scalar_addv(real_t scalar, const Ref &a, Ref out); + + std::vector addition(std::vector a, std::vector b); + Ref additionnv(const Ref &a, const Ref &b); + void additionv(const Ref &a, const Ref &b, Ref out); + + std::vector subtraction(std::vector a, std::vector b); + Ref subtractionnv(const Ref &a, const Ref &b); + void subtractionv(const Ref &a, const Ref &b, Ref out); + + std::vector subtractMatrixRows(std::vector a, std::vector> B); + Ref subtract_matrix_rows(const Ref &a, const Ref &B); + + std::vector log(std::vector a); + std::vector log10(std::vector a); + std::vector exp(std::vector a); + std::vector erf(std::vector a); + std::vector exponentiate(std::vector a, real_t p); + std::vector sqrt(std::vector a); + std::vector cbrt(std::vector a); + + Ref logv(const Ref &a); + Ref log10v(const Ref &a); + Ref expv(const Ref &a); + Ref erfv(const Ref &a); + Ref exponentiatev(const Ref &a, real_t p); + Ref sqrtv(const Ref &a); + Ref cbrtv(const Ref &a); + + real_t dot(std::vector a, std::vector b); + real_t dotv(const Ref &a, const Ref &b); + + std::vector cross(std::vector a, std::vector b); + + std::vector abs(std::vector a); + + std::vector zerovec(int n); + std::vector onevec(int n); + std::vector full(int n, int k); + + Ref absv(const Ref &a); + + Ref zerovecv(int n); + Ref onevecv(int n); + Ref fullv(int n, int k); + + std::vector> diag(std::vector a); + Ref diagm(const Ref &a); + + std::vector sin(std::vector a); + std::vector cos(std::vector a); + + Ref sinv(const Ref &a); + Ref cosv(const Ref &a); + + std::vector max(std::vector a, std::vector b); + + real_t max(std::vector a); + + real_t min(std::vector a); + + std::vector round(std::vector a); + + real_t euclideanDistance(std::vector a, std::vector b); + real_t euclidean_distance(const Ref &a, const Ref &b); + real_t euclidean_distance_squared(const Ref &a, const Ref &b); + + real_t norm_2(std::vector a); + + real_t norm_sq(std::vector a); + real_t norm_sqv(const Ref &a); + + real_t sum_elements(std::vector a); + real_t sum_elementsv(const Ref &a); + + real_t cosineSimilarity(std::vector a, std::vector b); + + void printVector(std::vector a); + + // MATRIX-VECTOR FUNCTIONS + std::vector> mat_vec_add(std::vector> A, std::vector b); + std::vector mat_vec_mult(std::vector> A, std::vector b); + + Ref mat_vec_addv(const Ref &A, const Ref &b); + Ref mat_vec_multv(const Ref &A, const Ref &b); + + // TENSOR FUNCTIONS + std::vector>> addition(std::vector>> A, std::vector>> B); + + std::vector>> elementWiseDivision(std::vector>> A, std::vector>> B); + + std::vector>> sqrt(std::vector>> A); + + std::vector>> exponentiate(std::vector>> A, real_t p); + + std::vector> tensor_vec_mult(std::vector>> A, std::vector b); + + std::vector flatten(std::vector>> A); + + void printTensor(std::vector>> A); + + std::vector>> scalarMultiply(real_t scalar, std::vector>> A); + std::vector>> scalarAdd(real_t scalar, std::vector>> A); + + Vector> scalar_multiply_vm(real_t scalar, Vector> A); + Vector> scalar_add_vm(real_t scalar, Vector> A); + + std::vector>> resize(std::vector>> A, std::vector>> B); + + std::vector>> hadamard_product(std::vector>> A, std::vector>> B); + + std::vector>> max(std::vector>> A, std::vector>> B); + + std::vector>> abs(std::vector>> A); + + real_t norm_2(std::vector>> A); + + std::vector>> vector_wise_tensor_product(std::vector>> A, std::vector> B); + +protected: + static void _bind_methods(); +}; + +#endif /* LinAlg_hpp */ \ No newline at end of file