pmlpp/mlpp/lin_alg/lin_alg.h

337 lines
14 KiB
C
Raw Normal View History

2023-01-24 18:57:18 +01:00
#ifndef MLPP_LIN_ALG_H
#define MLPP_LIN_ALG_H
//
// LinAlg.hpp
//
// Created by Marc Melikyan on 1/8/21.
//
2023-02-02 02:19:16 +01:00
//TODO Methods here should probably use error macros in a way where they get disabled in non-tools(?) (maybe release?) builds
2023-01-27 13:01:16 +01:00
#include "core/math/math_defs.h"
#include "core/object/reference.h"
#include "../lin_alg/mlpp_matrix.h"
#include "../lin_alg/mlpp_vector.h"
#include <tuple>
2023-01-24 19:00:54 +01:00
#include <vector>
class MLPPLinAlg : public Reference {
GDCLASS(MLPPLinAlg, Reference);
2023-01-24 19:00:54 +01:00
public:
// MATRIX FUNCTIONS
2023-01-27 13:01:16 +01:00
std::vector<std::vector<real_t>> gramMatrix(std::vector<std::vector<real_t>> A);
2023-01-24 19:00:54 +01:00
2023-01-27 13:01:16 +01:00
bool linearIndependenceChecker(std::vector<std::vector<real_t>> A);
2023-01-24 19:00:54 +01:00
2023-01-27 13:01:16 +01:00
std::vector<std::vector<real_t>> gaussianNoise(int n, int m);
2023-02-06 02:36:22 +01:00
Ref<MLPPMatrix> gaussian_noise(int n, int m);
2023-01-24 19:00:54 +01:00
2023-01-27 13:01:16 +01:00
std::vector<std::vector<real_t>> addition(std::vector<std::vector<real_t>> A, std::vector<std::vector<real_t>> B);
std::vector<std::vector<real_t>> subtraction(std::vector<std::vector<real_t>> A, std::vector<std::vector<real_t>> B);
std::vector<std::vector<real_t>> matmult(std::vector<std::vector<real_t>> A, std::vector<std::vector<real_t>> B);
2023-01-24 19:00:54 +01:00
2023-01-31 02:37:20 +01:00
Ref<MLPPMatrix> additionm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B);
Ref<MLPPMatrix> subtractionm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B);
Ref<MLPPMatrix> matmultm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B);
2023-01-24 19:00:54 +01:00
2023-01-31 02:37:20 +01:00
std::vector<std::vector<real_t>> hadamard_product(std::vector<std::vector<real_t>> A, std::vector<std::vector<real_t>> B);
2023-01-27 13:01:16 +01:00
std::vector<std::vector<real_t>> kronecker_product(std::vector<std::vector<real_t>> A, std::vector<std::vector<real_t>> B);
std::vector<std::vector<real_t>> elementWiseDivision(std::vector<std::vector<real_t>> A, std::vector<std::vector<real_t>> B);
2023-01-24 19:00:54 +01:00
2023-01-31 03:20:20 +01:00
Ref<MLPPMatrix> hadamard_productm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B);
Ref<MLPPMatrix> kronecker_productm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B);
2023-02-02 02:19:16 +01:00
Ref<MLPPMatrix> element_wise_divisionm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B);
2023-01-31 03:20:20 +01:00
2023-01-27 13:01:16 +01:00
std::vector<std::vector<real_t>> transpose(std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> scalarMultiply(real_t scalar, std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> scalarAdd(real_t scalar, std::vector<std::vector<real_t>> A);
2023-01-24 19:00:54 +01:00
2023-01-31 03:20:20 +01:00
Ref<MLPPMatrix> transposem(const Ref<MLPPMatrix> &A);
Ref<MLPPMatrix> scalar_multiplym(real_t scalar, const Ref<MLPPMatrix> &A);
Ref<MLPPMatrix> scalar_addm(real_t scalar, const Ref<MLPPMatrix> &A);
2023-01-27 13:01:16 +01:00
std::vector<std::vector<real_t>> log(std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> log10(std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> exp(std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> erf(std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> exponentiate(std::vector<std::vector<real_t>> A, real_t p);
std::vector<std::vector<real_t>> sqrt(std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> cbrt(std::vector<std::vector<real_t>> A);
2023-01-24 19:00:54 +01:00
2023-02-02 02:19:16 +01:00
Ref<MLPPMatrix> logm(const Ref<MLPPMatrix> &A);
Ref<MLPPMatrix> log10m(const Ref<MLPPMatrix> &A);
Ref<MLPPMatrix> expm(const Ref<MLPPMatrix> &A);
Ref<MLPPMatrix> erfm(const Ref<MLPPMatrix> &A);
Ref<MLPPMatrix> exponentiatem(const Ref<MLPPMatrix> &A, real_t p);
Ref<MLPPMatrix> sqrtm(const Ref<MLPPMatrix> &A);
Ref<MLPPMatrix> cbrtm(const Ref<MLPPMatrix> &A);
2023-01-27 13:01:16 +01:00
std::vector<std::vector<real_t>> matrixPower(std::vector<std::vector<real_t>> A, int n);
2023-01-27 13:01:16 +01:00
std::vector<std::vector<real_t>> abs(std::vector<std::vector<real_t>> A);
2023-02-02 02:19:16 +01:00
Ref<MLPPMatrix> absm(const Ref<MLPPMatrix> &A);
2023-01-27 13:01:16 +01:00
real_t det(std::vector<std::vector<real_t>> A, int d);
real_t detm(const Ref<MLPPMatrix> &A, int d);
2023-01-27 13:01:16 +01:00
real_t trace(std::vector<std::vector<real_t>> A);
2023-01-27 13:01:16 +01:00
std::vector<std::vector<real_t>> cofactor(std::vector<std::vector<real_t>> A, int n, int i, int j);
std::vector<std::vector<real_t>> adjoint(std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> inverse(std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> pinverse(std::vector<std::vector<real_t>> A);
Ref<MLPPMatrix> cofactorm(const Ref<MLPPMatrix> &A, int n, int i, int j);
Ref<MLPPMatrix> adjointm(const Ref<MLPPMatrix> &A);
Ref<MLPPMatrix> inversem(const Ref<MLPPMatrix> &A);
Ref<MLPPMatrix> pinversem(const Ref<MLPPMatrix> &A);
2023-01-27 13:01:16 +01:00
std::vector<std::vector<real_t>> zeromat(int n, int m);
std::vector<std::vector<real_t>> onemat(int n, int m);
std::vector<std::vector<real_t>> full(int n, int m, int k);
2023-01-31 02:37:20 +01:00
Ref<MLPPMatrix> zeromatm(int n, int m);
Ref<MLPPMatrix> onematm(int n, int m);
Ref<MLPPMatrix> fullm(int n, int m, int k);
2023-01-31 02:37:20 +01:00
std::vector<std::vector<real_t>> sin(std::vector<std::vector<real_t>> A);
2023-01-27 13:01:16 +01:00
std::vector<std::vector<real_t>> cos(std::vector<std::vector<real_t>> A);
Ref<MLPPMatrix> sinm(const Ref<MLPPMatrix> &A);
Ref<MLPPMatrix> cosm(const Ref<MLPPMatrix> &A);
2023-01-27 13:01:16 +01:00
std::vector<std::vector<real_t>> rotate(std::vector<std::vector<real_t>> A, real_t theta, int axis = -1);
2023-01-27 13:01:16 +01:00
std::vector<std::vector<real_t>> max(std::vector<std::vector<real_t>> A, std::vector<std::vector<real_t>> B);
real_t max(std::vector<std::vector<real_t>> A);
real_t min(std::vector<std::vector<real_t>> A);
2023-01-27 13:01:16 +01:00
std::vector<std::vector<real_t>> round(std::vector<std::vector<real_t>> A);
2023-01-27 13:01:16 +01:00
real_t norm_2(std::vector<std::vector<real_t>> A);
2023-01-27 13:01:16 +01:00
std::vector<std::vector<real_t>> identity(real_t d);
Ref<MLPPMatrix> identitym(int d);
2023-01-27 13:01:16 +01:00
std::vector<std::vector<real_t>> cov(std::vector<std::vector<real_t>> A);
2023-02-08 01:26:37 +01:00
Ref<MLPPMatrix> covm(const Ref<MLPPMatrix> &A);
2023-01-27 13:01:16 +01:00
std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> eig(std::vector<std::vector<real_t>> A);
2023-02-07 22:10:16 +01:00
struct EigenResultOld {
2023-01-27 13:01:16 +01:00
std::vector<std::vector<real_t>> eigen_vectors;
std::vector<std::vector<real_t>> eigen_values;
2023-01-26 14:52:49 +01:00
};
2023-02-07 22:10:16 +01:00
EigenResultOld eigen_old(std::vector<std::vector<real_t>> A);
struct EigenResult {
Ref<MLPPMatrix> eigen_vectors;
Ref<MLPPMatrix> eigen_values;
};
2023-01-26 14:52:49 +01:00
EigenResult eigen(Ref<MLPPMatrix> A);
2023-02-08 01:26:37 +01:00
struct SVDResultOld {
2023-01-27 13:01:16 +01:00
std::vector<std::vector<real_t>> U;
std::vector<std::vector<real_t>> S;
std::vector<std::vector<real_t>> Vt;
2023-01-26 14:52:49 +01:00
};
2023-02-08 01:26:37 +01:00
SVDResultOld SVD(std::vector<std::vector<real_t>> A);
2023-02-08 01:26:37 +01:00
struct SVDResult {
2023-02-07 22:10:16 +01:00
Ref<MLPPMatrix> U;
Ref<MLPPMatrix> S;
Ref<MLPPMatrix> Vt;
};
2023-02-08 01:26:37 +01:00
SVDResult svd(const Ref<MLPPMatrix> &A);
2023-01-26 14:52:49 +01:00
2023-01-27 13:01:16 +01:00
std::vector<real_t> vectorProjection(std::vector<real_t> a, std::vector<real_t> b);
2023-01-27 13:01:16 +01:00
std::vector<std::vector<real_t>> gramSchmidtProcess(std::vector<std::vector<real_t>> A);
2023-01-27 13:01:16 +01:00
std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> QRD(std::vector<std::vector<real_t>> A);
2023-01-26 14:52:49 +01:00
struct QRDResult {
2023-01-27 13:01:16 +01:00
std::vector<std::vector<real_t>> Q;
std::vector<std::vector<real_t>> R;
2023-01-26 14:52:49 +01:00
};
2023-01-27 13:01:16 +01:00
QRDResult qrd(std::vector<std::vector<real_t>> A);
2023-01-26 14:52:49 +01:00
2023-01-27 13:01:16 +01:00
std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> chol(std::vector<std::vector<real_t>> A);
2023-01-26 14:52:49 +01:00
struct CholeskyResult {
2023-01-27 13:01:16 +01:00
std::vector<std::vector<real_t>> L;
std::vector<std::vector<real_t>> Lt;
2023-01-26 14:52:49 +01:00
};
2023-01-27 13:01:16 +01:00
CholeskyResult cholesky(std::vector<std::vector<real_t>> A);
2023-01-26 14:52:49 +01:00
2023-01-27 13:01:16 +01:00
real_t sum_elements(std::vector<std::vector<real_t>> A);
2023-01-27 13:01:16 +01:00
std::vector<real_t> flatten(std::vector<std::vector<real_t>> A);
2023-01-31 03:20:20 +01:00
Ref<MLPPVector> flattenv(const Vector<Ref<MLPPVector>> &A);
2023-01-27 13:01:16 +01:00
std::vector<real_t> solve(std::vector<std::vector<real_t>> A, std::vector<real_t> b);
2023-01-27 13:01:16 +01:00
bool positiveDefiniteChecker(std::vector<std::vector<real_t>> A);
2023-01-27 13:01:16 +01:00
bool negativeDefiniteChecker(std::vector<std::vector<real_t>> A);
2023-01-27 13:01:16 +01:00
bool zeroEigenvalue(std::vector<std::vector<real_t>> A);
2023-01-27 13:01:16 +01:00
void printMatrix(std::vector<std::vector<real_t>> A);
2023-01-24 19:00:54 +01:00
// VECTOR FUNCTIONS
2023-01-27 13:01:16 +01:00
std::vector<std::vector<real_t>> outerProduct(std::vector<real_t> a, std::vector<real_t> b); // This multiplies a, bT
2023-02-05 00:58:00 +01:00
Ref<MLPPMatrix> outer_product(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b); // This multiplies a, bT
2023-01-27 13:01:16 +01:00
std::vector<real_t> hadamard_product(std::vector<real_t> a, std::vector<real_t> b);
2023-02-02 02:19:16 +01:00
Ref<MLPPVector> hadamard_productnv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b);
void hadamard_productv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b, Ref<MLPPVector> out);
2023-01-27 13:01:16 +01:00
std::vector<real_t> elementWiseDivision(std::vector<real_t> a, std::vector<real_t> b);
2023-01-31 02:37:20 +01:00
Ref<MLPPVector> element_wise_division(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b);
2023-01-27 13:01:16 +01:00
std::vector<real_t> scalarMultiply(real_t scalar, std::vector<real_t> a);
2023-01-29 15:46:55 +01:00
Ref<MLPPVector> scalar_multiplynv(real_t scalar, const Ref<MLPPVector> &a);
void scalar_multiplyv(real_t scalar, const Ref<MLPPVector> &a, Ref<MLPPVector> out);
2023-01-27 13:01:16 +01:00
std::vector<real_t> scalarAdd(real_t scalar, std::vector<real_t> a);
2023-02-02 02:19:16 +01:00
Ref<MLPPVector> scalar_addnv(real_t scalar, const Ref<MLPPVector> &a);
void scalar_addv(real_t scalar, const Ref<MLPPVector> &a, Ref<MLPPVector> out);
2023-01-27 13:01:16 +01:00
std::vector<real_t> addition(std::vector<real_t> a, std::vector<real_t> b);
2023-01-29 15:46:55 +01:00
Ref<MLPPVector> additionnv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b);
void additionv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b, Ref<MLPPVector> out);
2023-01-27 13:01:16 +01:00
std::vector<real_t> subtraction(std::vector<real_t> a, std::vector<real_t> b);
2023-01-29 15:46:55 +01:00
Ref<MLPPVector> subtractionnv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b);
void subtractionv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b, Ref<MLPPVector> out);
2023-01-27 13:01:16 +01:00
std::vector<real_t> subtractMatrixRows(std::vector<real_t> a, std::vector<std::vector<real_t>> B);
2023-02-05 00:58:00 +01:00
Ref<MLPPVector> subtract_matrix_rows(const Ref<MLPPVector> &a, const Ref<MLPPMatrix> &B);
2023-01-27 13:01:16 +01:00
std::vector<real_t> log(std::vector<real_t> a);
std::vector<real_t> log10(std::vector<real_t> a);
std::vector<real_t> exp(std::vector<real_t> a);
std::vector<real_t> erf(std::vector<real_t> a);
std::vector<real_t> exponentiate(std::vector<real_t> a, real_t p);
std::vector<real_t> sqrt(std::vector<real_t> a);
std::vector<real_t> cbrt(std::vector<real_t> a);
2023-01-31 02:37:20 +01:00
Ref<MLPPVector> logv(const Ref<MLPPVector> &a);
Ref<MLPPVector> log10v(const Ref<MLPPVector> &a);
Ref<MLPPVector> expv(const Ref<MLPPVector> &a);
Ref<MLPPVector> erfv(const Ref<MLPPVector> &a);
Ref<MLPPVector> exponentiatev(const Ref<MLPPVector> &a, real_t p);
Ref<MLPPVector> sqrtv(const Ref<MLPPVector> &a);
Ref<MLPPVector> cbrtv(const Ref<MLPPVector> &a);
2023-01-27 13:01:16 +01:00
real_t dot(std::vector<real_t> a, std::vector<real_t> b);
2023-02-04 12:20:25 +01:00
real_t dotv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b);
2023-01-27 13:01:16 +01:00
std::vector<real_t> cross(std::vector<real_t> a, std::vector<real_t> b);
2023-01-27 13:01:16 +01:00
std::vector<real_t> abs(std::vector<real_t> a);
2023-01-27 13:01:16 +01:00
std::vector<real_t> zerovec(int n);
std::vector<real_t> onevec(int n);
2023-01-31 02:37:20 +01:00
std::vector<real_t> full(int n, int k);
2023-02-02 02:19:16 +01:00
Ref<MLPPVector> absv(const Ref<MLPPVector> &a);
2023-01-31 02:37:20 +01:00
Ref<MLPPVector> zerovecv(int n);
Ref<MLPPVector> onevecv(int n);
Ref<MLPPVector> fullv(int n, int k);
2023-01-31 02:37:20 +01:00
std::vector<std::vector<real_t>> diag(std::vector<real_t> a);
2023-02-04 12:20:25 +01:00
Ref<MLPPVector> diagm(const Ref<MLPPVector> &a);
2023-01-27 13:01:16 +01:00
std::vector<real_t> sin(std::vector<real_t> a);
std::vector<real_t> cos(std::vector<real_t> a);
Ref<MLPPVector> sinv(const Ref<MLPPVector> &a);
Ref<MLPPVector> cosv(const Ref<MLPPVector> &a);
2023-01-27 13:01:16 +01:00
std::vector<real_t> max(std::vector<real_t> a, std::vector<real_t> b);
2023-01-27 13:01:16 +01:00
real_t max(std::vector<real_t> a);
2023-01-27 13:01:16 +01:00
real_t min(std::vector<real_t> a);
2023-01-27 13:01:16 +01:00
std::vector<real_t> round(std::vector<real_t> a);
2023-01-27 13:01:16 +01:00
real_t euclideanDistance(std::vector<real_t> a, std::vector<real_t> b);
real_t euclidean_distance(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b);
real_t euclidean_distance_squared(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b);
2023-01-27 13:01:16 +01:00
real_t norm_2(std::vector<real_t> a);
2023-01-27 13:01:16 +01:00
real_t norm_sq(std::vector<real_t> a);
2023-01-29 15:46:55 +01:00
real_t norm_sqv(const Ref<MLPPVector> &a);
2023-01-27 13:01:16 +01:00
real_t sum_elements(std::vector<real_t> a);
2023-02-05 00:58:00 +01:00
real_t sum_elementsv(const Ref<MLPPVector> &a);
2023-01-27 13:01:16 +01:00
real_t cosineSimilarity(std::vector<real_t> a, std::vector<real_t> b);
2023-01-27 13:01:16 +01:00
void printVector(std::vector<real_t> a);
2023-01-24 19:00:54 +01:00
// MATRIX-VECTOR FUNCTIONS
2023-01-27 13:01:16 +01:00
std::vector<std::vector<real_t>> mat_vec_add(std::vector<std::vector<real_t>> A, std::vector<real_t> b);
std::vector<real_t> mat_vec_mult(std::vector<std::vector<real_t>> A, std::vector<real_t> b);
Ref<MLPPMatrix> mat_vec_addv(const Ref<MLPPMatrix> &A, const Ref<MLPPVector> &b);
Ref<MLPPVector> mat_vec_multv(const Ref<MLPPMatrix> &A, const Ref<MLPPVector> &b);
2023-01-24 19:00:54 +01:00
// TENSOR FUNCTIONS
2023-01-27 13:01:16 +01:00
std::vector<std::vector<std::vector<real_t>>> addition(std::vector<std::vector<std::vector<real_t>>> A, std::vector<std::vector<std::vector<real_t>>> B);
2023-01-27 13:01:16 +01:00
std::vector<std::vector<std::vector<real_t>>> elementWiseDivision(std::vector<std::vector<std::vector<real_t>>> A, std::vector<std::vector<std::vector<real_t>>> B);
2023-01-27 13:01:16 +01:00
std::vector<std::vector<std::vector<real_t>>> sqrt(std::vector<std::vector<std::vector<real_t>>> A);
2023-01-27 13:01:16 +01:00
std::vector<std::vector<std::vector<real_t>>> exponentiate(std::vector<std::vector<std::vector<real_t>>> A, real_t p);
2023-01-27 13:01:16 +01:00
std::vector<std::vector<real_t>> tensor_vec_mult(std::vector<std::vector<std::vector<real_t>>> A, std::vector<real_t> b);
2023-01-27 13:01:16 +01:00
std::vector<real_t> flatten(std::vector<std::vector<std::vector<real_t>>> A);
2023-01-27 13:01:16 +01:00
void printTensor(std::vector<std::vector<std::vector<real_t>>> A);
2023-01-27 13:01:16 +01:00
std::vector<std::vector<std::vector<real_t>>> scalarMultiply(real_t scalar, std::vector<std::vector<std::vector<real_t>>> A);
std::vector<std::vector<std::vector<real_t>>> scalarAdd(real_t scalar, std::vector<std::vector<std::vector<real_t>>> A);
2023-02-06 12:20:52 +01:00
Vector<Ref<MLPPMatrix>> scalar_multiply_vm(real_t scalar, Vector<Ref<MLPPMatrix>> A);
Vector<Ref<MLPPMatrix>> scalar_add_vm(real_t scalar, Vector<Ref<MLPPMatrix>> A);
2023-01-27 13:01:16 +01:00
std::vector<std::vector<std::vector<real_t>>> resize(std::vector<std::vector<std::vector<real_t>>> A, std::vector<std::vector<std::vector<real_t>>> B);
2023-01-27 13:01:16 +01:00
std::vector<std::vector<std::vector<real_t>>> hadamard_product(std::vector<std::vector<std::vector<real_t>>> A, std::vector<std::vector<std::vector<real_t>>> B);
2023-01-27 13:01:16 +01:00
std::vector<std::vector<std::vector<real_t>>> max(std::vector<std::vector<std::vector<real_t>>> A, std::vector<std::vector<std::vector<real_t>>> B);
2023-01-27 13:01:16 +01:00
std::vector<std::vector<std::vector<real_t>>> abs(std::vector<std::vector<std::vector<real_t>>> A);
2023-01-27 13:01:16 +01:00
real_t norm_2(std::vector<std::vector<std::vector<real_t>>> A);
2023-01-27 13:01:16 +01:00
std::vector<std::vector<std::vector<real_t>>> vector_wise_tensor_product(std::vector<std::vector<std::vector<real_t>>> A, std::vector<std::vector<real_t>> B);
protected:
static void _bind_methods();
2023-01-24 19:00:54 +01:00
};
#endif /* LinAlg_hpp */