mirror of
https://github.com/Relintai/pmlpp.git
synced 2024-11-08 13:12:09 +01:00
Reworked more methods.
This commit is contained in:
parent
074af18c64
commit
719556e9bc
@ -869,45 +869,68 @@ MLPPLinAlg::SVDResult MLPPLinAlg::svd(const Ref<MLPPMatrix> &A) {
|
||||
return res;
|
||||
}
|
||||
|
||||
/*
|
||||
std::vector<real_t> MLPPLinAlg::vectorProjection(std::vector<real_t> a, std::vector<real_t> b) {
|
||||
real_t product = dot(a, b) / dot(a, a);
|
||||
return scalarMultiply(product, a); // Projection of vector a onto b. Denotated as proj_a(b).
|
||||
Ref<MLPPVector> MLPPLinAlg::vector_projection(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b) {
|
||||
real_t product = a->dot(b) / a->dot(a);
|
||||
|
||||
return a->scalar_multiplyn(product); // Projection of vector a onto b. Denotated as proj_a(b).
|
||||
}
|
||||
*/
|
||||
|
||||
/*
|
||||
std::vector<std::vector<real_t>> MLPPLinAlg::gramSchmidtProcess(std::vector<std::vector<real_t>> A) {
|
||||
A = transpose(A); // C++ vectors lack a mechanism to directly index columns. So, we transpose *a copy* of A for this purpose for ease of use.
|
||||
std::vector<std::vector<real_t>> B;
|
||||
B.resize(A.size());
|
||||
for (uint32_t i = 0; i < B.size(); i++) {
|
||||
B[i].resize(A[0].size());
|
||||
}
|
||||
Ref<MLPPMatrix> MLPPLinAlg::gram_schmidt_process(const Ref<MLPPMatrix> &p_A) {
|
||||
Ref<MLPPMatrix> A = p_A->transposen();
|
||||
Size2i a_size = A->size();
|
||||
|
||||
Ref<MLPPMatrix> B;
|
||||
B.instance();
|
||||
B->resize(a_size);
|
||||
B->fill(0);
|
||||
|
||||
Ref<MLPPVector> b_i_row_tmp;
|
||||
b_i_row_tmp.instance();
|
||||
b_i_row_tmp->resize(a_size.x);
|
||||
|
||||
A->row_get_into_mlpp_vector(0, b_i_row_tmp);
|
||||
b_i_row_tmp->scalar_multiply((real_t)1 / b_i_row_tmp->norm_2());
|
||||
B->row_set_mlpp_vector(0, b_i_row_tmp);
|
||||
|
||||
Ref<MLPPVector> a_i_row_tmp;
|
||||
a_i_row_tmp.instance();
|
||||
a_i_row_tmp->resize(a_size.x);
|
||||
|
||||
Ref<MLPPVector> b_j_row_tmp;
|
||||
b_j_row_tmp.instance();
|
||||
b_j_row_tmp->resize(a_size.x);
|
||||
|
||||
for (int i = 1; i < a_size.y; ++i) {
|
||||
A->row_get_into_mlpp_vector(i, b_i_row_tmp);
|
||||
B->row_set_mlpp_vector(i, b_i_row_tmp);
|
||||
|
||||
B[0] = A[0]; // We set a_1 = b_1 as an initial condition.
|
||||
B[0] = scalarMultiply(1 / norm_2(B[0]), B[0]);
|
||||
for (uint32_t i = 1; i < B.size(); i++) {
|
||||
B[i] = A[i];
|
||||
for (int j = i - 1; j >= 0; j--) {
|
||||
B[i] = subtraction(B[i], vectorProjection(B[j], A[i]));
|
||||
}
|
||||
B[i] = scalarMultiply(1 / norm_2(B[i]), B[i]); // Very simply multiply all elements of vec B[i] by 1/||B[i]||_2
|
||||
}
|
||||
return transpose(B); // We re-transpose the marix.
|
||||
}
|
||||
*/
|
||||
A->row_get_into_mlpp_vector(i, a_i_row_tmp);
|
||||
B->row_get_into_mlpp_vector(j, b_j_row_tmp);
|
||||
B->row_get_into_mlpp_vector(i, b_i_row_tmp);
|
||||
|
||||
/*
|
||||
MLPPLinAlg::QRDResult MLPPLinAlg::qrd(std::vector<std::vector<real_t>> A) {
|
||||
b_i_row_tmp->sub(vector_projection(b_j_row_tmp, a_i_row_tmp));
|
||||
|
||||
B->row_set_mlpp_vector(i, b_i_row_tmp);
|
||||
}
|
||||
|
||||
// Very simply multiply all elements of vec B[i] by 1/||B[i]||_2
|
||||
B->row_get_into_mlpp_vector(i, b_i_row_tmp);
|
||||
b_i_row_tmp->scalar_multiply((real_t)1 / b_i_row_tmp->norm_2());
|
||||
B->row_set_mlpp_vector(i, b_i_row_tmp);
|
||||
}
|
||||
|
||||
return B->transposen(); // We re-transpose the marix.
|
||||
}
|
||||
|
||||
MLPPLinAlg::QRDResult MLPPLinAlg::qrd(const Ref<MLPPMatrix> &A) {
|
||||
QRDResult res;
|
||||
|
||||
res.Q = gramSchmidtProcess(A);
|
||||
res.R = matmult(transpose(res.Q), A);
|
||||
res.Q = gram_schmidt_process(A);
|
||||
res.R = res.Q->transposen()->multn(A);
|
||||
|
||||
return res;
|
||||
}
|
||||
*/
|
||||
|
||||
/*
|
||||
MLPPLinAlg::CholeskyResult MLPPLinAlg::cholesky(std::vector<std::vector<real_t>> A) {
|
||||
|
@ -16,7 +16,7 @@
|
||||
|
||||
#include "core/object/reference.h"
|
||||
|
||||
#else
|
||||
#else
|
||||
|
||||
#include "core/defs.h"
|
||||
#include "core/math_funcs.h"
|
||||
@ -110,18 +110,16 @@ public:
|
||||
|
||||
SVDResult svd(const Ref<MLPPMatrix> &A);
|
||||
|
||||
//std::vector<real_t> vectorProjection(std::vector<real_t> a, std::vector<real_t> b);
|
||||
Ref<MLPPVector> vector_projection(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b);
|
||||
|
||||
//std::vector<std::vector<real_t>> gramSchmidtProcess(std::vector<std::vector<real_t>> A);
|
||||
Ref<MLPPMatrix> gram_schmidt_process(const Ref<MLPPMatrix> &A);
|
||||
|
||||
/*
|
||||
struct QRDResult {
|
||||
std::vector<std::vector<real_t>> Q;
|
||||
std::vector<std::vector<real_t>> R;
|
||||
Ref<MLPPMatrix> Q;
|
||||
Ref<MLPPMatrix> R;
|
||||
};
|
||||
*/
|
||||
|
||||
//QRDResult qrd(std::vector<std::vector<real_t>> A);
|
||||
QRDResult qrd(const Ref<MLPPMatrix> &A);
|
||||
|
||||
/*
|
||||
struct CholeskyResult {
|
||||
|
@ -1475,6 +1475,27 @@ void MLPPMatrix::cbrtb(const Ref<MLPPMatrix> &A) {
|
||||
exponentiateb(A, real_t(1) / real_t(3));
|
||||
}
|
||||
|
||||
Ref<MLPPMatrix> MLPPMatrix::matrix_powern(const int n) const {
|
||||
if (n == 0) {
|
||||
return identity_mat(_size.y);
|
||||
}
|
||||
|
||||
Ref<MLPPMatrix> A = Ref<MLPPMatrix>(this);
|
||||
Ref<MLPPMatrix> B = identity_mat(_size.y);
|
||||
|
||||
if (n < 0) {
|
||||
A = inverse();
|
||||
}
|
||||
|
||||
int absn = ABS(n);
|
||||
|
||||
for (int i = 0; i < absn; i++) {
|
||||
B->mult(A);
|
||||
}
|
||||
|
||||
return B;
|
||||
}
|
||||
|
||||
/*
|
||||
std::vector<std::vector<real_t>> MLPPMatrix::matrixPower(std::vector<std::vector<real_t>> A, int n) {
|
||||
std::vector<std::vector<real_t>> B = identity(A.size());
|
||||
@ -1580,15 +1601,17 @@ real_t MLPPMatrix::detb(const Ref<MLPPMatrix> &A, int d) const {
|
||||
return deter;
|
||||
}
|
||||
|
||||
/*
|
||||
real_t MLPPMatrix::trace(std::vector<std::vector<real_t>> A) {
|
||||
real_t MLPPMatrix::trace() const {
|
||||
real_t trace = 0;
|
||||
for (uint32_t i = 0; i < A.size(); i++) {
|
||||
trace += A[i][i];
|
||||
|
||||
int sm = MIN(_size.x, _size.y);
|
||||
|
||||
for (int i = 0; i < sm; ++i) {
|
||||
trace += element_get(i, i);
|
||||
}
|
||||
|
||||
return trace;
|
||||
}
|
||||
*/
|
||||
|
||||
Ref<MLPPMatrix> MLPPMatrix::cofactor(int n, int i, int j) const {
|
||||
Ref<MLPPMatrix> cof;
|
||||
@ -2663,11 +2686,11 @@ void MLPPMatrix::flatteno(Ref<MLPPVector> out) const {
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
std::vector<real_t> MLPPMatrix::solve(std::vector<std::vector<real_t>> A, std::vector<real_t> b) {
|
||||
return mat_vec_mult(inverse(A), b);
|
||||
Ref<MLPPVector> MLPPMatrix::solve(const Ref<MLPPVector> &b) const {
|
||||
return inverse()->mult_vec(b);
|
||||
}
|
||||
|
||||
/*
|
||||
bool MLPPMatrix::positiveDefiniteChecker(std::vector<std::vector<real_t>> A) {
|
||||
auto eig_result = eig(A);
|
||||
auto eigenvectors = std::get<0>(eig_result);
|
||||
|
@ -209,7 +209,7 @@ public:
|
||||
Ref<MLPPMatrix> cbrtn() const;
|
||||
void cbrtb(const Ref<MLPPMatrix> &A);
|
||||
|
||||
//std::vector<std::vector<real_t>> matrixPower(std::vector<std::vector<real_t>> A, int n);
|
||||
Ref<MLPPMatrix> matrix_powern(const int n) const;
|
||||
|
||||
void abs();
|
||||
Ref<MLPPMatrix> absn() const;
|
||||
@ -218,7 +218,7 @@ public:
|
||||
real_t det(int d = -1) const;
|
||||
real_t detb(const Ref<MLPPMatrix> &A, int d) const;
|
||||
|
||||
//real_t trace(std::vector<std::vector<real_t>> A);
|
||||
real_t trace() const;
|
||||
|
||||
Ref<MLPPMatrix> cofactor(int n, int i, int j) const;
|
||||
void cofactoro(int n, int i, int j, Ref<MLPPMatrix> out) const;
|
||||
@ -322,9 +322,10 @@ public:
|
||||
Ref<MLPPVector> flatten() const;
|
||||
void flatteno(Ref<MLPPVector> out) const;
|
||||
|
||||
/*
|
||||
std::vector<real_t> solve(std::vector<std::vector<real_t>> A, std::vector<real_t> b);
|
||||
|
||||
Ref<MLPPVector> solve(const Ref<MLPPVector>& b) const;
|
||||
|
||||
/*
|
||||
bool positiveDefiniteChecker(std::vector<std::vector<real_t>> A);
|
||||
|
||||
bool negativeDefiniteChecker(std::vector<std::vector<real_t>> A);
|
||||
|
@ -1283,17 +1283,16 @@ real_t MLPPVector::euclidean_distance_squared(const Ref<MLPPVector> &b) const {
|
||||
return dist;
|
||||
}
|
||||
|
||||
/*
|
||||
real_t MLPPVector::norm_2(std::vector<std::vector<real_t>> A) {
|
||||
real_t sum = 0;
|
||||
for (uint32_t i = 0; i < A.size(); i++) {
|
||||
for (uint32_t j = 0; j < A[i].size(); j++) {
|
||||
sum += A[i][j] * A[i][j];
|
||||
}
|
||||
real_t MLPPVector::norm_2() const {
|
||||
const real_t *a_ptr = ptr();
|
||||
|
||||
real_t n_sq = 0;
|
||||
for (int i = 0; i < _size; ++i) {
|
||||
n_sq += a_ptr[i] * a_ptr[i];
|
||||
}
|
||||
return Math::sqrt(sum);
|
||||
|
||||
return Math::sqrt(n_sq);
|
||||
}
|
||||
*/
|
||||
|
||||
real_t MLPPVector::norm_sq() const {
|
||||
const real_t *a_ptr = ptr();
|
||||
|
@ -226,10 +226,7 @@ public:
|
||||
real_t euclidean_distance(const Ref<MLPPVector> &b) const;
|
||||
real_t euclidean_distance_squared(const Ref<MLPPVector> &b) const;
|
||||
|
||||
/*
|
||||
real_t norm_2(std::vector<real_t> a);
|
||||
*/
|
||||
|
||||
real_t norm_2() const;
|
||||
real_t norm_sq() const;
|
||||
|
||||
real_t sum_elements() const;
|
||||
|
@ -1006,51 +1006,170 @@ void MLPPTests::test_outlier_finder(bool ui) {
|
||||
PLOG_MSG(Variant(outlier_finder.model_test(input_set)));
|
||||
}
|
||||
void MLPPTests::test_new_math_functions() {
|
||||
/*
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivationOld avn;
|
||||
MLPPActivation avn;
|
||||
MLPPData data;
|
||||
|
||||
PLOG_MSG("logit:");
|
||||
|
||||
// Testing new Functions
|
||||
real_t z_s = 0.001;
|
||||
std::cout << avn.logit(z_s) << std::endl;
|
||||
std::cout << avn.logit(z_s, true) << std::endl;
|
||||
|
||||
std::vector<real_t> z_v = { 0.001 };
|
||||
alg.printVector(avn.logit(z_v));
|
||||
alg.printVector(avn.logit(z_v, true));
|
||||
//-6.906755
|
||||
PLOG_MSG(String::num(avn.logit_normr(z_s)));
|
||||
//1001.000916
|
||||
PLOG_MSG(String::num(avn.logit_derivr(z_s)));
|
||||
|
||||
std::vector<std::vector<real_t>> Z_m = { { 0.001 } };
|
||||
alg.printMatrix(avn.logit(Z_m));
|
||||
alg.printMatrix(avn.logit(Z_m, true));
|
||||
std::vector<real_t> z_v_sv = { 0.001 };
|
||||
|
||||
std::cout << alg.trace({ { 1, 2 }, { 3, 4 } }) << std::endl;
|
||||
alg.printMatrix(alg.pinverse({ { 1, 2 }, { 3, 4 } }));
|
||||
alg.printMatrix(alg.diag({ 1, 2, 3, 4, 5 }));
|
||||
alg.printMatrix(alg.kronecker_product({ { 1, 2, 3, 4, 5 } }, { { 6, 7, 8, 9, 10 } }));
|
||||
alg.printMatrix(alg.matrixPower({ { 5, 5 }, { 5, 5 } }, 2));
|
||||
alg.printVector(alg.solve({ { 1, 1 }, { 1.5, 4.0 } }, { 2200, 5050 }));
|
||||
Ref<MLPPVector> z_v;
|
||||
z_v.instance();
|
||||
z_v->set_from_std_vector(z_v_sv);
|
||||
|
||||
//[MLPPVector: -6.906755 ]
|
||||
PLOG_MSG(avn.logit_normv(z_v)->to_string());
|
||||
//[MLPPVector: 1001.000916 ]
|
||||
PLOG_MSG(avn.logit_derivv(z_v)->to_string());
|
||||
|
||||
std::vector<std::vector<real_t>> Z_m_sv = { { 0.001 } };
|
||||
|
||||
Ref<MLPPMatrix> Z_m;
|
||||
Z_m.instance();
|
||||
Z_m->set_from_std_vectors(Z_m_sv);
|
||||
|
||||
//[MLPPMatrix:
|
||||
//[ -6.906755 ]
|
||||
//]
|
||||
PLOG_MSG(avn.logit_normm(Z_m)->to_string());
|
||||
//[MLPPMatrix:
|
||||
//[ 1001.000916 ]
|
||||
//]
|
||||
PLOG_MSG(avn.logit_derivm(Z_m)->to_string());
|
||||
|
||||
PLOG_MSG(avn.logit_derivm(Z_m)->to_string());
|
||||
PLOG_MSG(avn.logit_derivm(Z_m)->to_string());
|
||||
PLOG_MSG(avn.logit_derivm(Z_m)->to_string());
|
||||
PLOG_MSG(avn.logit_derivm(Z_m)->to_string());
|
||||
PLOG_MSG(avn.logit_derivm(Z_m)->to_string());
|
||||
PLOG_MSG(avn.logit_derivm(Z_m)->to_string());
|
||||
PLOG_MSG(avn.logit_derivm(Z_m)->to_string());
|
||||
|
||||
const real_t trace_arr[] = {
|
||||
1, 2, //
|
||||
3, 4 //
|
||||
};
|
||||
|
||||
Ref<MLPPMatrix> trace_mat(memnew(MLPPMatrix(trace_arr, 2, 2)));
|
||||
//5
|
||||
PLOG_MSG(String::num(trace_mat->trace()));
|
||||
|
||||
const real_t pinverse_arr[] = {
|
||||
1, 2, //
|
||||
3, 4 //
|
||||
};
|
||||
|
||||
Ref<MLPPMatrix> pinverse_mat(memnew(MLPPMatrix(pinverse_arr, 2, 2)));
|
||||
//[MLPPMatrix:
|
||||
//[ -2 1.5 ]
|
||||
//[ 1 -0.5 ]
|
||||
//]
|
||||
PLOG_MSG(pinverse_mat->pinverse()->to_string());
|
||||
|
||||
const real_t diag_arr[] = {
|
||||
1, 2, 3, 4, 5
|
||||
};
|
||||
|
||||
Ref<MLPPVector> diag_vec(memnew(MLPPVector(diag_arr, 5)));
|
||||
//[MLPPMatrix:
|
||||
// [ 1 0 0 0 0 ]
|
||||
// [ 0 2 0 0 0 ]
|
||||
// [ 0 0 3 0 0 ]
|
||||
// [ 0 0 0 4 0 ]
|
||||
// [ 0 0 0 0 5 ]
|
||||
//]
|
||||
PLOG_MSG(alg.diagnm(diag_vec)->to_string());
|
||||
|
||||
const real_t kronecker_product1_arr[] = {
|
||||
1, 2, 3, 4, 5, //
|
||||
};
|
||||
|
||||
const real_t kronecker_product2_arr[] = {
|
||||
6, 7, 8, 9, 10 //
|
||||
};
|
||||
|
||||
Ref<MLPPMatrix> kronecker_product1_mat(memnew(MLPPMatrix(kronecker_product1_arr, 1, 5)));
|
||||
Ref<MLPPMatrix> kronecker_product2_mat(memnew(MLPPMatrix(kronecker_product2_arr, 1, 5)));
|
||||
//[MLPPMatrix:
|
||||
// [ 6 7 8 9 10 12 14 16 18 20 18 21 24 27 30 24 28 32 36 40 30 35 40 45 50 ]
|
||||
//]
|
||||
PLOG_MSG(kronecker_product1_mat->kronecker_productn(kronecker_product2_mat)->to_string());
|
||||
|
||||
const real_t power_arr[] = {
|
||||
5, 5, //
|
||||
5, 5 //
|
||||
};
|
||||
|
||||
Ref<MLPPMatrix> power_mat(memnew(MLPPMatrix(power_arr, 2, 2)));
|
||||
//[MLPPMatrix:
|
||||
// [ 50 50 ]
|
||||
// [ 50 50 ]
|
||||
//]
|
||||
PLOG_MSG(power_mat->matrix_powern(2)->to_string());
|
||||
|
||||
const real_t solve1_arr[] = {
|
||||
1, 1, //
|
||||
1.5, 4.0 //
|
||||
};
|
||||
|
||||
const real_t solve2_arr[] = {
|
||||
2200, 5050
|
||||
};
|
||||
|
||||
Ref<MLPPMatrix> solve_mat(memnew(MLPPMatrix(solve1_arr, 2, 2)));
|
||||
Ref<MLPPVector> solve_vec(memnew(MLPPVector(solve2_arr, 2)));
|
||||
//[MLPPVector: 1500 700 ]
|
||||
PLOG_MSG(solve_mat->solve(solve_vec)->to_string());
|
||||
|
||||
std::vector<std::vector<real_t>> matrixOfCubes = { { 1, 2, 64, 27 } };
|
||||
std::vector<real_t> vectorOfCubes = { 1, 2, 64, 27 };
|
||||
alg.printMatrix(alg.cbrt(matrixOfCubes));
|
||||
alg.printVector(alg.cbrt(vectorOfCubes));
|
||||
std::cout << alg.max({ { 1, 2, 3, 4, 5 }, { 6, 5, 3, 4, 1 }, { 9, 9, 9, 9, 9 } }) << std::endl;
|
||||
std::cout << alg.min({ { 1, 2, 3, 4, 5 }, { 6, 5, 3, 4, 1 }, { 9, 9, 9, 9, 9 } }) << std::endl;
|
||||
|
||||
Ref<MLPPMatrix> matrix_of_cubes(memnew(MLPPMatrix(matrixOfCubes)));
|
||||
Ref<MLPPVector> vector_of_cubes(memnew(MLPPVector(vectorOfCubes)));
|
||||
PLOG_MSG(matrix_of_cubes->cbrtn()->to_string());
|
||||
PLOG_MSG(vector_of_cubes->cbrtn()->to_string());
|
||||
|
||||
//std::vector<std::vector<real_t>> min_max_svec = { { 1, 2, 3, 4, 5 }, { 6, 5, 3, 4, 1 }, { 9, 9, 9, 9, 9 } };
|
||||
//Ref<MLPPMatrix> min_max(memnew(MLPPMatrix(min_max_svec)));
|
||||
|
||||
//std::vector<real_t> chicken;
|
||||
//data.getImage("../../Data/apple.jpeg", chicken);
|
||||
//alg.printVector(chicken);
|
||||
|
||||
std::vector<std::vector<real_t>> P = { { 12, -51, 4 }, { 6, 167, -68 }, { -4, 24, -41 } };
|
||||
alg.printMatrix(P);
|
||||
std::vector<std::vector<real_t>> Pvec = { { 12, -51, 4 }, { 6, 167, -68 }, { -4, 24, -41 } };
|
||||
Ref<MLPPMatrix> P(memnew(MLPPMatrix(Pvec)));
|
||||
|
||||
alg.printMatrix(alg.gramSchmidtProcess(P));
|
||||
PLOG_MSG(P->to_string());
|
||||
//[MLPPMatrix:
|
||||
// [ 0.857143 -0.394286 -0.331429 ]
|
||||
// [ 0.428571 0.902857 0.034286 ]
|
||||
// [ -0.285714 0.171429 -0.942857 ]
|
||||
//]
|
||||
PLOG_MSG(alg.gram_schmidt_process(P)->to_string());
|
||||
|
||||
//MLPPLinAlg::QRDResult qrd_result = alg.qrd(P); // It works!
|
||||
//alg.printMatrix(qrd_result.Q);
|
||||
//alg.printMatrix(qrd_result.R);
|
||||
*/
|
||||
MLPPLinAlg::QRDResult qrd_result = alg.qrd(P); // It works!
|
||||
|
||||
//[MLPPMatrix:
|
||||
// [ 0.857143 -0.394286 -0.331429 ]
|
||||
// [ 0.428571 0.902857 0.034286 ]
|
||||
// [ -0.285714 0.171429 -0.942857 ]
|
||||
//]
|
||||
PLOG_MSG(qrd_result.Q->to_string());
|
||||
//[MLPPMatrix:
|
||||
// [ 14.000001 21 -14.000003 ]
|
||||
// [ -0 175 -70 ]
|
||||
// [ 0.000001 0.000029 34.999989 ]
|
||||
//]
|
||||
PLOG_MSG(qrd_result.R->to_string());
|
||||
}
|
||||
void MLPPTests::test_positive_definiteness_checker() {
|
||||
/*
|
||||
|
@ -442,12 +442,11 @@ void MLPPTestsOld::test_new_math_functions() {
|
||||
|
||||
std::vector<std::vector<real_t>> P = { { 12, -51, 4 }, { 6, 167, -68 }, { -4, 24, -41 } };
|
||||
alg.printMatrix(P);
|
||||
|
||||
alg.printMatrix(alg.gramSchmidtProcess(P));
|
||||
|
||||
//MLPPLinAlgOld::QRDResult qrd_result = alg.qrd(P); // It works!
|
||||
//alg.printMatrix(qrd_result.Q);
|
||||
//alg.printMatrix(qrd_result.R);
|
||||
MLPPLinAlgOld::QRDResult qrd_result = alg.qrd(P); // It works!
|
||||
alg.printMatrix(qrd_result.Q);
|
||||
alg.printMatrix(qrd_result.R);
|
||||
}
|
||||
void MLPPTestsOld::test_positive_definiteness_checker() {
|
||||
//MLPPStat stat;
|
||||
|
Loading…
Reference in New Issue
Block a user