diff --git a/doc_classes/MLPPMatrix.xml b/doc_classes/MLPPMatrix.xml
index 516aac9..4d4f3ef 100644
--- a/doc_classes/MLPPMatrix.xml
+++ b/doc_classes/MLPPMatrix.xml
@@ -29,19 +29,19 @@
-
+
-
+
-
+
@@ -293,7 +293,7 @@
-
+
@@ -337,20 +337,20 @@
-
+
-
+
-
+
@@ -511,7 +511,7 @@
-
+
@@ -543,13 +543,13 @@
-
+
-
+
@@ -604,13 +604,13 @@
-
+
-
+
@@ -656,14 +656,14 @@
-
+
-
+
@@ -737,7 +737,7 @@
-
+
@@ -770,7 +770,7 @@
-
+
diff --git a/doc_classes/MLPPTensor3.xml b/doc_classes/MLPPTensor3.xml
index 95e28aa..da5d7eb 100644
--- a/doc_classes/MLPPTensor3.xml
+++ b/doc_classes/MLPPTensor3.xml
@@ -170,7 +170,7 @@
-
+
@@ -178,14 +178,14 @@
-
+
-
+
@@ -464,7 +464,7 @@
-
+
@@ -472,7 +472,7 @@
-
+
diff --git a/mlpp/activation/activation.cpp b/mlpp/activation/activation.cpp
index 8d7888a..6be0b6f 100644
--- a/mlpp/activation/activation.cpp
+++ b/mlpp/activation/activation.cpp
@@ -924,11 +924,11 @@ Ref MLPPActivation::softmax_normm(const Ref &z) {
row_tmp->resize(z_size.x);
for (int i = 0; i < z_size.y; ++i) {
- z->get_row_into_mlpp_vector(i, row_tmp);
+ z->row_get_into_mlpp_vector(i, row_tmp);
Ref sfn = softmax_normv(row_tmp);
- a->set_row_mlpp_vector(i, sfn);
+ a->row_set_mlpp_vector(i, sfn);
}
return a;
@@ -974,11 +974,11 @@ Ref MLPPActivation::softmax_derivm(const Ref &z) {
row_tmp->resize(z_size.x);
for (int i = 0; i < z_size.y; ++i) {
- z->get_row_into_mlpp_vector(i, row_tmp);
+ z->row_get_into_mlpp_vector(i, row_tmp);
Ref sfn = softmax_derivm(z);
- a->set_row_mlpp_vector(i, sfn);
+ a->row_set_mlpp_vector(i, sfn);
}
return a;
@@ -1021,11 +1021,11 @@ Ref MLPPActivation::adj_softmax_normm(const Ref &z) {
row_rmp->resize(size.x);
for (int i = 0; i < size.y; ++i) {
- z->get_row_into_mlpp_vector(i, row_rmp);
+ z->row_get_into_mlpp_vector(i, row_rmp);
Ref nv = adj_softmax_normv(row_rmp);
- n->set_row_mlpp_vector(i, nv);
+ n->row_set_mlpp_vector(i, nv);
}
return n;
@@ -1066,11 +1066,11 @@ Ref MLPPActivation::adj_softmax_derivm(const Ref &z) {
row_rmp->resize(size.x);
for (int i = 0; i < size.y; ++i) {
- z->get_row_into_mlpp_vector(i, row_rmp);
+ z->row_get_into_mlpp_vector(i, row_rmp);
Ref nv = adj_softmax_derivv(row_rmp);
- n->set_row_mlpp_vector(i, nv);
+ n->row_set_mlpp_vector(i, nv);
}
return n;
@@ -1128,15 +1128,15 @@ Vector[> MLPPActivation::softmax_deriv_normm(const Refresize(Size2i(a_size_x, z_size_y));
for (int j = 0; j < z_size_y; ++j) {
- a->get_row_into_mlpp_vector(i, a_i_tmp);
+ a->row_get_into_mlpp_vector(i, a_i_tmp);
if (i == j) {
Ref d_j = alg.subtractionnv(a_i_tmp, alg.hadamard_productnv(a_i_tmp, a_i_tmp));
- d->set_row_mlpp_vector(j, d_j);
+ d->row_set_mlpp_vector(j, d_j);
} else {
- a->get_row_into_mlpp_vector(j, a_j_tmp);
+ a->row_get_into_mlpp_vector(j, a_j_tmp);
Ref d_j = alg.scalar_multiplynv(-1, alg.hadamard_productnv(a_i_tmp, a_j_tmp));
- d->set_row_mlpp_vector(j, d_j);
+ d->row_set_mlpp_vector(j, d_j);
}
}
@@ -1196,15 +1196,15 @@ Vector][> MLPPActivation::softmax_deriv_derivm(const Refresize(Size2i(a_size_x, z_size_y));
for (int j = 0; j < z_size_y; ++j) {
- a->get_row_into_mlpp_vector(i, a_i_tmp);
+ a->row_get_into_mlpp_vector(i, a_i_tmp);
if (i == j) {
Ref d_j = alg.subtractionnv(a_i_tmp, alg.hadamard_productnv(a_i_tmp, a_i_tmp));
- d->set_row_mlpp_vector(j, d_j);
+ d->row_set_mlpp_vector(j, d_j);
} else {
- a->get_row_into_mlpp_vector(j, a_j_tmp);
+ a->row_get_into_mlpp_vector(j, a_j_tmp);
Ref d_j = alg.scalar_multiplynv(-1, alg.hadamard_productnv(a_i_tmp, a_j_tmp));
- d->set_row_mlpp_vector(j, d_j);
+ d->row_set_mlpp_vector(j, d_j);
}
}
diff --git a/mlpp/ann/ann.cpp b/mlpp/ann/ann.cpp
index 54cc5af..00dd05f 100644
--- a/mlpp/ann/ann.cpp
+++ b/mlpp/ann/ann.cpp
@@ -125,7 +125,7 @@ void MLPPANN::sgd(real_t learning_rate, int max_epoch, bool ui) {
int output_index = distribution(generator);
- _input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
+ _input_set->row_get_into_mlpp_vector(output_index, input_set_row_tmp);
real_t output_element_set = _output_set->element_get(output_index);
output_set_row_tmp->element_set(0, output_element_set);
diff --git a/mlpp/auto_encoder/auto_encoder.cpp b/mlpp/auto_encoder/auto_encoder.cpp
index c6def87..9266c6f 100644
--- a/mlpp/auto_encoder/auto_encoder.cpp
+++ b/mlpp/auto_encoder/auto_encoder.cpp
@@ -128,11 +128,11 @@ void MLPPAutoEncoder::sgd(real_t learning_rate, int max_epoch, bool ui) {
while (true) {
int output_index = distribution(generator);
- _input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
- input_set_mat_tmp->set_row_mlpp_vector(0, input_set_row_tmp);
+ _input_set->row_get_into_mlpp_vector(output_index, input_set_row_tmp);
+ input_set_mat_tmp->row_set_mlpp_vector(0, input_set_row_tmp);
Ref y_hat = evaluatev(input_set_row_tmp);
- y_hat_mat_tmp->set_row_mlpp_vector(0, y_hat);
+ y_hat_mat_tmp->row_set_mlpp_vector(0, y_hat);
PropagateVResult prop_res = propagatev(input_set_row_tmp);
diff --git a/mlpp/bernoulli_nb/bernoulli_nb.cpp b/mlpp/bernoulli_nb/bernoulli_nb.cpp
index 05dd90d..dfd9a6e 100644
--- a/mlpp/bernoulli_nb/bernoulli_nb.cpp
+++ b/mlpp/bernoulli_nb/bernoulli_nb.cpp
@@ -22,7 +22,7 @@ Ref MLPPBernoulliNB::model_set_test(const Ref &X) {
x_row_tmp->resize(X->size().x);
for (int i = 0; i < X->size().y; i++) {
- X->get_row_into_mlpp_vector(i, x_row_tmp);
+ X->row_get_into_mlpp_vector(i, x_row_tmp);
y_hat->element_set(i, model_test(x_row_tmp));
}
diff --git a/mlpp/c_log_log_reg/c_log_log_reg.cpp b/mlpp/c_log_log_reg/c_log_log_reg.cpp
index 21f9bd9..f420993 100644
--- a/mlpp/c_log_log_reg/c_log_log_reg.cpp
+++ b/mlpp/c_log_log_reg/c_log_log_reg.cpp
@@ -122,7 +122,7 @@ void MLPPCLogLogReg::sgd(real_t learning_rate, int max_epoch, bool p_) {
while (true) {
int output_index = distribution(generator);
- _input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
+ _input_set->row_get_into_mlpp_vector(output_index, input_set_row_tmp);
real_t output_element_set = _output_set->element_get(output_index);
output_set_row_tmp->element_set(0, output_element_set);
diff --git a/mlpp/cost/cost.cpp b/mlpp/cost/cost.cpp
index 2333dff..ee145ab 100644
--- a/mlpp/cost/cost.cpp
+++ b/mlpp/cost/cost.cpp
@@ -561,7 +561,7 @@ real_t MLPPCost::dual_form_svm(const Ref &alpha, const Ref alpha_m;
alpha_m.instance();
alpha_m->resize(Size2i(alpha->size(), 1));
- alpha_m->set_row_mlpp_vector(0, alpha);
+ alpha_m->row_set_mlpp_vector(0, alpha);
Ref alpha_m_res = alg.matmultnm(alg.matmultnm(alpha_m, Q), alg.transposenm(alpha_m));
diff --git a/mlpp/data/data.cpp b/mlpp/data/data.cpp
index da0c92b..f0a4220 100644
--- a/mlpp/data/data.cpp
+++ b/mlpp/data/data.cpp
@@ -369,11 +369,11 @@ MLPPData::SplitComplexData MLPPData::train_test_split(Ref data,
for (int i = 0; i < test_input_number; ++i) {
int index = indices[i];
- orig_input->get_row_into_mlpp_vector(index, orig_input_row_tmp);
- orig_output->get_row_into_mlpp_vector(index, orig_output_row_tmp);
+ orig_input->row_get_into_mlpp_vector(index, orig_input_row_tmp);
+ orig_output->row_get_into_mlpp_vector(index, orig_output_row_tmp);
- res_test_input->set_row_mlpp_vector(i, orig_input);
- res_test_output->set_row_mlpp_vector(i, orig_output);
+ res_test_input->row_set_mlpp_vector(i, orig_input);
+ res_test_output->row_set_mlpp_vector(i, orig_output);
}
Ref res_train_input = res.train->get_input();
@@ -387,11 +387,11 @@ MLPPData::SplitComplexData MLPPData::train_test_split(Ref data,
for (int i = 0; i < train_input_number; ++i) {
int index = indices[train_input_number + i];
- orig_input->get_row_into_mlpp_vector(index, orig_input_row_tmp);
- orig_output->get_row_into_mlpp_vector(index, orig_output_row_tmp);
+ orig_input->row_get_into_mlpp_vector(index, orig_input_row_tmp);
+ orig_output->row_get_into_mlpp_vector(index, orig_output_row_tmp);
- res_train_input->set_row_mlpp_vector(i, orig_input);
- res_train_output->set_row_mlpp_vector(i, orig_output);
+ res_train_input->row_set_mlpp_vector(i, orig_input);
+ res_train_output->row_set_mlpp_vector(i, orig_output);
}
return res;
@@ -1280,7 +1280,7 @@ Ref MLPPData::mean_centering(const Ref &p_X) {
x_row_tmp->resize(x_size.x);
for (int i = 0; i < x_size.y; ++i) {
- X->get_row_into_mlpp_vector(i, x_row_tmp);
+ X->row_get_into_mlpp_vector(i, x_row_tmp);
real_t mean_i = stat.meanv(x_row_tmp);
diff --git a/mlpp/dual_svc/dual_svc.cpp b/mlpp/dual_svc/dual_svc.cpp
index b0b6ce7..4e848fc 100644
--- a/mlpp/dual_svc/dual_svc.cpp
+++ b/mlpp/dual_svc/dual_svc.cpp
@@ -53,8 +53,8 @@ void MLPPDualSVC::gradient_descent(real_t learning_rate, int max_epoch, bool ui)
if (_alpha->element_get(i) < _C && _alpha->element_get(i) > 0) {
for (int j = 0; j < _alpha->size(); j++) {
if (_alpha->element_get(j) > 0) {
- _input_set->get_row_into_mlpp_vector(i, input_set_i_row_tmp);
- _input_set->get_row_into_mlpp_vector(j, input_set_j_row_tmp);
+ _input_set->row_get_into_mlpp_vector(i, input_set_i_row_tmp);
+ _input_set->row_get_into_mlpp_vector(j, input_set_j_row_tmp);
sum += _alpha->element_get(j) * _output_set->element_get(j) * alg.dotnv(input_set_j_row_tmp, input_set_i_row_tmp); // TO DO: DON'T forget to add non-linear kernelizations.
}
@@ -216,7 +216,7 @@ real_t MLPPDualSVC::propagatev(const Ref &x) {
for (int j = 0; j < _alpha->size(); j++) {
if (_alpha->element_get(j) != 0) {
- _input_set->get_row_into_mlpp_vector(j, input_set_row_tmp);
+ _input_set->row_get_into_mlpp_vector(j, input_set_row_tmp);
z += _alpha->element_get(j) * _output_set->element_get(j) * alg.dotnv(input_set_row_tmp, x); // TO DO: DON'T forget to add non-linear kernelizations.
}
}
@@ -249,8 +249,8 @@ Ref MLPPDualSVC::propagatem(const Ref &X) {
for (int j = 0; j < _alpha->size(); j++) {
if (_alpha->element_get(j) != 0) {
- _input_set->get_row_into_mlpp_vector(j, input_set_row_tmp);
- X->get_row_into_mlpp_vector(i, x_row_tmp);
+ _input_set->row_get_into_mlpp_vector(j, input_set_row_tmp);
+ X->row_get_into_mlpp_vector(i, x_row_tmp);
sum += _alpha->element_get(j) * _output_set->element_get(j) * alg.dotnv(input_set_row_tmp, x_row_tmp); // TO DO: DON'T forget to add non-linear kernelizations.
}
diff --git a/mlpp/exp_reg/exp_reg.cpp b/mlpp/exp_reg/exp_reg.cpp
index c20bcec..9d7c5a6 100644
--- a/mlpp/exp_reg/exp_reg.cpp
+++ b/mlpp/exp_reg/exp_reg.cpp
@@ -109,7 +109,7 @@ void MLPPExpReg::sgd(real_t learning_rate, int max_epoch, bool ui) {
while (true) {
int output_index = distribution(generator);
- _input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
+ _input_set->row_get_into_mlpp_vector(output_index, input_set_row_tmp);
real_t output_element_set = _output_set->element_get(output_index);
output_set_row_tmp->element_set(0, output_element_set);
diff --git a/mlpp/gan/gan.cpp b/mlpp/gan/gan.cpp
index 798327c..e2d9d3e 100644
--- a/mlpp/gan/gan.cpp
+++ b/mlpp/gan/gan.cpp
@@ -60,7 +60,7 @@ void MLPPGAN::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
Ref generator_input_set = alg.gaussian_noise(_n, _k);
Ref discriminator_input_set = model_set_test_generator(generator_input_set);
- discriminator_input_set->add_rows_mlpp_matrix(_output_set); // Fake + real inputs.
+ discriminator_input_set->rows_add_mlpp_matrix(_output_set); // Fake + real inputs.
Ref y_hat = model_set_test_discriminator(discriminator_input_set);
Ref output_set = alg.zerovecnv(_n);
diff --git a/mlpp/gaussian_nb/gaussian_nb.cpp b/mlpp/gaussian_nb/gaussian_nb.cpp
index 2e14d3a..bdd0141 100644
--- a/mlpp/gaussian_nb/gaussian_nb.cpp
+++ b/mlpp/gaussian_nb/gaussian_nb.cpp
@@ -45,7 +45,7 @@ Ref MLPPGaussianNB::model_set_test(const Ref &X) {
x_row_tmp->resize(X->size().x);
for (int i = 0; i < X->size().y; i++) {
- X->get_row_into_mlpp_vector(i, x_row_tmp);
+ X->row_get_into_mlpp_vector(i, x_row_tmp);
y_hat->element_set(i, model_test(x_row_tmp));
}
diff --git a/mlpp/kmeans/kmeans.cpp b/mlpp/kmeans/kmeans.cpp
index 99fca87..d1392a2 100644
--- a/mlpp/kmeans/kmeans.cpp
+++ b/mlpp/kmeans/kmeans.cpp
@@ -77,11 +77,11 @@ Ref MLPPKMeans::model_set_test(const Ref &X) {
int r0_size = _r->size().x;
for (int i = 0; i < input_set_size_y; ++i) {
- _mu->get_row_into_mlpp_vector(0, closest_centroid);
- X->get_row_into_mlpp_vector(i, tmp_xiv);
+ _mu->row_get_into_mlpp_vector(0, closest_centroid);
+ X->row_get_into_mlpp_vector(i, tmp_xiv);
for (int j = 0; j < r0_size; ++j) {
- _mu->get_row_into_mlpp_vector(j, tmp_mujv);
+ _mu->row_get_into_mlpp_vector(j, tmp_mujv);
bool is_centroid_closer = alg.euclidean_distance(tmp_xiv, tmp_mujv) < alg.euclidean_distance(tmp_xiv, closest_centroid);
@@ -90,7 +90,7 @@ Ref MLPPKMeans::model_set_test(const Ref &X) {
}
}
- closest_centroids->set_row_mlpp_vector(i, closest_centroid);
+ closest_centroids->row_set_mlpp_vector(i, closest_centroid);
}
return closest_centroids;
@@ -105,7 +105,7 @@ Ref MLPPKMeans::model_test(const Ref &x) {
closest_centroid.instance();
closest_centroid->resize(_mu->size().x);
- _mu->get_row_into_mlpp_vector(0, closest_centroid);
+ _mu->row_get_into_mlpp_vector(0, closest_centroid);
int mu_size_y = _mu->size().y;
@@ -114,7 +114,7 @@ Ref MLPPKMeans::model_test(const Ref &x) {
tmp_mujv->resize(_mu->size().x);
for (int j = 0; j < mu_size_y; ++j) {
- _mu->get_row_into_mlpp_vector(j, tmp_mujv);
+ _mu->row_get_into_mlpp_vector(j, tmp_mujv);
if (alg.euclidean_distance(x, tmp_mujv) < alg.euclidean_distance(x, closest_centroid)) {
closest_centroid->set_from_mlpp_vector(tmp_mujv);
@@ -218,8 +218,8 @@ Ref MLPPKMeans::silhouette_scores() {
mu_j_tempv->resize(_mu->size().x);
for (int i = 0; i < input_set_size_y; ++i) {
- _r->get_row_into_mlpp_vector(i, r_i_tempv);
- _input_set->get_row_into_mlpp_vector(i, input_set_i_tempv);
+ _r->row_get_into_mlpp_vector(i, r_i_tempv);
+ _input_set->row_get_into_mlpp_vector(i, input_set_i_tempv);
// COMPUTING a[i]
real_t a = 0;
@@ -228,10 +228,10 @@ Ref MLPPKMeans::silhouette_scores() {
continue;
}
- _r->get_row_into_mlpp_vector(j, r_j_tempv);
+ _r->row_get_into_mlpp_vector(j, r_j_tempv);
if (r_i_tempv->is_equal_approx(r_j_tempv)) {
- _input_set->get_row_into_mlpp_vector(j, input_set_j_tempv);
+ _input_set->row_get_into_mlpp_vector(j, input_set_j_tempv);
a += alg.euclidean_distance(input_set_i_tempv, input_set_j_tempv);
}
@@ -240,17 +240,17 @@ Ref MLPPKMeans::silhouette_scores() {
// NORMALIZE a[i]
a /= closest_centroids->size().x - 1;
- closest_centroids->get_row_into_mlpp_vector(i, closest_centroids_i_tempv);
+ closest_centroids->row_get_into_mlpp_vector(i, closest_centroids_i_tempv);
// COMPUTING b[i]
real_t b = Math_INF;
for (int j = 0; j < mu_size_y; ++j) {
- _mu->get_row_into_mlpp_vector(j, mu_j_tempv);
+ _mu->row_get_into_mlpp_vector(j, mu_j_tempv);
if (!closest_centroids_i_tempv->is_equal_approx(mu_j_tempv)) {
real_t sum = 0;
for (int k = 0; k < input_set_size_y; ++k) {
- _input_set->get_row_into_mlpp_vector(k, input_set_k_tempv);
+ _input_set->row_get_into_mlpp_vector(k, input_set_k_tempv);
sum += alg.euclidean_distance(input_set_i_tempv, input_set_k_tempv);
}
@@ -258,7 +258,7 @@ Ref MLPPKMeans::silhouette_scores() {
// NORMALIZE b[i]
real_t k_cluster_size = 0;
for (int k = 0; k < closest_centroids_size_y; ++k) {
- _input_set->get_row_into_mlpp_vector(k, closest_centroids_k_tempv);
+ _input_set->row_get_into_mlpp_vector(k, closest_centroids_k_tempv);
if (closest_centroids_k_tempv->is_equal_approx(mu_j_tempv)) {
++k_cluster_size;
@@ -332,18 +332,18 @@ void MLPPKMeans::_evaluate() {
_r->fill(0);
for (int i = 0; i < r_size_y; ++i) {
- _mu->get_row_into_mlpp_vector(0, closest_centroid);
- _input_set->get_row_into_mlpp_vector(i, input_set_i_tempv);
+ _mu->row_get_into_mlpp_vector(0, closest_centroid);
+ _input_set->row_get_into_mlpp_vector(i, input_set_i_tempv);
closest_centroid_current_dist = alg.euclidean_distance(input_set_i_tempv, closest_centroid);
for (int j = 0; j < r_size_x; ++j) {
- _mu->get_row_into_mlpp_vector(j, mu_j_tempv);
+ _mu->row_get_into_mlpp_vector(j, mu_j_tempv);
bool is_centroid_closer = alg.euclidean_distance(input_set_i_tempv, mu_j_tempv) < closest_centroid_current_dist;
if (is_centroid_closer) {
- _mu->get_row_into_mlpp_vector(j, closest_centroid);
+ _mu->row_get_into_mlpp_vector(j, closest_centroid);
closest_centroid_current_dist = alg.euclidean_distance(input_set_i_tempv, closest_centroid);
closest_centroid_index = j;
}
@@ -381,7 +381,7 @@ void MLPPKMeans::_compute_mu() {
real_t den = 0;
for (int j = 0; j < r_size_y; ++j) {
- _input_set->get_row_into_mlpp_vector(j, input_set_j_tempv);
+ _input_set->row_get_into_mlpp_vector(j, input_set_j_tempv);
real_t r_j_i = _r->element_get(j, i);
@@ -393,7 +393,7 @@ void MLPPKMeans::_compute_mu() {
alg.scalar_multiplyv(real_t(1) / real_t(den), num, mu_tempv);
- _mu->set_row_mlpp_vector(i, mu_tempv);
+ _mu->row_set_mlpp_vector(i, mu_tempv);
}
}
@@ -416,8 +416,8 @@ void MLPPKMeans::_centroid_initialization() {
for (int i = 0; i < _k; ++i) {
int indx = rand.random(0, input_set_size_y_rand);
- _input_set->get_row_into_mlpp_vector(indx, mu_tempv);
- _mu->set_row_mlpp_vector(i, mu_tempv);
+ _input_set->row_get_into_mlpp_vector(indx, mu_tempv);
+ _mu->row_set_mlpp_vector(i, mu_tempv);
}
}
@@ -439,8 +439,8 @@ void MLPPKMeans::_kmeanspp_initialization() {
mu_tempv.instance();
mu_tempv->resize(_mu->size().x);
- _input_set->get_row_into_mlpp_vector(rand.random(0, input_set_size_y - 1), mu_tempv);
- _mu->set_row_mlpp_vector(0, mu_tempv);
+ _input_set->row_get_into_mlpp_vector(rand.random(0, input_set_size_y - 1), mu_tempv);
+ _mu->row_set_mlpp_vector(0, mu_tempv);
Ref input_set_j_tempv;
input_set_j_tempv.instance();
@@ -452,14 +452,14 @@ void MLPPKMeans::_kmeanspp_initialization() {
for (int i = 1; i < _k - 1; ++i) {
for (int j = 0; j < input_set_size_y; ++j) {
- _input_set->get_row_into_mlpp_vector(j, input_set_j_tempv);
+ _input_set->row_get_into_mlpp_vector(j, input_set_j_tempv);
real_t max_dist = 0;
// SUM ALL THE SQUARED DISTANCES, CHOOSE THE ONE THAT'S FARTHEST
// AS TO SPREAD OUT THE CLUSTER CENTROIDS.
real_t sum = 0;
for (int k = 0; k < i; k++) {
- _mu->get_row_into_mlpp_vector(k, mu_tempv);
+ _mu->row_get_into_mlpp_vector(k, mu_tempv);
sum += alg.euclidean_distance(input_set_j_tempv, mu_tempv);
}
@@ -470,7 +470,7 @@ void MLPPKMeans::_kmeanspp_initialization() {
}
}
- _mu->set_row_mlpp_vector(i, farthest_centroid);
+ _mu->row_set_mlpp_vector(i, farthest_centroid);
}
}
real_t MLPPKMeans::_cost() {
@@ -495,10 +495,10 @@ real_t MLPPKMeans::_cost() {
real_t sum = 0;
for (int i = 0; i < r_size_y; i++) {
- _input_set->get_row_into_mlpp_vector(i, input_set_i_tempv);
+ _input_set->row_get_into_mlpp_vector(i, input_set_i_tempv);
for (int j = 0; j < r_size_x; j++) {
- _mu->get_row_into_mlpp_vector(j, mu_j_tempv);
+ _mu->row_get_into_mlpp_vector(j, mu_j_tempv);
alg.subtractionv(input_set_i_tempv, mu_j_tempv, sub_tempv);
sum += _r->element_get(i, j) * alg.norm_sqv(sub_tempv);
diff --git a/mlpp/knn/knn.cpp b/mlpp/knn/knn.cpp
index 104a887..ba84a03 100644
--- a/mlpp/knn/knn.cpp
+++ b/mlpp/knn/knn.cpp
@@ -44,7 +44,7 @@ PoolIntArray MLPPKNN::model_set_test(const Ref &X) {
y_hat.resize(y_size);
for (int i = 0; i < y_size; i++) {
- X->get_row_into_mlpp_vector(i, v);
+ X->row_get_into_mlpp_vector(i, v);
y_hat.set(i, model_test(v));
}
@@ -94,8 +94,8 @@ PoolIntArray MLPPKNN::nearest_neighbors(const Ref &x) {
continue;
}
- _input_set->get_row_into_mlpp_vector(j, tmpv1);
- _input_set->get_row_into_mlpp_vector(neighbor, tmpv2);
+ _input_set->row_get_into_mlpp_vector(j, tmpv1);
+ _input_set->row_get_into_mlpp_vector(neighbor, tmpv2);
bool is_neighbor_nearer = alg.euclidean_distance(x, tmpv1) < alg.euclidean_distance(x, tmpv2);
diff --git a/mlpp/lin_alg/lin_alg.cpp b/mlpp/lin_alg/lin_alg.cpp
index d4bc999..c324fba 100644
--- a/mlpp/lin_alg/lin_alg.cpp
+++ b/mlpp/lin_alg/lin_alg.cpp
@@ -178,7 +178,7 @@ Ref MLPPLinAlg::kronecker_productnm(const Ref &A, const
for (int i = 0; i < a_size.y; ++i) {
for (int j = 0; j < b_size.y; ++j) {
- B->get_row_into_mlpp_vector(j, row_tmp);
+ B->row_get_into_mlpp_vector(j, row_tmp);
Vector][> row;
for (int k = 0; k < a_size.x; ++k) {
@@ -187,7 +187,7 @@ Ref MLPPLinAlg::kronecker_productnm(const Ref &A, const
Ref flattened_row = flattenmnv(row);
- C->set_row_mlpp_vector(i * b_size.y + j, flattened_row);
+ C->row_set_mlpp_vector(i * b_size.y + j, flattened_row);
}
}
@@ -689,10 +689,10 @@ Ref MLPPLinAlg::covnm(const Ref &A) {
a_j_row_tmp->resize(a_size.x);
for (int i = 0; i < a_size.y; ++i) {
- A->get_row_into_mlpp_vector(i, a_i_row_tmp);
+ A->row_get_into_mlpp_vector(i, a_i_row_tmp);
for (int j = 0; j < a_size.x; ++j) {
- A->get_row_into_mlpp_vector(j, a_j_row_tmp);
+ A->row_get_into_mlpp_vector(j, a_j_row_tmp);
cov_mat->element_set(i, j, stat.covariancev(a_i_row_tmp, a_j_row_tmp));
}
diff --git a/mlpp/lin_alg/mlpp_matrix.cpp b/mlpp/lin_alg/mlpp_matrix.cpp
index a30c88f..9ea9556 100644
--- a/mlpp/lin_alg/mlpp_matrix.cpp
+++ b/mlpp/lin_alg/mlpp_matrix.cpp
@@ -51,7 +51,7 @@ void MLPPMatrix::set_data(const Array &p_from) {
}
}
-void MLPPMatrix::add_row(const Vector &p_row) {
+void MLPPMatrix::row_add(const Vector &p_row) {
if (p_row.size() == 0) {
return;
}
@@ -76,7 +76,7 @@ void MLPPMatrix::add_row(const Vector &p_row) {
}
}
-void MLPPMatrix::add_row_pool_vector(const PoolRealArray &p_row) {
+void MLPPMatrix::row_add_pool_vector(const PoolRealArray &p_row) {
if (p_row.size() == 0) {
return;
}
@@ -102,7 +102,7 @@ void MLPPMatrix::add_row_pool_vector(const PoolRealArray &p_row) {
}
}
-void MLPPMatrix::add_row_mlpp_vector(const Ref &p_row) {
+void MLPPMatrix::row_add_mlpp_vector(const Ref &p_row) {
ERR_FAIL_COND(!p_row.is_valid());
int p_row_size = p_row->size();
@@ -131,7 +131,7 @@ void MLPPMatrix::add_row_mlpp_vector(const Ref &p_row) {
}
}
-void MLPPMatrix::add_rows_mlpp_matrix(const Ref &p_other) {
+void MLPPMatrix::rows_add_mlpp_matrix(const Ref &p_other) {
ERR_FAIL_COND(!p_other.is_valid());
int other_data_size = p_other->data_size();
@@ -162,7 +162,7 @@ void MLPPMatrix::add_rows_mlpp_matrix(const Ref &p_other) {
}
}
-void MLPPMatrix::remove_row(int p_index) {
+void MLPPMatrix::row_remove(int p_index) {
ERR_FAIL_INDEX(p_index, _size.y);
--_size.y;
@@ -185,7 +185,7 @@ void MLPPMatrix::remove_row(int p_index) {
// Removes the item copying the last value into the position of the one to
// remove. It's generally faster than `remove`.
-void MLPPMatrix::remove_row_unordered(int p_index) {
+void MLPPMatrix::row_remove_unordered(int p_index) {
ERR_FAIL_INDEX(p_index, _size.y);
--_size.y;
@@ -211,7 +211,7 @@ void MLPPMatrix::remove_row_unordered(int p_index) {
CRASH_COND_MSG(!_data, "Out of memory");
}
-void MLPPMatrix::swap_row(int p_index_1, int p_index_2) {
+void MLPPMatrix::row_swap(int p_index_1, int p_index_2) {
ERR_FAIL_INDEX(p_index_1, _size.y);
ERR_FAIL_INDEX(p_index_2, _size.y);
@@ -241,7 +241,7 @@ void MLPPMatrix::resize(const Size2i &p_size) {
CRASH_COND_MSG(!_data, "Out of memory");
}
-Vector MLPPMatrix::get_row_vector(int p_index_y) const {
+Vector MLPPMatrix::row_get_vector(int p_index_y) const {
ERR_FAIL_INDEX_V(p_index_y, _size.y, Vector());
Vector ret;
@@ -263,7 +263,7 @@ Vector MLPPMatrix::get_row_vector(int p_index_y) const {
return ret;
}
-PoolRealArray MLPPMatrix::get_row_pool_vector(int p_index_y) const {
+PoolRealArray MLPPMatrix::row_get_pool_vector(int p_index_y) const {
ERR_FAIL_INDEX_V(p_index_y, _size.y, PoolRealArray());
PoolRealArray ret;
@@ -286,7 +286,7 @@ PoolRealArray MLPPMatrix::get_row_pool_vector(int p_index_y) const {
return ret;
}
-Ref MLPPMatrix::get_row_mlpp_vector(int p_index_y) const {
+Ref MLPPMatrix::row_get_mlpp_vector(int p_index_y) const {
ERR_FAIL_INDEX_V(p_index_y, _size.y, Ref());
Ref ret;
@@ -309,7 +309,7 @@ Ref MLPPMatrix::get_row_mlpp_vector(int p_index_y) const {
return ret;
}
-void MLPPMatrix::get_row_into_mlpp_vector(int p_index_y, Ref target) const {
+void MLPPMatrix::row_get_into_mlpp_vector(int p_index_y, Ref target) const {
ERR_FAIL_COND(!target.is_valid());
ERR_FAIL_INDEX(p_index_y, _size.y);
@@ -326,7 +326,7 @@ void MLPPMatrix::get_row_into_mlpp_vector(int p_index_y, Ref target)
}
}
-void MLPPMatrix::set_row_vector(int p_index_y, const Vector &p_row) {
+void MLPPMatrix::row_set_vector(int p_index_y, const Vector &p_row) {
ERR_FAIL_COND(p_row.size() != _size.x);
ERR_FAIL_INDEX(p_index_y, _size.y);
@@ -339,7 +339,7 @@ void MLPPMatrix::set_row_vector(int p_index_y, const Vector &p_row) {
}
}
-void MLPPMatrix::set_row_pool_vector(int p_index_y, const PoolRealArray &p_row) {
+void MLPPMatrix::row_set_pool_vector(int p_index_y, const PoolRealArray &p_row) {
ERR_FAIL_COND(p_row.size() != _size.x);
ERR_FAIL_INDEX(p_index_y, _size.y);
@@ -353,7 +353,7 @@ void MLPPMatrix::set_row_pool_vector(int p_index_y, const PoolRealArray &p_row)
}
}
-void MLPPMatrix::set_row_mlpp_vector(int p_index_y, const Ref &p_row) {
+void MLPPMatrix::row_set_mlpp_vector(int p_index_y, const Ref &p_row) {
ERR_FAIL_COND(!p_row.is_valid());
ERR_FAIL_COND(p_row->size() != _size.x);
ERR_FAIL_INDEX(p_index_y, _size.y);
@@ -897,7 +897,7 @@ void MLPPMatrix::kronecker_product(const Ref &B) {
for (int i = 0; i < _size.y; ++i) {
for (int j = 0; j < b_size.y; ++j) {
- B->get_row_into_mlpp_vector(j, row_tmp);
+ B->row_get_into_mlpp_vector(j, row_tmp);
Vector][> row;
for (int k = 0; k < _size.x; ++k) {
@@ -906,7 +906,7 @@ void MLPPMatrix::kronecker_product(const Ref &B) {
Ref flattened_row = row_tmp->flatten_vectorsn(row);
- set_row_mlpp_vector(i * b_size.y + j, flattened_row);
+ row_set_mlpp_vector(i * b_size.y + j, flattened_row);
}
}
}
@@ -941,7 +941,7 @@ Ref MLPPMatrix::kronecker_productn(const Ref &B) const {
for (int i = 0; i < a_size.y; ++i) {
for (int j = 0; j < b_size.y; ++j) {
- B->get_row_into_mlpp_vector(j, row_tmp);
+ B->row_get_into_mlpp_vector(j, row_tmp);
Vector][> row;
for (int k = 0; k < a_size.x; ++k) {
@@ -950,7 +950,7 @@ Ref MLPPMatrix::kronecker_productn(const Ref &B) const {
Ref flattened_row = row_tmp->flatten_vectorsn(row);
- C->set_row_mlpp_vector(i * b_size.y + j, flattened_row);
+ C->row_set_mlpp_vector(i * b_size.y + j, flattened_row);
}
}
@@ -985,7 +985,7 @@ void MLPPMatrix::kronecker_productb(const Ref &A, const Refget_row_into_mlpp_vector(j, row_tmp);
+ B->row_get_into_mlpp_vector(j, row_tmp);
Vector][> row;
for (int k = 0; k < a_size.x; ++k) {
@@ -994,7 +994,7 @@ void MLPPMatrix::kronecker_productb(const Ref &A, const Ref flattened_row = row_tmp->flatten_vectorsn(row);
- set_row_mlpp_vector(i * b_size.y + j, flattened_row);
+ row_set_mlpp_vector(i * b_size.y + j, flattened_row);
}
}
}
@@ -1696,7 +1696,7 @@ void MLPPMatrix::pinverseo(Ref out) const {
out->set_from_mlpp_matrix(multn(Ref(this))->transposen()->inverse()->multn(transposen()));
}
-Ref MLPPMatrix::zero_mat(int n, int m) const {
+Ref MLPPMatrix::matn_zero(int n, int m) const {
Ref mat;
mat.instance();
@@ -1705,7 +1705,7 @@ Ref MLPPMatrix::zero_mat(int n, int m) const {
return mat;
}
-Ref MLPPMatrix::one_mat(int n, int m) const {
+Ref MLPPMatrix::matn_one(int n, int m) const {
Ref mat;
mat.instance();
@@ -1714,7 +1714,7 @@ Ref MLPPMatrix::one_mat(int n, int m) const {
return mat;
}
-Ref MLPPMatrix::full_mat(int n, int m, int k) const {
+Ref MLPPMatrix::matn_full(int n, int m, int k) const {
Ref mat;
mat.instance();
@@ -1963,10 +1963,10 @@ Ref MLPPMatrix::cov() const {
a_j_row_tmp->resize(_size.x);
for (int i = 0; i < _size.y; ++i) {
- get_row_into_mlpp_vector(i, a_i_row_tmp);
+ row_get_into_mlpp_vector(i, a_i_row_tmp);
for (int j = 0; j < _size.x; ++j) {
- get_row_into_mlpp_vector(j, a_j_row_tmp);
+ row_get_into_mlpp_vector(j, a_j_row_tmp);
cov_mat->element_set(i, j, stat.covariancev(a_i_row_tmp, a_j_row_tmp));
}
@@ -1992,10 +1992,10 @@ void MLPPMatrix::covo(Ref out) const {
a_j_row_tmp->resize(_size.x);
for (int i = 0; i < _size.y; ++i) {
- get_row_into_mlpp_vector(i, a_i_row_tmp);
+ row_get_into_mlpp_vector(i, a_i_row_tmp);
for (int j = 0; j < _size.x; ++j) {
- get_row_into_mlpp_vector(j, a_j_row_tmp);
+ row_get_into_mlpp_vector(j, a_j_row_tmp);
out->element_set(i, j, stat.covariancev(a_i_row_tmp, a_j_row_tmp));
}
@@ -2310,7 +2310,7 @@ MLPPMatrix::SVDResult MLPPMatrix::svd() const {
EigenResult right_eigen = transposen()->multn(Ref(this))->eigen();
Ref singularvals = left_eigen.eigen_values->sqrtn();
- Ref sigma = zero_mat(_size.y, _size.x);
+ Ref sigma = matn_zero(_size.y, _size.x);
Size2i singularvals_size = singularvals->size();
@@ -2338,7 +2338,7 @@ MLPPMatrix::SVDResult MLPPMatrix::svdb(const Ref &A) const {
EigenResult right_eigen = A->transposen()->multn(A)->eigen();
Ref singularvals = left_eigen.eigen_values->sqrtn();
- Ref sigma = zero_mat(a_size.y, a_size.x);
+ Ref sigma = matn_zero(a_size.y, a_size.x);
Size2i singularvals_size = singularvals->size();
@@ -2706,7 +2706,7 @@ Ref MLPPMatrix::outer_productn(const Ref &a, const Ref &a) {
+void MLPPMatrix::diagonal_set(const Ref &a) {
ERR_FAIL_COND(!a.is_valid());
int a_size = a->size();
@@ -2724,7 +2724,7 @@ void MLPPMatrix::set_diagonal(const Ref &a) {
b_ptr[calculate_index(i, i)] = a_ptr[i];
}
}
-Ref MLPPMatrix::set_diagonaln(const Ref &a) const {
+Ref MLPPMatrix::diagonal_setn(const Ref &a) const {
ERR_FAIL_COND_V(!a.is_valid(), Ref());
Ref B = duplicate_fast();
@@ -3001,13 +3001,13 @@ void MLPPMatrix::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_data", "data"), &MLPPMatrix::set_data);
ADD_PROPERTY(PropertyInfo(Variant::ARRAY, "data"), "set_data", "get_data");
- ClassDB::bind_method(D_METHOD("add_row", "row"), &MLPPMatrix::add_row_pool_vector);
- ClassDB::bind_method(D_METHOD("add_row_mlpp_vector", "row"), &MLPPMatrix::add_row_mlpp_vector);
- ClassDB::bind_method(D_METHOD("add_rows_mlpp_matrix", "other"), &MLPPMatrix::add_rows_mlpp_matrix);
+ ClassDB::bind_method(D_METHOD("row_add", "row"), &MLPPMatrix::row_add_pool_vector);
+ ClassDB::bind_method(D_METHOD("row_add_mlpp_vector", "row"), &MLPPMatrix::row_add_mlpp_vector);
+ ClassDB::bind_method(D_METHOD("rows_add_mlpp_matrix", "other"), &MLPPMatrix::rows_add_mlpp_matrix);
- ClassDB::bind_method(D_METHOD("remove_row", "index"), &MLPPMatrix::remove_row);
- ClassDB::bind_method(D_METHOD("remove_row_unordered", "index"), &MLPPMatrix::remove_row_unordered);
- ClassDB::bind_method(D_METHOD("swap_row", "index_1", "index_2"), &MLPPMatrix::swap_row);
+ ClassDB::bind_method(D_METHOD("row_remove", "index"), &MLPPMatrix::row_remove);
+ ClassDB::bind_method(D_METHOD("row_remove_unordered", "index"), &MLPPMatrix::row_remove_unordered);
+ ClassDB::bind_method(D_METHOD("row_swap", "index_1", "index_2"), &MLPPMatrix::row_swap);
ClassDB::bind_method(D_METHOD("clear"), &MLPPMatrix::clear);
ClassDB::bind_method(D_METHOD("reset"), &MLPPMatrix::reset);
@@ -3024,12 +3024,12 @@ void MLPPMatrix::_bind_methods() {
ClassDB::bind_method(D_METHOD("element_get", "index_y", "index_x"), &MLPPMatrix::element_get);
ClassDB::bind_method(D_METHOD("element_set", "index_y", "index_x", "val"), &MLPPMatrix::element_set);
- ClassDB::bind_method(D_METHOD("get_row_pool_vector", "index_y"), &MLPPMatrix::get_row_pool_vector);
- ClassDB::bind_method(D_METHOD("get_row_mlpp_vector", "index_y"), &MLPPMatrix::get_row_mlpp_vector);
- ClassDB::bind_method(D_METHOD("get_row_into_mlpp_vector", "index_y", "target"), &MLPPMatrix::get_row_into_mlpp_vector);
+ ClassDB::bind_method(D_METHOD("row_get_pool_vector", "index_y"), &MLPPMatrix::row_get_pool_vector);
+ ClassDB::bind_method(D_METHOD("row_get_mlpp_vector", "index_y"), &MLPPMatrix::row_get_mlpp_vector);
+ ClassDB::bind_method(D_METHOD("row_get_into_mlpp_vector", "index_y", "target"), &MLPPMatrix::row_get_into_mlpp_vector);
- ClassDB::bind_method(D_METHOD("set_row_pool_vector", "index_y", "row"), &MLPPMatrix::set_row_pool_vector);
- ClassDB::bind_method(D_METHOD("set_row_mlpp_vector", "index_y", "row"), &MLPPMatrix::set_row_mlpp_vector);
+ ClassDB::bind_method(D_METHOD("row_set_pool_vector", "index_y", "row"), &MLPPMatrix::row_set_pool_vector);
+ ClassDB::bind_method(D_METHOD("row_set_mlpp_vector", "index_y", "row"), &MLPPMatrix::row_set_mlpp_vector);
ClassDB::bind_method(D_METHOD("fill", "val"), &MLPPMatrix::fill);
@@ -3134,9 +3134,9 @@ void MLPPMatrix::_bind_methods() {
ClassDB::bind_method(D_METHOD("pinverse"), &MLPPMatrix::pinverse);
ClassDB::bind_method(D_METHOD("pinverseo", "out"), &MLPPMatrix::pinverseo);
- ClassDB::bind_method(D_METHOD("zero_mat", "n", "m"), &MLPPMatrix::zero_mat);
- ClassDB::bind_method(D_METHOD("one_mat", "n", "m"), &MLPPMatrix::one_mat);
- ClassDB::bind_method(D_METHOD("full_mat", "n", "m", "k"), &MLPPMatrix::full_mat);
+ ClassDB::bind_method(D_METHOD("matn_zero", "n", "m"), &MLPPMatrix::matn_zero);
+ ClassDB::bind_method(D_METHOD("matn_one", "n", "m"), &MLPPMatrix::matn_one);
+ ClassDB::bind_method(D_METHOD("matn_full", "n", "m", "k"), &MLPPMatrix::matn_full);
ClassDB::bind_method(D_METHOD("sin"), &MLPPMatrix::sin);
ClassDB::bind_method(D_METHOD("sinn"), &MLPPMatrix::sinn);
@@ -3176,8 +3176,8 @@ void MLPPMatrix::_bind_methods() {
ClassDB::bind_method(D_METHOD("outer_product", "a", "b"), &MLPPMatrix::outer_product);
ClassDB::bind_method(D_METHOD("outer_productn", "a", "b"), &MLPPMatrix::outer_productn);
- ClassDB::bind_method(D_METHOD("set_diagonal", "a"), &MLPPMatrix::set_diagonal);
- ClassDB::bind_method(D_METHOD("set_diagonaln", "a"), &MLPPMatrix::set_diagonaln);
+ ClassDB::bind_method(D_METHOD("diagonal_set", "a"), &MLPPMatrix::diagonal_set);
+ ClassDB::bind_method(D_METHOD("diagonal_setn", "a"), &MLPPMatrix::diagonal_setn);
ClassDB::bind_method(D_METHOD("diagonal_zeroed", "a"), &MLPPMatrix::diagonal_zeroed);
ClassDB::bind_method(D_METHOD("diagonal_zeroedn", "a"), &MLPPMatrix::diagonal_zeroedn);
diff --git a/mlpp/lin_alg/mlpp_matrix.h b/mlpp/lin_alg/mlpp_matrix.h
index 89e1d14..042359e 100644
--- a/mlpp/lin_alg/mlpp_matrix.h
+++ b/mlpp/lin_alg/mlpp_matrix.h
@@ -31,18 +31,18 @@ public:
return _data;
}
- void add_row(const Vector &p_row);
- void add_row_pool_vector(const PoolRealArray &p_row);
- void add_row_mlpp_vector(const Ref &p_row);
- void add_rows_mlpp_matrix(const Ref &p_other);
+ void row_add(const Vector &p_row);
+ void row_add_pool_vector(const PoolRealArray &p_row);
+ void row_add_mlpp_vector(const Ref &p_row);
+ void rows_add_mlpp_matrix(const Ref &p_other);
- void remove_row(int p_index);
+ void row_remove(int p_index);
// Removes the item copying the last value into the position of the one to
// remove. It's generally faster than `remove`.
- void remove_row_unordered(int p_index);
+ void row_remove_unordered(int p_index);
- void swap_row(int p_index_1, int p_index_2);
+ void row_swap(int p_index_1, int p_index_2);
_FORCE_INLINE_ void clear() { resize(Size2i()); }
_FORCE_INLINE_ void reset() {
@@ -98,14 +98,14 @@ public:
_data[p_index_y * _size.x + p_index_x] = p_val;
}
- Vector get_row_vector(int p_index_y) const;
- PoolRealArray get_row_pool_vector(int p_index_y) const;
- Ref get_row_mlpp_vector(int p_index_y) const;
- void get_row_into_mlpp_vector(int p_index_y, Ref target) const;
+ Vector row_get_vector(int p_index_y) const;
+ PoolRealArray row_get_pool_vector(int p_index_y) const;
+ Ref row_get_mlpp_vector(int p_index_y) const;
+ void row_get_into_mlpp_vector(int p_index_y, Ref target) const;
- void set_row_vector(int p_index_y, const Vector &p_row);
- void set_row_pool_vector(int p_index_y, const PoolRealArray &p_row);
- void set_row_mlpp_vector(int p_index_y, const Ref &p_row);
+ void row_set_vector(int p_index_y, const Vector &p_row);
+ void row_set_pool_vector(int p_index_y, const PoolRealArray &p_row);
+ void row_set_mlpp_vector(int p_index_y, const Ref &p_row);
void fill(real_t p_val);
@@ -215,9 +215,9 @@ public:
Ref pinverse() const;
void pinverseo(Ref out) const;
- Ref zero_mat(int n, int m) const;
- Ref one_mat(int n, int m) const;
- Ref full_mat(int n, int m, int k) const;
+ Ref matn_zero(int n, int m) const;
+ Ref matn_one(int n, int m) const;
+ Ref matn_full(int n, int m, int k) const;
void sin();
Ref sinn() const;
@@ -317,8 +317,8 @@ public:
Ref outer_productn(const Ref &a, const Ref &b) const;
// Just sets the diagonal
- void set_diagonal(const Ref &a);
- Ref set_diagonaln(const Ref &a) const;
+ void diagonal_set(const Ref &a);
+ Ref diagonal_setn(const Ref &a) const;
// Sets the diagonals, everythign else will get zeroed
void diagonal_zeroed(const Ref &a);
diff --git a/mlpp/lin_reg/lin_reg.cpp b/mlpp/lin_reg/lin_reg.cpp
index 702d6cf..d4d1415 100644
--- a/mlpp/lin_reg/lin_reg.cpp
+++ b/mlpp/lin_reg/lin_reg.cpp
@@ -181,7 +181,7 @@ void MLPPLinReg::sgd(real_t learning_rate, int max_epoch, bool ui) {
while (true) {
int output_index = distribution(generator);
- _input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
+ _input_set->row_get_into_mlpp_vector(output_index, input_set_row_tmp);
real_t output_element_set = _output_set->element_get(output_index);
output_set_row_tmp->element_set(0, output_element_set);
@@ -676,7 +676,7 @@ void MLPPLinReg::normal_equation() {
x_means->resize(input_set_t->size().y);
for (int i = 0; i < input_set_t->size().y; i++) {
- input_set_t->get_row_into_mlpp_vector(i, input_set_t_row_tmp);
+ input_set_t->row_get_into_mlpp_vector(i, input_set_t_row_tmp);
x_means->element_set(i, stat.meanv(input_set_t_row_tmp));
}
diff --git a/mlpp/log_reg/log_reg.cpp b/mlpp/log_reg/log_reg.cpp
index 7d56616..3fe2de2 100644
--- a/mlpp/log_reg/log_reg.cpp
+++ b/mlpp/log_reg/log_reg.cpp
@@ -177,7 +177,7 @@ void MLPPLogReg::sgd(real_t learning_rate, int max_epoch, bool ui) {
while (true) {
int output_index = distribution(generator);
- _input_set->get_row_into_mlpp_vector(output_index, input_row_tmp);
+ _input_set->row_get_into_mlpp_vector(output_index, input_row_tmp);
real_t output_element_set = _output_set->element_get(output_index);
output_element_set_tmp->element_set(0, output_element_set);
diff --git a/mlpp/mlp/mlp.cpp b/mlpp/mlp/mlp.cpp
index b62fe32..85c2c1b 100644
--- a/mlpp/mlp/mlp.cpp
+++ b/mlpp/mlp/mlp.cpp
@@ -172,7 +172,7 @@ void MLPPMLP::sgd(real_t learning_rate, int max_epoch, bool UI) {
while (true) {
int output_Index = distribution(generator);
- _input_set->get_row_into_mlpp_vector(output_Index, input_set_row_tmp);
+ _input_set->row_get_into_mlpp_vector(output_Index, input_set_row_tmp);
real_t output_element = _output_set->element_get(output_Index);
output_set_row_tmp->element_set(0, output_element);
diff --git a/mlpp/multinomial_nb/multinomial_nb.cpp b/mlpp/multinomial_nb/multinomial_nb.cpp
index 68bda63..9333c5e 100644
--- a/mlpp/multinomial_nb/multinomial_nb.cpp
+++ b/mlpp/multinomial_nb/multinomial_nb.cpp
@@ -56,7 +56,7 @@ Ref MLPPMultinomialNB::model_set_test(const Ref &X) {
y_hat->resize(x_size.y);
for (int i = 0; i < x_size.y; i++) {
- X->get_row_into_mlpp_vector(i, x_row_tmp);
+ X->row_get_into_mlpp_vector(i, x_row_tmp);
y_hat->element_set(i, model_test(x_row_tmp));
}
diff --git a/mlpp/outlier_finder/outlier_finder.cpp b/mlpp/outlier_finder/outlier_finder.cpp
index 5000226..ec1d7cb 100644
--- a/mlpp/outlier_finder/outlier_finder.cpp
+++ b/mlpp/outlier_finder/outlier_finder.cpp
@@ -30,7 +30,7 @@ Vector> MLPPOutlierFinder::model_set_test(const Ref &
input_set_i_row_tmp->resize(input_set_size.x);
for (int i = 0; i < input_set_size.y; ++i) {
- input_set->get_row_into_mlpp_vector(i, input_set_i_row_tmp);
+ input_set->row_get_into_mlpp_vector(i, input_set_i_row_tmp);
real_t meanv = stat.meanv(input_set_i_row_tmp);
real_t s_dev_v = stat.standard_deviationv(input_set_i_row_tmp);
@@ -75,7 +75,7 @@ PoolVector2iArray MLPPOutlierFinder::model_set_test_indices(const Refresize(input_set_size.x);
for (int i = 0; i < input_set_size.y; ++i) {
- input_set->get_row_into_mlpp_vector(i, input_set_i_row_tmp);
+ input_set->row_get_into_mlpp_vector(i, input_set_i_row_tmp);
real_t meanv = stat.meanv(input_set_i_row_tmp);
real_t s_dev_v = stat.standard_deviationv(input_set_i_row_tmp);
diff --git a/mlpp/pca/pca.cpp b/mlpp/pca/pca.cpp
index 9537456..56b178b 100644
--- a/mlpp/pca/pca.cpp
+++ b/mlpp/pca/pca.cpp
@@ -69,8 +69,8 @@ real_t MLPPPCA::score() {
x_normalized_row_tmp->resize(x_normalized_size.x);
for (int i = 0; i < x_normalized_size_y; ++i) {
- _x_normalized->get_row_into_mlpp_vector(i, x_normalized_row_tmp);
- x_approx->get_row_into_mlpp_vector(i, x_approx_row_tmp);
+ _x_normalized->row_get_into_mlpp_vector(i, x_normalized_row_tmp);
+ x_approx->row_get_into_mlpp_vector(i, x_approx_row_tmp);
num += alg.norm_sqv(alg.subtractionnv(x_normalized_row_tmp, x_approx_row_tmp));
}
@@ -78,7 +78,7 @@ real_t MLPPPCA::score() {
num /= x_normalized_size_y;
for (int i = 0; i < x_normalized_size_y; ++i) {
- _x_normalized->get_row_into_mlpp_vector(i, x_normalized_row_tmp);
+ _x_normalized->row_get_into_mlpp_vector(i, x_normalized_row_tmp);
den += alg.norm_sqv(x_normalized_row_tmp);
}
diff --git a/mlpp/probit_reg/probit_reg.cpp b/mlpp/probit_reg/probit_reg.cpp
index ceeacee..6be0499 100644
--- a/mlpp/probit_reg/probit_reg.cpp
+++ b/mlpp/probit_reg/probit_reg.cpp
@@ -172,7 +172,7 @@ void MLPPProbitReg::sgd(real_t learning_rate, int max_epoch, bool ui) {
while (true) {
int output_index = distribution(generator);
- _input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
+ _input_set->row_get_into_mlpp_vector(output_index, input_set_row_tmp);
real_t output_set_entry = _output_set->element_get(output_index);
real_t y_hat = evaluatev(input_set_row_tmp);
diff --git a/mlpp/softmax_net/softmax_net.cpp b/mlpp/softmax_net/softmax_net.cpp
index b6144da..59985a0 100644
--- a/mlpp/softmax_net/softmax_net.cpp
+++ b/mlpp/softmax_net/softmax_net.cpp
@@ -213,12 +213,12 @@ void MLPPSoftmaxNet::train_sgd(real_t learning_rate, int max_epoch, bool ui) {
while (true) {
int output_index = distribution(generator);
- _input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
- _output_set->get_row_into_mlpp_vector(output_index, output_set_row_tmp);
- output_row_mat_tmp->set_row_mlpp_vector(0, output_set_row_tmp);
+ _input_set->row_get_into_mlpp_vector(output_index, input_set_row_tmp);
+ _output_set->row_get_into_mlpp_vector(output_index, output_set_row_tmp);
+ output_row_mat_tmp->row_set_mlpp_vector(0, output_set_row_tmp);
Ref y_hat = evaluatev(input_set_row_tmp);
- y_hat_mat_tmp->set_row_mlpp_vector(0, y_hat);
+ y_hat_mat_tmp->row_set_mlpp_vector(0, y_hat);
PropagateVResult prop_res = propagatev(input_set_row_tmp);
diff --git a/mlpp/softmax_reg/softmax_reg.cpp b/mlpp/softmax_reg/softmax_reg.cpp
index 46f0b84..3d70672 100644
--- a/mlpp/softmax_reg/softmax_reg.cpp
+++ b/mlpp/softmax_reg/softmax_reg.cpp
@@ -160,14 +160,14 @@ void MLPPSoftmaxReg::train_sgd(real_t learning_rate, int max_epoch, bool ui) {
while (true) {
real_t output_index = distribution(generator);
- _input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
+ _input_set->row_get_into_mlpp_vector(output_index, input_set_row_tmp);
Ref y_hat = evaluatev(input_set_row_tmp);
y_hat_matrix_tmp->resize(Size2i(y_hat->size(), 1));
- y_hat_matrix_tmp->set_row_mlpp_vector(0, y_hat);
+ y_hat_matrix_tmp->row_set_mlpp_vector(0, y_hat);
- _output_set->get_row_into_mlpp_vector(output_index, output_set_row_tmp);
- output_set_row_matrix_tmp->set_row_mlpp_vector(0, output_set_row_tmp);
+ _output_set->row_get_into_mlpp_vector(output_index, output_set_row_tmp);
+ output_set_row_matrix_tmp->row_set_mlpp_vector(0, output_set_row_tmp);
cost_prev = cost(y_hat_matrix_tmp, output_set_row_matrix_tmp);
diff --git a/mlpp/svc/svc.cpp b/mlpp/svc/svc.cpp
index a9c96c9..988a22b 100644
--- a/mlpp/svc/svc.cpp
+++ b/mlpp/svc/svc.cpp
@@ -149,7 +149,7 @@ void MLPPSVC::train_sgd(real_t learning_rate, int max_epoch, bool ui) {
while (true) {
int output_index = distribution(generator);
- _input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
+ _input_set->row_get_into_mlpp_vector(output_index, input_set_row_tmp);
real_t output_set_indx = _output_set->element_get(output_index);
output_set_row_tmp->element_set(0, output_set_indx);
diff --git a/mlpp/tanh_reg/tanh_reg.cpp b/mlpp/tanh_reg/tanh_reg.cpp
index 5b22e03..63fb1d8 100644
--- a/mlpp/tanh_reg/tanh_reg.cpp
+++ b/mlpp/tanh_reg/tanh_reg.cpp
@@ -197,7 +197,7 @@ void MLPPTanhReg::train_sgd(real_t learning_rate, int max_epoch, bool ui) {
while (true) {
int output_index = distribution(generator);
- _input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
+ _input_set->row_get_into_mlpp_vector(output_index, input_set_row_tmp);
real_t output_set_entry = _output_set->element_get(output_index);
output_set_row_tmp->element_set(0, output_set_entry);
diff --git a/mlpp/utilities/utilities.cpp b/mlpp/utilities/utilities.cpp
index 7598a29..9103623 100644
--- a/mlpp/utilities/utilities.cpp
+++ b/mlpp/utilities/utilities.cpp
@@ -618,8 +618,8 @@ Vector][> MLPPUtilities::create_mini_batchesm(const Refresize(Size2i(size.x, mini_batch_element_count));
for (int j = 0; j < mini_batch_element_count; j++) {
- input_set->get_row_into_mlpp_vector(mini_batch_start_offset + j, row_tmp);
- current_input_set->set_row_mlpp_vector(j, row_tmp);
+ input_set->row_get_into_mlpp_vector(mini_batch_start_offset + j, row_tmp);
+ current_input_set->row_set_mlpp_vector(j, row_tmp);
}
input_mini_batches.push_back(current_input_set);
@@ -660,8 +660,8 @@ MLPPUtilities::CreateMiniBatchMVBatch MLPPUtilities::create_mini_batchesmv(const
for (int j = 0; j < mini_batch_element_count; j++) {
int main_indx = mini_batch_start_offset + j;
- input_set->get_row_into_mlpp_vector(main_indx, row_tmp);
- current_input_set->set_row_mlpp_vector(j, row_tmp);
+ input_set->row_get_into_mlpp_vector(main_indx, row_tmp);
+ current_input_set->row_set_mlpp_vector(j, row_tmp);
current_output_set->element_set(j, output_set->element_get(j));
}
@@ -711,11 +711,11 @@ MLPPUtilities::CreateMiniBatchMMBatch MLPPUtilities::create_mini_batchesmm(const
for (int j = 0; j < mini_batch_element_count; j++) {
int main_indx = mini_batch_start_offset + j;
- input_set->get_row_into_mlpp_vector(main_indx, input_row_tmp);
- current_input_set->set_row_mlpp_vector(j, input_row_tmp);
+ input_set->row_get_into_mlpp_vector(main_indx, input_row_tmp);
+ current_input_set->row_set_mlpp_vector(j, input_row_tmp);
- output_set->get_row_into_mlpp_vector(main_indx, output_row_tmp);
- current_output_set->set_row_mlpp_vector(j, output_row_tmp);
+ output_set->row_get_into_mlpp_vector(main_indx, output_row_tmp);
+ current_output_set->row_set_mlpp_vector(j, output_row_tmp);
}
ret.input_sets.push_back(current_input_set);
diff --git a/mlpp/wgan/wgan.cpp b/mlpp/wgan/wgan.cpp
index 5d3defd..850fd11 100644
--- a/mlpp/wgan/wgan.cpp
+++ b/mlpp/wgan/wgan.cpp
@@ -66,7 +66,7 @@ void MLPPWGAN::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
for (int i = 0; i < CRITIC_INTERATIONS; i++) {
generator_input_set = alg.gaussian_noise(_n, _k);
discriminator_input_set->set_from_mlpp_matrix(model_set_test_generator(generator_input_set));
- discriminator_input_set->add_rows_mlpp_matrix(_output_set); // Fake + real inputs.
+ discriminator_input_set->rows_add_mlpp_matrix(_output_set); // Fake + real inputs.
ly_hat = model_set_test_discriminator(discriminator_input_set);
loutput_set = alg.scalar_multiplynv(-1, alg.onevecnv(_n)); // WGAN changes y_i = 1 and y_i = 0 to y_i = 1 and y_i = -1
diff --git a/test/mlpp_matrix_tests.cpp b/test/mlpp_matrix_tests.cpp
index 7330e7a..e996835 100644
--- a/test/mlpp_matrix_tests.cpp
+++ b/test/mlpp_matrix_tests.cpp
@@ -14,19 +14,19 @@ void MLPPMatrixTests::run_tests() {
PLOG_MSG("test_mlpp_matrix()");
test_mlpp_matrix();
- PLOG_MSG("test_add_row()");
- test_add_row();
- PLOG_MSG("test_add_row_pool_vector()");
- test_add_row_pool_vector();
- PLOG_MSG("test_add_row_mlpp_vector()");
- test_add_row_mlpp_vector();
- PLOG_MSG("test_add_rows_mlpp_matrix()");
- test_add_rows_mlpp_matrix();
+ PLOG_MSG("test_row_add()");
+ test_row_add();
+ PLOG_MSG("test_row_add_pool_vector()");
+ test_row_add_pool_vector();
+ PLOG_MSG("test_row_add_mlpp_vector()");
+ test_row_add_mlpp_vector();
+ PLOG_MSG("test_rows_add_mlpp_matrix()");
+ test_rows_add_mlpp_matrix();
- PLOG_MSG("test_remove_row()");
- test_remove_row();
- PLOG_MSG("test_remove_row_unordered()");
- test_remove_row_unordered();
+ PLOG_MSG("test_row_remove()");
+ test_row_remove();
+ PLOG_MSG("test_row_remove_unordered()");
+ test_row_remove_unordered();
PLOG_MSG("test_mlpp_matrix_mul()");
test_mlpp_matrix_mul();
@@ -55,7 +55,7 @@ void MLPPMatrixTests::test_mlpp_matrix() {
is_approx_equals_mat(rmat, rmat2, "re-set_from_std_vectors test.");
}
-void MLPPMatrixTests::test_add_row() {
+void MLPPMatrixTests::test_row_add() {
std::vector> A = {
{ 1, 2, 3, 4 },
};
@@ -92,16 +92,16 @@ void MLPPMatrixTests::test_add_row() {
Ref rmat;
rmat.instance();
- rmat->add_row(rv);
- is_approx_equals_mat(rmata, rmat, "rmat->add_row(rv);");
+ rmat->row_add(rv);
+ is_approx_equals_mat(rmata, rmat, "rmat->row_add(rv);");
- rmat->add_row(rv);
- is_approx_equals_mat(rmatb, rmat, "rmat->add_row(rv);");
+ rmat->row_add(rv);
+ is_approx_equals_mat(rmatb, rmat, "rmat->row_add(rv);");
- rmat->add_row(rv);
- is_approx_equals_mat(rmatc, rmat, "rmat->add_row(rv);");
+ rmat->row_add(rv);
+ is_approx_equals_mat(rmatc, rmat, "rmat->row_add(rv);");
}
-void MLPPMatrixTests::test_add_row_pool_vector() {
+void MLPPMatrixTests::test_row_add_pool_vector() {
std::vector> A = {
{ 1, 2, 3, 4 },
};
@@ -138,16 +138,16 @@ void MLPPMatrixTests::test_add_row_pool_vector() {
Ref rmat;
rmat.instance();
- rmat->add_row_pool_vector(rv);
- is_approx_equals_mat(rmata, rmat, "rmat->add_row_pool_vector(rv);");
+ rmat->row_add_pool_vector(rv);
+ is_approx_equals_mat(rmata, rmat, "rmat->row_add_pool_vector(rv);");
- rmat->add_row_pool_vector(rv);
- is_approx_equals_mat(rmatb, rmat, "rmat->add_row_pool_vector(rv);");
+ rmat->row_add_pool_vector(rv);
+ is_approx_equals_mat(rmatb, rmat, "rmat->row_add_pool_vector(rv);");
- rmat->add_row_pool_vector(rv);
- is_approx_equals_mat(rmatc, rmat, "rmat->add_row_pool_vector(rv);");
+ rmat->row_add_pool_vector(rv);
+ is_approx_equals_mat(rmatc, rmat, "rmat->row_add_pool_vector(rv);");
}
-void MLPPMatrixTests::test_add_row_mlpp_vector() {
+void MLPPMatrixTests::test_row_add_mlpp_vector() {
std::vector> A = {
{ 1, 2, 3, 4 },
};
@@ -185,16 +185,16 @@ void MLPPMatrixTests::test_add_row_mlpp_vector() {
Ref rmat;
rmat.instance();
- rmat->add_row_mlpp_vector(rv);
- is_approx_equals_mat(rmata, rmat, "rmat->add_row_mlpp_vector(rv);");
+ rmat->row_add_mlpp_vector(rv);
+ is_approx_equals_mat(rmata, rmat, "rmat->row_add_mlpp_vector(rv);");
- rmat->add_row_mlpp_vector(rv);
- is_approx_equals_mat(rmatb, rmat, "rmat->add_row_mlpp_vector(rv);");
+ rmat->row_add_mlpp_vector(rv);
+ is_approx_equals_mat(rmatb, rmat, "rmat->row_add_mlpp_vector(rv);");
- rmat->add_row_mlpp_vector(rv);
- is_approx_equals_mat(rmatc, rmat, "rmat->add_row_mlpp_vector(rv);");
+ rmat->row_add_mlpp_vector(rv);
+ is_approx_equals_mat(rmatc, rmat, "rmat->row_add_mlpp_vector(rv);");
}
-void MLPPMatrixTests::test_add_rows_mlpp_matrix() {
+void MLPPMatrixTests::test_rows_add_mlpp_matrix() {
std::vector> A = {
{ 1, 2, 3, 4 },
};
@@ -220,7 +220,7 @@ void MLPPMatrixTests::test_add_rows_mlpp_matrix() {
Ref rv;
rv.instance();
- rv->add_row_pool_vector(rvp);
+ rv->row_add_pool_vector(rvp);
Ref rmata;
rmata.instance();
@@ -237,17 +237,17 @@ void MLPPMatrixTests::test_add_rows_mlpp_matrix() {
Ref rmat;
rmat.instance();
- rmat->add_rows_mlpp_matrix(rv);
- is_approx_equals_mat(rmata, rmat, "rmat->add_rows_mlpp_matrix(rv);");
+ rmat->rows_add_mlpp_matrix(rv);
+ is_approx_equals_mat(rmata, rmat, "rmat->rows_add_mlpp_matrix(rv);");
- rmat->add_rows_mlpp_matrix(rv);
- is_approx_equals_mat(rmatb, rmat, "rmat->add_rows_mlpp_matrix(rv);");
+ rmat->rows_add_mlpp_matrix(rv);
+ is_approx_equals_mat(rmatb, rmat, "rmat->rows_add_mlpp_matrix(rv);");
- rmat->add_rows_mlpp_matrix(rv);
- is_approx_equals_mat(rmatc, rmat, "rmat->add_rows_mlpp_matrix(rv);");
+ rmat->rows_add_mlpp_matrix(rv);
+ is_approx_equals_mat(rmatc, rmat, "rmat->rows_add_mlpp_matrix(rv);");
}
-void MLPPMatrixTests::test_remove_row() {
+void MLPPMatrixTests::test_row_remove() {
std::vector> A = {
{ 1, 2, 3, 4 },
{ 5, 6, 7, 8 },
@@ -286,16 +286,16 @@ void MLPPMatrixTests::test_remove_row() {
rmat.instance();
rmat->set_from_std_vectors(D);
- rmat->remove_row(2);
- is_approx_equals_mat(rmat, rmata, "rmat->remove_row(2);");
+ rmat->row_remove(2);
+ is_approx_equals_mat(rmat, rmata, "rmat->row_remove(2);");
- rmat->remove_row(2);
- is_approx_equals_mat(rmat, rmatb, "rmat->remove_row(2);");
+ rmat->row_remove(2);
+ is_approx_equals_mat(rmat, rmatb, "rmat->row_remove(2);");
- rmat->remove_row(1);
- is_approx_equals_mat(rmat, rmatc, "rmat->remove_row(1);");
+ rmat->row_remove(1);
+ is_approx_equals_mat(rmat, rmatc, "rmat->row_remove(1);");
}
-void MLPPMatrixTests::test_remove_row_unordered() {
+void MLPPMatrixTests::test_row_remove_unordered() {
std::vector> A = {
{ 1, 2, 3, 4 },
{ 13, 14, 15, 16 },
@@ -334,14 +334,14 @@ void MLPPMatrixTests::test_remove_row_unordered() {
rmat.instance();
rmat->set_from_std_vectors(D);
- rmat->remove_row_unordered(1);
- is_approx_equals_mat(rmat, rmata, "rmat->remove_row_unordered(1);");
+ rmat->row_remove_unordered(1);
+ is_approx_equals_mat(rmat, rmata, "rmat->row_remove_unordered(1);");
- rmat->remove_row_unordered(0);
- is_approx_equals_mat(rmat, rmatb, "rmat->remove_row(0);");
+ rmat->row_remove_unordered(0);
+ is_approx_equals_mat(rmat, rmatb, "rmat->row_remove(0);");
- rmat->remove_row_unordered(1);
- is_approx_equals_mat(rmat, rmatc, "rmat->remove_row_unordered(1);");
+ rmat->row_remove_unordered(1);
+ is_approx_equals_mat(rmat, rmatc, "rmat->row_remove_unordered(1);");
}
void MLPPMatrixTests::test_mlpp_matrix_mul() {
diff --git a/test/mlpp_matrix_tests.h b/test/mlpp_matrix_tests.h
index b2cfd22..8ba0303 100644
--- a/test/mlpp_matrix_tests.h
+++ b/test/mlpp_matrix_tests.h
@@ -25,13 +25,13 @@ public:
void test_mlpp_matrix();
- void test_add_row();
- void test_add_row_pool_vector();
- void test_add_row_mlpp_vector();
- void test_add_rows_mlpp_matrix();
+ void test_row_add();
+ void test_row_add_pool_vector();
+ void test_row_add_mlpp_vector();
+ void test_rows_add_mlpp_matrix();
- void test_remove_row();
- void test_remove_row_unordered();
+ void test_row_remove();
+ void test_row_remove_unordered();
void test_mlpp_matrix_mul();
]