mirror of
https://github.com/Relintai/pmlpp.git
synced 2024-11-08 13:12:09 +01:00
Matrix api tweaks.
This commit is contained in:
parent
020f6a601b
commit
a680e6bd6e
@ -29,19 +29,19 @@
|
|||||||
<description>
|
<description>
|
||||||
</description>
|
</description>
|
||||||
</method>
|
</method>
|
||||||
<method name="add_row">
|
<method name="row_add">
|
||||||
<return type="void" />
|
<return type="void" />
|
||||||
<argument index="0" name="row" type="PoolRealArray" />
|
<argument index="0" name="row" type="PoolRealArray" />
|
||||||
<description>
|
<description>
|
||||||
</description>
|
</description>
|
||||||
</method>
|
</method>
|
||||||
<method name="add_row_mlpp_vector">
|
<method name="row_add_mlpp_vector">
|
||||||
<return type="void" />
|
<return type="void" />
|
||||||
<argument index="0" name="row" type="MLPPVector" />
|
<argument index="0" name="row" type="MLPPVector" />
|
||||||
<description>
|
<description>
|
||||||
</description>
|
</description>
|
||||||
</method>
|
</method>
|
||||||
<method name="add_rows_mlpp_matrix">
|
<method name="rows_add_mlpp_matrix">
|
||||||
<return type="void" />
|
<return type="void" />
|
||||||
<argument index="0" name="other" type="MLPPMatrix" />
|
<argument index="0" name="other" type="MLPPMatrix" />
|
||||||
<description>
|
<description>
|
||||||
@ -293,7 +293,7 @@
|
|||||||
<description>
|
<description>
|
||||||
</description>
|
</description>
|
||||||
</method>
|
</method>
|
||||||
<method name="full_mat" qualifiers="const">
|
<method name="matn_full" qualifiers="const">
|
||||||
<return type="MLPPMatrix" />
|
<return type="MLPPMatrix" />
|
||||||
<argument index="0" name="n" type="int" />
|
<argument index="0" name="n" type="int" />
|
||||||
<argument index="1" name="m" type="int" />
|
<argument index="1" name="m" type="int" />
|
||||||
@ -337,20 +337,20 @@
|
|||||||
<description>
|
<description>
|
||||||
</description>
|
</description>
|
||||||
</method>
|
</method>
|
||||||
<method name="get_row_into_mlpp_vector" qualifiers="const">
|
<method name="row_get_into_mlpp_vector" qualifiers="const">
|
||||||
<return type="void" />
|
<return type="void" />
|
||||||
<argument index="0" name="index_y" type="int" />
|
<argument index="0" name="index_y" type="int" />
|
||||||
<argument index="1" name="target" type="MLPPVector" />
|
<argument index="1" name="target" type="MLPPVector" />
|
||||||
<description>
|
<description>
|
||||||
</description>
|
</description>
|
||||||
</method>
|
</method>
|
||||||
<method name="get_row_mlpp_vector" qualifiers="const">
|
<method name="row_get_mlpp_vector" qualifiers="const">
|
||||||
<return type="MLPPVector" />
|
<return type="MLPPVector" />
|
||||||
<argument index="0" name="index_y" type="int" />
|
<argument index="0" name="index_y" type="int" />
|
||||||
<description>
|
<description>
|
||||||
</description>
|
</description>
|
||||||
</method>
|
</method>
|
||||||
<method name="get_row_pool_vector" qualifiers="const">
|
<method name="row_get_pool_vector" qualifiers="const">
|
||||||
<return type="PoolRealArray" />
|
<return type="PoolRealArray" />
|
||||||
<argument index="0" name="index_y" type="int" />
|
<argument index="0" name="index_y" type="int" />
|
||||||
<description>
|
<description>
|
||||||
@ -511,7 +511,7 @@
|
|||||||
<description>
|
<description>
|
||||||
</description>
|
</description>
|
||||||
</method>
|
</method>
|
||||||
<method name="one_mat" qualifiers="const">
|
<method name="matn_one" qualifiers="const">
|
||||||
<return type="MLPPMatrix" />
|
<return type="MLPPMatrix" />
|
||||||
<argument index="0" name="n" type="int" />
|
<argument index="0" name="n" type="int" />
|
||||||
<argument index="1" name="m" type="int" />
|
<argument index="1" name="m" type="int" />
|
||||||
@ -543,13 +543,13 @@
|
|||||||
<description>
|
<description>
|
||||||
</description>
|
</description>
|
||||||
</method>
|
</method>
|
||||||
<method name="remove_row">
|
<method name="row_remove">
|
||||||
<return type="void" />
|
<return type="void" />
|
||||||
<argument index="0" name="index" type="int" />
|
<argument index="0" name="index" type="int" />
|
||||||
<description>
|
<description>
|
||||||
</description>
|
</description>
|
||||||
</method>
|
</method>
|
||||||
<method name="remove_row_unordered">
|
<method name="row_remove_unordered">
|
||||||
<return type="void" />
|
<return type="void" />
|
||||||
<argument index="0" name="index" type="int" />
|
<argument index="0" name="index" type="int" />
|
||||||
<description>
|
<description>
|
||||||
@ -604,13 +604,13 @@
|
|||||||
<description>
|
<description>
|
||||||
</description>
|
</description>
|
||||||
</method>
|
</method>
|
||||||
<method name="set_diagonal">
|
<method name="diagonal_set">
|
||||||
<return type="void" />
|
<return type="void" />
|
||||||
<argument index="0" name="a" type="MLPPVector" />
|
<argument index="0" name="a" type="MLPPVector" />
|
||||||
<description>
|
<description>
|
||||||
</description>
|
</description>
|
||||||
</method>
|
</method>
|
||||||
<method name="set_diagonaln" qualifiers="const">
|
<method name="diagonal_setn" qualifiers="const">
|
||||||
<return type="MLPPMatrix" />
|
<return type="MLPPMatrix" />
|
||||||
<argument index="0" name="a" type="MLPPVector" />
|
<argument index="0" name="a" type="MLPPVector" />
|
||||||
<description>
|
<description>
|
||||||
@ -656,14 +656,14 @@
|
|||||||
<description>
|
<description>
|
||||||
</description>
|
</description>
|
||||||
</method>
|
</method>
|
||||||
<method name="set_row_mlpp_vector">
|
<method name="row_set_mlpp_vector">
|
||||||
<return type="void" />
|
<return type="void" />
|
||||||
<argument index="0" name="index_y" type="int" />
|
<argument index="0" name="index_y" type="int" />
|
||||||
<argument index="1" name="row" type="MLPPVector" />
|
<argument index="1" name="row" type="MLPPVector" />
|
||||||
<description>
|
<description>
|
||||||
</description>
|
</description>
|
||||||
</method>
|
</method>
|
||||||
<method name="set_row_pool_vector">
|
<method name="row_set_pool_vector">
|
||||||
<return type="void" />
|
<return type="void" />
|
||||||
<argument index="0" name="index_y" type="int" />
|
<argument index="0" name="index_y" type="int" />
|
||||||
<argument index="1" name="row" type="PoolRealArray" />
|
<argument index="1" name="row" type="PoolRealArray" />
|
||||||
@ -737,7 +737,7 @@
|
|||||||
<description>
|
<description>
|
||||||
</description>
|
</description>
|
||||||
</method>
|
</method>
|
||||||
<method name="swap_row">
|
<method name="row_swap">
|
||||||
<return type="void" />
|
<return type="void" />
|
||||||
<argument index="0" name="index_1" type="int" />
|
<argument index="0" name="index_1" type="int" />
|
||||||
<argument index="1" name="index_2" type="int" />
|
<argument index="1" name="index_2" type="int" />
|
||||||
@ -770,7 +770,7 @@
|
|||||||
<description>
|
<description>
|
||||||
</description>
|
</description>
|
||||||
</method>
|
</method>
|
||||||
<method name="zero_mat" qualifiers="const">
|
<method name="matn_zero" qualifiers="const">
|
||||||
<return type="MLPPMatrix" />
|
<return type="MLPPMatrix" />
|
||||||
<argument index="0" name="n" type="int" />
|
<argument index="0" name="n" type="int" />
|
||||||
<argument index="1" name="m" type="int" />
|
<argument index="1" name="m" type="int" />
|
||||||
|
@ -170,7 +170,7 @@
|
|||||||
<description>
|
<description>
|
||||||
</description>
|
</description>
|
||||||
</method>
|
</method>
|
||||||
<method name="get_row_into_mlpp_vector" qualifiers="const">
|
<method name="row_get_into_mlpp_vector" qualifiers="const">
|
||||||
<return type="void" />
|
<return type="void" />
|
||||||
<argument index="0" name="index_y" type="int" />
|
<argument index="0" name="index_y" type="int" />
|
||||||
<argument index="1" name="index_z" type="int" />
|
<argument index="1" name="index_z" type="int" />
|
||||||
@ -178,14 +178,14 @@
|
|||||||
<description>
|
<description>
|
||||||
</description>
|
</description>
|
||||||
</method>
|
</method>
|
||||||
<method name="get_row_mlpp_vector" qualifiers="const">
|
<method name="row_get_mlpp_vector" qualifiers="const">
|
||||||
<return type="MLPPVector" />
|
<return type="MLPPVector" />
|
||||||
<argument index="0" name="index_y" type="int" />
|
<argument index="0" name="index_y" type="int" />
|
||||||
<argument index="1" name="index_z" type="int" />
|
<argument index="1" name="index_z" type="int" />
|
||||||
<description>
|
<description>
|
||||||
</description>
|
</description>
|
||||||
</method>
|
</method>
|
||||||
<method name="get_row_pool_vector" qualifiers="const">
|
<method name="row_get_pool_vector" qualifiers="const">
|
||||||
<return type="PoolRealArray" />
|
<return type="PoolRealArray" />
|
||||||
<argument index="0" name="index_y" type="int" />
|
<argument index="0" name="index_y" type="int" />
|
||||||
<argument index="1" name="index_z" type="int" />
|
<argument index="1" name="index_z" type="int" />
|
||||||
@ -464,7 +464,7 @@
|
|||||||
<description>
|
<description>
|
||||||
</description>
|
</description>
|
||||||
</method>
|
</method>
|
||||||
<method name="set_row_mlpp_vector">
|
<method name="row_set_mlpp_vector">
|
||||||
<return type="void" />
|
<return type="void" />
|
||||||
<argument index="0" name="index_y" type="int" />
|
<argument index="0" name="index_y" type="int" />
|
||||||
<argument index="1" name="index_z" type="int" />
|
<argument index="1" name="index_z" type="int" />
|
||||||
@ -472,7 +472,7 @@
|
|||||||
<description>
|
<description>
|
||||||
</description>
|
</description>
|
||||||
</method>
|
</method>
|
||||||
<method name="set_row_pool_vector">
|
<method name="row_set_pool_vector">
|
||||||
<return type="void" />
|
<return type="void" />
|
||||||
<argument index="0" name="index_y" type="int" />
|
<argument index="0" name="index_y" type="int" />
|
||||||
<argument index="1" name="index_z" type="int" />
|
<argument index="1" name="index_z" type="int" />
|
||||||
|
@ -924,11 +924,11 @@ Ref<MLPPMatrix> MLPPActivation::softmax_normm(const Ref<MLPPMatrix> &z) {
|
|||||||
row_tmp->resize(z_size.x);
|
row_tmp->resize(z_size.x);
|
||||||
|
|
||||||
for (int i = 0; i < z_size.y; ++i) {
|
for (int i = 0; i < z_size.y; ++i) {
|
||||||
z->get_row_into_mlpp_vector(i, row_tmp);
|
z->row_get_into_mlpp_vector(i, row_tmp);
|
||||||
|
|
||||||
Ref<MLPPVector> sfn = softmax_normv(row_tmp);
|
Ref<MLPPVector> sfn = softmax_normv(row_tmp);
|
||||||
|
|
||||||
a->set_row_mlpp_vector(i, sfn);
|
a->row_set_mlpp_vector(i, sfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
return a;
|
return a;
|
||||||
@ -974,11 +974,11 @@ Ref<MLPPMatrix> MLPPActivation::softmax_derivm(const Ref<MLPPMatrix> &z) {
|
|||||||
row_tmp->resize(z_size.x);
|
row_tmp->resize(z_size.x);
|
||||||
|
|
||||||
for (int i = 0; i < z_size.y; ++i) {
|
for (int i = 0; i < z_size.y; ++i) {
|
||||||
z->get_row_into_mlpp_vector(i, row_tmp);
|
z->row_get_into_mlpp_vector(i, row_tmp);
|
||||||
|
|
||||||
Ref<MLPPVector> sfn = softmax_derivm(z);
|
Ref<MLPPVector> sfn = softmax_derivm(z);
|
||||||
|
|
||||||
a->set_row_mlpp_vector(i, sfn);
|
a->row_set_mlpp_vector(i, sfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
return a;
|
return a;
|
||||||
@ -1021,11 +1021,11 @@ Ref<MLPPMatrix> MLPPActivation::adj_softmax_normm(const Ref<MLPPMatrix> &z) {
|
|||||||
row_rmp->resize(size.x);
|
row_rmp->resize(size.x);
|
||||||
|
|
||||||
for (int i = 0; i < size.y; ++i) {
|
for (int i = 0; i < size.y; ++i) {
|
||||||
z->get_row_into_mlpp_vector(i, row_rmp);
|
z->row_get_into_mlpp_vector(i, row_rmp);
|
||||||
|
|
||||||
Ref<MLPPVector> nv = adj_softmax_normv(row_rmp);
|
Ref<MLPPVector> nv = adj_softmax_normv(row_rmp);
|
||||||
|
|
||||||
n->set_row_mlpp_vector(i, nv);
|
n->row_set_mlpp_vector(i, nv);
|
||||||
}
|
}
|
||||||
|
|
||||||
return n;
|
return n;
|
||||||
@ -1066,11 +1066,11 @@ Ref<MLPPMatrix> MLPPActivation::adj_softmax_derivm(const Ref<MLPPMatrix> &z) {
|
|||||||
row_rmp->resize(size.x);
|
row_rmp->resize(size.x);
|
||||||
|
|
||||||
for (int i = 0; i < size.y; ++i) {
|
for (int i = 0; i < size.y; ++i) {
|
||||||
z->get_row_into_mlpp_vector(i, row_rmp);
|
z->row_get_into_mlpp_vector(i, row_rmp);
|
||||||
|
|
||||||
Ref<MLPPVector> nv = adj_softmax_derivv(row_rmp);
|
Ref<MLPPVector> nv = adj_softmax_derivv(row_rmp);
|
||||||
|
|
||||||
n->set_row_mlpp_vector(i, nv);
|
n->row_set_mlpp_vector(i, nv);
|
||||||
}
|
}
|
||||||
|
|
||||||
return n;
|
return n;
|
||||||
@ -1128,15 +1128,15 @@ Vector<Ref<MLPPMatrix>> MLPPActivation::softmax_deriv_normm(const Ref<MLPPMatrix
|
|||||||
d->resize(Size2i(a_size_x, z_size_y));
|
d->resize(Size2i(a_size_x, z_size_y));
|
||||||
|
|
||||||
for (int j = 0; j < z_size_y; ++j) {
|
for (int j = 0; j < z_size_y; ++j) {
|
||||||
a->get_row_into_mlpp_vector(i, a_i_tmp);
|
a->row_get_into_mlpp_vector(i, a_i_tmp);
|
||||||
|
|
||||||
if (i == j) {
|
if (i == j) {
|
||||||
Ref<MLPPVector> d_j = alg.subtractionnv(a_i_tmp, alg.hadamard_productnv(a_i_tmp, a_i_tmp));
|
Ref<MLPPVector> d_j = alg.subtractionnv(a_i_tmp, alg.hadamard_productnv(a_i_tmp, a_i_tmp));
|
||||||
d->set_row_mlpp_vector(j, d_j);
|
d->row_set_mlpp_vector(j, d_j);
|
||||||
} else {
|
} else {
|
||||||
a->get_row_into_mlpp_vector(j, a_j_tmp);
|
a->row_get_into_mlpp_vector(j, a_j_tmp);
|
||||||
Ref<MLPPVector> d_j = alg.scalar_multiplynv(-1, alg.hadamard_productnv(a_i_tmp, a_j_tmp));
|
Ref<MLPPVector> d_j = alg.scalar_multiplynv(-1, alg.hadamard_productnv(a_i_tmp, a_j_tmp));
|
||||||
d->set_row_mlpp_vector(j, d_j);
|
d->row_set_mlpp_vector(j, d_j);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1196,15 +1196,15 @@ Vector<Ref<MLPPMatrix>> MLPPActivation::softmax_deriv_derivm(const Ref<MLPPMatri
|
|||||||
d->resize(Size2i(a_size_x, z_size_y));
|
d->resize(Size2i(a_size_x, z_size_y));
|
||||||
|
|
||||||
for (int j = 0; j < z_size_y; ++j) {
|
for (int j = 0; j < z_size_y; ++j) {
|
||||||
a->get_row_into_mlpp_vector(i, a_i_tmp);
|
a->row_get_into_mlpp_vector(i, a_i_tmp);
|
||||||
|
|
||||||
if (i == j) {
|
if (i == j) {
|
||||||
Ref<MLPPVector> d_j = alg.subtractionnv(a_i_tmp, alg.hadamard_productnv(a_i_tmp, a_i_tmp));
|
Ref<MLPPVector> d_j = alg.subtractionnv(a_i_tmp, alg.hadamard_productnv(a_i_tmp, a_i_tmp));
|
||||||
d->set_row_mlpp_vector(j, d_j);
|
d->row_set_mlpp_vector(j, d_j);
|
||||||
} else {
|
} else {
|
||||||
a->get_row_into_mlpp_vector(j, a_j_tmp);
|
a->row_get_into_mlpp_vector(j, a_j_tmp);
|
||||||
Ref<MLPPVector> d_j = alg.scalar_multiplynv(-1, alg.hadamard_productnv(a_i_tmp, a_j_tmp));
|
Ref<MLPPVector> d_j = alg.scalar_multiplynv(-1, alg.hadamard_productnv(a_i_tmp, a_j_tmp));
|
||||||
d->set_row_mlpp_vector(j, d_j);
|
d->row_set_mlpp_vector(j, d_j);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -125,7 +125,7 @@ void MLPPANN::sgd(real_t learning_rate, int max_epoch, bool ui) {
|
|||||||
|
|
||||||
int output_index = distribution(generator);
|
int output_index = distribution(generator);
|
||||||
|
|
||||||
_input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
|
_input_set->row_get_into_mlpp_vector(output_index, input_set_row_tmp);
|
||||||
real_t output_element_set = _output_set->element_get(output_index);
|
real_t output_element_set = _output_set->element_get(output_index);
|
||||||
output_set_row_tmp->element_set(0, output_element_set);
|
output_set_row_tmp->element_set(0, output_element_set);
|
||||||
|
|
||||||
|
@ -128,11 +128,11 @@ void MLPPAutoEncoder::sgd(real_t learning_rate, int max_epoch, bool ui) {
|
|||||||
while (true) {
|
while (true) {
|
||||||
int output_index = distribution(generator);
|
int output_index = distribution(generator);
|
||||||
|
|
||||||
_input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
|
_input_set->row_get_into_mlpp_vector(output_index, input_set_row_tmp);
|
||||||
input_set_mat_tmp->set_row_mlpp_vector(0, input_set_row_tmp);
|
input_set_mat_tmp->row_set_mlpp_vector(0, input_set_row_tmp);
|
||||||
|
|
||||||
Ref<MLPPVector> y_hat = evaluatev(input_set_row_tmp);
|
Ref<MLPPVector> y_hat = evaluatev(input_set_row_tmp);
|
||||||
y_hat_mat_tmp->set_row_mlpp_vector(0, y_hat);
|
y_hat_mat_tmp->row_set_mlpp_vector(0, y_hat);
|
||||||
|
|
||||||
PropagateVResult prop_res = propagatev(input_set_row_tmp);
|
PropagateVResult prop_res = propagatev(input_set_row_tmp);
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ Ref<MLPPVector> MLPPBernoulliNB::model_set_test(const Ref<MLPPMatrix> &X) {
|
|||||||
x_row_tmp->resize(X->size().x);
|
x_row_tmp->resize(X->size().x);
|
||||||
|
|
||||||
for (int i = 0; i < X->size().y; i++) {
|
for (int i = 0; i < X->size().y; i++) {
|
||||||
X->get_row_into_mlpp_vector(i, x_row_tmp);
|
X->row_get_into_mlpp_vector(i, x_row_tmp);
|
||||||
|
|
||||||
y_hat->element_set(i, model_test(x_row_tmp));
|
y_hat->element_set(i, model_test(x_row_tmp));
|
||||||
}
|
}
|
||||||
|
@ -122,7 +122,7 @@ void MLPPCLogLogReg::sgd(real_t learning_rate, int max_epoch, bool p_) {
|
|||||||
while (true) {
|
while (true) {
|
||||||
int output_index = distribution(generator);
|
int output_index = distribution(generator);
|
||||||
|
|
||||||
_input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
|
_input_set->row_get_into_mlpp_vector(output_index, input_set_row_tmp);
|
||||||
real_t output_element_set = _output_set->element_get(output_index);
|
real_t output_element_set = _output_set->element_get(output_index);
|
||||||
output_set_row_tmp->element_set(0, output_element_set);
|
output_set_row_tmp->element_set(0, output_element_set);
|
||||||
|
|
||||||
|
@ -561,7 +561,7 @@ real_t MLPPCost::dual_form_svm(const Ref<MLPPVector> &alpha, const Ref<MLPPMatri
|
|||||||
Ref<MLPPMatrix> alpha_m;
|
Ref<MLPPMatrix> alpha_m;
|
||||||
alpha_m.instance();
|
alpha_m.instance();
|
||||||
alpha_m->resize(Size2i(alpha->size(), 1));
|
alpha_m->resize(Size2i(alpha->size(), 1));
|
||||||
alpha_m->set_row_mlpp_vector(0, alpha);
|
alpha_m->row_set_mlpp_vector(0, alpha);
|
||||||
|
|
||||||
Ref<MLPPMatrix> alpha_m_res = alg.matmultnm(alg.matmultnm(alpha_m, Q), alg.transposenm(alpha_m));
|
Ref<MLPPMatrix> alpha_m_res = alg.matmultnm(alg.matmultnm(alpha_m, Q), alg.transposenm(alpha_m));
|
||||||
|
|
||||||
|
@ -369,11 +369,11 @@ MLPPData::SplitComplexData MLPPData::train_test_split(Ref<MLPPDataComplex> data,
|
|||||||
for (int i = 0; i < test_input_number; ++i) {
|
for (int i = 0; i < test_input_number; ++i) {
|
||||||
int index = indices[i];
|
int index = indices[i];
|
||||||
|
|
||||||
orig_input->get_row_into_mlpp_vector(index, orig_input_row_tmp);
|
orig_input->row_get_into_mlpp_vector(index, orig_input_row_tmp);
|
||||||
orig_output->get_row_into_mlpp_vector(index, orig_output_row_tmp);
|
orig_output->row_get_into_mlpp_vector(index, orig_output_row_tmp);
|
||||||
|
|
||||||
res_test_input->set_row_mlpp_vector(i, orig_input);
|
res_test_input->row_set_mlpp_vector(i, orig_input);
|
||||||
res_test_output->set_row_mlpp_vector(i, orig_output);
|
res_test_output->row_set_mlpp_vector(i, orig_output);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ref<MLPPMatrix> res_train_input = res.train->get_input();
|
Ref<MLPPMatrix> res_train_input = res.train->get_input();
|
||||||
@ -387,11 +387,11 @@ MLPPData::SplitComplexData MLPPData::train_test_split(Ref<MLPPDataComplex> data,
|
|||||||
for (int i = 0; i < train_input_number; ++i) {
|
for (int i = 0; i < train_input_number; ++i) {
|
||||||
int index = indices[train_input_number + i];
|
int index = indices[train_input_number + i];
|
||||||
|
|
||||||
orig_input->get_row_into_mlpp_vector(index, orig_input_row_tmp);
|
orig_input->row_get_into_mlpp_vector(index, orig_input_row_tmp);
|
||||||
orig_output->get_row_into_mlpp_vector(index, orig_output_row_tmp);
|
orig_output->row_get_into_mlpp_vector(index, orig_output_row_tmp);
|
||||||
|
|
||||||
res_train_input->set_row_mlpp_vector(i, orig_input);
|
res_train_input->row_set_mlpp_vector(i, orig_input);
|
||||||
res_train_output->set_row_mlpp_vector(i, orig_output);
|
res_train_output->row_set_mlpp_vector(i, orig_output);
|
||||||
}
|
}
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
@ -1280,7 +1280,7 @@ Ref<MLPPMatrix> MLPPData::mean_centering(const Ref<MLPPMatrix> &p_X) {
|
|||||||
x_row_tmp->resize(x_size.x);
|
x_row_tmp->resize(x_size.x);
|
||||||
|
|
||||||
for (int i = 0; i < x_size.y; ++i) {
|
for (int i = 0; i < x_size.y; ++i) {
|
||||||
X->get_row_into_mlpp_vector(i, x_row_tmp);
|
X->row_get_into_mlpp_vector(i, x_row_tmp);
|
||||||
|
|
||||||
real_t mean_i = stat.meanv(x_row_tmp);
|
real_t mean_i = stat.meanv(x_row_tmp);
|
||||||
|
|
||||||
|
@ -53,8 +53,8 @@ void MLPPDualSVC::gradient_descent(real_t learning_rate, int max_epoch, bool ui)
|
|||||||
if (_alpha->element_get(i) < _C && _alpha->element_get(i) > 0) {
|
if (_alpha->element_get(i) < _C && _alpha->element_get(i) > 0) {
|
||||||
for (int j = 0; j < _alpha->size(); j++) {
|
for (int j = 0; j < _alpha->size(); j++) {
|
||||||
if (_alpha->element_get(j) > 0) {
|
if (_alpha->element_get(j) > 0) {
|
||||||
_input_set->get_row_into_mlpp_vector(i, input_set_i_row_tmp);
|
_input_set->row_get_into_mlpp_vector(i, input_set_i_row_tmp);
|
||||||
_input_set->get_row_into_mlpp_vector(j, input_set_j_row_tmp);
|
_input_set->row_get_into_mlpp_vector(j, input_set_j_row_tmp);
|
||||||
|
|
||||||
sum += _alpha->element_get(j) * _output_set->element_get(j) * alg.dotnv(input_set_j_row_tmp, input_set_i_row_tmp); // TO DO: DON'T forget to add non-linear kernelizations.
|
sum += _alpha->element_get(j) * _output_set->element_get(j) * alg.dotnv(input_set_j_row_tmp, input_set_i_row_tmp); // TO DO: DON'T forget to add non-linear kernelizations.
|
||||||
}
|
}
|
||||||
@ -216,7 +216,7 @@ real_t MLPPDualSVC::propagatev(const Ref<MLPPVector> &x) {
|
|||||||
|
|
||||||
for (int j = 0; j < _alpha->size(); j++) {
|
for (int j = 0; j < _alpha->size(); j++) {
|
||||||
if (_alpha->element_get(j) != 0) {
|
if (_alpha->element_get(j) != 0) {
|
||||||
_input_set->get_row_into_mlpp_vector(j, input_set_row_tmp);
|
_input_set->row_get_into_mlpp_vector(j, input_set_row_tmp);
|
||||||
z += _alpha->element_get(j) * _output_set->element_get(j) * alg.dotnv(input_set_row_tmp, x); // TO DO: DON'T forget to add non-linear kernelizations.
|
z += _alpha->element_get(j) * _output_set->element_get(j) * alg.dotnv(input_set_row_tmp, x); // TO DO: DON'T forget to add non-linear kernelizations.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -249,8 +249,8 @@ Ref<MLPPVector> MLPPDualSVC::propagatem(const Ref<MLPPMatrix> &X) {
|
|||||||
|
|
||||||
for (int j = 0; j < _alpha->size(); j++) {
|
for (int j = 0; j < _alpha->size(); j++) {
|
||||||
if (_alpha->element_get(j) != 0) {
|
if (_alpha->element_get(j) != 0) {
|
||||||
_input_set->get_row_into_mlpp_vector(j, input_set_row_tmp);
|
_input_set->row_get_into_mlpp_vector(j, input_set_row_tmp);
|
||||||
X->get_row_into_mlpp_vector(i, x_row_tmp);
|
X->row_get_into_mlpp_vector(i, x_row_tmp);
|
||||||
|
|
||||||
sum += _alpha->element_get(j) * _output_set->element_get(j) * alg.dotnv(input_set_row_tmp, x_row_tmp); // TO DO: DON'T forget to add non-linear kernelizations.
|
sum += _alpha->element_get(j) * _output_set->element_get(j) * alg.dotnv(input_set_row_tmp, x_row_tmp); // TO DO: DON'T forget to add non-linear kernelizations.
|
||||||
}
|
}
|
||||||
|
@ -109,7 +109,7 @@ void MLPPExpReg::sgd(real_t learning_rate, int max_epoch, bool ui) {
|
|||||||
while (true) {
|
while (true) {
|
||||||
int output_index = distribution(generator);
|
int output_index = distribution(generator);
|
||||||
|
|
||||||
_input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
|
_input_set->row_get_into_mlpp_vector(output_index, input_set_row_tmp);
|
||||||
real_t output_element_set = _output_set->element_get(output_index);
|
real_t output_element_set = _output_set->element_get(output_index);
|
||||||
output_set_row_tmp->element_set(0, output_element_set);
|
output_set_row_tmp->element_set(0, output_element_set);
|
||||||
|
|
||||||
|
@ -60,7 +60,7 @@ void MLPPGAN::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
|
|||||||
|
|
||||||
Ref<MLPPMatrix> generator_input_set = alg.gaussian_noise(_n, _k);
|
Ref<MLPPMatrix> generator_input_set = alg.gaussian_noise(_n, _k);
|
||||||
Ref<MLPPMatrix> discriminator_input_set = model_set_test_generator(generator_input_set);
|
Ref<MLPPMatrix> discriminator_input_set = model_set_test_generator(generator_input_set);
|
||||||
discriminator_input_set->add_rows_mlpp_matrix(_output_set); // Fake + real inputs.
|
discriminator_input_set->rows_add_mlpp_matrix(_output_set); // Fake + real inputs.
|
||||||
|
|
||||||
Ref<MLPPVector> y_hat = model_set_test_discriminator(discriminator_input_set);
|
Ref<MLPPVector> y_hat = model_set_test_discriminator(discriminator_input_set);
|
||||||
Ref<MLPPVector> output_set = alg.zerovecnv(_n);
|
Ref<MLPPVector> output_set = alg.zerovecnv(_n);
|
||||||
|
@ -45,7 +45,7 @@ Ref<MLPPVector> MLPPGaussianNB::model_set_test(const Ref<MLPPMatrix> &X) {
|
|||||||
x_row_tmp->resize(X->size().x);
|
x_row_tmp->resize(X->size().x);
|
||||||
|
|
||||||
for (int i = 0; i < X->size().y; i++) {
|
for (int i = 0; i < X->size().y; i++) {
|
||||||
X->get_row_into_mlpp_vector(i, x_row_tmp);
|
X->row_get_into_mlpp_vector(i, x_row_tmp);
|
||||||
|
|
||||||
y_hat->element_set(i, model_test(x_row_tmp));
|
y_hat->element_set(i, model_test(x_row_tmp));
|
||||||
}
|
}
|
||||||
|
@ -77,11 +77,11 @@ Ref<MLPPMatrix> MLPPKMeans::model_set_test(const Ref<MLPPMatrix> &X) {
|
|||||||
int r0_size = _r->size().x;
|
int r0_size = _r->size().x;
|
||||||
|
|
||||||
for (int i = 0; i < input_set_size_y; ++i) {
|
for (int i = 0; i < input_set_size_y; ++i) {
|
||||||
_mu->get_row_into_mlpp_vector(0, closest_centroid);
|
_mu->row_get_into_mlpp_vector(0, closest_centroid);
|
||||||
X->get_row_into_mlpp_vector(i, tmp_xiv);
|
X->row_get_into_mlpp_vector(i, tmp_xiv);
|
||||||
|
|
||||||
for (int j = 0; j < r0_size; ++j) {
|
for (int j = 0; j < r0_size; ++j) {
|
||||||
_mu->get_row_into_mlpp_vector(j, tmp_mujv);
|
_mu->row_get_into_mlpp_vector(j, tmp_mujv);
|
||||||
|
|
||||||
bool is_centroid_closer = alg.euclidean_distance(tmp_xiv, tmp_mujv) < alg.euclidean_distance(tmp_xiv, closest_centroid);
|
bool is_centroid_closer = alg.euclidean_distance(tmp_xiv, tmp_mujv) < alg.euclidean_distance(tmp_xiv, closest_centroid);
|
||||||
|
|
||||||
@ -90,7 +90,7 @@ Ref<MLPPMatrix> MLPPKMeans::model_set_test(const Ref<MLPPMatrix> &X) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
closest_centroids->set_row_mlpp_vector(i, closest_centroid);
|
closest_centroids->row_set_mlpp_vector(i, closest_centroid);
|
||||||
}
|
}
|
||||||
|
|
||||||
return closest_centroids;
|
return closest_centroids;
|
||||||
@ -105,7 +105,7 @@ Ref<MLPPVector> MLPPKMeans::model_test(const Ref<MLPPVector> &x) {
|
|||||||
closest_centroid.instance();
|
closest_centroid.instance();
|
||||||
closest_centroid->resize(_mu->size().x);
|
closest_centroid->resize(_mu->size().x);
|
||||||
|
|
||||||
_mu->get_row_into_mlpp_vector(0, closest_centroid);
|
_mu->row_get_into_mlpp_vector(0, closest_centroid);
|
||||||
|
|
||||||
int mu_size_y = _mu->size().y;
|
int mu_size_y = _mu->size().y;
|
||||||
|
|
||||||
@ -114,7 +114,7 @@ Ref<MLPPVector> MLPPKMeans::model_test(const Ref<MLPPVector> &x) {
|
|||||||
tmp_mujv->resize(_mu->size().x);
|
tmp_mujv->resize(_mu->size().x);
|
||||||
|
|
||||||
for (int j = 0; j < mu_size_y; ++j) {
|
for (int j = 0; j < mu_size_y; ++j) {
|
||||||
_mu->get_row_into_mlpp_vector(j, tmp_mujv);
|
_mu->row_get_into_mlpp_vector(j, tmp_mujv);
|
||||||
|
|
||||||
if (alg.euclidean_distance(x, tmp_mujv) < alg.euclidean_distance(x, closest_centroid)) {
|
if (alg.euclidean_distance(x, tmp_mujv) < alg.euclidean_distance(x, closest_centroid)) {
|
||||||
closest_centroid->set_from_mlpp_vector(tmp_mujv);
|
closest_centroid->set_from_mlpp_vector(tmp_mujv);
|
||||||
@ -218,8 +218,8 @@ Ref<MLPPVector> MLPPKMeans::silhouette_scores() {
|
|||||||
mu_j_tempv->resize(_mu->size().x);
|
mu_j_tempv->resize(_mu->size().x);
|
||||||
|
|
||||||
for (int i = 0; i < input_set_size_y; ++i) {
|
for (int i = 0; i < input_set_size_y; ++i) {
|
||||||
_r->get_row_into_mlpp_vector(i, r_i_tempv);
|
_r->row_get_into_mlpp_vector(i, r_i_tempv);
|
||||||
_input_set->get_row_into_mlpp_vector(i, input_set_i_tempv);
|
_input_set->row_get_into_mlpp_vector(i, input_set_i_tempv);
|
||||||
|
|
||||||
// COMPUTING a[i]
|
// COMPUTING a[i]
|
||||||
real_t a = 0;
|
real_t a = 0;
|
||||||
@ -228,10 +228,10 @@ Ref<MLPPVector> MLPPKMeans::silhouette_scores() {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
_r->get_row_into_mlpp_vector(j, r_j_tempv);
|
_r->row_get_into_mlpp_vector(j, r_j_tempv);
|
||||||
|
|
||||||
if (r_i_tempv->is_equal_approx(r_j_tempv)) {
|
if (r_i_tempv->is_equal_approx(r_j_tempv)) {
|
||||||
_input_set->get_row_into_mlpp_vector(j, input_set_j_tempv);
|
_input_set->row_get_into_mlpp_vector(j, input_set_j_tempv);
|
||||||
|
|
||||||
a += alg.euclidean_distance(input_set_i_tempv, input_set_j_tempv);
|
a += alg.euclidean_distance(input_set_i_tempv, input_set_j_tempv);
|
||||||
}
|
}
|
||||||
@ -240,17 +240,17 @@ Ref<MLPPVector> MLPPKMeans::silhouette_scores() {
|
|||||||
// NORMALIZE a[i]
|
// NORMALIZE a[i]
|
||||||
a /= closest_centroids->size().x - 1;
|
a /= closest_centroids->size().x - 1;
|
||||||
|
|
||||||
closest_centroids->get_row_into_mlpp_vector(i, closest_centroids_i_tempv);
|
closest_centroids->row_get_into_mlpp_vector(i, closest_centroids_i_tempv);
|
||||||
|
|
||||||
// COMPUTING b[i]
|
// COMPUTING b[i]
|
||||||
real_t b = Math_INF;
|
real_t b = Math_INF;
|
||||||
for (int j = 0; j < mu_size_y; ++j) {
|
for (int j = 0; j < mu_size_y; ++j) {
|
||||||
_mu->get_row_into_mlpp_vector(j, mu_j_tempv);
|
_mu->row_get_into_mlpp_vector(j, mu_j_tempv);
|
||||||
|
|
||||||
if (!closest_centroids_i_tempv->is_equal_approx(mu_j_tempv)) {
|
if (!closest_centroids_i_tempv->is_equal_approx(mu_j_tempv)) {
|
||||||
real_t sum = 0;
|
real_t sum = 0;
|
||||||
for (int k = 0; k < input_set_size_y; ++k) {
|
for (int k = 0; k < input_set_size_y; ++k) {
|
||||||
_input_set->get_row_into_mlpp_vector(k, input_set_k_tempv);
|
_input_set->row_get_into_mlpp_vector(k, input_set_k_tempv);
|
||||||
|
|
||||||
sum += alg.euclidean_distance(input_set_i_tempv, input_set_k_tempv);
|
sum += alg.euclidean_distance(input_set_i_tempv, input_set_k_tempv);
|
||||||
}
|
}
|
||||||
@ -258,7 +258,7 @@ Ref<MLPPVector> MLPPKMeans::silhouette_scores() {
|
|||||||
// NORMALIZE b[i]
|
// NORMALIZE b[i]
|
||||||
real_t k_cluster_size = 0;
|
real_t k_cluster_size = 0;
|
||||||
for (int k = 0; k < closest_centroids_size_y; ++k) {
|
for (int k = 0; k < closest_centroids_size_y; ++k) {
|
||||||
_input_set->get_row_into_mlpp_vector(k, closest_centroids_k_tempv);
|
_input_set->row_get_into_mlpp_vector(k, closest_centroids_k_tempv);
|
||||||
|
|
||||||
if (closest_centroids_k_tempv->is_equal_approx(mu_j_tempv)) {
|
if (closest_centroids_k_tempv->is_equal_approx(mu_j_tempv)) {
|
||||||
++k_cluster_size;
|
++k_cluster_size;
|
||||||
@ -332,18 +332,18 @@ void MLPPKMeans::_evaluate() {
|
|||||||
_r->fill(0);
|
_r->fill(0);
|
||||||
|
|
||||||
for (int i = 0; i < r_size_y; ++i) {
|
for (int i = 0; i < r_size_y; ++i) {
|
||||||
_mu->get_row_into_mlpp_vector(0, closest_centroid);
|
_mu->row_get_into_mlpp_vector(0, closest_centroid);
|
||||||
_input_set->get_row_into_mlpp_vector(i, input_set_i_tempv);
|
_input_set->row_get_into_mlpp_vector(i, input_set_i_tempv);
|
||||||
|
|
||||||
closest_centroid_current_dist = alg.euclidean_distance(input_set_i_tempv, closest_centroid);
|
closest_centroid_current_dist = alg.euclidean_distance(input_set_i_tempv, closest_centroid);
|
||||||
|
|
||||||
for (int j = 0; j < r_size_x; ++j) {
|
for (int j = 0; j < r_size_x; ++j) {
|
||||||
_mu->get_row_into_mlpp_vector(j, mu_j_tempv);
|
_mu->row_get_into_mlpp_vector(j, mu_j_tempv);
|
||||||
|
|
||||||
bool is_centroid_closer = alg.euclidean_distance(input_set_i_tempv, mu_j_tempv) < closest_centroid_current_dist;
|
bool is_centroid_closer = alg.euclidean_distance(input_set_i_tempv, mu_j_tempv) < closest_centroid_current_dist;
|
||||||
|
|
||||||
if (is_centroid_closer) {
|
if (is_centroid_closer) {
|
||||||
_mu->get_row_into_mlpp_vector(j, closest_centroid);
|
_mu->row_get_into_mlpp_vector(j, closest_centroid);
|
||||||
closest_centroid_current_dist = alg.euclidean_distance(input_set_i_tempv, closest_centroid);
|
closest_centroid_current_dist = alg.euclidean_distance(input_set_i_tempv, closest_centroid);
|
||||||
closest_centroid_index = j;
|
closest_centroid_index = j;
|
||||||
}
|
}
|
||||||
@ -381,7 +381,7 @@ void MLPPKMeans::_compute_mu() {
|
|||||||
|
|
||||||
real_t den = 0;
|
real_t den = 0;
|
||||||
for (int j = 0; j < r_size_y; ++j) {
|
for (int j = 0; j < r_size_y; ++j) {
|
||||||
_input_set->get_row_into_mlpp_vector(j, input_set_j_tempv);
|
_input_set->row_get_into_mlpp_vector(j, input_set_j_tempv);
|
||||||
|
|
||||||
real_t r_j_i = _r->element_get(j, i);
|
real_t r_j_i = _r->element_get(j, i);
|
||||||
|
|
||||||
@ -393,7 +393,7 @@ void MLPPKMeans::_compute_mu() {
|
|||||||
|
|
||||||
alg.scalar_multiplyv(real_t(1) / real_t(den), num, mu_tempv);
|
alg.scalar_multiplyv(real_t(1) / real_t(den), num, mu_tempv);
|
||||||
|
|
||||||
_mu->set_row_mlpp_vector(i, mu_tempv);
|
_mu->row_set_mlpp_vector(i, mu_tempv);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -416,8 +416,8 @@ void MLPPKMeans::_centroid_initialization() {
|
|||||||
for (int i = 0; i < _k; ++i) {
|
for (int i = 0; i < _k; ++i) {
|
||||||
int indx = rand.random(0, input_set_size_y_rand);
|
int indx = rand.random(0, input_set_size_y_rand);
|
||||||
|
|
||||||
_input_set->get_row_into_mlpp_vector(indx, mu_tempv);
|
_input_set->row_get_into_mlpp_vector(indx, mu_tempv);
|
||||||
_mu->set_row_mlpp_vector(i, mu_tempv);
|
_mu->row_set_mlpp_vector(i, mu_tempv);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -439,8 +439,8 @@ void MLPPKMeans::_kmeanspp_initialization() {
|
|||||||
mu_tempv.instance();
|
mu_tempv.instance();
|
||||||
mu_tempv->resize(_mu->size().x);
|
mu_tempv->resize(_mu->size().x);
|
||||||
|
|
||||||
_input_set->get_row_into_mlpp_vector(rand.random(0, input_set_size_y - 1), mu_tempv);
|
_input_set->row_get_into_mlpp_vector(rand.random(0, input_set_size_y - 1), mu_tempv);
|
||||||
_mu->set_row_mlpp_vector(0, mu_tempv);
|
_mu->row_set_mlpp_vector(0, mu_tempv);
|
||||||
|
|
||||||
Ref<MLPPVector> input_set_j_tempv;
|
Ref<MLPPVector> input_set_j_tempv;
|
||||||
input_set_j_tempv.instance();
|
input_set_j_tempv.instance();
|
||||||
@ -452,14 +452,14 @@ void MLPPKMeans::_kmeanspp_initialization() {
|
|||||||
|
|
||||||
for (int i = 1; i < _k - 1; ++i) {
|
for (int i = 1; i < _k - 1; ++i) {
|
||||||
for (int j = 0; j < input_set_size_y; ++j) {
|
for (int j = 0; j < input_set_size_y; ++j) {
|
||||||
_input_set->get_row_into_mlpp_vector(j, input_set_j_tempv);
|
_input_set->row_get_into_mlpp_vector(j, input_set_j_tempv);
|
||||||
|
|
||||||
real_t max_dist = 0;
|
real_t max_dist = 0;
|
||||||
// SUM ALL THE SQUARED DISTANCES, CHOOSE THE ONE THAT'S FARTHEST
|
// SUM ALL THE SQUARED DISTANCES, CHOOSE THE ONE THAT'S FARTHEST
|
||||||
// AS TO SPREAD OUT THE CLUSTER CENTROIDS.
|
// AS TO SPREAD OUT THE CLUSTER CENTROIDS.
|
||||||
real_t sum = 0;
|
real_t sum = 0;
|
||||||
for (int k = 0; k < i; k++) {
|
for (int k = 0; k < i; k++) {
|
||||||
_mu->get_row_into_mlpp_vector(k, mu_tempv);
|
_mu->row_get_into_mlpp_vector(k, mu_tempv);
|
||||||
|
|
||||||
sum += alg.euclidean_distance(input_set_j_tempv, mu_tempv);
|
sum += alg.euclidean_distance(input_set_j_tempv, mu_tempv);
|
||||||
}
|
}
|
||||||
@ -470,7 +470,7 @@ void MLPPKMeans::_kmeanspp_initialization() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_mu->set_row_mlpp_vector(i, farthest_centroid);
|
_mu->row_set_mlpp_vector(i, farthest_centroid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
real_t MLPPKMeans::_cost() {
|
real_t MLPPKMeans::_cost() {
|
||||||
@ -495,10 +495,10 @@ real_t MLPPKMeans::_cost() {
|
|||||||
|
|
||||||
real_t sum = 0;
|
real_t sum = 0;
|
||||||
for (int i = 0; i < r_size_y; i++) {
|
for (int i = 0; i < r_size_y; i++) {
|
||||||
_input_set->get_row_into_mlpp_vector(i, input_set_i_tempv);
|
_input_set->row_get_into_mlpp_vector(i, input_set_i_tempv);
|
||||||
|
|
||||||
for (int j = 0; j < r_size_x; j++) {
|
for (int j = 0; j < r_size_x; j++) {
|
||||||
_mu->get_row_into_mlpp_vector(j, mu_j_tempv);
|
_mu->row_get_into_mlpp_vector(j, mu_j_tempv);
|
||||||
|
|
||||||
alg.subtractionv(input_set_i_tempv, mu_j_tempv, sub_tempv);
|
alg.subtractionv(input_set_i_tempv, mu_j_tempv, sub_tempv);
|
||||||
sum += _r->element_get(i, j) * alg.norm_sqv(sub_tempv);
|
sum += _r->element_get(i, j) * alg.norm_sqv(sub_tempv);
|
||||||
|
@ -44,7 +44,7 @@ PoolIntArray MLPPKNN::model_set_test(const Ref<MLPPMatrix> &X) {
|
|||||||
y_hat.resize(y_size);
|
y_hat.resize(y_size);
|
||||||
|
|
||||||
for (int i = 0; i < y_size; i++) {
|
for (int i = 0; i < y_size; i++) {
|
||||||
X->get_row_into_mlpp_vector(i, v);
|
X->row_get_into_mlpp_vector(i, v);
|
||||||
|
|
||||||
y_hat.set(i, model_test(v));
|
y_hat.set(i, model_test(v));
|
||||||
}
|
}
|
||||||
@ -94,8 +94,8 @@ PoolIntArray MLPPKNN::nearest_neighbors(const Ref<MLPPVector> &x) {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
_input_set->get_row_into_mlpp_vector(j, tmpv1);
|
_input_set->row_get_into_mlpp_vector(j, tmpv1);
|
||||||
_input_set->get_row_into_mlpp_vector(neighbor, tmpv2);
|
_input_set->row_get_into_mlpp_vector(neighbor, tmpv2);
|
||||||
|
|
||||||
bool is_neighbor_nearer = alg.euclidean_distance(x, tmpv1) < alg.euclidean_distance(x, tmpv2);
|
bool is_neighbor_nearer = alg.euclidean_distance(x, tmpv1) < alg.euclidean_distance(x, tmpv2);
|
||||||
|
|
||||||
|
@ -178,7 +178,7 @@ Ref<MLPPMatrix> MLPPLinAlg::kronecker_productnm(const Ref<MLPPMatrix> &A, const
|
|||||||
|
|
||||||
for (int i = 0; i < a_size.y; ++i) {
|
for (int i = 0; i < a_size.y; ++i) {
|
||||||
for (int j = 0; j < b_size.y; ++j) {
|
for (int j = 0; j < b_size.y; ++j) {
|
||||||
B->get_row_into_mlpp_vector(j, row_tmp);
|
B->row_get_into_mlpp_vector(j, row_tmp);
|
||||||
|
|
||||||
Vector<Ref<MLPPVector>> row;
|
Vector<Ref<MLPPVector>> row;
|
||||||
for (int k = 0; k < a_size.x; ++k) {
|
for (int k = 0; k < a_size.x; ++k) {
|
||||||
@ -187,7 +187,7 @@ Ref<MLPPMatrix> MLPPLinAlg::kronecker_productnm(const Ref<MLPPMatrix> &A, const
|
|||||||
|
|
||||||
Ref<MLPPVector> flattened_row = flattenmnv(row);
|
Ref<MLPPVector> flattened_row = flattenmnv(row);
|
||||||
|
|
||||||
C->set_row_mlpp_vector(i * b_size.y + j, flattened_row);
|
C->row_set_mlpp_vector(i * b_size.y + j, flattened_row);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -689,10 +689,10 @@ Ref<MLPPMatrix> MLPPLinAlg::covnm(const Ref<MLPPMatrix> &A) {
|
|||||||
a_j_row_tmp->resize(a_size.x);
|
a_j_row_tmp->resize(a_size.x);
|
||||||
|
|
||||||
for (int i = 0; i < a_size.y; ++i) {
|
for (int i = 0; i < a_size.y; ++i) {
|
||||||
A->get_row_into_mlpp_vector(i, a_i_row_tmp);
|
A->row_get_into_mlpp_vector(i, a_i_row_tmp);
|
||||||
|
|
||||||
for (int j = 0; j < a_size.x; ++j) {
|
for (int j = 0; j < a_size.x; ++j) {
|
||||||
A->get_row_into_mlpp_vector(j, a_j_row_tmp);
|
A->row_get_into_mlpp_vector(j, a_j_row_tmp);
|
||||||
|
|
||||||
cov_mat->element_set(i, j, stat.covariancev(a_i_row_tmp, a_j_row_tmp));
|
cov_mat->element_set(i, j, stat.covariancev(a_i_row_tmp, a_j_row_tmp));
|
||||||
}
|
}
|
||||||
|
@ -51,7 +51,7 @@ void MLPPMatrix::set_data(const Array &p_from) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MLPPMatrix::add_row(const Vector<real_t> &p_row) {
|
void MLPPMatrix::row_add(const Vector<real_t> &p_row) {
|
||||||
if (p_row.size() == 0) {
|
if (p_row.size() == 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -76,7 +76,7 @@ void MLPPMatrix::add_row(const Vector<real_t> &p_row) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MLPPMatrix::add_row_pool_vector(const PoolRealArray &p_row) {
|
void MLPPMatrix::row_add_pool_vector(const PoolRealArray &p_row) {
|
||||||
if (p_row.size() == 0) {
|
if (p_row.size() == 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -102,7 +102,7 @@ void MLPPMatrix::add_row_pool_vector(const PoolRealArray &p_row) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MLPPMatrix::add_row_mlpp_vector(const Ref<MLPPVector> &p_row) {
|
void MLPPMatrix::row_add_mlpp_vector(const Ref<MLPPVector> &p_row) {
|
||||||
ERR_FAIL_COND(!p_row.is_valid());
|
ERR_FAIL_COND(!p_row.is_valid());
|
||||||
|
|
||||||
int p_row_size = p_row->size();
|
int p_row_size = p_row->size();
|
||||||
@ -131,7 +131,7 @@ void MLPPMatrix::add_row_mlpp_vector(const Ref<MLPPVector> &p_row) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MLPPMatrix::add_rows_mlpp_matrix(const Ref<MLPPMatrix> &p_other) {
|
void MLPPMatrix::rows_add_mlpp_matrix(const Ref<MLPPMatrix> &p_other) {
|
||||||
ERR_FAIL_COND(!p_other.is_valid());
|
ERR_FAIL_COND(!p_other.is_valid());
|
||||||
|
|
||||||
int other_data_size = p_other->data_size();
|
int other_data_size = p_other->data_size();
|
||||||
@ -162,7 +162,7 @@ void MLPPMatrix::add_rows_mlpp_matrix(const Ref<MLPPMatrix> &p_other) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MLPPMatrix::remove_row(int p_index) {
|
void MLPPMatrix::row_remove(int p_index) {
|
||||||
ERR_FAIL_INDEX(p_index, _size.y);
|
ERR_FAIL_INDEX(p_index, _size.y);
|
||||||
|
|
||||||
--_size.y;
|
--_size.y;
|
||||||
@ -185,7 +185,7 @@ void MLPPMatrix::remove_row(int p_index) {
|
|||||||
|
|
||||||
// Removes the item copying the last value into the position of the one to
|
// Removes the item copying the last value into the position of the one to
|
||||||
// remove. It's generally faster than `remove`.
|
// remove. It's generally faster than `remove`.
|
||||||
void MLPPMatrix::remove_row_unordered(int p_index) {
|
void MLPPMatrix::row_remove_unordered(int p_index) {
|
||||||
ERR_FAIL_INDEX(p_index, _size.y);
|
ERR_FAIL_INDEX(p_index, _size.y);
|
||||||
|
|
||||||
--_size.y;
|
--_size.y;
|
||||||
@ -211,7 +211,7 @@ void MLPPMatrix::remove_row_unordered(int p_index) {
|
|||||||
CRASH_COND_MSG(!_data, "Out of memory");
|
CRASH_COND_MSG(!_data, "Out of memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
void MLPPMatrix::swap_row(int p_index_1, int p_index_2) {
|
void MLPPMatrix::row_swap(int p_index_1, int p_index_2) {
|
||||||
ERR_FAIL_INDEX(p_index_1, _size.y);
|
ERR_FAIL_INDEX(p_index_1, _size.y);
|
||||||
ERR_FAIL_INDEX(p_index_2, _size.y);
|
ERR_FAIL_INDEX(p_index_2, _size.y);
|
||||||
|
|
||||||
@ -241,7 +241,7 @@ void MLPPMatrix::resize(const Size2i &p_size) {
|
|||||||
CRASH_COND_MSG(!_data, "Out of memory");
|
CRASH_COND_MSG(!_data, "Out of memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
Vector<real_t> MLPPMatrix::get_row_vector(int p_index_y) const {
|
Vector<real_t> MLPPMatrix::row_get_vector(int p_index_y) const {
|
||||||
ERR_FAIL_INDEX_V(p_index_y, _size.y, Vector<real_t>());
|
ERR_FAIL_INDEX_V(p_index_y, _size.y, Vector<real_t>());
|
||||||
|
|
||||||
Vector<real_t> ret;
|
Vector<real_t> ret;
|
||||||
@ -263,7 +263,7 @@ Vector<real_t> MLPPMatrix::get_row_vector(int p_index_y) const {
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
PoolRealArray MLPPMatrix::get_row_pool_vector(int p_index_y) const {
|
PoolRealArray MLPPMatrix::row_get_pool_vector(int p_index_y) const {
|
||||||
ERR_FAIL_INDEX_V(p_index_y, _size.y, PoolRealArray());
|
ERR_FAIL_INDEX_V(p_index_y, _size.y, PoolRealArray());
|
||||||
|
|
||||||
PoolRealArray ret;
|
PoolRealArray ret;
|
||||||
@ -286,7 +286,7 @@ PoolRealArray MLPPMatrix::get_row_pool_vector(int p_index_y) const {
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ref<MLPPVector> MLPPMatrix::get_row_mlpp_vector(int p_index_y) const {
|
Ref<MLPPVector> MLPPMatrix::row_get_mlpp_vector(int p_index_y) const {
|
||||||
ERR_FAIL_INDEX_V(p_index_y, _size.y, Ref<MLPPVector>());
|
ERR_FAIL_INDEX_V(p_index_y, _size.y, Ref<MLPPVector>());
|
||||||
|
|
||||||
Ref<MLPPVector> ret;
|
Ref<MLPPVector> ret;
|
||||||
@ -309,7 +309,7 @@ Ref<MLPPVector> MLPPMatrix::get_row_mlpp_vector(int p_index_y) const {
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void MLPPMatrix::get_row_into_mlpp_vector(int p_index_y, Ref<MLPPVector> target) const {
|
void MLPPMatrix::row_get_into_mlpp_vector(int p_index_y, Ref<MLPPVector> target) const {
|
||||||
ERR_FAIL_COND(!target.is_valid());
|
ERR_FAIL_COND(!target.is_valid());
|
||||||
ERR_FAIL_INDEX(p_index_y, _size.y);
|
ERR_FAIL_INDEX(p_index_y, _size.y);
|
||||||
|
|
||||||
@ -326,7 +326,7 @@ void MLPPMatrix::get_row_into_mlpp_vector(int p_index_y, Ref<MLPPVector> target)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MLPPMatrix::set_row_vector(int p_index_y, const Vector<real_t> &p_row) {
|
void MLPPMatrix::row_set_vector(int p_index_y, const Vector<real_t> &p_row) {
|
||||||
ERR_FAIL_COND(p_row.size() != _size.x);
|
ERR_FAIL_COND(p_row.size() != _size.x);
|
||||||
ERR_FAIL_INDEX(p_index_y, _size.y);
|
ERR_FAIL_INDEX(p_index_y, _size.y);
|
||||||
|
|
||||||
@ -339,7 +339,7 @@ void MLPPMatrix::set_row_vector(int p_index_y, const Vector<real_t> &p_row) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MLPPMatrix::set_row_pool_vector(int p_index_y, const PoolRealArray &p_row) {
|
void MLPPMatrix::row_set_pool_vector(int p_index_y, const PoolRealArray &p_row) {
|
||||||
ERR_FAIL_COND(p_row.size() != _size.x);
|
ERR_FAIL_COND(p_row.size() != _size.x);
|
||||||
ERR_FAIL_INDEX(p_index_y, _size.y);
|
ERR_FAIL_INDEX(p_index_y, _size.y);
|
||||||
|
|
||||||
@ -353,7 +353,7 @@ void MLPPMatrix::set_row_pool_vector(int p_index_y, const PoolRealArray &p_row)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MLPPMatrix::set_row_mlpp_vector(int p_index_y, const Ref<MLPPVector> &p_row) {
|
void MLPPMatrix::row_set_mlpp_vector(int p_index_y, const Ref<MLPPVector> &p_row) {
|
||||||
ERR_FAIL_COND(!p_row.is_valid());
|
ERR_FAIL_COND(!p_row.is_valid());
|
||||||
ERR_FAIL_COND(p_row->size() != _size.x);
|
ERR_FAIL_COND(p_row->size() != _size.x);
|
||||||
ERR_FAIL_INDEX(p_index_y, _size.y);
|
ERR_FAIL_INDEX(p_index_y, _size.y);
|
||||||
@ -897,7 +897,7 @@ void MLPPMatrix::kronecker_product(const Ref<MLPPMatrix> &B) {
|
|||||||
|
|
||||||
for (int i = 0; i < _size.y; ++i) {
|
for (int i = 0; i < _size.y; ++i) {
|
||||||
for (int j = 0; j < b_size.y; ++j) {
|
for (int j = 0; j < b_size.y; ++j) {
|
||||||
B->get_row_into_mlpp_vector(j, row_tmp);
|
B->row_get_into_mlpp_vector(j, row_tmp);
|
||||||
|
|
||||||
Vector<Ref<MLPPVector>> row;
|
Vector<Ref<MLPPVector>> row;
|
||||||
for (int k = 0; k < _size.x; ++k) {
|
for (int k = 0; k < _size.x; ++k) {
|
||||||
@ -906,7 +906,7 @@ void MLPPMatrix::kronecker_product(const Ref<MLPPMatrix> &B) {
|
|||||||
|
|
||||||
Ref<MLPPVector> flattened_row = row_tmp->flatten_vectorsn(row);
|
Ref<MLPPVector> flattened_row = row_tmp->flatten_vectorsn(row);
|
||||||
|
|
||||||
set_row_mlpp_vector(i * b_size.y + j, flattened_row);
|
row_set_mlpp_vector(i * b_size.y + j, flattened_row);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -941,7 +941,7 @@ Ref<MLPPMatrix> MLPPMatrix::kronecker_productn(const Ref<MLPPMatrix> &B) const {
|
|||||||
|
|
||||||
for (int i = 0; i < a_size.y; ++i) {
|
for (int i = 0; i < a_size.y; ++i) {
|
||||||
for (int j = 0; j < b_size.y; ++j) {
|
for (int j = 0; j < b_size.y; ++j) {
|
||||||
B->get_row_into_mlpp_vector(j, row_tmp);
|
B->row_get_into_mlpp_vector(j, row_tmp);
|
||||||
|
|
||||||
Vector<Ref<MLPPVector>> row;
|
Vector<Ref<MLPPVector>> row;
|
||||||
for (int k = 0; k < a_size.x; ++k) {
|
for (int k = 0; k < a_size.x; ++k) {
|
||||||
@ -950,7 +950,7 @@ Ref<MLPPMatrix> MLPPMatrix::kronecker_productn(const Ref<MLPPMatrix> &B) const {
|
|||||||
|
|
||||||
Ref<MLPPVector> flattened_row = row_tmp->flatten_vectorsn(row);
|
Ref<MLPPVector> flattened_row = row_tmp->flatten_vectorsn(row);
|
||||||
|
|
||||||
C->set_row_mlpp_vector(i * b_size.y + j, flattened_row);
|
C->row_set_mlpp_vector(i * b_size.y + j, flattened_row);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -985,7 +985,7 @@ void MLPPMatrix::kronecker_productb(const Ref<MLPPMatrix> &A, const Ref<MLPPMatr
|
|||||||
|
|
||||||
for (int i = 0; i < a_size.y; ++i) {
|
for (int i = 0; i < a_size.y; ++i) {
|
||||||
for (int j = 0; j < b_size.y; ++j) {
|
for (int j = 0; j < b_size.y; ++j) {
|
||||||
B->get_row_into_mlpp_vector(j, row_tmp);
|
B->row_get_into_mlpp_vector(j, row_tmp);
|
||||||
|
|
||||||
Vector<Ref<MLPPVector>> row;
|
Vector<Ref<MLPPVector>> row;
|
||||||
for (int k = 0; k < a_size.x; ++k) {
|
for (int k = 0; k < a_size.x; ++k) {
|
||||||
@ -994,7 +994,7 @@ void MLPPMatrix::kronecker_productb(const Ref<MLPPMatrix> &A, const Ref<MLPPMatr
|
|||||||
|
|
||||||
Ref<MLPPVector> flattened_row = row_tmp->flatten_vectorsn(row);
|
Ref<MLPPVector> flattened_row = row_tmp->flatten_vectorsn(row);
|
||||||
|
|
||||||
set_row_mlpp_vector(i * b_size.y + j, flattened_row);
|
row_set_mlpp_vector(i * b_size.y + j, flattened_row);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1696,7 +1696,7 @@ void MLPPMatrix::pinverseo(Ref<MLPPMatrix> out) const {
|
|||||||
out->set_from_mlpp_matrix(multn(Ref<MLPPMatrix>(this))->transposen()->inverse()->multn(transposen()));
|
out->set_from_mlpp_matrix(multn(Ref<MLPPMatrix>(this))->transposen()->inverse()->multn(transposen()));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ref<MLPPMatrix> MLPPMatrix::zero_mat(int n, int m) const {
|
Ref<MLPPMatrix> MLPPMatrix::matn_zero(int n, int m) const {
|
||||||
Ref<MLPPMatrix> mat;
|
Ref<MLPPMatrix> mat;
|
||||||
mat.instance();
|
mat.instance();
|
||||||
|
|
||||||
@ -1705,7 +1705,7 @@ Ref<MLPPMatrix> MLPPMatrix::zero_mat(int n, int m) const {
|
|||||||
|
|
||||||
return mat;
|
return mat;
|
||||||
}
|
}
|
||||||
Ref<MLPPMatrix> MLPPMatrix::one_mat(int n, int m) const {
|
Ref<MLPPMatrix> MLPPMatrix::matn_one(int n, int m) const {
|
||||||
Ref<MLPPMatrix> mat;
|
Ref<MLPPMatrix> mat;
|
||||||
mat.instance();
|
mat.instance();
|
||||||
|
|
||||||
@ -1714,7 +1714,7 @@ Ref<MLPPMatrix> MLPPMatrix::one_mat(int n, int m) const {
|
|||||||
|
|
||||||
return mat;
|
return mat;
|
||||||
}
|
}
|
||||||
Ref<MLPPMatrix> MLPPMatrix::full_mat(int n, int m, int k) const {
|
Ref<MLPPMatrix> MLPPMatrix::matn_full(int n, int m, int k) const {
|
||||||
Ref<MLPPMatrix> mat;
|
Ref<MLPPMatrix> mat;
|
||||||
mat.instance();
|
mat.instance();
|
||||||
|
|
||||||
@ -1963,10 +1963,10 @@ Ref<MLPPMatrix> MLPPMatrix::cov() const {
|
|||||||
a_j_row_tmp->resize(_size.x);
|
a_j_row_tmp->resize(_size.x);
|
||||||
|
|
||||||
for (int i = 0; i < _size.y; ++i) {
|
for (int i = 0; i < _size.y; ++i) {
|
||||||
get_row_into_mlpp_vector(i, a_i_row_tmp);
|
row_get_into_mlpp_vector(i, a_i_row_tmp);
|
||||||
|
|
||||||
for (int j = 0; j < _size.x; ++j) {
|
for (int j = 0; j < _size.x; ++j) {
|
||||||
get_row_into_mlpp_vector(j, a_j_row_tmp);
|
row_get_into_mlpp_vector(j, a_j_row_tmp);
|
||||||
|
|
||||||
cov_mat->element_set(i, j, stat.covariancev(a_i_row_tmp, a_j_row_tmp));
|
cov_mat->element_set(i, j, stat.covariancev(a_i_row_tmp, a_j_row_tmp));
|
||||||
}
|
}
|
||||||
@ -1992,10 +1992,10 @@ void MLPPMatrix::covo(Ref<MLPPMatrix> out) const {
|
|||||||
a_j_row_tmp->resize(_size.x);
|
a_j_row_tmp->resize(_size.x);
|
||||||
|
|
||||||
for (int i = 0; i < _size.y; ++i) {
|
for (int i = 0; i < _size.y; ++i) {
|
||||||
get_row_into_mlpp_vector(i, a_i_row_tmp);
|
row_get_into_mlpp_vector(i, a_i_row_tmp);
|
||||||
|
|
||||||
for (int j = 0; j < _size.x; ++j) {
|
for (int j = 0; j < _size.x; ++j) {
|
||||||
get_row_into_mlpp_vector(j, a_j_row_tmp);
|
row_get_into_mlpp_vector(j, a_j_row_tmp);
|
||||||
|
|
||||||
out->element_set(i, j, stat.covariancev(a_i_row_tmp, a_j_row_tmp));
|
out->element_set(i, j, stat.covariancev(a_i_row_tmp, a_j_row_tmp));
|
||||||
}
|
}
|
||||||
@ -2310,7 +2310,7 @@ MLPPMatrix::SVDResult MLPPMatrix::svd() const {
|
|||||||
EigenResult right_eigen = transposen()->multn(Ref<MLPPMatrix>(this))->eigen();
|
EigenResult right_eigen = transposen()->multn(Ref<MLPPMatrix>(this))->eigen();
|
||||||
|
|
||||||
Ref<MLPPMatrix> singularvals = left_eigen.eigen_values->sqrtn();
|
Ref<MLPPMatrix> singularvals = left_eigen.eigen_values->sqrtn();
|
||||||
Ref<MLPPMatrix> sigma = zero_mat(_size.y, _size.x);
|
Ref<MLPPMatrix> sigma = matn_zero(_size.y, _size.x);
|
||||||
|
|
||||||
Size2i singularvals_size = singularvals->size();
|
Size2i singularvals_size = singularvals->size();
|
||||||
|
|
||||||
@ -2338,7 +2338,7 @@ MLPPMatrix::SVDResult MLPPMatrix::svdb(const Ref<MLPPMatrix> &A) const {
|
|||||||
EigenResult right_eigen = A->transposen()->multn(A)->eigen();
|
EigenResult right_eigen = A->transposen()->multn(A)->eigen();
|
||||||
|
|
||||||
Ref<MLPPMatrix> singularvals = left_eigen.eigen_values->sqrtn();
|
Ref<MLPPMatrix> singularvals = left_eigen.eigen_values->sqrtn();
|
||||||
Ref<MLPPMatrix> sigma = zero_mat(a_size.y, a_size.x);
|
Ref<MLPPMatrix> sigma = matn_zero(a_size.y, a_size.x);
|
||||||
|
|
||||||
Size2i singularvals_size = singularvals->size();
|
Size2i singularvals_size = singularvals->size();
|
||||||
|
|
||||||
@ -2706,7 +2706,7 @@ Ref<MLPPMatrix> MLPPMatrix::outer_productn(const Ref<MLPPVector> &a, const Ref<M
|
|||||||
return C;
|
return C;
|
||||||
}
|
}
|
||||||
|
|
||||||
void MLPPMatrix::set_diagonal(const Ref<MLPPVector> &a) {
|
void MLPPMatrix::diagonal_set(const Ref<MLPPVector> &a) {
|
||||||
ERR_FAIL_COND(!a.is_valid());
|
ERR_FAIL_COND(!a.is_valid());
|
||||||
|
|
||||||
int a_size = a->size();
|
int a_size = a->size();
|
||||||
@ -2724,7 +2724,7 @@ void MLPPMatrix::set_diagonal(const Ref<MLPPVector> &a) {
|
|||||||
b_ptr[calculate_index(i, i)] = a_ptr[i];
|
b_ptr[calculate_index(i, i)] = a_ptr[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ref<MLPPMatrix> MLPPMatrix::set_diagonaln(const Ref<MLPPVector> &a) const {
|
Ref<MLPPMatrix> MLPPMatrix::diagonal_setn(const Ref<MLPPVector> &a) const {
|
||||||
ERR_FAIL_COND_V(!a.is_valid(), Ref<MLPPMatrix>());
|
ERR_FAIL_COND_V(!a.is_valid(), Ref<MLPPMatrix>());
|
||||||
|
|
||||||
Ref<MLPPMatrix> B = duplicate_fast();
|
Ref<MLPPMatrix> B = duplicate_fast();
|
||||||
@ -3001,13 +3001,13 @@ void MLPPMatrix::_bind_methods() {
|
|||||||
ClassDB::bind_method(D_METHOD("set_data", "data"), &MLPPMatrix::set_data);
|
ClassDB::bind_method(D_METHOD("set_data", "data"), &MLPPMatrix::set_data);
|
||||||
ADD_PROPERTY(PropertyInfo(Variant::ARRAY, "data"), "set_data", "get_data");
|
ADD_PROPERTY(PropertyInfo(Variant::ARRAY, "data"), "set_data", "get_data");
|
||||||
|
|
||||||
ClassDB::bind_method(D_METHOD("add_row", "row"), &MLPPMatrix::add_row_pool_vector);
|
ClassDB::bind_method(D_METHOD("row_add", "row"), &MLPPMatrix::row_add_pool_vector);
|
||||||
ClassDB::bind_method(D_METHOD("add_row_mlpp_vector", "row"), &MLPPMatrix::add_row_mlpp_vector);
|
ClassDB::bind_method(D_METHOD("row_add_mlpp_vector", "row"), &MLPPMatrix::row_add_mlpp_vector);
|
||||||
ClassDB::bind_method(D_METHOD("add_rows_mlpp_matrix", "other"), &MLPPMatrix::add_rows_mlpp_matrix);
|
ClassDB::bind_method(D_METHOD("rows_add_mlpp_matrix", "other"), &MLPPMatrix::rows_add_mlpp_matrix);
|
||||||
|
|
||||||
ClassDB::bind_method(D_METHOD("remove_row", "index"), &MLPPMatrix::remove_row);
|
ClassDB::bind_method(D_METHOD("row_remove", "index"), &MLPPMatrix::row_remove);
|
||||||
ClassDB::bind_method(D_METHOD("remove_row_unordered", "index"), &MLPPMatrix::remove_row_unordered);
|
ClassDB::bind_method(D_METHOD("row_remove_unordered", "index"), &MLPPMatrix::row_remove_unordered);
|
||||||
ClassDB::bind_method(D_METHOD("swap_row", "index_1", "index_2"), &MLPPMatrix::swap_row);
|
ClassDB::bind_method(D_METHOD("row_swap", "index_1", "index_2"), &MLPPMatrix::row_swap);
|
||||||
|
|
||||||
ClassDB::bind_method(D_METHOD("clear"), &MLPPMatrix::clear);
|
ClassDB::bind_method(D_METHOD("clear"), &MLPPMatrix::clear);
|
||||||
ClassDB::bind_method(D_METHOD("reset"), &MLPPMatrix::reset);
|
ClassDB::bind_method(D_METHOD("reset"), &MLPPMatrix::reset);
|
||||||
@ -3024,12 +3024,12 @@ void MLPPMatrix::_bind_methods() {
|
|||||||
ClassDB::bind_method(D_METHOD("element_get", "index_y", "index_x"), &MLPPMatrix::element_get);
|
ClassDB::bind_method(D_METHOD("element_get", "index_y", "index_x"), &MLPPMatrix::element_get);
|
||||||
ClassDB::bind_method(D_METHOD("element_set", "index_y", "index_x", "val"), &MLPPMatrix::element_set);
|
ClassDB::bind_method(D_METHOD("element_set", "index_y", "index_x", "val"), &MLPPMatrix::element_set);
|
||||||
|
|
||||||
ClassDB::bind_method(D_METHOD("get_row_pool_vector", "index_y"), &MLPPMatrix::get_row_pool_vector);
|
ClassDB::bind_method(D_METHOD("row_get_pool_vector", "index_y"), &MLPPMatrix::row_get_pool_vector);
|
||||||
ClassDB::bind_method(D_METHOD("get_row_mlpp_vector", "index_y"), &MLPPMatrix::get_row_mlpp_vector);
|
ClassDB::bind_method(D_METHOD("row_get_mlpp_vector", "index_y"), &MLPPMatrix::row_get_mlpp_vector);
|
||||||
ClassDB::bind_method(D_METHOD("get_row_into_mlpp_vector", "index_y", "target"), &MLPPMatrix::get_row_into_mlpp_vector);
|
ClassDB::bind_method(D_METHOD("row_get_into_mlpp_vector", "index_y", "target"), &MLPPMatrix::row_get_into_mlpp_vector);
|
||||||
|
|
||||||
ClassDB::bind_method(D_METHOD("set_row_pool_vector", "index_y", "row"), &MLPPMatrix::set_row_pool_vector);
|
ClassDB::bind_method(D_METHOD("row_set_pool_vector", "index_y", "row"), &MLPPMatrix::row_set_pool_vector);
|
||||||
ClassDB::bind_method(D_METHOD("set_row_mlpp_vector", "index_y", "row"), &MLPPMatrix::set_row_mlpp_vector);
|
ClassDB::bind_method(D_METHOD("row_set_mlpp_vector", "index_y", "row"), &MLPPMatrix::row_set_mlpp_vector);
|
||||||
|
|
||||||
ClassDB::bind_method(D_METHOD("fill", "val"), &MLPPMatrix::fill);
|
ClassDB::bind_method(D_METHOD("fill", "val"), &MLPPMatrix::fill);
|
||||||
|
|
||||||
@ -3134,9 +3134,9 @@ void MLPPMatrix::_bind_methods() {
|
|||||||
ClassDB::bind_method(D_METHOD("pinverse"), &MLPPMatrix::pinverse);
|
ClassDB::bind_method(D_METHOD("pinverse"), &MLPPMatrix::pinverse);
|
||||||
ClassDB::bind_method(D_METHOD("pinverseo", "out"), &MLPPMatrix::pinverseo);
|
ClassDB::bind_method(D_METHOD("pinverseo", "out"), &MLPPMatrix::pinverseo);
|
||||||
|
|
||||||
ClassDB::bind_method(D_METHOD("zero_mat", "n", "m"), &MLPPMatrix::zero_mat);
|
ClassDB::bind_method(D_METHOD("matn_zero", "n", "m"), &MLPPMatrix::matn_zero);
|
||||||
ClassDB::bind_method(D_METHOD("one_mat", "n", "m"), &MLPPMatrix::one_mat);
|
ClassDB::bind_method(D_METHOD("matn_one", "n", "m"), &MLPPMatrix::matn_one);
|
||||||
ClassDB::bind_method(D_METHOD("full_mat", "n", "m", "k"), &MLPPMatrix::full_mat);
|
ClassDB::bind_method(D_METHOD("matn_full", "n", "m", "k"), &MLPPMatrix::matn_full);
|
||||||
|
|
||||||
ClassDB::bind_method(D_METHOD("sin"), &MLPPMatrix::sin);
|
ClassDB::bind_method(D_METHOD("sin"), &MLPPMatrix::sin);
|
||||||
ClassDB::bind_method(D_METHOD("sinn"), &MLPPMatrix::sinn);
|
ClassDB::bind_method(D_METHOD("sinn"), &MLPPMatrix::sinn);
|
||||||
@ -3176,8 +3176,8 @@ void MLPPMatrix::_bind_methods() {
|
|||||||
ClassDB::bind_method(D_METHOD("outer_product", "a", "b"), &MLPPMatrix::outer_product);
|
ClassDB::bind_method(D_METHOD("outer_product", "a", "b"), &MLPPMatrix::outer_product);
|
||||||
ClassDB::bind_method(D_METHOD("outer_productn", "a", "b"), &MLPPMatrix::outer_productn);
|
ClassDB::bind_method(D_METHOD("outer_productn", "a", "b"), &MLPPMatrix::outer_productn);
|
||||||
|
|
||||||
ClassDB::bind_method(D_METHOD("set_diagonal", "a"), &MLPPMatrix::set_diagonal);
|
ClassDB::bind_method(D_METHOD("diagonal_set", "a"), &MLPPMatrix::diagonal_set);
|
||||||
ClassDB::bind_method(D_METHOD("set_diagonaln", "a"), &MLPPMatrix::set_diagonaln);
|
ClassDB::bind_method(D_METHOD("diagonal_setn", "a"), &MLPPMatrix::diagonal_setn);
|
||||||
|
|
||||||
ClassDB::bind_method(D_METHOD("diagonal_zeroed", "a"), &MLPPMatrix::diagonal_zeroed);
|
ClassDB::bind_method(D_METHOD("diagonal_zeroed", "a"), &MLPPMatrix::diagonal_zeroed);
|
||||||
ClassDB::bind_method(D_METHOD("diagonal_zeroedn", "a"), &MLPPMatrix::diagonal_zeroedn);
|
ClassDB::bind_method(D_METHOD("diagonal_zeroedn", "a"), &MLPPMatrix::diagonal_zeroedn);
|
||||||
|
@ -31,18 +31,18 @@ public:
|
|||||||
return _data;
|
return _data;
|
||||||
}
|
}
|
||||||
|
|
||||||
void add_row(const Vector<real_t> &p_row);
|
void row_add(const Vector<real_t> &p_row);
|
||||||
void add_row_pool_vector(const PoolRealArray &p_row);
|
void row_add_pool_vector(const PoolRealArray &p_row);
|
||||||
void add_row_mlpp_vector(const Ref<MLPPVector> &p_row);
|
void row_add_mlpp_vector(const Ref<MLPPVector> &p_row);
|
||||||
void add_rows_mlpp_matrix(const Ref<MLPPMatrix> &p_other);
|
void rows_add_mlpp_matrix(const Ref<MLPPMatrix> &p_other);
|
||||||
|
|
||||||
void remove_row(int p_index);
|
void row_remove(int p_index);
|
||||||
|
|
||||||
// Removes the item copying the last value into the position of the one to
|
// Removes the item copying the last value into the position of the one to
|
||||||
// remove. It's generally faster than `remove`.
|
// remove. It's generally faster than `remove`.
|
||||||
void remove_row_unordered(int p_index);
|
void row_remove_unordered(int p_index);
|
||||||
|
|
||||||
void swap_row(int p_index_1, int p_index_2);
|
void row_swap(int p_index_1, int p_index_2);
|
||||||
|
|
||||||
_FORCE_INLINE_ void clear() { resize(Size2i()); }
|
_FORCE_INLINE_ void clear() { resize(Size2i()); }
|
||||||
_FORCE_INLINE_ void reset() {
|
_FORCE_INLINE_ void reset() {
|
||||||
@ -98,14 +98,14 @@ public:
|
|||||||
_data[p_index_y * _size.x + p_index_x] = p_val;
|
_data[p_index_y * _size.x + p_index_x] = p_val;
|
||||||
}
|
}
|
||||||
|
|
||||||
Vector<real_t> get_row_vector(int p_index_y) const;
|
Vector<real_t> row_get_vector(int p_index_y) const;
|
||||||
PoolRealArray get_row_pool_vector(int p_index_y) const;
|
PoolRealArray row_get_pool_vector(int p_index_y) const;
|
||||||
Ref<MLPPVector> get_row_mlpp_vector(int p_index_y) const;
|
Ref<MLPPVector> row_get_mlpp_vector(int p_index_y) const;
|
||||||
void get_row_into_mlpp_vector(int p_index_y, Ref<MLPPVector> target) const;
|
void row_get_into_mlpp_vector(int p_index_y, Ref<MLPPVector> target) const;
|
||||||
|
|
||||||
void set_row_vector(int p_index_y, const Vector<real_t> &p_row);
|
void row_set_vector(int p_index_y, const Vector<real_t> &p_row);
|
||||||
void set_row_pool_vector(int p_index_y, const PoolRealArray &p_row);
|
void row_set_pool_vector(int p_index_y, const PoolRealArray &p_row);
|
||||||
void set_row_mlpp_vector(int p_index_y, const Ref<MLPPVector> &p_row);
|
void row_set_mlpp_vector(int p_index_y, const Ref<MLPPVector> &p_row);
|
||||||
|
|
||||||
void fill(real_t p_val);
|
void fill(real_t p_val);
|
||||||
|
|
||||||
@ -215,9 +215,9 @@ public:
|
|||||||
Ref<MLPPMatrix> pinverse() const;
|
Ref<MLPPMatrix> pinverse() const;
|
||||||
void pinverseo(Ref<MLPPMatrix> out) const;
|
void pinverseo(Ref<MLPPMatrix> out) const;
|
||||||
|
|
||||||
Ref<MLPPMatrix> zero_mat(int n, int m) const;
|
Ref<MLPPMatrix> matn_zero(int n, int m) const;
|
||||||
Ref<MLPPMatrix> one_mat(int n, int m) const;
|
Ref<MLPPMatrix> matn_one(int n, int m) const;
|
||||||
Ref<MLPPMatrix> full_mat(int n, int m, int k) const;
|
Ref<MLPPMatrix> matn_full(int n, int m, int k) const;
|
||||||
|
|
||||||
void sin();
|
void sin();
|
||||||
Ref<MLPPMatrix> sinn() const;
|
Ref<MLPPMatrix> sinn() const;
|
||||||
@ -317,8 +317,8 @@ public:
|
|||||||
Ref<MLPPMatrix> outer_productn(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b) const;
|
Ref<MLPPMatrix> outer_productn(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b) const;
|
||||||
|
|
||||||
// Just sets the diagonal
|
// Just sets the diagonal
|
||||||
void set_diagonal(const Ref<MLPPVector> &a);
|
void diagonal_set(const Ref<MLPPVector> &a);
|
||||||
Ref<MLPPMatrix> set_diagonaln(const Ref<MLPPVector> &a) const;
|
Ref<MLPPMatrix> diagonal_setn(const Ref<MLPPVector> &a) const;
|
||||||
|
|
||||||
// Sets the diagonals, everythign else will get zeroed
|
// Sets the diagonals, everythign else will get zeroed
|
||||||
void diagonal_zeroed(const Ref<MLPPVector> &a);
|
void diagonal_zeroed(const Ref<MLPPVector> &a);
|
||||||
|
@ -181,7 +181,7 @@ void MLPPLinReg::sgd(real_t learning_rate, int max_epoch, bool ui) {
|
|||||||
while (true) {
|
while (true) {
|
||||||
int output_index = distribution(generator);
|
int output_index = distribution(generator);
|
||||||
|
|
||||||
_input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
|
_input_set->row_get_into_mlpp_vector(output_index, input_set_row_tmp);
|
||||||
real_t output_element_set = _output_set->element_get(output_index);
|
real_t output_element_set = _output_set->element_get(output_index);
|
||||||
output_set_row_tmp->element_set(0, output_element_set);
|
output_set_row_tmp->element_set(0, output_element_set);
|
||||||
|
|
||||||
@ -676,7 +676,7 @@ void MLPPLinReg::normal_equation() {
|
|||||||
x_means->resize(input_set_t->size().y);
|
x_means->resize(input_set_t->size().y);
|
||||||
|
|
||||||
for (int i = 0; i < input_set_t->size().y; i++) {
|
for (int i = 0; i < input_set_t->size().y; i++) {
|
||||||
input_set_t->get_row_into_mlpp_vector(i, input_set_t_row_tmp);
|
input_set_t->row_get_into_mlpp_vector(i, input_set_t_row_tmp);
|
||||||
|
|
||||||
x_means->element_set(i, stat.meanv(input_set_t_row_tmp));
|
x_means->element_set(i, stat.meanv(input_set_t_row_tmp));
|
||||||
}
|
}
|
||||||
|
@ -177,7 +177,7 @@ void MLPPLogReg::sgd(real_t learning_rate, int max_epoch, bool ui) {
|
|||||||
while (true) {
|
while (true) {
|
||||||
int output_index = distribution(generator);
|
int output_index = distribution(generator);
|
||||||
|
|
||||||
_input_set->get_row_into_mlpp_vector(output_index, input_row_tmp);
|
_input_set->row_get_into_mlpp_vector(output_index, input_row_tmp);
|
||||||
real_t output_element_set = _output_set->element_get(output_index);
|
real_t output_element_set = _output_set->element_get(output_index);
|
||||||
output_element_set_tmp->element_set(0, output_element_set);
|
output_element_set_tmp->element_set(0, output_element_set);
|
||||||
|
|
||||||
|
@ -172,7 +172,7 @@ void MLPPMLP::sgd(real_t learning_rate, int max_epoch, bool UI) {
|
|||||||
while (true) {
|
while (true) {
|
||||||
int output_Index = distribution(generator);
|
int output_Index = distribution(generator);
|
||||||
|
|
||||||
_input_set->get_row_into_mlpp_vector(output_Index, input_set_row_tmp);
|
_input_set->row_get_into_mlpp_vector(output_Index, input_set_row_tmp);
|
||||||
real_t output_element = _output_set->element_get(output_Index);
|
real_t output_element = _output_set->element_get(output_Index);
|
||||||
output_set_row_tmp->element_set(0, output_element);
|
output_set_row_tmp->element_set(0, output_element);
|
||||||
|
|
||||||
|
@ -56,7 +56,7 @@ Ref<MLPPVector> MLPPMultinomialNB::model_set_test(const Ref<MLPPMatrix> &X) {
|
|||||||
y_hat->resize(x_size.y);
|
y_hat->resize(x_size.y);
|
||||||
|
|
||||||
for (int i = 0; i < x_size.y; i++) {
|
for (int i = 0; i < x_size.y; i++) {
|
||||||
X->get_row_into_mlpp_vector(i, x_row_tmp);
|
X->row_get_into_mlpp_vector(i, x_row_tmp);
|
||||||
|
|
||||||
y_hat->element_set(i, model_test(x_row_tmp));
|
y_hat->element_set(i, model_test(x_row_tmp));
|
||||||
}
|
}
|
||||||
|
@ -30,7 +30,7 @@ Vector<Vector<real_t>> MLPPOutlierFinder::model_set_test(const Ref<MLPPMatrix> &
|
|||||||
input_set_i_row_tmp->resize(input_set_size.x);
|
input_set_i_row_tmp->resize(input_set_size.x);
|
||||||
|
|
||||||
for (int i = 0; i < input_set_size.y; ++i) {
|
for (int i = 0; i < input_set_size.y; ++i) {
|
||||||
input_set->get_row_into_mlpp_vector(i, input_set_i_row_tmp);
|
input_set->row_get_into_mlpp_vector(i, input_set_i_row_tmp);
|
||||||
real_t meanv = stat.meanv(input_set_i_row_tmp);
|
real_t meanv = stat.meanv(input_set_i_row_tmp);
|
||||||
real_t s_dev_v = stat.standard_deviationv(input_set_i_row_tmp);
|
real_t s_dev_v = stat.standard_deviationv(input_set_i_row_tmp);
|
||||||
|
|
||||||
@ -75,7 +75,7 @@ PoolVector2iArray MLPPOutlierFinder::model_set_test_indices(const Ref<MLPPMatrix
|
|||||||
input_set_i_row_tmp->resize(input_set_size.x);
|
input_set_i_row_tmp->resize(input_set_size.x);
|
||||||
|
|
||||||
for (int i = 0; i < input_set_size.y; ++i) {
|
for (int i = 0; i < input_set_size.y; ++i) {
|
||||||
input_set->get_row_into_mlpp_vector(i, input_set_i_row_tmp);
|
input_set->row_get_into_mlpp_vector(i, input_set_i_row_tmp);
|
||||||
real_t meanv = stat.meanv(input_set_i_row_tmp);
|
real_t meanv = stat.meanv(input_set_i_row_tmp);
|
||||||
real_t s_dev_v = stat.standard_deviationv(input_set_i_row_tmp);
|
real_t s_dev_v = stat.standard_deviationv(input_set_i_row_tmp);
|
||||||
|
|
||||||
|
@ -69,8 +69,8 @@ real_t MLPPPCA::score() {
|
|||||||
x_normalized_row_tmp->resize(x_normalized_size.x);
|
x_normalized_row_tmp->resize(x_normalized_size.x);
|
||||||
|
|
||||||
for (int i = 0; i < x_normalized_size_y; ++i) {
|
for (int i = 0; i < x_normalized_size_y; ++i) {
|
||||||
_x_normalized->get_row_into_mlpp_vector(i, x_normalized_row_tmp);
|
_x_normalized->row_get_into_mlpp_vector(i, x_normalized_row_tmp);
|
||||||
x_approx->get_row_into_mlpp_vector(i, x_approx_row_tmp);
|
x_approx->row_get_into_mlpp_vector(i, x_approx_row_tmp);
|
||||||
|
|
||||||
num += alg.norm_sqv(alg.subtractionnv(x_normalized_row_tmp, x_approx_row_tmp));
|
num += alg.norm_sqv(alg.subtractionnv(x_normalized_row_tmp, x_approx_row_tmp));
|
||||||
}
|
}
|
||||||
@ -78,7 +78,7 @@ real_t MLPPPCA::score() {
|
|||||||
num /= x_normalized_size_y;
|
num /= x_normalized_size_y;
|
||||||
|
|
||||||
for (int i = 0; i < x_normalized_size_y; ++i) {
|
for (int i = 0; i < x_normalized_size_y; ++i) {
|
||||||
_x_normalized->get_row_into_mlpp_vector(i, x_normalized_row_tmp);
|
_x_normalized->row_get_into_mlpp_vector(i, x_normalized_row_tmp);
|
||||||
|
|
||||||
den += alg.norm_sqv(x_normalized_row_tmp);
|
den += alg.norm_sqv(x_normalized_row_tmp);
|
||||||
}
|
}
|
||||||
|
@ -172,7 +172,7 @@ void MLPPProbitReg::sgd(real_t learning_rate, int max_epoch, bool ui) {
|
|||||||
while (true) {
|
while (true) {
|
||||||
int output_index = distribution(generator);
|
int output_index = distribution(generator);
|
||||||
|
|
||||||
_input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
|
_input_set->row_get_into_mlpp_vector(output_index, input_set_row_tmp);
|
||||||
real_t output_set_entry = _output_set->element_get(output_index);
|
real_t output_set_entry = _output_set->element_get(output_index);
|
||||||
|
|
||||||
real_t y_hat = evaluatev(input_set_row_tmp);
|
real_t y_hat = evaluatev(input_set_row_tmp);
|
||||||
|
@ -213,12 +213,12 @@ void MLPPSoftmaxNet::train_sgd(real_t learning_rate, int max_epoch, bool ui) {
|
|||||||
while (true) {
|
while (true) {
|
||||||
int output_index = distribution(generator);
|
int output_index = distribution(generator);
|
||||||
|
|
||||||
_input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
|
_input_set->row_get_into_mlpp_vector(output_index, input_set_row_tmp);
|
||||||
_output_set->get_row_into_mlpp_vector(output_index, output_set_row_tmp);
|
_output_set->row_get_into_mlpp_vector(output_index, output_set_row_tmp);
|
||||||
output_row_mat_tmp->set_row_mlpp_vector(0, output_set_row_tmp);
|
output_row_mat_tmp->row_set_mlpp_vector(0, output_set_row_tmp);
|
||||||
|
|
||||||
Ref<MLPPVector> y_hat = evaluatev(input_set_row_tmp);
|
Ref<MLPPVector> y_hat = evaluatev(input_set_row_tmp);
|
||||||
y_hat_mat_tmp->set_row_mlpp_vector(0, y_hat);
|
y_hat_mat_tmp->row_set_mlpp_vector(0, y_hat);
|
||||||
|
|
||||||
PropagateVResult prop_res = propagatev(input_set_row_tmp);
|
PropagateVResult prop_res = propagatev(input_set_row_tmp);
|
||||||
|
|
||||||
|
@ -160,14 +160,14 @@ void MLPPSoftmaxReg::train_sgd(real_t learning_rate, int max_epoch, bool ui) {
|
|||||||
while (true) {
|
while (true) {
|
||||||
real_t output_index = distribution(generator);
|
real_t output_index = distribution(generator);
|
||||||
|
|
||||||
_input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
|
_input_set->row_get_into_mlpp_vector(output_index, input_set_row_tmp);
|
||||||
|
|
||||||
Ref<MLPPVector> y_hat = evaluatev(input_set_row_tmp);
|
Ref<MLPPVector> y_hat = evaluatev(input_set_row_tmp);
|
||||||
y_hat_matrix_tmp->resize(Size2i(y_hat->size(), 1));
|
y_hat_matrix_tmp->resize(Size2i(y_hat->size(), 1));
|
||||||
y_hat_matrix_tmp->set_row_mlpp_vector(0, y_hat);
|
y_hat_matrix_tmp->row_set_mlpp_vector(0, y_hat);
|
||||||
|
|
||||||
_output_set->get_row_into_mlpp_vector(output_index, output_set_row_tmp);
|
_output_set->row_get_into_mlpp_vector(output_index, output_set_row_tmp);
|
||||||
output_set_row_matrix_tmp->set_row_mlpp_vector(0, output_set_row_tmp);
|
output_set_row_matrix_tmp->row_set_mlpp_vector(0, output_set_row_tmp);
|
||||||
|
|
||||||
cost_prev = cost(y_hat_matrix_tmp, output_set_row_matrix_tmp);
|
cost_prev = cost(y_hat_matrix_tmp, output_set_row_matrix_tmp);
|
||||||
|
|
||||||
|
@ -149,7 +149,7 @@ void MLPPSVC::train_sgd(real_t learning_rate, int max_epoch, bool ui) {
|
|||||||
while (true) {
|
while (true) {
|
||||||
int output_index = distribution(generator);
|
int output_index = distribution(generator);
|
||||||
|
|
||||||
_input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
|
_input_set->row_get_into_mlpp_vector(output_index, input_set_row_tmp);
|
||||||
|
|
||||||
real_t output_set_indx = _output_set->element_get(output_index);
|
real_t output_set_indx = _output_set->element_get(output_index);
|
||||||
output_set_row_tmp->element_set(0, output_set_indx);
|
output_set_row_tmp->element_set(0, output_set_indx);
|
||||||
|
@ -197,7 +197,7 @@ void MLPPTanhReg::train_sgd(real_t learning_rate, int max_epoch, bool ui) {
|
|||||||
while (true) {
|
while (true) {
|
||||||
int output_index = distribution(generator);
|
int output_index = distribution(generator);
|
||||||
|
|
||||||
_input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
|
_input_set->row_get_into_mlpp_vector(output_index, input_set_row_tmp);
|
||||||
real_t output_set_entry = _output_set->element_get(output_index);
|
real_t output_set_entry = _output_set->element_get(output_index);
|
||||||
output_set_row_tmp->element_set(0, output_set_entry);
|
output_set_row_tmp->element_set(0, output_set_entry);
|
||||||
|
|
||||||
|
@ -618,8 +618,8 @@ Vector<Ref<MLPPMatrix>> MLPPUtilities::create_mini_batchesm(const Ref<MLPPMatrix
|
|||||||
current_input_set->resize(Size2i(size.x, mini_batch_element_count));
|
current_input_set->resize(Size2i(size.x, mini_batch_element_count));
|
||||||
|
|
||||||
for (int j = 0; j < mini_batch_element_count; j++) {
|
for (int j = 0; j < mini_batch_element_count; j++) {
|
||||||
input_set->get_row_into_mlpp_vector(mini_batch_start_offset + j, row_tmp);
|
input_set->row_get_into_mlpp_vector(mini_batch_start_offset + j, row_tmp);
|
||||||
current_input_set->set_row_mlpp_vector(j, row_tmp);
|
current_input_set->row_set_mlpp_vector(j, row_tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
input_mini_batches.push_back(current_input_set);
|
input_mini_batches.push_back(current_input_set);
|
||||||
@ -660,8 +660,8 @@ MLPPUtilities::CreateMiniBatchMVBatch MLPPUtilities::create_mini_batchesmv(const
|
|||||||
for (int j = 0; j < mini_batch_element_count; j++) {
|
for (int j = 0; j < mini_batch_element_count; j++) {
|
||||||
int main_indx = mini_batch_start_offset + j;
|
int main_indx = mini_batch_start_offset + j;
|
||||||
|
|
||||||
input_set->get_row_into_mlpp_vector(main_indx, row_tmp);
|
input_set->row_get_into_mlpp_vector(main_indx, row_tmp);
|
||||||
current_input_set->set_row_mlpp_vector(j, row_tmp);
|
current_input_set->row_set_mlpp_vector(j, row_tmp);
|
||||||
|
|
||||||
current_output_set->element_set(j, output_set->element_get(j));
|
current_output_set->element_set(j, output_set->element_get(j));
|
||||||
}
|
}
|
||||||
@ -711,11 +711,11 @@ MLPPUtilities::CreateMiniBatchMMBatch MLPPUtilities::create_mini_batchesmm(const
|
|||||||
for (int j = 0; j < mini_batch_element_count; j++) {
|
for (int j = 0; j < mini_batch_element_count; j++) {
|
||||||
int main_indx = mini_batch_start_offset + j;
|
int main_indx = mini_batch_start_offset + j;
|
||||||
|
|
||||||
input_set->get_row_into_mlpp_vector(main_indx, input_row_tmp);
|
input_set->row_get_into_mlpp_vector(main_indx, input_row_tmp);
|
||||||
current_input_set->set_row_mlpp_vector(j, input_row_tmp);
|
current_input_set->row_set_mlpp_vector(j, input_row_tmp);
|
||||||
|
|
||||||
output_set->get_row_into_mlpp_vector(main_indx, output_row_tmp);
|
output_set->row_get_into_mlpp_vector(main_indx, output_row_tmp);
|
||||||
current_output_set->set_row_mlpp_vector(j, output_row_tmp);
|
current_output_set->row_set_mlpp_vector(j, output_row_tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret.input_sets.push_back(current_input_set);
|
ret.input_sets.push_back(current_input_set);
|
||||||
|
@ -66,7 +66,7 @@ void MLPPWGAN::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
|
|||||||
for (int i = 0; i < CRITIC_INTERATIONS; i++) {
|
for (int i = 0; i < CRITIC_INTERATIONS; i++) {
|
||||||
generator_input_set = alg.gaussian_noise(_n, _k);
|
generator_input_set = alg.gaussian_noise(_n, _k);
|
||||||
discriminator_input_set->set_from_mlpp_matrix(model_set_test_generator(generator_input_set));
|
discriminator_input_set->set_from_mlpp_matrix(model_set_test_generator(generator_input_set));
|
||||||
discriminator_input_set->add_rows_mlpp_matrix(_output_set); // Fake + real inputs.
|
discriminator_input_set->rows_add_mlpp_matrix(_output_set); // Fake + real inputs.
|
||||||
|
|
||||||
ly_hat = model_set_test_discriminator(discriminator_input_set);
|
ly_hat = model_set_test_discriminator(discriminator_input_set);
|
||||||
loutput_set = alg.scalar_multiplynv(-1, alg.onevecnv(_n)); // WGAN changes y_i = 1 and y_i = 0 to y_i = 1 and y_i = -1
|
loutput_set = alg.scalar_multiplynv(-1, alg.onevecnv(_n)); // WGAN changes y_i = 1 and y_i = 0 to y_i = 1 and y_i = -1
|
||||||
|
@ -14,19 +14,19 @@ void MLPPMatrixTests::run_tests() {
|
|||||||
PLOG_MSG("test_mlpp_matrix()");
|
PLOG_MSG("test_mlpp_matrix()");
|
||||||
test_mlpp_matrix();
|
test_mlpp_matrix();
|
||||||
|
|
||||||
PLOG_MSG("test_add_row()");
|
PLOG_MSG("test_row_add()");
|
||||||
test_add_row();
|
test_row_add();
|
||||||
PLOG_MSG("test_add_row_pool_vector()");
|
PLOG_MSG("test_row_add_pool_vector()");
|
||||||
test_add_row_pool_vector();
|
test_row_add_pool_vector();
|
||||||
PLOG_MSG("test_add_row_mlpp_vector()");
|
PLOG_MSG("test_row_add_mlpp_vector()");
|
||||||
test_add_row_mlpp_vector();
|
test_row_add_mlpp_vector();
|
||||||
PLOG_MSG("test_add_rows_mlpp_matrix()");
|
PLOG_MSG("test_rows_add_mlpp_matrix()");
|
||||||
test_add_rows_mlpp_matrix();
|
test_rows_add_mlpp_matrix();
|
||||||
|
|
||||||
PLOG_MSG("test_remove_row()");
|
PLOG_MSG("test_row_remove()");
|
||||||
test_remove_row();
|
test_row_remove();
|
||||||
PLOG_MSG("test_remove_row_unordered()");
|
PLOG_MSG("test_row_remove_unordered()");
|
||||||
test_remove_row_unordered();
|
test_row_remove_unordered();
|
||||||
|
|
||||||
PLOG_MSG("test_mlpp_matrix_mul()");
|
PLOG_MSG("test_mlpp_matrix_mul()");
|
||||||
test_mlpp_matrix_mul();
|
test_mlpp_matrix_mul();
|
||||||
@ -55,7 +55,7 @@ void MLPPMatrixTests::test_mlpp_matrix() {
|
|||||||
is_approx_equals_mat(rmat, rmat2, "re-set_from_std_vectors test.");
|
is_approx_equals_mat(rmat, rmat2, "re-set_from_std_vectors test.");
|
||||||
}
|
}
|
||||||
|
|
||||||
void MLPPMatrixTests::test_add_row() {
|
void MLPPMatrixTests::test_row_add() {
|
||||||
std::vector<std::vector<real_t>> A = {
|
std::vector<std::vector<real_t>> A = {
|
||||||
{ 1, 2, 3, 4 },
|
{ 1, 2, 3, 4 },
|
||||||
};
|
};
|
||||||
@ -92,16 +92,16 @@ void MLPPMatrixTests::test_add_row() {
|
|||||||
Ref<MLPPMatrix> rmat;
|
Ref<MLPPMatrix> rmat;
|
||||||
rmat.instance();
|
rmat.instance();
|
||||||
|
|
||||||
rmat->add_row(rv);
|
rmat->row_add(rv);
|
||||||
is_approx_equals_mat(rmata, rmat, "rmat->add_row(rv);");
|
is_approx_equals_mat(rmata, rmat, "rmat->row_add(rv);");
|
||||||
|
|
||||||
rmat->add_row(rv);
|
rmat->row_add(rv);
|
||||||
is_approx_equals_mat(rmatb, rmat, "rmat->add_row(rv);");
|
is_approx_equals_mat(rmatb, rmat, "rmat->row_add(rv);");
|
||||||
|
|
||||||
rmat->add_row(rv);
|
rmat->row_add(rv);
|
||||||
is_approx_equals_mat(rmatc, rmat, "rmat->add_row(rv);");
|
is_approx_equals_mat(rmatc, rmat, "rmat->row_add(rv);");
|
||||||
}
|
}
|
||||||
void MLPPMatrixTests::test_add_row_pool_vector() {
|
void MLPPMatrixTests::test_row_add_pool_vector() {
|
||||||
std::vector<std::vector<real_t>> A = {
|
std::vector<std::vector<real_t>> A = {
|
||||||
{ 1, 2, 3, 4 },
|
{ 1, 2, 3, 4 },
|
||||||
};
|
};
|
||||||
@ -138,16 +138,16 @@ void MLPPMatrixTests::test_add_row_pool_vector() {
|
|||||||
Ref<MLPPMatrix> rmat;
|
Ref<MLPPMatrix> rmat;
|
||||||
rmat.instance();
|
rmat.instance();
|
||||||
|
|
||||||
rmat->add_row_pool_vector(rv);
|
rmat->row_add_pool_vector(rv);
|
||||||
is_approx_equals_mat(rmata, rmat, "rmat->add_row_pool_vector(rv);");
|
is_approx_equals_mat(rmata, rmat, "rmat->row_add_pool_vector(rv);");
|
||||||
|
|
||||||
rmat->add_row_pool_vector(rv);
|
rmat->row_add_pool_vector(rv);
|
||||||
is_approx_equals_mat(rmatb, rmat, "rmat->add_row_pool_vector(rv);");
|
is_approx_equals_mat(rmatb, rmat, "rmat->row_add_pool_vector(rv);");
|
||||||
|
|
||||||
rmat->add_row_pool_vector(rv);
|
rmat->row_add_pool_vector(rv);
|
||||||
is_approx_equals_mat(rmatc, rmat, "rmat->add_row_pool_vector(rv);");
|
is_approx_equals_mat(rmatc, rmat, "rmat->row_add_pool_vector(rv);");
|
||||||
}
|
}
|
||||||
void MLPPMatrixTests::test_add_row_mlpp_vector() {
|
void MLPPMatrixTests::test_row_add_mlpp_vector() {
|
||||||
std::vector<std::vector<real_t>> A = {
|
std::vector<std::vector<real_t>> A = {
|
||||||
{ 1, 2, 3, 4 },
|
{ 1, 2, 3, 4 },
|
||||||
};
|
};
|
||||||
@ -185,16 +185,16 @@ void MLPPMatrixTests::test_add_row_mlpp_vector() {
|
|||||||
Ref<MLPPMatrix> rmat;
|
Ref<MLPPMatrix> rmat;
|
||||||
rmat.instance();
|
rmat.instance();
|
||||||
|
|
||||||
rmat->add_row_mlpp_vector(rv);
|
rmat->row_add_mlpp_vector(rv);
|
||||||
is_approx_equals_mat(rmata, rmat, "rmat->add_row_mlpp_vector(rv);");
|
is_approx_equals_mat(rmata, rmat, "rmat->row_add_mlpp_vector(rv);");
|
||||||
|
|
||||||
rmat->add_row_mlpp_vector(rv);
|
rmat->row_add_mlpp_vector(rv);
|
||||||
is_approx_equals_mat(rmatb, rmat, "rmat->add_row_mlpp_vector(rv);");
|
is_approx_equals_mat(rmatb, rmat, "rmat->row_add_mlpp_vector(rv);");
|
||||||
|
|
||||||
rmat->add_row_mlpp_vector(rv);
|
rmat->row_add_mlpp_vector(rv);
|
||||||
is_approx_equals_mat(rmatc, rmat, "rmat->add_row_mlpp_vector(rv);");
|
is_approx_equals_mat(rmatc, rmat, "rmat->row_add_mlpp_vector(rv);");
|
||||||
}
|
}
|
||||||
void MLPPMatrixTests::test_add_rows_mlpp_matrix() {
|
void MLPPMatrixTests::test_rows_add_mlpp_matrix() {
|
||||||
std::vector<std::vector<real_t>> A = {
|
std::vector<std::vector<real_t>> A = {
|
||||||
{ 1, 2, 3, 4 },
|
{ 1, 2, 3, 4 },
|
||||||
};
|
};
|
||||||
@ -220,7 +220,7 @@ void MLPPMatrixTests::test_add_rows_mlpp_matrix() {
|
|||||||
|
|
||||||
Ref<MLPPMatrix> rv;
|
Ref<MLPPMatrix> rv;
|
||||||
rv.instance();
|
rv.instance();
|
||||||
rv->add_row_pool_vector(rvp);
|
rv->row_add_pool_vector(rvp);
|
||||||
|
|
||||||
Ref<MLPPMatrix> rmata;
|
Ref<MLPPMatrix> rmata;
|
||||||
rmata.instance();
|
rmata.instance();
|
||||||
@ -237,17 +237,17 @@ void MLPPMatrixTests::test_add_rows_mlpp_matrix() {
|
|||||||
Ref<MLPPMatrix> rmat;
|
Ref<MLPPMatrix> rmat;
|
||||||
rmat.instance();
|
rmat.instance();
|
||||||
|
|
||||||
rmat->add_rows_mlpp_matrix(rv);
|
rmat->rows_add_mlpp_matrix(rv);
|
||||||
is_approx_equals_mat(rmata, rmat, "rmat->add_rows_mlpp_matrix(rv);");
|
is_approx_equals_mat(rmata, rmat, "rmat->rows_add_mlpp_matrix(rv);");
|
||||||
|
|
||||||
rmat->add_rows_mlpp_matrix(rv);
|
rmat->rows_add_mlpp_matrix(rv);
|
||||||
is_approx_equals_mat(rmatb, rmat, "rmat->add_rows_mlpp_matrix(rv);");
|
is_approx_equals_mat(rmatb, rmat, "rmat->rows_add_mlpp_matrix(rv);");
|
||||||
|
|
||||||
rmat->add_rows_mlpp_matrix(rv);
|
rmat->rows_add_mlpp_matrix(rv);
|
||||||
is_approx_equals_mat(rmatc, rmat, "rmat->add_rows_mlpp_matrix(rv);");
|
is_approx_equals_mat(rmatc, rmat, "rmat->rows_add_mlpp_matrix(rv);");
|
||||||
}
|
}
|
||||||
|
|
||||||
void MLPPMatrixTests::test_remove_row() {
|
void MLPPMatrixTests::test_row_remove() {
|
||||||
std::vector<std::vector<real_t>> A = {
|
std::vector<std::vector<real_t>> A = {
|
||||||
{ 1, 2, 3, 4 },
|
{ 1, 2, 3, 4 },
|
||||||
{ 5, 6, 7, 8 },
|
{ 5, 6, 7, 8 },
|
||||||
@ -286,16 +286,16 @@ void MLPPMatrixTests::test_remove_row() {
|
|||||||
rmat.instance();
|
rmat.instance();
|
||||||
rmat->set_from_std_vectors(D);
|
rmat->set_from_std_vectors(D);
|
||||||
|
|
||||||
rmat->remove_row(2);
|
rmat->row_remove(2);
|
||||||
is_approx_equals_mat(rmat, rmata, "rmat->remove_row(2);");
|
is_approx_equals_mat(rmat, rmata, "rmat->row_remove(2);");
|
||||||
|
|
||||||
rmat->remove_row(2);
|
rmat->row_remove(2);
|
||||||
is_approx_equals_mat(rmat, rmatb, "rmat->remove_row(2);");
|
is_approx_equals_mat(rmat, rmatb, "rmat->row_remove(2);");
|
||||||
|
|
||||||
rmat->remove_row(1);
|
rmat->row_remove(1);
|
||||||
is_approx_equals_mat(rmat, rmatc, "rmat->remove_row(1);");
|
is_approx_equals_mat(rmat, rmatc, "rmat->row_remove(1);");
|
||||||
}
|
}
|
||||||
void MLPPMatrixTests::test_remove_row_unordered() {
|
void MLPPMatrixTests::test_row_remove_unordered() {
|
||||||
std::vector<std::vector<real_t>> A = {
|
std::vector<std::vector<real_t>> A = {
|
||||||
{ 1, 2, 3, 4 },
|
{ 1, 2, 3, 4 },
|
||||||
{ 13, 14, 15, 16 },
|
{ 13, 14, 15, 16 },
|
||||||
@ -334,14 +334,14 @@ void MLPPMatrixTests::test_remove_row_unordered() {
|
|||||||
rmat.instance();
|
rmat.instance();
|
||||||
rmat->set_from_std_vectors(D);
|
rmat->set_from_std_vectors(D);
|
||||||
|
|
||||||
rmat->remove_row_unordered(1);
|
rmat->row_remove_unordered(1);
|
||||||
is_approx_equals_mat(rmat, rmata, "rmat->remove_row_unordered(1);");
|
is_approx_equals_mat(rmat, rmata, "rmat->row_remove_unordered(1);");
|
||||||
|
|
||||||
rmat->remove_row_unordered(0);
|
rmat->row_remove_unordered(0);
|
||||||
is_approx_equals_mat(rmat, rmatb, "rmat->remove_row(0);");
|
is_approx_equals_mat(rmat, rmatb, "rmat->row_remove(0);");
|
||||||
|
|
||||||
rmat->remove_row_unordered(1);
|
rmat->row_remove_unordered(1);
|
||||||
is_approx_equals_mat(rmat, rmatc, "rmat->remove_row_unordered(1);");
|
is_approx_equals_mat(rmat, rmatc, "rmat->row_remove_unordered(1);");
|
||||||
}
|
}
|
||||||
|
|
||||||
void MLPPMatrixTests::test_mlpp_matrix_mul() {
|
void MLPPMatrixTests::test_mlpp_matrix_mul() {
|
||||||
|
@ -25,13 +25,13 @@ public:
|
|||||||
|
|
||||||
void test_mlpp_matrix();
|
void test_mlpp_matrix();
|
||||||
|
|
||||||
void test_add_row();
|
void test_row_add();
|
||||||
void test_add_row_pool_vector();
|
void test_row_add_pool_vector();
|
||||||
void test_add_row_mlpp_vector();
|
void test_row_add_mlpp_vector();
|
||||||
void test_add_rows_mlpp_matrix();
|
void test_rows_add_mlpp_matrix();
|
||||||
|
|
||||||
void test_remove_row();
|
void test_row_remove();
|
||||||
void test_remove_row_unordered();
|
void test_row_remove_unordered();
|
||||||
|
|
||||||
|
|
||||||
void test_mlpp_matrix_mul();
|
void test_mlpp_matrix_mul();
|
||||||
|
Loading…
Reference in New Issue
Block a user