Tensor3 api style rework. Also changed 2 methods in Matrix and Vector.

This commit is contained in:
Relintai 2023-04-29 13:44:18 +02:00
parent b0bd3344ad
commit e84e45f6e8
34 changed files with 431 additions and 431 deletions

View File

@ -318,14 +318,14 @@
<description>
</description>
</method>
<method name="get_element" qualifiers="const">
<method name="element_get" qualifiers="const">
<return type="float" />
<argument index="0" name="index_y" type="int" />
<argument index="1" name="index_x" type="int" />
<description>
</description>
</method>
<method name="get_element_index" qualifiers="const">
<method name="element_get_index" qualifiers="const">
<return type="float" />
<argument index="0" name="index" type="int" />
<description>
@ -616,7 +616,7 @@
<description>
</description>
</method>
<method name="set_element">
<method name="element_set">
<return type="void" />
<argument index="0" name="index_y" type="int" />
<argument index="1" name="index_x" type="int" />
@ -624,7 +624,7 @@
<description>
</description>
</method>
<method name="set_element_index">
<method name="element_set_index">
<return type="void" />
<argument index="0" name="index" type="int" />
<argument index="1" name="val" type="float" />

View File

@ -156,7 +156,7 @@
<description>
</description>
</method>
<method name="get_element" qualifiers="const">
<method name="element_get" qualifiers="const">
<return type="float" />
<argument index="0" name="index_y" type="int" />
<argument index="1" name="index_x" type="int" />
@ -164,7 +164,7 @@
<description>
</description>
</method>
<method name="get_element_index" qualifiers="const">
<method name="element_get_index" qualifiers="const">
<return type="float" />
<argument index="0" name="index" type="int" />
<description>
@ -417,7 +417,7 @@
<description>
</description>
</method>
<method name="set_element">
<method name="element_set">
<return type="void" />
<argument index="0" name="index_y" type="int" />
<argument index="1" name="index_x" type="int" />
@ -426,7 +426,7 @@
<description>
</description>
</method>
<method name="set_element_index">
<method name="element_set_index">
<return type="void" />
<argument index="0" name="index" type="int" />
<argument index="1" name="val" type="float" />

View File

@ -220,7 +220,7 @@
<description>
</description>
</method>
<method name="get_element" qualifiers="const">
<method name="element_get" qualifiers="const">
<return type="float" />
<argument index="0" name="index" type="int" />
<description>
@ -415,7 +415,7 @@
<description>
</description>
</method>
<method name="set_element">
<method name="element_set">
<return type="void" />
<argument index="0" name="index" type="int" />
<argument index="1" name="val" type="float" />

View File

@ -1093,9 +1093,9 @@ Ref<MLPPMatrix> MLPPActivation::softmax_deriv_normv(const Ref<MLPPVector> &z) {
for (int i = 0; i < z_size; ++i) {
for (int j = 0; j < z_size; ++j) {
if (i == j) {
deriv->set_element(i, j, a_ptr[i] * (1 - a_ptr[i]));
deriv->element_set(i, j, a_ptr[i] * (1 - a_ptr[i]));
} else {
deriv->set_element(i, j, -a_ptr[i] * a_ptr[j]);
deriv->element_set(i, j, -a_ptr[i] * a_ptr[j]);
}
}
}
@ -1161,9 +1161,9 @@ Ref<MLPPMatrix> MLPPActivation::softmax_deriv_derivv(const Ref<MLPPVector> &z) {
for (int i = 0; i < z_size; ++i) {
for (int j = 0; j < z_size; ++j) {
if (i == j) {
deriv->set_element(i, j, a_ptr[i] * (1 - a_ptr[i]));
deriv->element_set(i, j, a_ptr[i] * (1 - a_ptr[i]));
} else {
deriv->set_element(i, j, -a_ptr[i] * a_ptr[j]);
deriv->element_set(i, j, -a_ptr[i] * a_ptr[j]);
}
}
}

View File

@ -126,11 +126,11 @@ void MLPPANN::sgd(real_t learning_rate, int max_epoch, bool ui) {
int output_index = distribution(generator);
_input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
real_t output_set_element = _output_set->get_element(output_index);
output_set_row_tmp->set_element(0, output_set_element);
real_t output_element_set = _output_set->element_get(output_index);
output_set_row_tmp->element_set(0, output_element_set);
real_t y_hat = model_test(input_set_row_tmp);
y_hat_row_tmp->set_element(0, y_hat);
y_hat_row_tmp->element_set(0, y_hat);
cost_prev = cost(y_hat_row_tmp, output_set_row_tmp);

View File

@ -24,7 +24,7 @@ Ref<MLPPVector> MLPPBernoulliNB::model_set_test(const Ref<MLPPMatrix> &X) {
for (int i = 0; i < X->size().y; i++) {
X->get_row_into_mlpp_vector(i, x_row_tmp);
y_hat->set_element(i, model_test(x_row_tmp));
y_hat->element_set(i, model_test(x_row_tmp));
}
return y_hat;
@ -38,9 +38,9 @@ real_t MLPPBernoulliNB::model_test(const Ref<MLPPVector> &x) {
for (int j = 0; j < x->size(); j++) {
for (int k = 0; k < _vocab->size(); k++) {
if (x->get_element(j) == _vocab->get_element(k)) {
score_0 *= _theta[0][_vocab->get_element(k)];
score_1 *= _theta[1][_vocab->get_element(k)];
if (x->element_get(j) == _vocab->element_get(k)) {
score_0 *= _theta[0][_vocab->element_get(k)];
score_1 *= _theta[1][_vocab->element_get(k)];
found_indices.push_back(k);
}
@ -50,13 +50,13 @@ real_t MLPPBernoulliNB::model_test(const Ref<MLPPVector> &x) {
for (int i = 0; i < _vocab->size(); i++) {
bool found = false;
for (int j = 0; j < found_indices.size(); j++) {
if (_vocab->get_element(i) == _vocab->get_element(found_indices[j])) {
if (_vocab->element_get(i) == _vocab->element_get(found_indices[j])) {
found = true;
}
}
if (!found) {
score_0 *= 1 - _theta[0][_vocab->get_element(i)];
score_1 *= 1 - _theta[1][_vocab->get_element(i)];
score_0 *= 1 - _theta[0][_vocab->element_get(i)];
score_1 *= 1 - _theta[1][_vocab->element_get(i)];
}
}
@ -113,13 +113,13 @@ void MLPPBernoulliNB::compute_theta() {
// Setting all values in the hasmap by default to 0.
for (int i = _class_num - 1; i >= 0; i--) {
for (int j = 0; j < _vocab->size(); j++) {
_theta.write[i][_vocab->get_element(j)] = 0;
_theta.write[i][_vocab->element_get(j)] = 0;
}
}
for (int i = 0; i < _input_set->size().y; i++) {
for (int j = 0; j < _input_set->size().x; j++) {
_theta.write[_output_set->get_element(i)][_input_set->get_element(i, j)]++;
_theta.write[_output_set->element_get(i)][_input_set->element_get(i, j)]++;
}
}
@ -142,7 +142,7 @@ void MLPPBernoulliNB::evaluate() {
real_t sum = 0;
for (int ii = 0; ii < _output_set->size(); ii++) {
if (_output_set->get_element(ii) == 1) {
if (_output_set->element_get(ii) == 1) {
sum += 1;
}
}
@ -161,9 +161,9 @@ void MLPPBernoulliNB::evaluate() {
for (int j = 0; j < _input_set->size().y; j++) {
for (int k = 0; k < _vocab->size(); k++) {
if (_input_set->get_element(i, j) == _vocab->get_element(k)) {
score_0 += Math::log(static_cast<real_t>(_theta[0][_vocab->get_element(k)]));
score_1 += Math::log(static_cast<real_t>(_theta[1][_vocab->get_element(k)]));
if (_input_set->element_get(i, j) == _vocab->element_get(k)) {
score_0 += Math::log(static_cast<real_t>(_theta[0][_vocab->element_get(k)]));
score_1 += Math::log(static_cast<real_t>(_theta[1][_vocab->element_get(k)]));
found_indices.push_back(k);
}
@ -173,13 +173,13 @@ void MLPPBernoulliNB::evaluate() {
for (int ii = 0; ii < _vocab->size(); ii++) {
bool found = false;
for (int j = 0; j < found_indices.size(); j++) {
if (_vocab->get_element(ii) == _vocab->get_element(found_indices[j])) {
if (_vocab->element_get(ii) == _vocab->element_get(found_indices[j])) {
found = true;
}
}
if (!found) {
score_0 += Math::log(1.0 - _theta[0][_vocab->get_element(ii)]);
score_1 += Math::log(1.0 - _theta[1][_vocab->get_element(ii)]);
score_0 += Math::log(1.0 - _theta[0][_vocab->element_get(ii)]);
score_1 += Math::log(1.0 - _theta[1][_vocab->element_get(ii)]);
}
}
@ -192,9 +192,9 @@ void MLPPBernoulliNB::evaluate() {
// Assigning the traning example to a class
if (score_0 > score_1) {
_y_hat->set_element(i, 0);
_y_hat->element_set(i, 0);
} else {
_y_hat->set_element(i, 1);
_y_hat->element_set(i, 1);
}
}
}

View File

@ -123,17 +123,17 @@ void MLPPCLogLogReg::sgd(real_t learning_rate, int max_epoch, bool p_) {
int output_index = distribution(generator);
_input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
real_t output_set_element = _output_set->get_element(output_index);
output_set_row_tmp->set_element(0, output_set_element);
real_t output_element_set = _output_set->element_get(output_index);
output_set_row_tmp->element_set(0, output_element_set);
real_t y_hat = evaluatev(input_set_row_tmp);
y_hat_row_tmp->set_element(0, y_hat);
y_hat_row_tmp->element_set(0, y_hat);
real_t z = propagatev(input_set_row_tmp);
cost_prev = cost(y_hat_row_tmp, output_set_row_tmp);
real_t error = y_hat - output_set_element;
real_t error = y_hat - output_element_set;
// Weight Updation
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate * error * Math::exp(z - Math::exp(z)), input_set_row_tmp));

View File

@ -565,7 +565,7 @@ real_t MLPPCost::dual_form_svm(const Ref<MLPPVector> &alpha, const Ref<MLPPMatri
Ref<MLPPMatrix> alpha_m_res = alg.matmultnm(alg.matmultnm(alpha_m, Q), alg.transposenm(alpha_m));
real_t alphaQ = alpha_m_res->get_element(0, 0);
real_t alphaQ = alpha_m_res->element_get(0, 0);
Ref<MLPPVector> one = alg.onevecnv(alpha->size());
return -alg.dotnv(one, alpha) + 0.5 * alphaQ;

View File

@ -1285,7 +1285,7 @@ Ref<MLPPMatrix> MLPPData::mean_centering(const Ref<MLPPMatrix> &p_X) {
real_t mean_i = stat.meanv(x_row_tmp);
for (int j = 0; j < x_size.x; ++j) {
X->set_element(i, j, p_X->get_element(i, j) - mean_i);
X->element_set(i, j, p_X->element_get(i, j) - mean_i);
}
}
@ -1306,9 +1306,9 @@ Ref<MLPPMatrix> MLPPData::one_hot_rep(const Ref<MLPPVector> &temp_output_set, in
for (int i = 0; i < temp_output_set_size; ++i) {
for (int j = 0; j <= n_class - 1; ++j) {
if (static_cast<int>(temp_output_set_ptr[i]) == j) {
output_set->set_element(i, j, 1);
output_set->element_set(i, j, 1);
} else {
output_set->set_element(i, j, 0);
output_set->element_set(i, j, 0);
}
}
}

View File

@ -222,13 +222,13 @@ public:
bool new_element = true;
for (int j = 0; j < set_input_set.size(); j++) {
if (set_input_set[j] == input_set->get_element(i)) {
if (set_input_set[j] == input_set->element_get(i)) {
new_element = false;
}
}
if (new_element) {
set_input_set.push_back(input_set->get_element(i));
set_input_set.push_back(input_set->element_get(i));
}
}

View File

@ -50,18 +50,18 @@ void MLPPDualSVC::gradient_descent(real_t learning_rate, int max_epoch, bool ui)
real_t biasGradient = 0;
for (int i = 0; i < _alpha->size(); i++) {
real_t sum = 0;
if (_alpha->get_element(i) < _C && _alpha->get_element(i) > 0) {
if (_alpha->element_get(i) < _C && _alpha->element_get(i) > 0) {
for (int j = 0; j < _alpha->size(); j++) {
if (_alpha->get_element(j) > 0) {
if (_alpha->element_get(j) > 0) {
_input_set->get_row_into_mlpp_vector(i, input_set_i_row_tmp);
_input_set->get_row_into_mlpp_vector(j, input_set_j_row_tmp);
sum += _alpha->get_element(j) * _output_set->get_element(j) * alg.dotnv(input_set_j_row_tmp, input_set_i_row_tmp); // TO DO: DON'T forget to add non-linear kernelizations.
sum += _alpha->element_get(j) * _output_set->element_get(j) * alg.dotnv(input_set_j_row_tmp, input_set_i_row_tmp); // TO DO: DON'T forget to add non-linear kernelizations.
}
}
}
biasGradient = (1 - _output_set->get_element(i) * sum) / _output_set->get_element(i);
biasGradient = (1 - _output_set->element_get(i) * sum) / _output_set->element_get(i);
break;
}
@ -215,9 +215,9 @@ real_t MLPPDualSVC::propagatev(const Ref<MLPPVector> &x) {
input_set_row_tmp->resize(_input_set->size().x);
for (int j = 0; j < _alpha->size(); j++) {
if (_alpha->get_element(j) != 0) {
if (_alpha->element_get(j) != 0) {
_input_set->get_row_into_mlpp_vector(j, input_set_row_tmp);
z += _alpha->get_element(j) * _output_set->get_element(j) * alg.dotnv(input_set_row_tmp, x); // TO DO: DON'T forget to add non-linear kernelizations.
z += _alpha->element_get(j) * _output_set->element_get(j) * alg.dotnv(input_set_row_tmp, x); // TO DO: DON'T forget to add non-linear kernelizations.
}
}
z += _bias;
@ -248,17 +248,17 @@ Ref<MLPPVector> MLPPDualSVC::propagatem(const Ref<MLPPMatrix> &X) {
real_t sum = 0;
for (int j = 0; j < _alpha->size(); j++) {
if (_alpha->get_element(j) != 0) {
if (_alpha->element_get(j) != 0) {
_input_set->get_row_into_mlpp_vector(j, input_set_row_tmp);
X->get_row_into_mlpp_vector(i, x_row_tmp);
sum += _alpha->get_element(j) * _output_set->get_element(j) * alg.dotnv(input_set_row_tmp, x_row_tmp); // TO DO: DON'T forget to add non-linear kernelizations.
sum += _alpha->element_get(j) * _output_set->element_get(j) * alg.dotnv(input_set_row_tmp, x_row_tmp); // TO DO: DON'T forget to add non-linear kernelizations.
}
}
sum += _bias;
z->set_element(i, sum);
z->element_set(i, sum);
}
return z;
}
@ -272,10 +272,10 @@ void MLPPDualSVC::forward_pass() {
void MLPPDualSVC::alpha_projection() {
for (int i = 0; i < _alpha->size(); i++) {
if (_alpha->get_element(i) > _C) {
_alpha->set_element(i, _C);
} else if (_alpha->get_element(i) < 0) {
_alpha->set_element(i, 0);
if (_alpha->element_get(i) > _C) {
_alpha->element_set(i, _C);
} else if (_alpha->element_get(i) < 0) {
_alpha->element_set(i, 0);
}
}
}

View File

@ -40,21 +40,21 @@ void MLPPExpReg::gradient_descent(real_t learning_rate, int max_epoch, bool ui)
// Calculating the weight gradient
real_t sum = 0;
for (int j = 0; j < _n; j++) {
sum += error->get_element(j) * _input_set->get_element(j, i) * Math::pow(_weights->get_element(i), _input_set->get_element(j, i) - 1);
sum += error->element_get(j) * _input_set->element_get(j, i) * Math::pow(_weights->element_get(i), _input_set->element_get(j, i) - 1);
}
real_t w_gradient = sum / _n;
// Calculating the initial gradient
real_t sum2 = 0;
for (int j = 0; j < _n; j++) {
sum2 += error->get_element(j) * Math::pow(_weights->get_element(i), _input_set->get_element(j, i));
sum2 += error->element_get(j) * Math::pow(_weights->element_get(i), _input_set->element_get(j, i));
}
real_t i_gradient = sum2 / _n;
// Weight/initial updation
_weights->set_element(i, _weights->get_element(i) - learning_rate * w_gradient);
_initial->set_element(i, _initial->get_element(i) - learning_rate * i_gradient);
_weights->element_set(i, _weights->element_get(i) - learning_rate * w_gradient);
_initial->element_set(i, _initial->element_get(i) - learning_rate * i_gradient);
}
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
@ -62,7 +62,7 @@ void MLPPExpReg::gradient_descent(real_t learning_rate, int max_epoch, bool ui)
// Calculating the bias gradient
real_t sum = 0;
for (int j = 0; j < _n; j++) {
sum += (_y_hat->get_element(j) - _output_set->get_element(j));
sum += (_y_hat->element_get(j) - _output_set->element_get(j));
}
real_t b_gradient = sum / _n;
@ -110,29 +110,29 @@ void MLPPExpReg::sgd(real_t learning_rate, int max_epoch, bool ui) {
int output_index = distribution(generator);
_input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
real_t output_set_element = _output_set->get_element(output_index);
output_set_row_tmp->set_element(0, output_set_element);
real_t output_element_set = _output_set->element_get(output_index);
output_set_row_tmp->element_set(0, output_element_set);
real_t y_hat = evaluatev(input_set_row_tmp);
y_hat_row_tmp->set_element(0, y_hat);
y_hat_row_tmp->element_set(0, y_hat);
cost_prev = cost(y_hat_row_tmp, output_set_row_tmp);
for (int i = 0; i < _k; i++) {
// Calculating the weight gradients
real_t w_gradient = (y_hat - output_set_element) * input_set_row_tmp->get_element(i) * Math::pow(_weights->get_element(i), _input_set->get_element(output_index, i) - 1);
real_t i_gradient = (y_hat - output_set_element) * Math::pow(_weights->get_element(i), _input_set->get_element(output_index, i));
real_t w_gradient = (y_hat - output_element_set) * input_set_row_tmp->element_get(i) * Math::pow(_weights->element_get(i), _input_set->element_get(output_index, i) - 1);
real_t i_gradient = (y_hat - output_element_set) * Math::pow(_weights->element_get(i), _input_set->element_get(output_index, i));
// Weight/initial updation
_weights->set_element(i, _weights->get_element(i) - learning_rate * w_gradient);
_initial->set_element(i, _initial->get_element(i) - learning_rate * i_gradient);
_weights->element_set(i, _weights->element_get(i) - learning_rate * w_gradient);
_initial->element_set(i, _initial->element_get(i) - learning_rate * i_gradient);
}
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients
real_t b_gradient = (y_hat - output_set_element);
real_t b_gradient = (y_hat - output_element_set);
// Bias updation
_bias -= learning_rate * b_gradient;
@ -177,21 +177,21 @@ void MLPPExpReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size,
// Calculating the weight gradient
real_t sum = 0;
for (int k = 0; k < current_output_batch->size(); k++) {
sum += error->get_element(k) * current_input_batch->get_element(k, j) * Math::pow(_weights->get_element(j), current_input_batch->get_element(k, j) - 1);
sum += error->element_get(k) * current_input_batch->element_get(k, j) * Math::pow(_weights->element_get(j), current_input_batch->element_get(k, j) - 1);
}
real_t w_gradient = sum / current_output_batch->size();
// Calculating the initial gradient
real_t sum2 = 0;
for (int k = 0; k < current_output_batch->size(); k++) {
sum2 += error->get_element(k) * Math::pow(_weights->get_element(j), current_input_batch->get_element(k, j));
sum2 += error->element_get(k) * Math::pow(_weights->element_get(j), current_input_batch->element_get(k, j));
}
real_t i_gradient = sum2 / current_output_batch->size();
// Weight/initial updation
_weights->set_element(i, _weights->get_element(i) - learning_rate * w_gradient);
_initial->set_element(i, _initial->get_element(i) - learning_rate * i_gradient);
_weights->element_set(i, _weights->element_get(i) - learning_rate * w_gradient);
_initial->element_set(i, _initial->element_get(i) - learning_rate * i_gradient);
}
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
@ -199,7 +199,7 @@ void MLPPExpReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size,
// Calculating the bias gradient
//real_t sum = 0;
//for (int j = 0; j < current_output_batch->size(); j++) {
// sum += (y_hat->get_element(j) - current_output_batch->get_element(j));
// sum += (y_hat->element_get(j) - current_output_batch->element_get(j));
//}
//real_t b_gradient = sum / output_mini_batches[i].size();
@ -276,7 +276,7 @@ real_t MLPPExpReg::evaluatev(const Ref<MLPPVector> &x) {
real_t y_hat = 0;
for (int i = 0; i < x->size(); i++) {
y_hat += _initial->get_element(i) * Math::pow(_weights->get_element(i), x->get_element(i));
y_hat += _initial->element_get(i) * Math::pow(_weights->element_get(i), x->element_get(i));
}
return y_hat + _bias;
@ -291,12 +291,12 @@ Ref<MLPPVector> MLPPExpReg::evaluatem(const Ref<MLPPMatrix> &X) {
real_t y = 0;
for (int j = 0; j < X->size().x; j++) {
y += _initial->get_element(j) * Math::pow(_weights->get_element(j), X->get_element(i, j));
y += _initial->element_get(j) * Math::pow(_weights->element_get(j), X->element_get(i, j));
}
y += _bias;
y_hat->set_element(i, y);
y_hat->element_set(i, y);
}
return y_hat;

View File

@ -47,7 +47,7 @@ Ref<MLPPVector> MLPPGaussianNB::model_set_test(const Ref<MLPPMatrix> &X) {
for (int i = 0; i < X->size().y; i++) {
X->get_row_into_mlpp_vector(i, x_row_tmp);
y_hat->set_element(i, model_test(x_row_tmp));
y_hat->element_set(i, model_test(x_row_tmp));
}
return y_hat;
@ -60,11 +60,11 @@ real_t MLPPGaussianNB::model_test(const Ref<MLPPVector> &x) {
real_t y_hat_i = 1;
for (int i = _class_num - 1; i >= 0; i--) {
real_t sigma_i = _sigma->get_element(i);
real_t x_i = x->get_element(i);
real_t mu_i = _mu->get_element(i);
real_t sigma_i = _sigma->element_get(i);
real_t x_i = x->element_get(i);
real_t mu_i = _mu->element_get(i);
y_hat_i += Math::log(_priors->get_element(i) * (1 / Math::sqrt(2 * Math_PI * sigma_i * sigma_i)) * Math::exp(-(x_i * mu_i) * (x_i * mu_i) / (2 * sigma_i * sigma_i)));
y_hat_i += Math::log(_priors->element_get(i) * (1 / Math::sqrt(2 * Math_PI * sigma_i * sigma_i)) * Math::exp(-(x_i * mu_i) * (x_i * mu_i) / (2 * sigma_i * sigma_i)));
score[i] = Math::exp(y_hat_i);
}
@ -140,24 +140,24 @@ void MLPPGaussianNB::evaluate() {
for (int j = 0; j < _input_set->size().y; j++) {
for (int k = 0; k < _input_set->size().x; k++) {
if (_output_set->get_element(j) == i) {
set.push_back(_input_set->get_element(j, k));
if (_output_set->element_get(j) == i) {
set.push_back(_input_set->element_get(j, k));
}
}
}
set_vec->set_from_pool_vector(set);
_mu->set_element(i, stat.meanv(set_vec));
_sigma->set_element(i, stat.standard_deviationv(set_vec));
_mu->element_set(i, stat.meanv(set_vec));
_sigma->element_set(i, stat.standard_deviationv(set_vec));
}
// Priors
_priors->resize(_class_num);
_priors->fill(0);
for (int i = 0; i < _output_set->size(); i++) {
int indx = static_cast<int>(_output_set->get_element(i));
_priors->set_element(indx, _priors->get_element(indx));
int indx = static_cast<int>(_output_set->element_get(i));
_priors->element_set(indx, _priors->element_get(indx));
}
_priors = alg.scalar_multiplynv(real_t(1) / real_t(_output_set->size()), _priors);
@ -170,11 +170,11 @@ void MLPPGaussianNB::evaluate() {
for (int j = _class_num - 1; j >= 0; j--) {
for (int k = 0; k < _input_set->size().x; k++) {
real_t sigma_j = _sigma->get_element(j);
real_t mu_j = _mu->get_element(j);
real_t input_set_i_k = _input_set->get_element(i, k);
real_t sigma_j = _sigma->element_get(j);
real_t mu_j = _mu->element_get(j);
real_t input_set_i_k = _input_set->element_get(i, k);
y_hat_i += Math::log(_priors->get_element(j) * (1 / Math::sqrt(2 * Math_PI * sigma_j * sigma_j)) * Math::exp(-(input_set_i_k * mu_j) * (input_set_i_k * mu_j) / (2 * sigma_j * sigma_j)));
y_hat_i += Math::log(_priors->element_get(j) * (1 / Math::sqrt(2 * Math_PI * sigma_j * sigma_j)) * Math::exp(-(input_set_i_k * mu_j) * (input_set_i_k * mu_j) / (2 * sigma_j * sigma_j)));
}
score[j] = Math::exp(y_hat_i);
@ -192,7 +192,7 @@ void MLPPGaussianNB::evaluate() {
}
}
_y_hat->set_element(i, max_element_index);
_y_hat->element_set(i, max_element_index);
}
}

View File

@ -271,17 +271,17 @@ Ref<MLPPVector> MLPPKMeans::silhouette_scores() {
}
}
silhouette_scores->set_element(i, (b - a) / fmax(a, b));
silhouette_scores->element_set(i, (b - a) / fmax(a, b));
// Or the expanded version:
// if(a < b) {
// silhouette_scores->set_element(i, 1 - a/b);
// silhouette_scores->element_set(i, 1 - a/b);
// }
// else if(a == b){
// silhouette_scores->set_element(i, 0);
// silhouette_scores->element_set(i, 0);
// }
// else{
// silhouette_scores->set_element(i, b/a - 1);
// silhouette_scores->element_set(i, b/a - 1);
// }
}
@ -349,7 +349,7 @@ void MLPPKMeans::_evaluate() {
}
}
_r->set_element(i, closest_centroid_index, 1);
_r->element_set(i, closest_centroid_index, 1);
}
}
@ -383,9 +383,9 @@ void MLPPKMeans::_compute_mu() {
for (int j = 0; j < r_size_y; ++j) {
_input_set->get_row_into_mlpp_vector(j, input_set_j_tempv);
real_t r_j_i = _r->get_element(j, i);
real_t r_j_i = _r->element_get(j, i);
alg.scalar_multiplyv(_r->get_element(j, i), input_set_j_tempv, mat_tempv);
alg.scalar_multiplyv(_r->element_get(j, i), input_set_j_tempv, mat_tempv);
alg.additionv(num, mat_tempv, num);
den += r_j_i;
@ -501,7 +501,7 @@ real_t MLPPKMeans::_cost() {
_mu->get_row_into_mlpp_vector(j, mu_j_tempv);
alg.subtractionv(input_set_i_tempv, mu_j_tempv, sub_tempv);
sum += _r->get_element(i, j) * alg.norm_sqv(sub_tempv);
sum += _r->element_get(i, j) * alg.norm_sqv(sub_tempv);
}
}

View File

@ -117,7 +117,7 @@ Ref<MLPPMatrix> MLPPLinAlg::matmultnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMa
c_ptr[ind_i_j] += a_ptr[ind_i_k] * b_ptr[ind_k_j];
//C->set_element(i, j, C->get_element(i, j) + A->get_element(i, k) * B->get_element(k, j
//C->element_set(i, j, C->element_get(i, j) + A->element_get(i, k) * B->element_get(k, j
}
}
}
@ -421,7 +421,7 @@ real_t MLPPLinAlg::detm(const Ref<MLPPMatrix> &A, int d) {
Recursion is performed unless and until we reach this base case,
such that we recieve a scalar as the result. */
if (d == 2) {
return A->get_element(0, 0) * A->get_element(1, 1) - A->get_element(0, 1) * A->get_element(1, 0);
return A->element_get(0, 0) * A->element_get(1, 1) - A->element_get(0, 1) * A->element_get(1, 0);
} else {
for (int i = 0; i < d; i++) {
int sub_i = 0;
@ -432,13 +432,13 @@ real_t MLPPLinAlg::detm(const Ref<MLPPMatrix> &A, int d) {
continue;
}
B->set_element(sub_i, sub_j, A->get_element(j, k));
B->element_set(sub_i, sub_j, A->element_get(j, k));
sub_j++;
}
sub_i++;
}
deter += Math::pow(static_cast<real_t>(-1), static_cast<real_t>(i)) * A->get_element(0, i) * detm(B, d - 1);
deter += Math::pow(static_cast<real_t>(-1), static_cast<real_t>(i)) * A->element_get(0, i) * detm(B, d - 1);
}
}
@ -466,7 +466,7 @@ Ref<MLPPMatrix> MLPPLinAlg::cofactornm(const Ref<MLPPMatrix> &A, int n, int i, i
for (int row = 0; row < n; row++) {
for (int col = 0; col < n; col++) {
if (row != i && col != j) {
cof->set_element(sub_i, sub_j++, A->get_element(row, col));
cof->element_set(sub_i, sub_j++, A->element_get(row, col));
if (sub_j == n - 1) {
sub_j = 0;
@ -494,16 +494,16 @@ Ref<MLPPMatrix> MLPPLinAlg::adjointnm(const Ref<MLPPMatrix> &A) {
// Checking for the case where the given N x N matrix is a scalar
if (a_size.y == 1) {
adj->set_element(0, 0, 1);
adj->element_set(0, 0, 1);
return adj;
}
if (a_size.y == 2) {
adj->set_element(0, 0, A->get_element(1, 1));
adj->set_element(1, 1, A->get_element(0, 0));
adj->element_set(0, 0, A->element_get(1, 1));
adj->element_set(1, 1, A->element_get(0, 0));
adj->set_element(0, 1, -A->get_element(0, 1));
adj->set_element(1, 0, -A->get_element(1, 0));
adj->element_set(0, 1, -A->element_get(0, 1));
adj->element_set(1, 0, -A->element_get(1, 0));
return adj;
}
@ -513,7 +513,7 @@ Ref<MLPPMatrix> MLPPLinAlg::adjointnm(const Ref<MLPPMatrix> &A) {
Ref<MLPPMatrix> cof = cofactornm(A, a_size.y, i, j);
// 1 if even, -1 if odd
int sign = (i + j) % 2 == 0 ? 1 : -1;
adj->set_element(j, i, sign * detm(cof, int(a_size.y) - 1));
adj->element_set(j, i, sign * detm(cof, int(a_size.y) - 1));
}
}
return adj;
@ -694,7 +694,7 @@ Ref<MLPPMatrix> MLPPLinAlg::covnm(const Ref<MLPPMatrix> &A) {
for (int j = 0; j < a_size.x; ++j) {
A->get_row_into_mlpp_vector(j, a_j_row_tmp);
cov_mat->set_element(i, j, stat.covariancev(a_i_row_tmp, a_j_row_tmp));
cov_mat->element_set(i, j, stat.covariancev(a_i_row_tmp, a_j_row_tmp));
}
}
@ -720,12 +720,12 @@ MLPPLinAlg::EigenResult MLPPLinAlg::eigen(Ref<MLPPMatrix> A) {
Size2i a_size = A->size();
do {
real_t a_ij = A->get_element(0, 1);
real_t a_ij = A->element_get(0, 1);
real_t sub_i = 0;
real_t sub_j = 1;
for (int i = 0; i < a_size.y; ++i) {
for (int j = 0; j < a_size.x; ++j) {
real_t ca_ij = A->get_element(i, j);
real_t ca_ij = A->element_get(i, j);
real_t abs_ca_ij = ABS(ca_ij);
if (i != j && abs_ca_ij > a_ij) {
@ -742,9 +742,9 @@ MLPPLinAlg::EigenResult MLPPLinAlg::eigen(Ref<MLPPMatrix> A) {
}
}
real_t a_ii = A->get_element(sub_i, sub_i);
real_t a_jj = A->get_element(sub_j, sub_j);
//real_t a_ji = A->get_element(sub_j, sub_i);
real_t a_ii = A->element_get(sub_i, sub_i);
real_t a_jj = A->element_get(sub_j, sub_j);
//real_t a_ji = A->element_get(sub_j, sub_i);
real_t theta;
if (a_ii == a_jj) {
@ -754,10 +754,10 @@ MLPPLinAlg::EigenResult MLPPLinAlg::eigen(Ref<MLPPMatrix> A) {
}
Ref<MLPPMatrix> P = identitym(A->size().y);
P->set_element(sub_i, sub_j, -Math::sin(theta));
P->set_element(sub_i, sub_i, Math::cos(theta));
P->set_element(sub_j, sub_j, Math::cos(theta));
P->set_element(sub_j, sub_i, Math::sin(theta));
P->element_set(sub_i, sub_j, -Math::sin(theta));
P->element_set(sub_i, sub_i, Math::cos(theta));
P->element_set(sub_j, sub_j, Math::cos(theta));
P->element_set(sub_j, sub_i, Math::sin(theta));
a_new = matmultnm(matmultnm(inversenm(P), A), P);
@ -765,8 +765,8 @@ MLPPLinAlg::EigenResult MLPPLinAlg::eigen(Ref<MLPPMatrix> A) {
for (int i = 0; i < a_new_size.y; ++i) {
for (int j = 0; j < a_new_size.x; ++j) {
if (i != j && Math::is_zero_approx(Math::round(a_new->get_element(i, j)))) {
a_new->set_element(i, j, 0);
if (i != j && Math::is_zero_approx(Math::round(a_new->element_get(i, j)))) {
a_new->element_set(i, j, 0);
}
}
}
@ -774,7 +774,7 @@ MLPPLinAlg::EigenResult MLPPLinAlg::eigen(Ref<MLPPMatrix> A) {
bool non_zero = false;
for (int i = 0; i < a_new_size.y; ++i) {
for (int j = 0; j < a_new_size.x; ++j) {
if (i != j && Math::is_zero_approx(Math::round(a_new->get_element(i, j)))) {
if (i != j && Math::is_zero_approx(Math::round(a_new->element_get(i, j)))) {
non_zero = true;
}
}
@ -791,7 +791,7 @@ MLPPLinAlg::EigenResult MLPPLinAlg::eigen(Ref<MLPPMatrix> A) {
for (int i = 0; i < a_new_size.y; ++i) {
for (int j = 0; j < a_new_size.x; ++j) {
if (i != j) {
a_new->set_element(i, j, 0);
a_new->element_set(i, j, 0);
}
}
}
@ -809,17 +809,17 @@ MLPPLinAlg::EigenResult MLPPLinAlg::eigen(Ref<MLPPMatrix> A) {
// Bubble Sort. Should change this later.
for (int i = 0; i < a_new_size.y - 1; ++i) {
for (int j = 0; j < a_new_size.x - 1 - i; ++j) {
if (a_new->get_element(j, j) < a_new->get_element(j + 1, j + 1)) {
real_t temp = a_new->get_element(j + 1, j + 1);
a_new->set_element(j + 1, j + 1, a_new->get_element(j, j));
a_new->set_element(j, j, temp);
if (a_new->element_get(j, j) < a_new->element_get(j + 1, j + 1)) {
real_t temp = a_new->element_get(j + 1, j + 1);
a_new->element_set(j + 1, j + 1, a_new->element_get(j, j));
a_new->element_set(j, j, temp);
}
}
}
for (int i = 0; i < a_new_size.y; ++i) {
for (int j = 0; j < a_new_size.x; ++j) {
if (a_new->get_element(i, i) == a_new_prior->get_element(j, j)) {
if (a_new->element_get(i, i) == a_new_prior->element_get(j, j)) {
val_to_vec[i] = j;
}
}
@ -831,7 +831,7 @@ MLPPLinAlg::EigenResult MLPPLinAlg::eigen(Ref<MLPPMatrix> A) {
for (int i = 0; i < eigenvectors_size.y; ++i) {
for (int j = 0; j < eigenvectors_size.x; ++j) {
eigenvectors->set_element(i, j, eigen_temp->get_element(i, val_to_vec[j]));
eigenvectors->element_set(i, j, eigen_temp->element_get(i, val_to_vec[j]));
}
}
@ -858,7 +858,7 @@ MLPPLinAlg::SVDResult MLPPLinAlg::svd(const Ref<MLPPMatrix> &A) {
for (int i = 0; i < singularvals_size.y; ++i) {
for (int j = 0; j < singularvals_size.x; ++j) {
sigma->set_element(i, j, singularvals->get_element(i, j));
sigma->element_set(i, j, singularvals->element_get(i, j));
}
}
@ -1720,7 +1720,7 @@ Ref<MLPPMatrix> MLPPLinAlg::outer_product(const Ref<MLPPVector> &a, const Ref<ML
real_t curr_a = a_ptr[i];
for (int j = 0; j < size.x; ++j) {
C->set_element(i, j, curr_a * b_ptr[j]);
C->element_set(i, j, curr_a * b_ptr[j]);
}
}

View File

@ -1524,7 +1524,7 @@ real_t MLPPMatrix::detb(const Ref<MLPPMatrix> &A, int d) const {
Recursion is performed unless and until we reach this base case,
such that we recieve a scalar as the result. */
if (d == 2) {
return A->get_element(0, 0) * A->get_element(1, 1) - A->get_element(0, 1) * A->get_element(1, 0);
return A->element_get(0, 0) * A->element_get(1, 1) - A->element_get(0, 1) * A->element_get(1, 0);
} else {
for (int i = 0; i < d; i++) {
int sub_i = 0;
@ -1535,13 +1535,13 @@ real_t MLPPMatrix::detb(const Ref<MLPPMatrix> &A, int d) const {
continue;
}
B->set_element(sub_i, sub_j, A->get_element(j, k));
B->element_set(sub_i, sub_j, A->element_get(j, k));
sub_j++;
}
sub_i++;
}
deter += Math::pow(static_cast<real_t>(-1), static_cast<real_t>(i)) * A->get_element(0, i) * B->det(d - 1);
deter += Math::pow(static_cast<real_t>(-1), static_cast<real_t>(i)) * A->element_get(0, i) * B->det(d - 1);
}
}
@ -1569,7 +1569,7 @@ Ref<MLPPMatrix> MLPPMatrix::cofactor(int n, int i, int j) const {
for (int row = 0; row < n; row++) {
for (int col = 0; col < n; col++) {
if (row != i && col != j) {
cof->set_element(sub_i, sub_j++, get_element(row, col));
cof->element_set(sub_i, sub_j++, element_get(row, col));
if (sub_j == n - 1) {
sub_j = 0;
@ -1594,7 +1594,7 @@ void MLPPMatrix::cofactoro(int n, int i, int j, Ref<MLPPMatrix> out) const {
for (int row = 0; row < n; row++) {
for (int col = 0; col < n; col++) {
if (row != i && col != j) {
out->set_element(sub_i, sub_j++, get_element(row, col));
out->element_set(sub_i, sub_j++, element_get(row, col));
if (sub_j == n - 1) {
sub_j = 0;
@ -1617,16 +1617,16 @@ Ref<MLPPMatrix> MLPPMatrix::adjoint() const {
// Checking for the case where the given N x N matrix is a scalar
if (_size.y == 1) {
adj->set_element(0, 0, 1);
adj->element_set(0, 0, 1);
return adj;
}
if (_size.y == 2) {
adj->set_element(0, 0, get_element(1, 1));
adj->set_element(1, 1, get_element(0, 0));
adj->element_set(0, 0, element_get(1, 1));
adj->element_set(1, 1, element_get(0, 0));
adj->set_element(0, 1, -get_element(0, 1));
adj->set_element(1, 0, -get_element(1, 0));
adj->element_set(0, 1, -element_get(0, 1));
adj->element_set(1, 0, -element_get(1, 0));
return adj;
}
@ -1636,7 +1636,7 @@ Ref<MLPPMatrix> MLPPMatrix::adjoint() const {
Ref<MLPPMatrix> cof = cofactor(_size.y, i, j);
// 1 if even, -1 if odd
int sign = (i + j) % 2 == 0 ? 1 : -1;
adj->set_element(j, i, sign * cof->det(int(_size.y) - 1));
adj->element_set(j, i, sign * cof->det(int(_size.y) - 1));
}
}
return adj;
@ -1654,16 +1654,16 @@ void MLPPMatrix::adjointo(Ref<MLPPMatrix> out) const {
// Checking for the case where the given N x N matrix is a scalar
if (_size.y == 1) {
out->set_element(0, 0, 1);
out->element_set(0, 0, 1);
return;
}
if (_size.y == 2) {
out->set_element(0, 0, get_element(1, 1));
out->set_element(1, 1, get_element(0, 0));
out->element_set(0, 0, element_get(1, 1));
out->element_set(1, 1, element_get(0, 0));
out->set_element(0, 1, -get_element(0, 1));
out->set_element(1, 0, -get_element(1, 0));
out->element_set(0, 1, -element_get(0, 1));
out->element_set(1, 0, -element_get(1, 0));
return;
}
@ -1673,7 +1673,7 @@ void MLPPMatrix::adjointo(Ref<MLPPMatrix> out) const {
Ref<MLPPMatrix> cof = cofactor(_size.y, i, j);
// 1 if even, -1 if odd
int sign = (i + j) % 2 == 0 ? 1 : -1;
out->set_element(j, i, sign * cof->det(int(_size.y) - 1));
out->element_set(j, i, sign * cof->det(int(_size.y) - 1));
}
}
}
@ -1968,7 +1968,7 @@ Ref<MLPPMatrix> MLPPMatrix::cov() const {
for (int j = 0; j < _size.x; ++j) {
get_row_into_mlpp_vector(j, a_j_row_tmp);
cov_mat->set_element(i, j, stat.covariancev(a_i_row_tmp, a_j_row_tmp));
cov_mat->element_set(i, j, stat.covariancev(a_i_row_tmp, a_j_row_tmp));
}
}
@ -1997,7 +1997,7 @@ void MLPPMatrix::covo(Ref<MLPPMatrix> out) const {
for (int j = 0; j < _size.x; ++j) {
get_row_into_mlpp_vector(j, a_j_row_tmp);
out->set_element(i, j, stat.covariancev(a_i_row_tmp, a_j_row_tmp));
out->element_set(i, j, stat.covariancev(a_i_row_tmp, a_j_row_tmp));
}
}
}
@ -2020,12 +2020,12 @@ MLPPMatrix::EigenResult MLPPMatrix::eigen() const {
Size2i a_size = a_mat->size();
do {
real_t a_ij = a_mat->get_element(0, 1);
real_t a_ij = a_mat->element_get(0, 1);
real_t sub_i = 0;
real_t sub_j = 1;
for (int i = 0; i < a_size.y; ++i) {
for (int j = 0; j < a_size.x; ++j) {
real_t ca_ij = a_mat->get_element(i, j);
real_t ca_ij = a_mat->element_get(i, j);
real_t abs_ca_ij = ABS(ca_ij);
if (i != j && abs_ca_ij > a_ij) {
@ -2042,9 +2042,9 @@ MLPPMatrix::EigenResult MLPPMatrix::eigen() const {
}
}
real_t a_ii = a_mat->get_element(sub_i, sub_i);
real_t a_jj = a_mat->get_element(sub_j, sub_j);
//real_t a_ji = a_mat->get_element(sub_j, sub_i);
real_t a_ii = a_mat->element_get(sub_i, sub_i);
real_t a_jj = a_mat->element_get(sub_j, sub_j);
//real_t a_ji = a_mat->element_get(sub_j, sub_i);
real_t theta;
if (a_ii == a_jj) {
@ -2054,10 +2054,10 @@ MLPPMatrix::EigenResult MLPPMatrix::eigen() const {
}
Ref<MLPPMatrix> P = identity_mat(a_mat->size().y);
P->set_element(sub_i, sub_j, -Math::sin(theta));
P->set_element(sub_i, sub_i, Math::cos(theta));
P->set_element(sub_j, sub_j, Math::cos(theta));
P->set_element(sub_j, sub_i, Math::sin(theta));
P->element_set(sub_i, sub_j, -Math::sin(theta));
P->element_set(sub_i, sub_i, Math::cos(theta));
P->element_set(sub_j, sub_j, Math::cos(theta));
P->element_set(sub_j, sub_i, Math::sin(theta));
a_new = P->inverse()->multn(a_mat)->multn(P);
@ -2065,8 +2065,8 @@ MLPPMatrix::EigenResult MLPPMatrix::eigen() const {
for (int i = 0; i < a_new_size.y; ++i) {
for (int j = 0; j < a_new_size.x; ++j) {
if (i != j && Math::is_zero_approx(Math::round(a_new->get_element(i, j)))) {
a_new->set_element(i, j, 0);
if (i != j && Math::is_zero_approx(Math::round(a_new->element_get(i, j)))) {
a_new->element_set(i, j, 0);
}
}
}
@ -2074,7 +2074,7 @@ MLPPMatrix::EigenResult MLPPMatrix::eigen() const {
bool non_zero = false;
for (int i = 0; i < a_new_size.y; ++i) {
for (int j = 0; j < a_new_size.x; ++j) {
if (i != j && Math::is_zero_approx(Math::round(a_new->get_element(i, j)))) {
if (i != j && Math::is_zero_approx(Math::round(a_new->element_get(i, j)))) {
non_zero = true;
}
}
@ -2091,7 +2091,7 @@ MLPPMatrix::EigenResult MLPPMatrix::eigen() const {
for (int i = 0; i < a_new_size.y; ++i) {
for (int j = 0; j < a_new_size.x; ++j) {
if (i != j) {
a_new->set_element(i, j, 0);
a_new->element_set(i, j, 0);
}
}
}
@ -2109,17 +2109,17 @@ MLPPMatrix::EigenResult MLPPMatrix::eigen() const {
// Bubble Sort. Should change this later.
for (int i = 0; i < a_new_size.y - 1; ++i) {
for (int j = 0; j < a_new_size.x - 1 - i; ++j) {
if (a_new->get_element(j, j) < a_new->get_element(j + 1, j + 1)) {
real_t temp = a_new->get_element(j + 1, j + 1);
a_new->set_element(j + 1, j + 1, a_new->get_element(j, j));
a_new->set_element(j, j, temp);
if (a_new->element_get(j, j) < a_new->element_get(j + 1, j + 1)) {
real_t temp = a_new->element_get(j + 1, j + 1);
a_new->element_set(j + 1, j + 1, a_new->element_get(j, j));
a_new->element_set(j, j, temp);
}
}
}
for (int i = 0; i < a_new_size.y; ++i) {
for (int j = 0; j < a_new_size.x; ++j) {
if (a_new->get_element(i, i) == a_new_prior->get_element(j, j)) {
if (a_new->element_get(i, i) == a_new_prior->element_get(j, j)) {
val_to_vec[i] = j;
}
}
@ -2131,7 +2131,7 @@ MLPPMatrix::EigenResult MLPPMatrix::eigen() const {
for (int i = 0; i < eigenvectors_size.y; ++i) {
for (int j = 0; j < eigenvectors_size.x; ++j) {
eigenvectors->set_element(i, j, eigen_temp->get_element(i, val_to_vec[j]));
eigenvectors->element_set(i, j, eigen_temp->element_get(i, val_to_vec[j]));
}
}
@ -2160,12 +2160,12 @@ MLPPMatrix::EigenResult MLPPMatrix::eigenb(const Ref<MLPPMatrix> &A) const {
Size2i a_size = a_mat->size();
do {
real_t a_ij = a_mat->get_element(0, 1);
real_t a_ij = a_mat->element_get(0, 1);
real_t sub_i = 0;
real_t sub_j = 1;
for (int i = 0; i < a_size.y; ++i) {
for (int j = 0; j < a_size.x; ++j) {
real_t ca_ij = a_mat->get_element(i, j);
real_t ca_ij = a_mat->element_get(i, j);
real_t abs_ca_ij = ABS(ca_ij);
if (i != j && abs_ca_ij > a_ij) {
@ -2182,9 +2182,9 @@ MLPPMatrix::EigenResult MLPPMatrix::eigenb(const Ref<MLPPMatrix> &A) const {
}
}
real_t a_ii = a_mat->get_element(sub_i, sub_i);
real_t a_jj = a_mat->get_element(sub_j, sub_j);
//real_t a_ji = a_mat->get_element(sub_j, sub_i);
real_t a_ii = a_mat->element_get(sub_i, sub_i);
real_t a_jj = a_mat->element_get(sub_j, sub_j);
//real_t a_ji = a_mat->element_get(sub_j, sub_i);
real_t theta;
if (a_ii == a_jj) {
@ -2194,10 +2194,10 @@ MLPPMatrix::EigenResult MLPPMatrix::eigenb(const Ref<MLPPMatrix> &A) const {
}
Ref<MLPPMatrix> P = identity_mat(a_mat->size().y);
P->set_element(sub_i, sub_j, -Math::sin(theta));
P->set_element(sub_i, sub_i, Math::cos(theta));
P->set_element(sub_j, sub_j, Math::cos(theta));
P->set_element(sub_j, sub_i, Math::sin(theta));
P->element_set(sub_i, sub_j, -Math::sin(theta));
P->element_set(sub_i, sub_i, Math::cos(theta));
P->element_set(sub_j, sub_j, Math::cos(theta));
P->element_set(sub_j, sub_i, Math::sin(theta));
a_new = P->inverse()->multn(a_mat)->multn(P);
@ -2205,8 +2205,8 @@ MLPPMatrix::EigenResult MLPPMatrix::eigenb(const Ref<MLPPMatrix> &A) const {
for (int i = 0; i < a_new_size.y; ++i) {
for (int j = 0; j < a_new_size.x; ++j) {
if (i != j && Math::is_zero_approx(Math::round(a_new->get_element(i, j)))) {
a_new->set_element(i, j, 0);
if (i != j && Math::is_zero_approx(Math::round(a_new->element_get(i, j)))) {
a_new->element_set(i, j, 0);
}
}
}
@ -2214,7 +2214,7 @@ MLPPMatrix::EigenResult MLPPMatrix::eigenb(const Ref<MLPPMatrix> &A) const {
bool non_zero = false;
for (int i = 0; i < a_new_size.y; ++i) {
for (int j = 0; j < a_new_size.x; ++j) {
if (i != j && Math::is_zero_approx(Math::round(a_new->get_element(i, j)))) {
if (i != j && Math::is_zero_approx(Math::round(a_new->element_get(i, j)))) {
non_zero = true;
}
}
@ -2231,7 +2231,7 @@ MLPPMatrix::EigenResult MLPPMatrix::eigenb(const Ref<MLPPMatrix> &A) const {
for (int i = 0; i < a_new_size.y; ++i) {
for (int j = 0; j < a_new_size.x; ++j) {
if (i != j) {
a_new->set_element(i, j, 0);
a_new->element_set(i, j, 0);
}
}
}
@ -2249,17 +2249,17 @@ MLPPMatrix::EigenResult MLPPMatrix::eigenb(const Ref<MLPPMatrix> &A) const {
// Bubble Sort. Should change this later.
for (int i = 0; i < a_new_size.y - 1; ++i) {
for (int j = 0; j < a_new_size.x - 1 - i; ++j) {
if (a_new->get_element(j, j) < a_new->get_element(j + 1, j + 1)) {
real_t temp = a_new->get_element(j + 1, j + 1);
a_new->set_element(j + 1, j + 1, a_new->get_element(j, j));
a_new->set_element(j, j, temp);
if (a_new->element_get(j, j) < a_new->element_get(j + 1, j + 1)) {
real_t temp = a_new->element_get(j + 1, j + 1);
a_new->element_set(j + 1, j + 1, a_new->element_get(j, j));
a_new->element_set(j, j, temp);
}
}
}
for (int i = 0; i < a_new_size.y; ++i) {
for (int j = 0; j < a_new_size.x; ++j) {
if (a_new->get_element(i, i) == a_new_prior->get_element(j, j)) {
if (a_new->element_get(i, i) == a_new_prior->element_get(j, j)) {
val_to_vec[i] = j;
}
}
@ -2271,7 +2271,7 @@ MLPPMatrix::EigenResult MLPPMatrix::eigenb(const Ref<MLPPMatrix> &A) const {
for (int i = 0; i < eigenvectors_size.y; ++i) {
for (int j = 0; j < eigenvectors_size.x; ++j) {
eigenvectors->set_element(i, j, eigen_temp->get_element(i, val_to_vec[j]));
eigenvectors->element_set(i, j, eigen_temp->element_get(i, val_to_vec[j]));
}
}
@ -2316,7 +2316,7 @@ MLPPMatrix::SVDResult MLPPMatrix::svd() const {
for (int i = 0; i < singularvals_size.y; ++i) {
for (int j = 0; j < singularvals_size.x; ++j) {
sigma->set_element(i, j, singularvals->get_element(i, j));
sigma->element_set(i, j, singularvals->element_get(i, j));
}
}
@ -2344,7 +2344,7 @@ MLPPMatrix::SVDResult MLPPMatrix::svdb(const Ref<MLPPMatrix> &A) const {
for (int i = 0; i < singularvals_size.y; ++i) {
for (int j = 0; j < singularvals_size.x; ++j) {
sigma->set_element(i, j, singularvals->get_element(i, j));
sigma->element_set(i, j, singularvals->element_get(i, j));
}
}
@ -2679,7 +2679,7 @@ void MLPPMatrix::outer_product(const Ref<MLPPVector> &a, const Ref<MLPPVector> &
real_t curr_a = a_ptr[i];
for (int j = 0; j < s.x; ++j) {
set_element(i, j, curr_a * b_ptr[j]);
element_set(i, j, curr_a * b_ptr[j]);
}
}
}
@ -2699,7 +2699,7 @@ Ref<MLPPMatrix> MLPPMatrix::outer_productn(const Ref<MLPPVector> &a, const Ref<M
real_t curr_a = a_ptr[i];
for (int j = 0; j < s.x; ++j) {
C->set_element(i, j, curr_a * b_ptr[j]);
C->element_set(i, j, curr_a * b_ptr[j]);
}
}
@ -2865,7 +2865,7 @@ void MLPPMatrix::set_from_image(const Ref<Image> &p_img, const int p_image_chann
for (int x = 0; x < _size.x; ++x) {
Color c = img->get_pixel(x, y);
set_element(y, x, c[p_image_channel]);
element_set(y, x, c[p_image_channel]);
}
}
@ -3018,11 +3018,11 @@ void MLPPMatrix::_bind_methods() {
ClassDB::bind_method(D_METHOD("resize", "size"), &MLPPMatrix::resize);
ClassDB::bind_method(D_METHOD("get_element_index", "index"), &MLPPMatrix::get_element_index);
ClassDB::bind_method(D_METHOD("set_element_index", "index", "val"), &MLPPMatrix::set_element_index);
ClassDB::bind_method(D_METHOD("element_get_index", "index"), &MLPPMatrix::element_get_index);
ClassDB::bind_method(D_METHOD("element_set_index", "index", "val"), &MLPPMatrix::element_set_index);
ClassDB::bind_method(D_METHOD("get_element", "index_y", "index_x"), &MLPPMatrix::get_element);
ClassDB::bind_method(D_METHOD("set_element", "index_y", "index_x", "val"), &MLPPMatrix::set_element);
ClassDB::bind_method(D_METHOD("element_get", "index_y", "index_x"), &MLPPMatrix::element_get);
ClassDB::bind_method(D_METHOD("element_set", "index_y", "index_x", "val"), &MLPPMatrix::element_set);
ClassDB::bind_method(D_METHOD("get_row_pool_vector", "index_y"), &MLPPMatrix::get_row_pool_vector);
ClassDB::bind_method(D_METHOD("get_row_mlpp_vector", "index_y"), &MLPPMatrix::get_row_mlpp_vector);

View File

@ -72,26 +72,26 @@ public:
return _data[p_index];
}
_FORCE_INLINE_ real_t get_element_index(int p_index) const {
_FORCE_INLINE_ real_t element_get_index(int p_index) const {
ERR_FAIL_INDEX_V(p_index, data_size(), 0);
return _data[p_index];
}
_FORCE_INLINE_ void set_element_index(int p_index, real_t p_val) {
_FORCE_INLINE_ void element_set_index(int p_index, real_t p_val) {
ERR_FAIL_INDEX(p_index, data_size());
_data[p_index] = p_val;
}
_FORCE_INLINE_ real_t get_element(int p_index_y, int p_index_x) const {
_FORCE_INLINE_ real_t element_get(int p_index_y, int p_index_x) const {
ERR_FAIL_INDEX_V(p_index_x, _size.x, 0);
ERR_FAIL_INDEX_V(p_index_y, _size.y, 0);
return _data[p_index_y * _size.x + p_index_x];
}
_FORCE_INLINE_ void set_element(int p_index_y, int p_index_x, real_t p_val) {
_FORCE_INLINE_ void element_set(int p_index_y, int p_index_x, real_t p_val) {
ERR_FAIL_INDEX(p_index_x, _size.x);
ERR_FAIL_INDEX(p_index_y, _size.y);

View File

@ -48,7 +48,7 @@ void MLPPTensor3::set_data(const Array &p_from) {
}
}
void MLPPTensor3::add_z_slice(const Vector<real_t> &p_row) {
void MLPPTensor3::z_slice_add(const Vector<real_t> &p_row) {
if (p_row.size() == 0) {
return;
}
@ -71,7 +71,7 @@ void MLPPTensor3::add_z_slice(const Vector<real_t> &p_row) {
}
}
void MLPPTensor3::add_z_slice_pool_vector(const PoolRealArray &p_row) {
void MLPPTensor3::z_slice_add_pool_vector(const PoolRealArray &p_row) {
if (p_row.size() == 0) {
return;
}
@ -95,7 +95,7 @@ void MLPPTensor3::add_z_slice_pool_vector(const PoolRealArray &p_row) {
}
}
void MLPPTensor3::add_z_slice_mlpp_vector(const Ref<MLPPVector> &p_row) {
void MLPPTensor3::z_slice_add_mlpp_vector(const Ref<MLPPVector> &p_row) {
ERR_FAIL_COND(!p_row.is_valid());
int p_row_size = p_row->size();
@ -122,7 +122,7 @@ void MLPPTensor3::add_z_slice_mlpp_vector(const Ref<MLPPVector> &p_row) {
}
}
void MLPPTensor3::add_z_slice_mlpp_matrix(const Ref<MLPPMatrix> &p_matrix) {
void MLPPTensor3::z_slice_add_mlpp_matrix(const Ref<MLPPMatrix> &p_matrix) {
ERR_FAIL_COND(!p_matrix.is_valid());
int other_data_size = p_matrix->data_size();
@ -150,7 +150,7 @@ void MLPPTensor3::add_z_slice_mlpp_matrix(const Ref<MLPPMatrix> &p_matrix) {
}
}
void MLPPTensor3::remove_z_slice(int p_index) {
void MLPPTensor3::z_slice_remove(int p_index) {
ERR_FAIL_INDEX(p_index, _size.z);
--_size.z;
@ -175,7 +175,7 @@ void MLPPTensor3::remove_z_slice(int p_index) {
// Removes the item copying the last value into the position of the one to
// remove. It's generally faster than `remove`.
void MLPPTensor3::remove_z_slice_unordered(int p_index) {
void MLPPTensor3::z_slice_remove_unordered(int p_index) {
ERR_FAIL_INDEX(p_index, _size.z);
--_size.z;
@ -199,7 +199,7 @@ void MLPPTensor3::remove_z_slice_unordered(int p_index) {
CRASH_COND_MSG(!_data, "Out of memory");
}
void MLPPTensor3::swap_z_slice(int p_index_1, int p_index_2) {
void MLPPTensor3::z_slice_swap(int p_index_1, int p_index_2) {
ERR_FAIL_INDEX(p_index_1, _size.z);
ERR_FAIL_INDEX(p_index_2, _size.z);
@ -240,7 +240,7 @@ void MLPPTensor3::set_shape(const Size3i &p_size) {
_size = p_size;
}
Vector<real_t> MLPPTensor3::get_row_vector(int p_index_y, int p_index_z) const {
Vector<real_t> MLPPTensor3::row_get_vector(int p_index_y, int p_index_z) const {
ERR_FAIL_INDEX_V(p_index_y, _size.y, Vector<real_t>());
ERR_FAIL_INDEX_V(p_index_z, _size.z, Vector<real_t>());
@ -263,7 +263,7 @@ Vector<real_t> MLPPTensor3::get_row_vector(int p_index_y, int p_index_z) const {
return ret;
}
PoolRealArray MLPPTensor3::get_row_pool_vector(int p_index_y, int p_index_z) const {
PoolRealArray MLPPTensor3::row_get_pool_vector(int p_index_y, int p_index_z) const {
ERR_FAIL_INDEX_V(p_index_y, _size.y, PoolRealArray());
ERR_FAIL_INDEX_V(p_index_z, _size.z, PoolRealArray());
@ -287,7 +287,7 @@ PoolRealArray MLPPTensor3::get_row_pool_vector(int p_index_y, int p_index_z) con
return ret;
}
Ref<MLPPVector> MLPPTensor3::get_row_mlpp_vector(int p_index_y, int p_index_z) const {
Ref<MLPPVector> MLPPTensor3::row_get_mlpp_vector(int p_index_y, int p_index_z) const {
ERR_FAIL_INDEX_V(p_index_y, _size.y, Ref<MLPPVector>());
ERR_FAIL_INDEX_V(p_index_z, _size.z, Ref<MLPPVector>());
@ -311,7 +311,7 @@ Ref<MLPPVector> MLPPTensor3::get_row_mlpp_vector(int p_index_y, int p_index_z) c
return ret;
}
void MLPPTensor3::get_row_into_mlpp_vector(int p_index_y, int p_index_z, Ref<MLPPVector> target) const {
void MLPPTensor3::row_get_into_mlpp_vector(int p_index_y, int p_index_z, Ref<MLPPVector> target) const {
ERR_FAIL_COND(!target.is_valid());
ERR_FAIL_INDEX(p_index_y, _size.y);
ERR_FAIL_INDEX(p_index_z, _size.z);
@ -329,7 +329,7 @@ void MLPPTensor3::get_row_into_mlpp_vector(int p_index_y, int p_index_z, Ref<MLP
}
}
void MLPPTensor3::set_row_vector(int p_index_y, int p_index_z, const Vector<real_t> &p_row) {
void MLPPTensor3::row_set_vector(int p_index_y, int p_index_z, const Vector<real_t> &p_row) {
ERR_FAIL_COND(p_row.size() != _size.x);
ERR_FAIL_INDEX(p_index_y, _size.y);
ERR_FAIL_INDEX(p_index_z, _size.z);
@ -343,7 +343,7 @@ void MLPPTensor3::set_row_vector(int p_index_y, int p_index_z, const Vector<real
}
}
void MLPPTensor3::set_row_pool_vector(int p_index_y, int p_index_z, const PoolRealArray &p_row) {
void MLPPTensor3::row_set_pool_vector(int p_index_y, int p_index_z, const PoolRealArray &p_row) {
ERR_FAIL_COND(p_row.size() != _size.x);
ERR_FAIL_INDEX(p_index_y, _size.y);
ERR_FAIL_INDEX(p_index_z, _size.z);
@ -358,7 +358,7 @@ void MLPPTensor3::set_row_pool_vector(int p_index_y, int p_index_z, const PoolRe
}
}
void MLPPTensor3::set_row_mlpp_vector(int p_index_y, int p_index_z, const Ref<MLPPVector> &p_row) {
void MLPPTensor3::row_set_mlpp_vector(int p_index_y, int p_index_z, const Ref<MLPPVector> &p_row) {
ERR_FAIL_COND(!p_row.is_valid());
ERR_FAIL_COND(p_row->size() != _size.x);
ERR_FAIL_INDEX(p_index_y, _size.y);
@ -373,7 +373,7 @@ void MLPPTensor3::set_row_mlpp_vector(int p_index_y, int p_index_z, const Ref<ML
}
}
Vector<real_t> MLPPTensor3::get_z_slice_vector(int p_index_z) const {
Vector<real_t> MLPPTensor3::z_slice_get_vector(int p_index_z) const {
ERR_FAIL_INDEX_V(p_index_z, _size.z, Vector<real_t>());
Vector<real_t> ret;
@ -397,7 +397,7 @@ Vector<real_t> MLPPTensor3::get_z_slice_vector(int p_index_z) const {
return ret;
}
PoolRealArray MLPPTensor3::get_z_slice_pool_vector(int p_index_z) const {
PoolRealArray MLPPTensor3::z_slice_get_pool_vector(int p_index_z) const {
ERR_FAIL_INDEX_V(p_index_z, _size.z, PoolRealArray());
PoolRealArray ret;
@ -422,7 +422,7 @@ PoolRealArray MLPPTensor3::get_z_slice_pool_vector(int p_index_z) const {
return ret;
}
Ref<MLPPVector> MLPPTensor3::get_z_slice_mlpp_vector(int p_index_z) const {
Ref<MLPPVector> MLPPTensor3::z_slice_get_mlpp_vector(int p_index_z) const {
ERR_FAIL_INDEX_V(p_index_z, _size.z, Ref<MLPPVector>());
Ref<MLPPVector> ret;
@ -447,7 +447,7 @@ Ref<MLPPVector> MLPPTensor3::get_z_slice_mlpp_vector(int p_index_z) const {
return ret;
}
void MLPPTensor3::get_z_slice_into_mlpp_vector(int p_index_z, Ref<MLPPVector> target) const {
void MLPPTensor3::z_slice_get_into_mlpp_vector(int p_index_z, Ref<MLPPVector> target) const {
ERR_FAIL_INDEX(p_index_z, _size.z);
int fmds = z_slice_data_size();
@ -465,7 +465,7 @@ void MLPPTensor3::get_z_slice_into_mlpp_vector(int p_index_z, Ref<MLPPVector> ta
}
}
Ref<MLPPMatrix> MLPPTensor3::get_z_slice_mlpp_matrix(int p_index_z) const {
Ref<MLPPMatrix> MLPPTensor3::z_slice_get_mlpp_matrix(int p_index_z) const {
ERR_FAIL_INDEX_V(p_index_z, _size.z, Ref<MLPPMatrix>());
Ref<MLPPMatrix> ret;
@ -490,7 +490,7 @@ Ref<MLPPMatrix> MLPPTensor3::get_z_slice_mlpp_matrix(int p_index_z) const {
return ret;
}
void MLPPTensor3::get_z_slice_into_mlpp_matrix(int p_index_z, Ref<MLPPMatrix> target) const {
void MLPPTensor3::z_slice_get_into_mlpp_matrix(int p_index_z, Ref<MLPPMatrix> target) const {
ERR_FAIL_INDEX(p_index_z, _size.z);
int fmds = z_slice_data_size();
@ -509,7 +509,7 @@ void MLPPTensor3::get_z_slice_into_mlpp_matrix(int p_index_z, Ref<MLPPMatrix> ta
}
}
void MLPPTensor3::set_z_slice_vector(int p_index_z, const Vector<real_t> &p_row) {
void MLPPTensor3::z_slice_set_vector(int p_index_z, const Vector<real_t> &p_row) {
ERR_FAIL_INDEX(p_index_z, _size.z);
int fmds = z_slice_data_size();
@ -525,7 +525,7 @@ void MLPPTensor3::set_z_slice_vector(int p_index_z, const Vector<real_t> &p_row)
}
}
void MLPPTensor3::set_z_slice_pool_vector(int p_index_z, const PoolRealArray &p_row) {
void MLPPTensor3::z_slice_set_pool_vector(int p_index_z, const PoolRealArray &p_row) {
ERR_FAIL_INDEX(p_index_z, _size.z);
int fmds = z_slice_data_size();
@ -542,7 +542,7 @@ void MLPPTensor3::set_z_slice_pool_vector(int p_index_z, const PoolRealArray &p_
}
}
void MLPPTensor3::set_z_slice_mlpp_vector(int p_index_z, const Ref<MLPPVector> &p_row) {
void MLPPTensor3::z_slice_set_mlpp_vector(int p_index_z, const Ref<MLPPVector> &p_row) {
ERR_FAIL_INDEX(p_index_z, _size.z);
ERR_FAIL_COND(!p_row.is_valid());
@ -559,7 +559,7 @@ void MLPPTensor3::set_z_slice_mlpp_vector(int p_index_z, const Ref<MLPPVector> &
}
}
void MLPPTensor3::set_z_slice_mlpp_matrix(int p_index_z, const Ref<MLPPMatrix> &p_mat) {
void MLPPTensor3::z_slice_set_mlpp_matrix(int p_index_z, const Ref<MLPPMatrix> &p_mat) {
ERR_FAIL_INDEX(p_index_z, _size.z);
ERR_FAIL_COND(!p_mat.is_valid());
@ -576,7 +576,7 @@ void MLPPTensor3::set_z_slice_mlpp_matrix(int p_index_z, const Ref<MLPPMatrix> &
}
}
void MLPPTensor3::get_x_slice_into(int p_index_x, Ref<MLPPMatrix> target) const {
void MLPPTensor3::x_slice_get_into(int p_index_x, Ref<MLPPMatrix> target) const {
ERR_FAIL_INDEX(p_index_x, _size.x);
ERR_FAIL_COND(!target.is_valid());
@ -586,33 +586,33 @@ void MLPPTensor3::get_x_slice_into(int p_index_x, Ref<MLPPMatrix> target) const
for (int z = 0; z < _size.z; ++z) {
for (int y = 0; y < _size.y; ++y) {
target->set_element(z, y, get_element(p_index_x, y, z));
target->element_set(z, y, element_get(p_index_x, y, z));
}
}
}
Ref<MLPPMatrix> MLPPTensor3::get_x_slice(int p_index_x) const {
Ref<MLPPMatrix> MLPPTensor3::x_slice_get(int p_index_x) const {
ERR_FAIL_INDEX_V(p_index_x, _size.x, Ref<MLPPMatrix>());
Ref<MLPPMatrix> m;
m.instance();
get_x_slice_into(p_index_x, m);
x_slice_get_into(p_index_x, m);
return m;
}
void MLPPTensor3::set_x_slice(int p_index_x, const Ref<MLPPMatrix> &p_mat) {
void MLPPTensor3::x_slice_set(int p_index_x, const Ref<MLPPMatrix> &p_mat) {
ERR_FAIL_INDEX(p_index_x, _size.x);
ERR_FAIL_COND(!p_mat.is_valid());
ERR_FAIL_COND(p_mat->size() != Size2i(_size.y, _size.z));
for (int z = 0; z < _size.z; ++z) {
for (int y = 0; y < _size.y; ++y) {
set_element(p_index_x, y, z, p_mat->get_element(z, y));
element_set(p_index_x, y, z, p_mat->element_get(z, y));
}
}
}
void MLPPTensor3::get_y_slice_into(int p_index_y, Ref<MLPPMatrix> target) const {
void MLPPTensor3::y_slice_get_into(int p_index_y, Ref<MLPPMatrix> target) const {
ERR_FAIL_INDEX(p_index_y, _size.y);
ERR_FAIL_COND(!target.is_valid());
@ -622,33 +622,33 @@ void MLPPTensor3::get_y_slice_into(int p_index_y, Ref<MLPPMatrix> target) const
for (int z = 0; z < _size.z; ++z) {
for (int x = 0; x < _size.x; ++x) {
target->set_element(z, x, get_element(x, p_index_y, z));
target->element_set(z, x, element_get(x, p_index_y, z));
}
}
}
Ref<MLPPMatrix> MLPPTensor3::get_y_slice(int p_index_y) const {
Ref<MLPPMatrix> MLPPTensor3::y_slice_get(int p_index_y) const {
ERR_FAIL_INDEX_V(p_index_y, _size.y, Ref<MLPPMatrix>());
Ref<MLPPMatrix> m;
m.instance();
get_y_slice_into(p_index_y, m);
y_slice_get_into(p_index_y, m);
return m;
}
void MLPPTensor3::set_y_slice(int p_index_y, const Ref<MLPPMatrix> &p_mat) {
void MLPPTensor3::y_slice_set(int p_index_y, const Ref<MLPPMatrix> &p_mat) {
ERR_FAIL_INDEX(p_index_y, _size.y);
ERR_FAIL_COND(!p_mat.is_valid());
ERR_FAIL_COND(p_mat->size() != Size2i(_size.y, _size.z));
for (int z = 0; z < _size.z; ++z) {
for (int x = 0; x < _size.x; ++x) {
set_element(x, p_index_y, z, p_mat->get_element(z, x));
element_set(x, p_index_y, z, p_mat->element_get(z, x));
}
}
}
void MLPPTensor3::add_z_slices_image(const Ref<Image> &p_img, const int p_channels) {
void MLPPTensor3::z_slices_add_image(const Ref<Image> &p_img, const int p_channels) {
ERR_FAIL_COND(!p_img.is_valid());
Size2i img_size = Size2i(p_img->get_width(), p_img->get_height());
@ -701,7 +701,7 @@ void MLPPTensor3::add_z_slices_image(const Ref<Image> &p_img, const int p_channe
Color c = img->get_pixel(x, y);
for (int i = 0; i < channel_count; ++i) {
set_element(y, x, start_channel + i, c[channels[i]]);
element_set(y, x, start_channel + i, c[channels[i]]);
}
}
}
@ -709,7 +709,7 @@ void MLPPTensor3::add_z_slices_image(const Ref<Image> &p_img, const int p_channe
img->unlock();
}
Ref<Image> MLPPTensor3::get_z_slice_image(const int p_index_z) const {
Ref<Image> MLPPTensor3::z_slice_get_image(const int p_index_z) const {
ERR_FAIL_INDEX_V(p_index_z, _size.z, Ref<Image>());
Ref<Image> image;
@ -737,7 +737,7 @@ Ref<Image> MLPPTensor3::get_z_slice_image(const int p_index_z) const {
return image;
}
Ref<Image> MLPPTensor3::get_z_slices_image(const int p_index_r, const int p_index_g, const int p_index_b, const int p_index_a) const {
Ref<Image> MLPPTensor3::z_slices_get_image(const int p_index_r, const int p_index_g, const int p_index_b, const int p_index_a) const {
if (p_index_r != -1) {
ERR_FAIL_INDEX_V(p_index_r, _size.z, Ref<Image>());
}
@ -772,19 +772,19 @@ Ref<Image> MLPPTensor3::get_z_slices_image(const int p_index_r, const int p_inde
Color c;
if (p_index_r != -1) {
c.r = get_element(y, x, p_index_r);
c.r = element_get(y, x, p_index_r);
}
if (p_index_g != -1) {
c.g = get_element(y, x, p_index_g);
c.g = element_get(y, x, p_index_g);
}
if (p_index_b != -1) {
c.b = get_element(y, x, p_index_b);
c.b = element_get(y, x, p_index_b);
}
if (p_index_a != -1) {
c.a = get_element(y, x, p_index_a);
c.a = element_get(y, x, p_index_a);
}
image->set_pixel(x, y, c);
@ -796,7 +796,7 @@ Ref<Image> MLPPTensor3::get_z_slices_image(const int p_index_r, const int p_inde
return image;
}
void MLPPTensor3::get_z_slice_into_image(Ref<Image> p_target, const int p_index_z, const int p_target_channels) const {
void MLPPTensor3::z_slice_get_into_image(Ref<Image> p_target, const int p_index_z, const int p_target_channels) const {
ERR_FAIL_INDEX(p_index_z, _size.z);
ERR_FAIL_COND(!p_target.is_valid());
@ -851,7 +851,7 @@ void MLPPTensor3::get_z_slice_into_image(Ref<Image> p_target, const int p_index_
for (int x = 0; x < fms.x; ++x) {
Color c;
float e = get_element(y, x, p_index_z);
float e = element_get(y, x, p_index_z);
for (int i = 0; i < channel_count; ++i) {
c[channels[i]] = e;
@ -863,7 +863,7 @@ void MLPPTensor3::get_z_slice_into_image(Ref<Image> p_target, const int p_index_
p_target->unlock();
}
void MLPPTensor3::get_z_slices_into_image(Ref<Image> p_target, const int p_index_r, const int p_index_g, const int p_index_b, const int p_index_a) const {
void MLPPTensor3::z_slices_get_into_image(Ref<Image> p_target, const int p_index_r, const int p_index_g, const int p_index_b, const int p_index_a) const {
ERR_FAIL_COND(!p_target.is_valid());
if (p_index_r != -1) {
@ -909,19 +909,19 @@ void MLPPTensor3::get_z_slices_into_image(Ref<Image> p_target, const int p_index
Color c;
if (p_index_r != -1) {
c.r = get_element(y, x, p_index_r);
c.r = element_get(y, x, p_index_r);
}
if (p_index_g != -1) {
c.g = get_element(y, x, p_index_g);
c.g = element_get(y, x, p_index_g);
}
if (p_index_b != -1) {
c.b = get_element(y, x, p_index_b);
c.b = element_get(y, x, p_index_b);
}
if (p_index_a != -1) {
c.a = get_element(y, x, p_index_a);
c.a = element_get(y, x, p_index_a);
}
p_target->set_pixel(x, y, c);
@ -931,7 +931,7 @@ void MLPPTensor3::get_z_slices_into_image(Ref<Image> p_target, const int p_index
p_target->unlock();
}
void MLPPTensor3::set_z_slice_image(const Ref<Image> &p_img, const int p_index_z, const int p_image_channel_flag) {
void MLPPTensor3::z_slice_set_image(const Ref<Image> &p_img, const int p_index_z, const int p_image_channel_flag) {
ERR_FAIL_COND(!p_img.is_valid());
ERR_FAIL_INDEX(p_index_z, _size.z);
@ -959,13 +959,13 @@ void MLPPTensor3::set_z_slice_image(const Ref<Image> &p_img, const int p_index_z
for (int x = 0; x < fms.x; ++x) {
Color c = img->get_pixel(x, y);
set_element(y, x, p_index_z, c[channel_index]);
element_set(y, x, p_index_z, c[channel_index]);
}
}
img->unlock();
}
void MLPPTensor3::set_z_slices_image(const Ref<Image> &p_img, const int p_index_r, const int p_index_g, const int p_index_b, const int p_index_a) {
void MLPPTensor3::z_slices_set_image(const Ref<Image> &p_img, const int p_index_r, const int p_index_g, const int p_index_b, const int p_index_a) {
ERR_FAIL_COND(!p_img.is_valid());
if (p_index_r != -1) {
@ -998,19 +998,19 @@ void MLPPTensor3::set_z_slices_image(const Ref<Image> &p_img, const int p_index_
Color c = img->get_pixel(x, y);
if (p_index_r != -1) {
set_element(y, x, p_index_r, c.r);
element_set(y, x, p_index_r, c.r);
}
if (p_index_g != -1) {
set_element(y, x, p_index_g, c.g);
element_set(y, x, p_index_g, c.g);
}
if (p_index_b != -1) {
set_element(y, x, p_index_b, c.b);
element_set(y, x, p_index_b, c.b);
}
if (p_index_a != -1) {
set_element(y, x, p_index_a, c.a);
element_set(y, x, p_index_a, c.a);
}
}
}
@ -1061,7 +1061,7 @@ void MLPPTensor3::set_from_image(const Ref<Image> &p_img, const int p_channels)
Color c = img->get_pixel(x, y);
for (int i = 0; i < channel_count; ++i) {
set_element(y, x, i, c[channels[i]]);
element_set(y, x, i, c[channels[i]]);
}
}
}
@ -1069,7 +1069,7 @@ void MLPPTensor3::set_from_image(const Ref<Image> &p_img, const int p_channels)
img->unlock();
}
Ref<Image> MLPPTensor3::get_x_slice_image(const int p_index_x) const {
Ref<Image> MLPPTensor3::x_slice_get_image(const int p_index_x) const {
ERR_FAIL_INDEX_V(p_index_x, _size.x, Ref<Image>());
Ref<Image> image;
@ -1088,7 +1088,7 @@ Ref<Image> MLPPTensor3::get_x_slice_image(const int p_index_x) const {
for (int z = 0; z < _size.z; ++z) {
for (int y = 0; y < _size.y; ++y) {
wptr[i] = static_cast<uint8_t>(get_element(p_index_x, y, z) * 255.0);
wptr[i] = static_cast<uint8_t>(element_get(p_index_x, y, z) * 255.0);
++i;
}
@ -1098,7 +1098,7 @@ Ref<Image> MLPPTensor3::get_x_slice_image(const int p_index_x) const {
return image;
}
void MLPPTensor3::get_x_slice_into_image(Ref<Image> p_target, const int p_index_x, const int p_target_channels) const {
void MLPPTensor3::x_slice_get_into_image(Ref<Image> p_target, const int p_index_x, const int p_target_channels) const {
ERR_FAIL_INDEX(p_index_x, _size.x);
ERR_FAIL_COND(!p_target.is_valid());
@ -1153,7 +1153,7 @@ void MLPPTensor3::get_x_slice_into_image(Ref<Image> p_target, const int p_index_
for (int z = 0; z < fms.x; ++z) {
Color c;
float e = get_element(y, p_index_x, z);
float e = element_get(y, p_index_x, z);
for (int i = 0; i < channel_count; ++i) {
c[channels[i]] = e;
@ -1165,7 +1165,7 @@ void MLPPTensor3::get_x_slice_into_image(Ref<Image> p_target, const int p_index_
p_target->unlock();
}
void MLPPTensor3::set_x_slice_image(const Ref<Image> &p_img, const int p_index_x, const int p_image_channel_flag) {
void MLPPTensor3::x_slice_set_image(const Ref<Image> &p_img, const int p_index_x, const int p_image_channel_flag) {
ERR_FAIL_COND(!p_img.is_valid());
ERR_FAIL_INDEX(p_index_x, _size.x);
@ -1193,14 +1193,14 @@ void MLPPTensor3::set_x_slice_image(const Ref<Image> &p_img, const int p_index_x
for (int z = 0; z < fms.x; ++z) {
Color c = img->get_pixel(z, y);
set_element(y, p_index_x, z, c[channel_index]);
element_set(y, p_index_x, z, c[channel_index]);
}
}
img->unlock();
}
Ref<Image> MLPPTensor3::get_y_slice_image(const int p_index_y) const {
Ref<Image> MLPPTensor3::y_slice_get_image(const int p_index_y) const {
ERR_FAIL_INDEX_V(p_index_y, _size.y, Ref<Image>());
Ref<Image> image;
@ -1219,7 +1219,7 @@ Ref<Image> MLPPTensor3::get_y_slice_image(const int p_index_y) const {
for (int z = 0; z < _size.z; ++z) {
for (int x = 0; x < _size.x; ++x) {
wptr[i] = static_cast<uint8_t>(get_element(x, p_index_y, z) * 255.0);
wptr[i] = static_cast<uint8_t>(element_get(x, p_index_y, z) * 255.0);
++i;
}
@ -1229,7 +1229,7 @@ Ref<Image> MLPPTensor3::get_y_slice_image(const int p_index_y) const {
return image;
}
void MLPPTensor3::get_y_slice_into_image(Ref<Image> p_target, const int p_index_y, const int p_target_channels) const {
void MLPPTensor3::y_slice_get_into_image(Ref<Image> p_target, const int p_index_y, const int p_target_channels) const {
ERR_FAIL_INDEX(p_index_y, _size.y);
ERR_FAIL_COND(!p_target.is_valid());
@ -1284,7 +1284,7 @@ void MLPPTensor3::get_y_slice_into_image(Ref<Image> p_target, const int p_index_
for (int z = 0; z < fms.x; ++z) {
Color c;
float e = get_element(p_index_y, x, z);
float e = element_get(p_index_y, x, z);
for (int i = 0; i < channel_count; ++i) {
c[channels[i]] = e;
@ -1296,7 +1296,7 @@ void MLPPTensor3::get_y_slice_into_image(Ref<Image> p_target, const int p_index_
p_target->unlock();
}
void MLPPTensor3::set_y_slice_image(const Ref<Image> &p_img, const int p_index_y, const int p_image_channel_flag) {
void MLPPTensor3::y_slice_set_image(const Ref<Image> &p_img, const int p_index_y, const int p_image_channel_flag) {
ERR_FAIL_COND(!p_img.is_valid());
ERR_FAIL_INDEX(p_index_y, _size.y);
@ -1324,7 +1324,7 @@ void MLPPTensor3::set_y_slice_image(const Ref<Image> &p_img, const int p_index_y
for (int x = 0; x < fms.x; ++x) {
Color c = img->get_pixel(x, z);
set_element(p_index_y, x, z, c[channel_index]);
element_set(p_index_y, x, z, c[channel_index]);
}
}
@ -2277,14 +2277,14 @@ void MLPPTensor3::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_data", "data"), &MLPPTensor3::set_data);
ADD_PROPERTY(PropertyInfo(Variant::ARRAY, "data"), "set_data", "get_data");
ClassDB::bind_method(D_METHOD("add_z_slice_pool_vector", "row"), &MLPPTensor3::add_z_slice_pool_vector);
ClassDB::bind_method(D_METHOD("add_z_slice_mlpp_vector", "row"), &MLPPTensor3::add_z_slice_mlpp_vector);
ClassDB::bind_method(D_METHOD("add_z_slice_mlpp_matrix", "matrix"), &MLPPTensor3::add_z_slice_mlpp_matrix);
ClassDB::bind_method(D_METHOD("z_slice_add_pool_vector", "row"), &MLPPTensor3::z_slice_add_pool_vector);
ClassDB::bind_method(D_METHOD("z_add_slice_mlpp_vector", "row"), &MLPPTensor3::z_slice_add_mlpp_vector);
ClassDB::bind_method(D_METHOD("z_slice_add_mlpp_matrix", "matrix"), &MLPPTensor3::z_slice_add_mlpp_matrix);
ClassDB::bind_method(D_METHOD("remove_z_slice", "index"), &MLPPTensor3::remove_z_slice);
ClassDB::bind_method(D_METHOD("remove_z_slice_unordered", "index"), &MLPPTensor3::remove_z_slice_unordered);
ClassDB::bind_method(D_METHOD("z_slice_remove", "index"), &MLPPTensor3::z_slice_remove);
ClassDB::bind_method(D_METHOD("z_slice_remove_unordered", "index"), &MLPPTensor3::z_slice_remove_unordered);
ClassDB::bind_method(D_METHOD("swap_z_slice", "index_1", "index_2"), &MLPPTensor3::swap_z_slice);
ClassDB::bind_method(D_METHOD("z_slice_swap", "index_1", "index_2"), &MLPPTensor3::z_slice_swap);
ClassDB::bind_method(D_METHOD("clear"), &MLPPTensor3::clear);
ClassDB::bind_method(D_METHOD("reset"), &MLPPTensor3::reset);
@ -2302,58 +2302,58 @@ void MLPPTensor3::_bind_methods() {
ClassDB::bind_method(D_METHOD("calculate_index", "index_y", "index_x", "index_z"), &MLPPTensor3::calculate_index);
ClassDB::bind_method(D_METHOD("calculate_z_slice_index", "index_z"), &MLPPTensor3::calculate_z_slice_index);
ClassDB::bind_method(D_METHOD("get_element_index", "index"), &MLPPTensor3::get_element_index);
ClassDB::bind_method(D_METHOD("set_element_index", "index", "val"), &MLPPTensor3::set_element_index);
ClassDB::bind_method(D_METHOD("element_get_index", "index"), &MLPPTensor3::element_get_index);
ClassDB::bind_method(D_METHOD("element_set_index", "index", "val"), &MLPPTensor3::element_set_index);
ClassDB::bind_method(D_METHOD("get_element", "index_y", "index_x", "index_z"), &MLPPTensor3::get_element);
ClassDB::bind_method(D_METHOD("set_element", "index_y", "index_x", "index_z", "val"), &MLPPTensor3::set_element);
ClassDB::bind_method(D_METHOD("element_get", "index_y", "index_x", "index_z"), &MLPPTensor3::element_get);
ClassDB::bind_method(D_METHOD("element_set", "index_y", "index_x", "index_z", "val"), &MLPPTensor3::element_set);
ClassDB::bind_method(D_METHOD("get_row_pool_vector", "index_y", "index_z"), &MLPPTensor3::get_row_pool_vector);
ClassDB::bind_method(D_METHOD("get_row_mlpp_vector", "index_y", "index_z"), &MLPPTensor3::get_row_mlpp_vector);
ClassDB::bind_method(D_METHOD("get_row_into_mlpp_vector", "index_y", "index_z", "target"), &MLPPTensor3::get_row_into_mlpp_vector);
ClassDB::bind_method(D_METHOD("row_get_pool_vector", "index_y", "index_z"), &MLPPTensor3::row_get_pool_vector);
ClassDB::bind_method(D_METHOD("row_get_mlpp_vector", "index_y", "index_z"), &MLPPTensor3::row_get_mlpp_vector);
ClassDB::bind_method(D_METHOD("row_get_into_mlpp_vector", "index_y", "index_z", "target"), &MLPPTensor3::row_get_into_mlpp_vector);
ClassDB::bind_method(D_METHOD("set_row_pool_vector", "index_y", "index_z", "row"), &MLPPTensor3::set_row_pool_vector);
ClassDB::bind_method(D_METHOD("set_row_mlpp_vector", "index_y", "index_z", "row"), &MLPPTensor3::set_row_mlpp_vector);
ClassDB::bind_method(D_METHOD("row_set_pool_vector", "index_y", "index_z", "row"), &MLPPTensor3::row_set_pool_vector);
ClassDB::bind_method(D_METHOD("row_set_mlpp_vector", "index_y", "index_z", "row"), &MLPPTensor3::row_set_mlpp_vector);
ClassDB::bind_method(D_METHOD("get_z_slice_pool_vector", "index_z"), &MLPPTensor3::get_z_slice_pool_vector);
ClassDB::bind_method(D_METHOD("get_z_slice_mlpp_vector", "index_z"), &MLPPTensor3::get_z_slice_mlpp_vector);
ClassDB::bind_method(D_METHOD("get_z_slice_into_mlpp_vector", "index_z", "target"), &MLPPTensor3::get_z_slice_into_mlpp_vector);
ClassDB::bind_method(D_METHOD("z_slice_get_pool_vector", "index_z"), &MLPPTensor3::z_slice_get_pool_vector);
ClassDB::bind_method(D_METHOD("z_slice_get_mlpp_vector", "index_z"), &MLPPTensor3::z_slice_get_mlpp_vector);
ClassDB::bind_method(D_METHOD("z_slice_get_into_mlpp_vector", "index_z", "target"), &MLPPTensor3::z_slice_get_into_mlpp_vector);
ClassDB::bind_method(D_METHOD("get_z_slice_mlpp_matrix", "index_z"), &MLPPTensor3::get_z_slice_mlpp_matrix);
ClassDB::bind_method(D_METHOD("get_z_slice_into_mlpp_matrix", "index_z", "target"), &MLPPTensor3::get_z_slice_into_mlpp_matrix);
ClassDB::bind_method(D_METHOD("z_slice_get_mlpp_matrix", "index_z"), &MLPPTensor3::z_slice_get_mlpp_matrix);
ClassDB::bind_method(D_METHOD("z_slice_get_into_mlpp_matrix", "index_z", "target"), &MLPPTensor3::z_slice_get_into_mlpp_matrix);
ClassDB::bind_method(D_METHOD("set_z_slice_pool_vector", "index_z", "row"), &MLPPTensor3::set_z_slice_pool_vector);
ClassDB::bind_method(D_METHOD("set_z_slice_mlpp_vector", "index_z", "row"), &MLPPTensor3::set_z_slice_mlpp_vector);
ClassDB::bind_method(D_METHOD("set_z_slice_mlpp_matrix", "index_z", "mat"), &MLPPTensor3::set_z_slice_mlpp_matrix);
ClassDB::bind_method(D_METHOD("z_slice_set_pool_vector", "index_z", "row"), &MLPPTensor3::z_slice_set_pool_vector);
ClassDB::bind_method(D_METHOD("z_slice_set_mlpp_vector", "index_z", "row"), &MLPPTensor3::z_slice_set_mlpp_vector);
ClassDB::bind_method(D_METHOD("z_slice_set_mlpp_matrix", "index_z", "mat"), &MLPPTensor3::z_slice_set_mlpp_matrix);
ClassDB::bind_method(D_METHOD("get_x_slice_into", "index_x", "target"), &MLPPTensor3::get_x_slice_into);
ClassDB::bind_method(D_METHOD("get_x_slice", "index_x"), &MLPPTensor3::get_x_slice);
ClassDB::bind_method(D_METHOD("set_x_slice", "index_x", "mat"), &MLPPTensor3::set_x_slice);
ClassDB::bind_method(D_METHOD("x_slice_get_into", "index_x", "target"), &MLPPTensor3::x_slice_get_into);
ClassDB::bind_method(D_METHOD("x_slice_get", "index_x"), &MLPPTensor3::x_slice_get);
ClassDB::bind_method(D_METHOD("x_slice_set", "index_x", "mat"), &MLPPTensor3::x_slice_set);
ClassDB::bind_method(D_METHOD("get_y_slice_into", "index_y", "target"), &MLPPTensor3::get_y_slice_into);
ClassDB::bind_method(D_METHOD("get_y_slice", "index_y"), &MLPPTensor3::get_y_slice);
ClassDB::bind_method(D_METHOD("set_y_slice", "index_y", "mat"), &MLPPTensor3::set_y_slice);
ClassDB::bind_method(D_METHOD("y_slice_get_into", "index_y", "target"), &MLPPTensor3::y_slice_get_into);
ClassDB::bind_method(D_METHOD("y_slice_get", "index_y"), &MLPPTensor3::y_slice_get);
ClassDB::bind_method(D_METHOD("y_slice_set", "index_y", "mat"), &MLPPTensor3::y_slice_set);
ClassDB::bind_method(D_METHOD("add_z_slices_image", "img", "channels"), &MLPPTensor3::add_z_slices_image, IMAGE_CHANNEL_FLAG_RGBA);
ClassDB::bind_method(D_METHOD("z_slices_add_image", "img", "channels"), &MLPPTensor3::z_slices_add_image, IMAGE_CHANNEL_FLAG_RGBA);
ClassDB::bind_method(D_METHOD("get_z_slice_image", "index_z"), &MLPPTensor3::get_z_slice_image);
ClassDB::bind_method(D_METHOD("get_z_slices_image", "index_r", "index_g", "index_b", "index_a"), &MLPPTensor3::get_z_slices_image, -1, -1, -1, -1);
ClassDB::bind_method(D_METHOD("z_slice_get_image", "index_z"), &MLPPTensor3::z_slice_get_image);
ClassDB::bind_method(D_METHOD("z_slices_get_image", "index_r", "index_g", "index_b", "index_a"), &MLPPTensor3::z_slices_get_image, -1, -1, -1, -1);
ClassDB::bind_method(D_METHOD("get_z_slice_into_image", "target", "index_z", "target_channels"), &MLPPTensor3::get_z_slice_into_image, IMAGE_CHANNEL_FLAG_RGB);
ClassDB::bind_method(D_METHOD("get_z_slices_into_image", "target", "index_r", "index_g", "index_b", "index_a"), &MLPPTensor3::get_z_slices_into_image, -1, -1, -1, -1);
ClassDB::bind_method(D_METHOD("z_slice_get_into_image", "target", "index_z", "target_channels"), &MLPPTensor3::z_slice_get_into_image, IMAGE_CHANNEL_FLAG_RGB);
ClassDB::bind_method(D_METHOD("z_slices_get_into_image", "target", "index_r", "index_g", "index_b", "index_a"), &MLPPTensor3::z_slices_get_into_image, -1, -1, -1, -1);
ClassDB::bind_method(D_METHOD("set_z_slice_image", "img", "index_z", "image_channel_flag"), &MLPPTensor3::set_z_slice_image, IMAGE_CHANNEL_FLAG_R);
ClassDB::bind_method(D_METHOD("set_z_slices_image", "img", "index_r", "index_g", "index_b", "index_a"), &MLPPTensor3::set_z_slices_image);
ClassDB::bind_method(D_METHOD("z_slice_set_image", "img", "index_z", "image_channel_flag"), &MLPPTensor3::z_slice_set_image, IMAGE_CHANNEL_FLAG_R);
ClassDB::bind_method(D_METHOD("z_slices_set_image", "img", "index_r", "index_g", "index_b", "index_a"), &MLPPTensor3::z_slices_set_image);
ClassDB::bind_method(D_METHOD("set_from_image", "img", "channels"), &MLPPTensor3::set_from_image, IMAGE_CHANNEL_FLAG_RGBA);
ClassDB::bind_method(D_METHOD("get_x_slice_image", "index_x"), &MLPPTensor3::get_x_slice_image);
ClassDB::bind_method(D_METHOD("get_x_slice_into_image", "target", "index_x", "target_channels"), &MLPPTensor3::get_x_slice_into_image, IMAGE_CHANNEL_FLAG_RGB);
ClassDB::bind_method(D_METHOD("set_x_slice_image", "img", "index_x", "image_channel_flag"), &MLPPTensor3::set_x_slice_image, IMAGE_CHANNEL_FLAG_R);
ClassDB::bind_method(D_METHOD("x_slice_get_image", "index_x"), &MLPPTensor3::x_slice_get_image);
ClassDB::bind_method(D_METHOD("x_slice_get_into_image", "target", "index_x", "target_channels"), &MLPPTensor3::x_slice_get_into_image, IMAGE_CHANNEL_FLAG_RGB);
ClassDB::bind_method(D_METHOD("x_slice_set_image", "img", "index_x", "image_channel_flag"), &MLPPTensor3::x_slice_set_image, IMAGE_CHANNEL_FLAG_R);
ClassDB::bind_method(D_METHOD("get_y_slice_image", "index_x"), &MLPPTensor3::get_y_slice_image);
ClassDB::bind_method(D_METHOD("get_y_slice_into_image", "target", "index_x", "target_channels"), &MLPPTensor3::get_y_slice_into_image, IMAGE_CHANNEL_FLAG_RGB);
ClassDB::bind_method(D_METHOD("set_y_slice_image", "img", "index_x", "image_channel_flag"), &MLPPTensor3::set_y_slice_image, IMAGE_CHANNEL_FLAG_R);
ClassDB::bind_method(D_METHOD("y_slice_get_image", "index_x"), &MLPPTensor3::y_slice_get_image);
ClassDB::bind_method(D_METHOD("y_slice_get_into_image", "target", "index_x", "target_channels"), &MLPPTensor3::y_slice_get_into_image, IMAGE_CHANNEL_FLAG_RGB);
ClassDB::bind_method(D_METHOD("y_slice_set_image", "img", "index_x", "image_channel_flag"), &MLPPTensor3::y_slice_set_image, IMAGE_CHANNEL_FLAG_R);
ClassDB::bind_method(D_METHOD("fill", "val"), &MLPPTensor3::fill);

View File

@ -23,7 +23,7 @@ class MLPPTensor3 : public Resource {
public:
Array get_data();
void set_data(const Array &p_from);
_FORCE_INLINE_ real_t *ptrw() {
return _data;
}
@ -32,17 +32,17 @@ public:
return _data;
}
void add_z_slice(const Vector<real_t> &p_row);
void add_z_slice_pool_vector(const PoolRealArray &p_row);
void add_z_slice_mlpp_vector(const Ref<MLPPVector> &p_row);
void add_z_slice_mlpp_matrix(const Ref<MLPPMatrix> &p_matrix);
void remove_z_slice(int p_index);
void z_slice_add(const Vector<real_t> &p_row);
void z_slice_add_pool_vector(const PoolRealArray &p_row);
void z_slice_add_mlpp_vector(const Ref<MLPPVector> &p_row);
void z_slice_add_mlpp_matrix(const Ref<MLPPMatrix> &p_matrix);
void z_slice_remove(int p_index);
// Removes the item copying the last value into the position of the one to
// remove. It's generally faster than `remove`.
void remove_z_slice_unordered(int p_index);
void z_slice_remove_unordered(int p_index);
void swap_z_slice(int p_index_1, int p_index_2);
void z_slice_swap(int p_index_1, int p_index_2);
_FORCE_INLINE_ void clear() { resize(Size3i()); }
_FORCE_INLINE_ void reset() {
@ -79,19 +79,19 @@ public:
return _data[p_index];
}
_FORCE_INLINE_ real_t get_element_index(int p_index) const {
_FORCE_INLINE_ real_t element_get_index(int p_index) const {
ERR_FAIL_INDEX_V(p_index, data_size(), 0);
return _data[p_index];
}
_FORCE_INLINE_ void set_element_index(int p_index, real_t p_val) {
_FORCE_INLINE_ void element_set_index(int p_index, real_t p_val) {
ERR_FAIL_INDEX(p_index, data_size());
_data[p_index] = p_val;
}
_FORCE_INLINE_ real_t get_element(int p_index_y, int p_index_x, int p_index_z) const {
_FORCE_INLINE_ real_t element_get(int p_index_y, int p_index_x, int p_index_z) const {
ERR_FAIL_INDEX_V(p_index_x, _size.x, 0);
ERR_FAIL_INDEX_V(p_index_y, _size.y, 0);
ERR_FAIL_INDEX_V(p_index_z, _size.z, 0);
@ -99,7 +99,7 @@ public:
return _data[p_index_y * _size.x + p_index_x + _size.x * _size.y * p_index_z];
}
_FORCE_INLINE_ void set_element(int p_index_y, int p_index_x, int p_index_z, real_t p_val) {
_FORCE_INLINE_ void element_set(int p_index_y, int p_index_x, int p_index_z, real_t p_val) {
ERR_FAIL_INDEX(p_index_x, _size.x);
ERR_FAIL_INDEX(p_index_y, _size.y);
ERR_FAIL_INDEX(p_index_z, _size.z);
@ -107,39 +107,39 @@ public:
_data[p_index_y * _size.x + p_index_x + _size.x * _size.y * p_index_z] = p_val;
}
Vector<real_t> get_row_vector(int p_index_y, int p_index_z) const;
PoolRealArray get_row_pool_vector(int p_index_y, int p_index_z) const;
Ref<MLPPVector> get_row_mlpp_vector(int p_index_y, int p_index_z) const;
void get_row_into_mlpp_vector(int p_index_y, int p_index_z, Ref<MLPPVector> target) const;
Vector<real_t> row_get_vector(int p_index_y, int p_index_z) const;
PoolRealArray row_get_pool_vector(int p_index_y, int p_index_z) const;
Ref<MLPPVector> row_get_mlpp_vector(int p_index_y, int p_index_z) const;
void row_get_into_mlpp_vector(int p_index_y, int p_index_z, Ref<MLPPVector> target) const;
void set_row_vector(int p_index_y, int p_index_z, const Vector<real_t> &p_row);
void set_row_pool_vector(int p_index_y, int p_index_z, const PoolRealArray &p_row);
void set_row_mlpp_vector(int p_index_y, int p_index_z, const Ref<MLPPVector> &p_row);
void row_set_vector(int p_index_y, int p_index_z, const Vector<real_t> &p_row);
void row_set_pool_vector(int p_index_y, int p_index_z, const PoolRealArray &p_row);
void row_set_mlpp_vector(int p_index_y, int p_index_z, const Ref<MLPPVector> &p_row);
Vector<real_t> get_z_slice_vector(int p_index_z) const;
PoolRealArray get_z_slice_pool_vector(int p_index_z) const;
Ref<MLPPVector> get_z_slice_mlpp_vector(int p_index_z) const;
void get_z_slice_into_mlpp_vector(int p_index_z, Ref<MLPPVector> target) const;
Ref<MLPPMatrix> get_z_slice_mlpp_matrix(int p_index_z) const;
void get_z_slice_into_mlpp_matrix(int p_index_z, Ref<MLPPMatrix> target) const;
Vector<real_t> z_slice_get_vector(int p_index_z) const;
PoolRealArray z_slice_get_pool_vector(int p_index_z) const;
Ref<MLPPVector> z_slice_get_mlpp_vector(int p_index_z) const;
void z_slice_get_into_mlpp_vector(int p_index_z, Ref<MLPPVector> target) const;
Ref<MLPPMatrix> z_slice_get_mlpp_matrix(int p_index_z) const;
void z_slice_get_into_mlpp_matrix(int p_index_z, Ref<MLPPMatrix> target) const;
void set_z_slice_vector(int p_index_z, const Vector<real_t> &p_row);
void set_z_slice_pool_vector(int p_index_z, const PoolRealArray &p_row);
void set_z_slice_mlpp_vector(int p_index_z, const Ref<MLPPVector> &p_row);
void set_z_slice_mlpp_matrix(int p_index_z, const Ref<MLPPMatrix> &p_mat);
void z_slice_set_vector(int p_index_z, const Vector<real_t> &p_row);
void z_slice_set_pool_vector(int p_index_z, const PoolRealArray &p_row);
void z_slice_set_mlpp_vector(int p_index_z, const Ref<MLPPVector> &p_row);
void z_slice_set_mlpp_matrix(int p_index_z, const Ref<MLPPMatrix> &p_mat);
//TODO resize() need to be reworked for add and remove to work, in any other direction than z
//void add_x_slice(const Ref<MLPPMatrix> &p_matrix);
//void remove_x_slice(int p_index);
void get_x_slice_into(int p_index_x, Ref<MLPPMatrix> target) const;
Ref<MLPPMatrix> get_x_slice(int p_index_x) const;
void set_x_slice(int p_index_x, const Ref<MLPPMatrix> &p_mat);
//void x_slice_add(const Ref<MLPPMatrix> &p_matrix);
//void x_slice_remove(int p_index);
void x_slice_get_into(int p_index_x, Ref<MLPPMatrix> target) const;
Ref<MLPPMatrix> x_slice_get(int p_index_x) const;
void x_slice_set(int p_index_x, const Ref<MLPPMatrix> &p_mat);
//void add_y_slice(const Ref<MLPPMatrix> &p_matrix);
//void remove_y_slice(int p_index);
void get_y_slice_into(int p_index_y, Ref<MLPPMatrix> target) const;
Ref<MLPPMatrix> get_y_slice(int p_index_y) const;
void set_y_slice(int p_index_y, const Ref<MLPPMatrix> &p_mat);
//void y_slice_add(const Ref<MLPPMatrix> &p_matrix);
//void y_slice_remove(int p_index);
void y_slice_get_into(int p_index_y, Ref<MLPPMatrix> target) const;
Ref<MLPPMatrix> y_slice_get(int p_index_y) const;
void y_slice_set(int p_index_y, const Ref<MLPPMatrix> &p_mat);
public:
//Image api
@ -159,28 +159,28 @@ public:
IMAGE_CHANNEL_FLAG_RGBA = IMAGE_CHANNEL_FLAG_R | IMAGE_CHANNEL_FLAG_G | IMAGE_CHANNEL_FLAG_B | IMAGE_CHANNEL_FLAG_A,
};
void add_z_slices_image(const Ref<Image> &p_img, const int p_channels = IMAGE_CHANNEL_FLAG_RGBA);
void z_slices_add_image(const Ref<Image> &p_img, const int p_channels = IMAGE_CHANNEL_FLAG_RGBA);
Ref<Image> get_z_slice_image(const int p_index_z) const;
Ref<Image> get_z_slices_image(const int p_index_r = -1, const int p_index_g = -1, const int p_index_b = -1, const int p_index_a = -1) const;
Ref<Image> z_slice_get_image(const int p_index_z) const;
Ref<Image> z_slices_get_image(const int p_index_r = -1, const int p_index_g = -1, const int p_index_b = -1, const int p_index_a = -1) const;
void get_z_slice_into_image(Ref<Image> p_target, const int p_index_z, const int p_target_channels = IMAGE_CHANNEL_FLAG_RGB) const;
void get_z_slices_into_image(Ref<Image> p_target, const int p_index_r = -1, const int p_index_g = -1, const int p_index_b = -1, const int p_index_a = -1) const;
void z_slice_get_into_image(Ref<Image> p_target, const int p_index_z, const int p_target_channels = IMAGE_CHANNEL_FLAG_RGB) const;
void z_slices_get_into_image(Ref<Image> p_target, const int p_index_r = -1, const int p_index_g = -1, const int p_index_b = -1, const int p_index_a = -1) const;
void set_z_slice_image(const Ref<Image> &p_img, const int p_index_z, const int p_image_channel_flag = IMAGE_CHANNEL_FLAG_R);
void set_z_slices_image(const Ref<Image> &p_img, const int p_index_r = -1, const int p_index_g = -1, const int p_index_b = -1, const int p_index_a = -1);
void z_slice_set_image(const Ref<Image> &p_img, const int p_index_z, const int p_image_channel_flag = IMAGE_CHANNEL_FLAG_R);
void z_slices_set_image(const Ref<Image> &p_img, const int p_index_r = -1, const int p_index_g = -1, const int p_index_b = -1, const int p_index_a = -1);
void set_from_image(const Ref<Image> &p_img, const int p_channels = IMAGE_CHANNEL_FLAG_RGBA);
//void add_x_slices_image(const Ref<Image> &p_img, const int p_channels = IMAGE_CHANNEL_FLAG_RGBA);
Ref<Image> get_x_slice_image(const int p_index_x) const;
void get_x_slice_into_image(Ref<Image> p_target, const int p_index_x, const int p_target_channels = IMAGE_CHANNEL_FLAG_RGB) const;
void set_x_slice_image(const Ref<Image> &p_img, const int p_index_x, const int p_image_channel_flag = IMAGE_CHANNEL_FLAG_R);
//void x_slices_add_image(const Ref<Image> &p_img, const int p_channels = IMAGE_CHANNEL_FLAG_RGBA);
Ref<Image> x_slice_get_image(const int p_index_x) const;
void x_slice_get_into_image(Ref<Image> p_target, const int p_index_x, const int p_target_channels = IMAGE_CHANNEL_FLAG_RGB) const;
void x_slice_set_image(const Ref<Image> &p_img, const int p_index_x, const int p_image_channel_flag = IMAGE_CHANNEL_FLAG_R);
//void add_y_slices_image(const Ref<Image> &p_img, const int p_channels = IMAGE_CHANNEL_FLAG_RGBA);
Ref<Image> get_y_slice_image(const int p_index_y) const;
void get_y_slice_into_image(Ref<Image> p_target, const int p_index_y, const int p_target_channels = IMAGE_CHANNEL_FLAG_RGB) const;
void set_y_slice_image(const Ref<Image> &p_img, const int p_index_y, const int p_image_channel_flag = IMAGE_CHANNEL_FLAG_R);
//void y_slices_add_image(const Ref<Image> &p_img, const int p_channels = IMAGE_CHANNEL_FLAG_RGBA);
Ref<Image> y_slice_get_image(const int p_index_y) const;
void y_slice_get_into_image(Ref<Image> p_target, const int p_index_y, const int p_target_channels = IMAGE_CHANNEL_FLAG_RGB) const;
void y_slice_set_image(const Ref<Image> &p_img, const int p_index_y, const int p_image_channel_flag = IMAGE_CHANNEL_FLAG_R);
public:
//math api

View File

@ -1249,7 +1249,7 @@ Ref<MLPPMatrix> MLPPVector::outer_product(const Ref<MLPPVector> &b) const {
real_t curr_a = a_ptr[i];
for (int j = 0; j < sm.x; ++j) {
C->set_element(i, j, curr_a * b_ptr[j]);
C->element_set(i, j, curr_a * b_ptr[j]);
}
}
@ -1373,8 +1373,8 @@ void MLPPVector::_bind_methods() {
ClassDB::bind_method(D_METHOD("size"), &MLPPVector::size);
ClassDB::bind_method(D_METHOD("resize", "size"), &MLPPVector::resize);
ClassDB::bind_method(D_METHOD("get_element", "index"), &MLPPVector::get_element);
ClassDB::bind_method(D_METHOD("set_element", "index", "val"), &MLPPVector::set_element);
ClassDB::bind_method(D_METHOD("element_get", "index"), &MLPPVector::element_get);
ClassDB::bind_method(D_METHOD("element_set", "index", "val"), &MLPPVector::element_set);
ClassDB::bind_method(D_METHOD("fill", "val"), &MLPPVector::fill);
ClassDB::bind_method(D_METHOD("insert", "pos", "val"), &MLPPVector::insert);

View File

@ -69,12 +69,12 @@ public:
return _data[p_index];
}
_FORCE_INLINE_ real_t get_element(int p_index) const {
_FORCE_INLINE_ real_t element_get(int p_index) const {
ERR_FAIL_INDEX_V(p_index, _size, 0);
return _data[p_index];
}
_FORCE_INLINE_ void set_element(int p_index, real_t p_val) {
_FORCE_INLINE_ void element_set(int p_index, real_t p_val) {
ERR_FAIL_INDEX(p_index, _size);
_data[p_index] = p_val;
}

View File

@ -182,15 +182,15 @@ void MLPPLinReg::sgd(real_t learning_rate, int max_epoch, bool ui) {
int output_index = distribution(generator);
_input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
real_t output_set_element = _output_set->get_element(output_index);
output_set_row_tmp->set_element(0, output_set_element);
real_t output_element_set = _output_set->element_get(output_index);
output_set_row_tmp->element_set(0, output_element_set);
real_t y_hat = evaluatev(input_set_row_tmp);
y_hat_tmp->set_element(0, output_set_element);
y_hat_tmp->element_set(0, output_element_set);
cost_prev = cost(y_hat_tmp, output_set_row_tmp);
real_t error = y_hat - output_set_element;
real_t error = y_hat - output_element_set;
// Weight updation
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate * error, input_set_row_tmp));
@ -678,14 +678,14 @@ void MLPPLinReg::normal_equation() {
for (int i = 0; i < input_set_t->size().y; i++) {
input_set_t->get_row_into_mlpp_vector(i, input_set_t_row_tmp);
x_means->set_element(i, stat.meanv(input_set_t_row_tmp));
x_means->element_set(i, stat.meanv(input_set_t_row_tmp));
}
Ref<MLPPVector> temp;
//temp.resize(_k);
temp = alg.mat_vec_multnv(alg.inversenm(alg.matmultnm(alg.transposenm(_input_set), _input_set)), alg.mat_vec_multnv(alg.transposenm(_input_set), _output_set));
ERR_FAIL_COND_MSG(Math::is_nan(temp->get_element(0)), "ERR: Resulting matrix was noninvertible/degenerate, and so the normal equation could not be performed. Try utilizing gradient descent.");
ERR_FAIL_COND_MSG(Math::is_nan(temp->element_get(0)), "ERR: Resulting matrix was noninvertible/degenerate, and so the normal equation could not be performed. Try utilizing gradient descent.");
if (_reg == MLPPReg::REGULARIZATION_TYPE_RIDGE) {
_weights = alg.mat_vec_multnv(alg.inversenm(alg.additionnm(alg.matmultnm(alg.transposenm(_input_set), _input_set), alg.scalar_multiplynm(_lambda, alg.identitym(_k)))), alg.mat_vec_multnv(alg.transposenm(_input_set), _output_set));

View File

@ -170,23 +170,23 @@ void MLPPLogReg::sgd(real_t learning_rate, int max_epoch, bool ui) {
y_hat_tmp.instance();
y_hat_tmp->resize(1);
Ref<MLPPVector> output_set_element_tmp;
output_set_element_tmp.instance();
output_set_element_tmp->resize(1);
Ref<MLPPVector> output_element_set_tmp;
output_element_set_tmp.instance();
output_element_set_tmp->resize(1);
while (true) {
int output_index = distribution(generator);
_input_set->get_row_into_mlpp_vector(output_index, input_row_tmp);
real_t output_set_element = _output_set->get_element(output_index);
output_set_element_tmp->set_element(0, output_set_element);
real_t output_element_set = _output_set->element_get(output_index);
output_element_set_tmp->element_set(0, output_element_set);
real_t y_hat = evaluatev(input_row_tmp);
y_hat_tmp->set_element(0, y_hat);
y_hat_tmp->element_set(0, y_hat);
cost_prev = cost(y_hat_tmp, output_set_element_tmp);
cost_prev = cost(y_hat_tmp, output_element_set_tmp);
real_t error = y_hat - output_set_element;
real_t error = y_hat - output_element_set;
// Weight updation
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate * error, input_row_tmp));
@ -198,7 +198,7 @@ void MLPPLogReg::sgd(real_t learning_rate, int max_epoch, bool ui) {
y_hat = evaluatev(input_row_tmp);
if (ui) {
MLPPUtilities::cost_info(epoch, cost_prev, cost(y_hat_tmp, output_set_element_tmp));
MLPPUtilities::cost_info(epoch, cost_prev, cost(y_hat_tmp, output_element_set_tmp));
MLPPUtilities::print_ui_vb(_weights, _bias);
}

View File

@ -173,11 +173,11 @@ void MLPPMLP::sgd(real_t learning_rate, int max_epoch, bool UI) {
int output_Index = distribution(generator);
_input_set->get_row_into_mlpp_vector(output_Index, input_set_row_tmp);
real_t output_element = _output_set->get_element(output_Index);
output_set_row_tmp->set_element(0, output_element);
real_t output_element = _output_set->element_get(output_Index);
output_set_row_tmp->element_set(0, output_element);
real_t ly_hat = evaluatev(input_set_row_tmp);
y_hat_row_tmp->set_element(0, ly_hat);
y_hat_row_tmp->element_set(0, ly_hat);
propagatev(input_set_row_tmp, lz2, la2);
cost_prev = cost(y_hat_row_tmp, output_set_row_tmp);
real_t error = ly_hat - output_element;

View File

@ -58,7 +58,7 @@ Ref<MLPPVector> MLPPMultinomialNB::model_set_test(const Ref<MLPPMatrix> &X) {
for (int i = 0; i < x_size.y; i++) {
X->get_row_into_mlpp_vector(i, x_row_tmp);
y_hat->set_element(i, model_test(x_row_tmp));
y_hat->element_set(i, model_test(x_row_tmp));
}
return y_hat;
@ -78,8 +78,8 @@ real_t MLPPMultinomialNB::model_test(const Ref<MLPPVector> &x) {
for (int j = 0; j < x_size; j++) {
for (int k = 0; k < vocab_size; k++) {
real_t x_j = x->get_element(j);
real_t vocab_k = _vocab->get_element(k);
real_t x_j = x->element_get(j);
real_t vocab_k = _vocab->element_get(k);
if (Math::is_equal_approx(x_j, vocab_k)) {
for (int p = _class_num - 1; p >= 0; p--) {
@ -92,7 +92,7 @@ real_t MLPPMultinomialNB::model_test(const Ref<MLPPVector> &x) {
}
for (int i = 0; i < _priors->size(); i++) {
score[i] += std::log(_priors->get_element(i));
score[i] += std::log(_priors->element_get(i));
}
int max_index = 0;
@ -159,7 +159,7 @@ void MLPPMultinomialNB::compute_theta() {
// Setting all values in the hasmap by default to 0.
for (int i = _class_num - 1; i >= 0; i--) {
for (int j = 0; j < vocab_size; j++) {
_theta.write[i][_vocab->get_element(j)] = 0;
_theta.write[i][_vocab->element_get(j)] = 0;
}
}
@ -167,7 +167,7 @@ void MLPPMultinomialNB::compute_theta() {
for (int i = 0; i < input_set_size.y; i++) {
for (int j = 0; j < input_set_size.x; j++) {
_theta.write[_output_set->get_element(i)][_input_set->get_element(i, j)]++;
_theta.write[_output_set->element_get(i)][_input_set->element_get(i, j)]++;
}
}
@ -175,7 +175,7 @@ void MLPPMultinomialNB::compute_theta() {
uint32_t theta_i_size = _theta[i].size();
for (uint32_t j = 0; j < theta_i_size; j++) {
_theta.write[i][j] /= _priors->get_element(i) * _y_hat->size();
_theta.write[i][j] /= _priors->element_get(i) * _y_hat->size();
}
}
}
@ -194,8 +194,8 @@ void MLPPMultinomialNB::evaluate() {
// Easy computation of priors, i.e. Pr(C_k)
_priors->resize(_class_num);
for (int ii = 0; ii < _output_set->size(); ii++) {
int osii = static_cast<int>(_output_set->get_element(ii));
_priors->set_element(osii, _priors->get_element(osii) + 1);
int osii = static_cast<int>(_output_set->element_get(ii));
_priors->element_set(osii, _priors->element_get(osii) + 1);
}
_priors = alg.scalar_multiplynv(real_t(1) / real_t(output_set_size), _priors);
@ -205,8 +205,8 @@ void MLPPMultinomialNB::evaluate() {
for (int j = 0; j < input_set_size.y; j++) {
for (int k = 0; k < _vocab->size(); k++) {
real_t input_set_i_j = _input_set->get_element(i, j);
real_t vocab_k = _vocab->get_element(k);
real_t input_set_i_j = _input_set->element_get(i, j);
real_t vocab_k = _vocab->element_get(k);
if (Math::is_equal_approx(input_set_i_j, vocab_k)) {
real_t theta_i_k = _theta[i][vocab_k];
@ -222,7 +222,7 @@ void MLPPMultinomialNB::evaluate() {
int priors_size = _priors->size();
for (int ii = 0; ii < priors_size; ii++) {
score[ii] += Math::log(_priors->get_element(ii));
score[ii] += Math::log(_priors->element_get(ii));
score[ii] = Math::exp(score[ii]);
}
@ -240,7 +240,7 @@ void MLPPMultinomialNB::evaluate() {
}
}
_y_hat->set_element(i, max_index);
_y_hat->element_set(i, max_index);
}
}

View File

@ -35,7 +35,7 @@ Vector<Vector<real_t>> MLPPOutlierFinder::model_set_test(const Ref<MLPPMatrix> &
real_t s_dev_v = stat.standard_deviationv(input_set_i_row_tmp);
for (int j = 0; j < input_set_size.x; ++j) {
real_t input_set_i_j = input_set->get_element(i, j);
real_t input_set_i_j = input_set->element_get(i, j);
real_t z = (input_set_i_j - meanv) / s_dev_v;
@ -80,7 +80,7 @@ PoolVector2iArray MLPPOutlierFinder::model_set_test_indices(const Ref<MLPPMatrix
real_t s_dev_v = stat.standard_deviationv(input_set_i_row_tmp);
for (int j = 0; j < input_set_size.x; ++j) {
real_t z = (input_set->get_element(i, j) - meanv) / s_dev_v;
real_t z = (input_set->element_get(i, j) - meanv) / s_dev_v;
if (ABS(z) > _threshold) {
outliers.push_back(Vector2i(j, i));

View File

@ -37,7 +37,7 @@ Ref<MLPPMatrix> MLPPPCA::principal_components() {
for (int i = 0; i < _k; ++i) {
for (int j = 0; j < svr_res_u_size.y; ++j) {
_u_reduce->set_element(j, i, svr_res.U->get_element(j, i));
_u_reduce->element_set(j, i, svr_res.U->element_get(j, i));
}
}

View File

@ -173,13 +173,13 @@ void MLPPProbitReg::sgd(real_t learning_rate, int max_epoch, bool ui) {
int output_index = distribution(generator);
_input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
real_t output_set_entry = _output_set->get_element(output_index);
real_t output_set_entry = _output_set->element_get(output_index);
real_t y_hat = evaluatev(input_set_row_tmp);
real_t z = propagatev(input_set_row_tmp);
y_hat_tmp->set_element(0, y_hat);
output_set_tmp->set_element(0, output_set_entry);
y_hat_tmp->element_set(0, y_hat);
output_set_tmp->element_set(0, output_set_entry);
cost_prev = cost(y_hat_tmp, output_set_tmp);
@ -235,7 +235,7 @@ void MLPPProbitReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_siz
Ref<MLPPVector> y_hat = evaluatem(current_input);
real_t z = propagatev(current_output);
z_tmp->set_element(0, z);
z_tmp->element_set(0, z);
cost_prev = cost(y_hat, current_output);

View File

@ -164,7 +164,7 @@ void MLPPReg::_bind_methods() {
real_t MLPPReg::reg_deriv_termvr(const Ref<MLPPVector> &weights, real_t lambda, real_t alpha, MLPPReg::RegularizationType reg, int j) {
MLPPActivation act;
real_t wj = weights->get_element(j);
real_t wj = weights->element_get(j);
if (reg == REGULARIZATION_TYPE_RIDGE) {
return lambda * wj;
@ -189,7 +189,7 @@ real_t MLPPReg::reg_deriv_termvr(const Ref<MLPPVector> &weights, real_t lambda,
real_t MLPPReg::reg_deriv_termmr(const Ref<MLPPMatrix> &weights, real_t lambda, real_t alpha, MLPPReg::RegularizationType reg, int i, int j) {
MLPPActivation act;
real_t wj = weights->get_element(i, j);
real_t wj = weights->element_get(i, j);
if (reg == REGULARIZATION_TYPE_RIDGE) {
return lambda * wj;

View File

@ -151,19 +151,19 @@ void MLPPSVC::train_sgd(real_t learning_rate, int max_epoch, bool ui) {
_input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
real_t output_set_indx = _output_set->get_element(output_index);
output_set_row_tmp->set_element(0, output_set_indx);
real_t output_set_indx = _output_set->element_get(output_index);
output_set_row_tmp->element_set(0, output_set_indx);
//real_t y_hat = Evaluate(input_set_row_tmp);
real_t z = propagatev(input_set_row_tmp);
z_row_tmp->set_element(0, z);
z_row_tmp->element_set(0, z);
cost_prev = cost(z_row_tmp, output_set_row_tmp, _weights, _c);
Ref<MLPPVector> cost_deriv_vec = mlpp_cost.hinge_loss_derivwv(z_row_tmp, output_set_row_tmp, _c);
real_t cost_deriv = cost_deriv_vec->get_element(0);
real_t cost_deriv = cost_deriv_vec->element_get(0);
// Weight Updation
_weights->sub(input_set_row_tmp->scalar_multiplyn(learning_rate * cost_deriv));

View File

@ -198,11 +198,11 @@ void MLPPTanhReg::train_sgd(real_t learning_rate, int max_epoch, bool ui) {
int output_index = distribution(generator);
_input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
real_t output_set_entry = _output_set->get_element(output_index);
output_set_row_tmp->set_element(0, output_set_entry);
real_t output_set_entry = _output_set->element_get(output_index);
output_set_row_tmp->element_set(0, output_set_entry);
real_t y_hat = evaluatev(input_set_row_tmp);
y_hat_row_tmp->set_element(0, y_hat);
y_hat_row_tmp->element_set(0, y_hat);
cost_prev = cost(y_hat_row_tmp, output_set_row_tmp);

View File

@ -311,7 +311,7 @@ real_t MLPPUtilities::performance_vec(const Ref<MLPPVector> &y_hat, const Ref<ML
real_t correct = 0;
for (int i = 0; i < y_hat->size(); i++) {
if (Math::is_equal_approx(y_hat->get_element(i), output_set->get_element(i))) {
if (Math::is_equal_approx(y_hat->element_get(i), output_set->element_get(i))) {
correct++;
}
}
@ -326,7 +326,7 @@ real_t MLPPUtilities::performance_mat(const Ref<MLPPMatrix> &y_hat, const Ref<ML
int sub_correct = 0;
for (int j = 0; j < y_hat->size().x; j++) {
if (Math::round(y_hat->get_element(i, j)) == y->get_element(i, j)) {
if (Math::round(y_hat->element_get(i, j)) == y->element_get(i, j)) {
sub_correct++;
}
@ -342,7 +342,7 @@ real_t MLPPUtilities::performance_pool_int_array_vec(PoolIntArray y_hat, const R
real_t correct = 0;
for (int i = 0; i < y_hat.size(); i++) {
if (y_hat[i] == Math::round(output_set->get_element(i))) {
if (y_hat[i] == Math::round(output_set->element_get(i))) {
correct++;
}
}
@ -663,7 +663,7 @@ MLPPUtilities::CreateMiniBatchMVBatch MLPPUtilities::create_mini_batchesmv(const
input_set->get_row_into_mlpp_vector(main_indx, row_tmp);
current_input_set->set_row_mlpp_vector(j, row_tmp);
current_output_set->set_element(j, output_set->get_element(j));
current_output_set->element_set(j, output_set->element_get(j));
}
ret.input_sets.push_back(current_input_set);

View File

@ -1259,7 +1259,7 @@ void MLPPTests::is_approx_equals_vec(Ref<MLPPVector> a, Ref<MLPPVector> b, const
}
for (int i = 0; i < a->size(); ++i) {
if (!Math::is_equal_approx(a->get_element(i), b->get_element(i))) {
if (!Math::is_equal_approx(a->element_get(i), b->element_get(i))) {
goto IAEDVEC_FAILED;
}
}

View File

@ -1133,7 +1133,7 @@ void MLPPTestsOld::is_approx_equals_vec(Ref<MLPPVector> a, Ref<MLPPVector> b, co
}
for (int i = 0; i < a->size(); ++i) {
if (!Math::is_equal_approx(a->get_element(i), b->get_element(i))) {
if (!Math::is_equal_approx(a->element_get(i), b->element_get(i))) {
goto IAEDVEC_FAILED;
}
}