diff --git a/doc_classes/MLPPMatrix.xml b/doc_classes/MLPPMatrix.xml
index 30e0582..a6ee3ff 100644
--- a/doc_classes/MLPPMatrix.xml
+++ b/doc_classes/MLPPMatrix.xml
@@ -318,14 +318,14 @@
-
+
-
+
@@ -616,7 +616,7 @@
-
+
@@ -624,7 +624,7 @@
-
+
diff --git a/doc_classes/MLPPTensor3.xml b/doc_classes/MLPPTensor3.xml
index bed3689..9b6c1c5 100644
--- a/doc_classes/MLPPTensor3.xml
+++ b/doc_classes/MLPPTensor3.xml
@@ -156,7 +156,7 @@
-
+
@@ -164,7 +164,7 @@
-
+
@@ -417,7 +417,7 @@
-
+
@@ -426,7 +426,7 @@
-
+
diff --git a/doc_classes/MLPPVector.xml b/doc_classes/MLPPVector.xml
index 42384d4..db51b2f 100644
--- a/doc_classes/MLPPVector.xml
+++ b/doc_classes/MLPPVector.xml
@@ -220,7 +220,7 @@
-
+
@@ -415,7 +415,7 @@
-
+
diff --git a/mlpp/activation/activation.cpp b/mlpp/activation/activation.cpp
index 5645999..8defe7f 100644
--- a/mlpp/activation/activation.cpp
+++ b/mlpp/activation/activation.cpp
@@ -1093,9 +1093,9 @@ Ref MLPPActivation::softmax_deriv_normv(const Ref &z) {
for (int i = 0; i < z_size; ++i) {
for (int j = 0; j < z_size; ++j) {
if (i == j) {
- deriv->set_element(i, j, a_ptr[i] * (1 - a_ptr[i]));
+ deriv->element_set(i, j, a_ptr[i] * (1 - a_ptr[i]));
} else {
- deriv->set_element(i, j, -a_ptr[i] * a_ptr[j]);
+ deriv->element_set(i, j, -a_ptr[i] * a_ptr[j]);
}
}
}
@@ -1161,9 +1161,9 @@ Ref MLPPActivation::softmax_deriv_derivv(const Ref &z) {
for (int i = 0; i < z_size; ++i) {
for (int j = 0; j < z_size; ++j) {
if (i == j) {
- deriv->set_element(i, j, a_ptr[i] * (1 - a_ptr[i]));
+ deriv->element_set(i, j, a_ptr[i] * (1 - a_ptr[i]));
} else {
- deriv->set_element(i, j, -a_ptr[i] * a_ptr[j]);
+ deriv->element_set(i, j, -a_ptr[i] * a_ptr[j]);
}
}
}
diff --git a/mlpp/ann/ann.cpp b/mlpp/ann/ann.cpp
index 1f751f1..5499f54 100644
--- a/mlpp/ann/ann.cpp
+++ b/mlpp/ann/ann.cpp
@@ -126,11 +126,11 @@ void MLPPANN::sgd(real_t learning_rate, int max_epoch, bool ui) {
int output_index = distribution(generator);
_input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
- real_t output_set_element = _output_set->get_element(output_index);
- output_set_row_tmp->set_element(0, output_set_element);
+ real_t output_element_set = _output_set->element_get(output_index);
+ output_set_row_tmp->element_set(0, output_element_set);
real_t y_hat = model_test(input_set_row_tmp);
- y_hat_row_tmp->set_element(0, y_hat);
+ y_hat_row_tmp->element_set(0, y_hat);
cost_prev = cost(y_hat_row_tmp, output_set_row_tmp);
diff --git a/mlpp/bernoulli_nb/bernoulli_nb.cpp b/mlpp/bernoulli_nb/bernoulli_nb.cpp
index 856277a..05dd90d 100644
--- a/mlpp/bernoulli_nb/bernoulli_nb.cpp
+++ b/mlpp/bernoulli_nb/bernoulli_nb.cpp
@@ -24,7 +24,7 @@ Ref MLPPBernoulliNB::model_set_test(const Ref &X) {
for (int i = 0; i < X->size().y; i++) {
X->get_row_into_mlpp_vector(i, x_row_tmp);
- y_hat->set_element(i, model_test(x_row_tmp));
+ y_hat->element_set(i, model_test(x_row_tmp));
}
return y_hat;
@@ -38,9 +38,9 @@ real_t MLPPBernoulliNB::model_test(const Ref &x) {
for (int j = 0; j < x->size(); j++) {
for (int k = 0; k < _vocab->size(); k++) {
- if (x->get_element(j) == _vocab->get_element(k)) {
- score_0 *= _theta[0][_vocab->get_element(k)];
- score_1 *= _theta[1][_vocab->get_element(k)];
+ if (x->element_get(j) == _vocab->element_get(k)) {
+ score_0 *= _theta[0][_vocab->element_get(k)];
+ score_1 *= _theta[1][_vocab->element_get(k)];
found_indices.push_back(k);
}
@@ -50,13 +50,13 @@ real_t MLPPBernoulliNB::model_test(const Ref &x) {
for (int i = 0; i < _vocab->size(); i++) {
bool found = false;
for (int j = 0; j < found_indices.size(); j++) {
- if (_vocab->get_element(i) == _vocab->get_element(found_indices[j])) {
+ if (_vocab->element_get(i) == _vocab->element_get(found_indices[j])) {
found = true;
}
}
if (!found) {
- score_0 *= 1 - _theta[0][_vocab->get_element(i)];
- score_1 *= 1 - _theta[1][_vocab->get_element(i)];
+ score_0 *= 1 - _theta[0][_vocab->element_get(i)];
+ score_1 *= 1 - _theta[1][_vocab->element_get(i)];
}
}
@@ -113,13 +113,13 @@ void MLPPBernoulliNB::compute_theta() {
// Setting all values in the hasmap by default to 0.
for (int i = _class_num - 1; i >= 0; i--) {
for (int j = 0; j < _vocab->size(); j++) {
- _theta.write[i][_vocab->get_element(j)] = 0;
+ _theta.write[i][_vocab->element_get(j)] = 0;
}
}
for (int i = 0; i < _input_set->size().y; i++) {
for (int j = 0; j < _input_set->size().x; j++) {
- _theta.write[_output_set->get_element(i)][_input_set->get_element(i, j)]++;
+ _theta.write[_output_set->element_get(i)][_input_set->element_get(i, j)]++;
}
}
@@ -142,7 +142,7 @@ void MLPPBernoulliNB::evaluate() {
real_t sum = 0;
for (int ii = 0; ii < _output_set->size(); ii++) {
- if (_output_set->get_element(ii) == 1) {
+ if (_output_set->element_get(ii) == 1) {
sum += 1;
}
}
@@ -161,9 +161,9 @@ void MLPPBernoulliNB::evaluate() {
for (int j = 0; j < _input_set->size().y; j++) {
for (int k = 0; k < _vocab->size(); k++) {
- if (_input_set->get_element(i, j) == _vocab->get_element(k)) {
- score_0 += Math::log(static_cast(_theta[0][_vocab->get_element(k)]));
- score_1 += Math::log(static_cast(_theta[1][_vocab->get_element(k)]));
+ if (_input_set->element_get(i, j) == _vocab->element_get(k)) {
+ score_0 += Math::log(static_cast(_theta[0][_vocab->element_get(k)]));
+ score_1 += Math::log(static_cast(_theta[1][_vocab->element_get(k)]));
found_indices.push_back(k);
}
@@ -173,13 +173,13 @@ void MLPPBernoulliNB::evaluate() {
for (int ii = 0; ii < _vocab->size(); ii++) {
bool found = false;
for (int j = 0; j < found_indices.size(); j++) {
- if (_vocab->get_element(ii) == _vocab->get_element(found_indices[j])) {
+ if (_vocab->element_get(ii) == _vocab->element_get(found_indices[j])) {
found = true;
}
}
if (!found) {
- score_0 += Math::log(1.0 - _theta[0][_vocab->get_element(ii)]);
- score_1 += Math::log(1.0 - _theta[1][_vocab->get_element(ii)]);
+ score_0 += Math::log(1.0 - _theta[0][_vocab->element_get(ii)]);
+ score_1 += Math::log(1.0 - _theta[1][_vocab->element_get(ii)]);
}
}
@@ -192,9 +192,9 @@ void MLPPBernoulliNB::evaluate() {
// Assigning the traning example to a class
if (score_0 > score_1) {
- _y_hat->set_element(i, 0);
+ _y_hat->element_set(i, 0);
} else {
- _y_hat->set_element(i, 1);
+ _y_hat->element_set(i, 1);
}
}
}
diff --git a/mlpp/c_log_log_reg/c_log_log_reg.cpp b/mlpp/c_log_log_reg/c_log_log_reg.cpp
index 859af1c..21f9bd9 100644
--- a/mlpp/c_log_log_reg/c_log_log_reg.cpp
+++ b/mlpp/c_log_log_reg/c_log_log_reg.cpp
@@ -123,17 +123,17 @@ void MLPPCLogLogReg::sgd(real_t learning_rate, int max_epoch, bool p_) {
int output_index = distribution(generator);
_input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
- real_t output_set_element = _output_set->get_element(output_index);
- output_set_row_tmp->set_element(0, output_set_element);
+ real_t output_element_set = _output_set->element_get(output_index);
+ output_set_row_tmp->element_set(0, output_element_set);
real_t y_hat = evaluatev(input_set_row_tmp);
- y_hat_row_tmp->set_element(0, y_hat);
+ y_hat_row_tmp->element_set(0, y_hat);
real_t z = propagatev(input_set_row_tmp);
cost_prev = cost(y_hat_row_tmp, output_set_row_tmp);
- real_t error = y_hat - output_set_element;
+ real_t error = y_hat - output_element_set;
// Weight Updation
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate * error * Math::exp(z - Math::exp(z)), input_set_row_tmp));
diff --git a/mlpp/cost/cost.cpp b/mlpp/cost/cost.cpp
index 083f507..5c351fa 100644
--- a/mlpp/cost/cost.cpp
+++ b/mlpp/cost/cost.cpp
@@ -565,7 +565,7 @@ real_t MLPPCost::dual_form_svm(const Ref &alpha, const Ref alpha_m_res = alg.matmultnm(alg.matmultnm(alpha_m, Q), alg.transposenm(alpha_m));
- real_t alphaQ = alpha_m_res->get_element(0, 0);
+ real_t alphaQ = alpha_m_res->element_get(0, 0);
Ref one = alg.onevecnv(alpha->size());
return -alg.dotnv(one, alpha) + 0.5 * alphaQ;
diff --git a/mlpp/data/data.cpp b/mlpp/data/data.cpp
index 63342b2..da0c92b 100644
--- a/mlpp/data/data.cpp
+++ b/mlpp/data/data.cpp
@@ -1285,7 +1285,7 @@ Ref MLPPData::mean_centering(const Ref &p_X) {
real_t mean_i = stat.meanv(x_row_tmp);
for (int j = 0; j < x_size.x; ++j) {
- X->set_element(i, j, p_X->get_element(i, j) - mean_i);
+ X->element_set(i, j, p_X->element_get(i, j) - mean_i);
}
}
@@ -1306,9 +1306,9 @@ Ref MLPPData::one_hot_rep(const Ref &temp_output_set, in
for (int i = 0; i < temp_output_set_size; ++i) {
for (int j = 0; j <= n_class - 1; ++j) {
if (static_cast(temp_output_set_ptr[i]) == j) {
- output_set->set_element(i, j, 1);
+ output_set->element_set(i, j, 1);
} else {
- output_set->set_element(i, j, 0);
+ output_set->element_set(i, j, 0);
}
}
}
diff --git a/mlpp/data/data.h b/mlpp/data/data.h
index af3e338..dac293e 100644
--- a/mlpp/data/data.h
+++ b/mlpp/data/data.h
@@ -222,13 +222,13 @@ public:
bool new_element = true;
for (int j = 0; j < set_input_set.size(); j++) {
- if (set_input_set[j] == input_set->get_element(i)) {
+ if (set_input_set[j] == input_set->element_get(i)) {
new_element = false;
}
}
if (new_element) {
- set_input_set.push_back(input_set->get_element(i));
+ set_input_set.push_back(input_set->element_get(i));
}
}
diff --git a/mlpp/dual_svc/dual_svc.cpp b/mlpp/dual_svc/dual_svc.cpp
index 209ab1c..b0b6ce7 100644
--- a/mlpp/dual_svc/dual_svc.cpp
+++ b/mlpp/dual_svc/dual_svc.cpp
@@ -50,18 +50,18 @@ void MLPPDualSVC::gradient_descent(real_t learning_rate, int max_epoch, bool ui)
real_t biasGradient = 0;
for (int i = 0; i < _alpha->size(); i++) {
real_t sum = 0;
- if (_alpha->get_element(i) < _C && _alpha->get_element(i) > 0) {
+ if (_alpha->element_get(i) < _C && _alpha->element_get(i) > 0) {
for (int j = 0; j < _alpha->size(); j++) {
- if (_alpha->get_element(j) > 0) {
+ if (_alpha->element_get(j) > 0) {
_input_set->get_row_into_mlpp_vector(i, input_set_i_row_tmp);
_input_set->get_row_into_mlpp_vector(j, input_set_j_row_tmp);
- sum += _alpha->get_element(j) * _output_set->get_element(j) * alg.dotnv(input_set_j_row_tmp, input_set_i_row_tmp); // TO DO: DON'T forget to add non-linear kernelizations.
+ sum += _alpha->element_get(j) * _output_set->element_get(j) * alg.dotnv(input_set_j_row_tmp, input_set_i_row_tmp); // TO DO: DON'T forget to add non-linear kernelizations.
}
}
}
- biasGradient = (1 - _output_set->get_element(i) * sum) / _output_set->get_element(i);
+ biasGradient = (1 - _output_set->element_get(i) * sum) / _output_set->element_get(i);
break;
}
@@ -215,9 +215,9 @@ real_t MLPPDualSVC::propagatev(const Ref &x) {
input_set_row_tmp->resize(_input_set->size().x);
for (int j = 0; j < _alpha->size(); j++) {
- if (_alpha->get_element(j) != 0) {
+ if (_alpha->element_get(j) != 0) {
_input_set->get_row_into_mlpp_vector(j, input_set_row_tmp);
- z += _alpha->get_element(j) * _output_set->get_element(j) * alg.dotnv(input_set_row_tmp, x); // TO DO: DON'T forget to add non-linear kernelizations.
+ z += _alpha->element_get(j) * _output_set->element_get(j) * alg.dotnv(input_set_row_tmp, x); // TO DO: DON'T forget to add non-linear kernelizations.
}
}
z += _bias;
@@ -248,17 +248,17 @@ Ref MLPPDualSVC::propagatem(const Ref &X) {
real_t sum = 0;
for (int j = 0; j < _alpha->size(); j++) {
- if (_alpha->get_element(j) != 0) {
+ if (_alpha->element_get(j) != 0) {
_input_set->get_row_into_mlpp_vector(j, input_set_row_tmp);
X->get_row_into_mlpp_vector(i, x_row_tmp);
- sum += _alpha->get_element(j) * _output_set->get_element(j) * alg.dotnv(input_set_row_tmp, x_row_tmp); // TO DO: DON'T forget to add non-linear kernelizations.
+ sum += _alpha->element_get(j) * _output_set->element_get(j) * alg.dotnv(input_set_row_tmp, x_row_tmp); // TO DO: DON'T forget to add non-linear kernelizations.
}
}
sum += _bias;
- z->set_element(i, sum);
+ z->element_set(i, sum);
}
return z;
}
@@ -272,10 +272,10 @@ void MLPPDualSVC::forward_pass() {
void MLPPDualSVC::alpha_projection() {
for (int i = 0; i < _alpha->size(); i++) {
- if (_alpha->get_element(i) > _C) {
- _alpha->set_element(i, _C);
- } else if (_alpha->get_element(i) < 0) {
- _alpha->set_element(i, 0);
+ if (_alpha->element_get(i) > _C) {
+ _alpha->element_set(i, _C);
+ } else if (_alpha->element_get(i) < 0) {
+ _alpha->element_set(i, 0);
}
}
}
diff --git a/mlpp/exp_reg/exp_reg.cpp b/mlpp/exp_reg/exp_reg.cpp
index 191819a..c20bcec 100644
--- a/mlpp/exp_reg/exp_reg.cpp
+++ b/mlpp/exp_reg/exp_reg.cpp
@@ -40,21 +40,21 @@ void MLPPExpReg::gradient_descent(real_t learning_rate, int max_epoch, bool ui)
// Calculating the weight gradient
real_t sum = 0;
for (int j = 0; j < _n; j++) {
- sum += error->get_element(j) * _input_set->get_element(j, i) * Math::pow(_weights->get_element(i), _input_set->get_element(j, i) - 1);
+ sum += error->element_get(j) * _input_set->element_get(j, i) * Math::pow(_weights->element_get(i), _input_set->element_get(j, i) - 1);
}
real_t w_gradient = sum / _n;
// Calculating the initial gradient
real_t sum2 = 0;
for (int j = 0; j < _n; j++) {
- sum2 += error->get_element(j) * Math::pow(_weights->get_element(i), _input_set->get_element(j, i));
+ sum2 += error->element_get(j) * Math::pow(_weights->element_get(i), _input_set->element_get(j, i));
}
real_t i_gradient = sum2 / _n;
// Weight/initial updation
- _weights->set_element(i, _weights->get_element(i) - learning_rate * w_gradient);
- _initial->set_element(i, _initial->get_element(i) - learning_rate * i_gradient);
+ _weights->element_set(i, _weights->element_get(i) - learning_rate * w_gradient);
+ _initial->element_set(i, _initial->element_get(i) - learning_rate * i_gradient);
}
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
@@ -62,7 +62,7 @@ void MLPPExpReg::gradient_descent(real_t learning_rate, int max_epoch, bool ui)
// Calculating the bias gradient
real_t sum = 0;
for (int j = 0; j < _n; j++) {
- sum += (_y_hat->get_element(j) - _output_set->get_element(j));
+ sum += (_y_hat->element_get(j) - _output_set->element_get(j));
}
real_t b_gradient = sum / _n;
@@ -110,29 +110,29 @@ void MLPPExpReg::sgd(real_t learning_rate, int max_epoch, bool ui) {
int output_index = distribution(generator);
_input_set->get_row_into_mlpp_vector(output_index, input_set_row_tmp);
- real_t output_set_element = _output_set->get_element(output_index);
- output_set_row_tmp->set_element(0, output_set_element);
+ real_t output_element_set = _output_set->element_get(output_index);
+ output_set_row_tmp->element_set(0, output_element_set);
real_t y_hat = evaluatev(input_set_row_tmp);
- y_hat_row_tmp->set_element(0, y_hat);
+ y_hat_row_tmp->element_set(0, y_hat);
cost_prev = cost(y_hat_row_tmp, output_set_row_tmp);
for (int i = 0; i < _k; i++) {
// Calculating the weight gradients
- real_t w_gradient = (y_hat - output_set_element) * input_set_row_tmp->get_element(i) * Math::pow(_weights->get_element(i), _input_set->get_element(output_index, i) - 1);
- real_t i_gradient = (y_hat - output_set_element) * Math::pow(_weights->get_element(i), _input_set->get_element(output_index, i));
+ real_t w_gradient = (y_hat - output_element_set) * input_set_row_tmp->element_get(i) * Math::pow(_weights->element_get(i), _input_set->element_get(output_index, i) - 1);
+ real_t i_gradient = (y_hat - output_element_set) * Math::pow(_weights->element_get(i), _input_set->element_get(output_index, i));
// Weight/initial updation
- _weights->set_element(i, _weights->get_element(i) - learning_rate * w_gradient);
- _initial->set_element(i, _initial->get_element(i) - learning_rate * i_gradient);
+ _weights->element_set(i, _weights->element_get(i) - learning_rate * w_gradient);
+ _initial->element_set(i, _initial->element_get(i) - learning_rate * i_gradient);
}
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients
- real_t b_gradient = (y_hat - output_set_element);
+ real_t b_gradient = (y_hat - output_element_set);
// Bias updation
_bias -= learning_rate * b_gradient;
@@ -177,21 +177,21 @@ void MLPPExpReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size,
// Calculating the weight gradient
real_t sum = 0;
for (int k = 0; k < current_output_batch->size(); k++) {
- sum += error->get_element(k) * current_input_batch->get_element(k, j) * Math::pow(_weights->get_element(j), current_input_batch->get_element(k, j) - 1);
+ sum += error->element_get(k) * current_input_batch->element_get(k, j) * Math::pow(_weights->element_get(j), current_input_batch->element_get(k, j) - 1);
}
real_t w_gradient = sum / current_output_batch->size();
// Calculating the initial gradient
real_t sum2 = 0;
for (int k = 0; k < current_output_batch->size(); k++) {
- sum2 += error->get_element(k) * Math::pow(_weights->get_element(j), current_input_batch->get_element(k, j));
+ sum2 += error->element_get(k) * Math::pow(_weights->element_get(j), current_input_batch->element_get(k, j));
}
real_t i_gradient = sum2 / current_output_batch->size();
// Weight/initial updation
- _weights->set_element(i, _weights->get_element(i) - learning_rate * w_gradient);
- _initial->set_element(i, _initial->get_element(i) - learning_rate * i_gradient);
+ _weights->element_set(i, _weights->element_get(i) - learning_rate * w_gradient);
+ _initial->element_set(i, _initial->element_get(i) - learning_rate * i_gradient);
}
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
@@ -199,7 +199,7 @@ void MLPPExpReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size,
// Calculating the bias gradient
//real_t sum = 0;
//for (int j = 0; j < current_output_batch->size(); j++) {
- // sum += (y_hat->get_element(j) - current_output_batch->get_element(j));
+ // sum += (y_hat->element_get(j) - current_output_batch->element_get(j));
//}
//real_t b_gradient = sum / output_mini_batches[i].size();
@@ -276,7 +276,7 @@ real_t MLPPExpReg::evaluatev(const Ref &x) {
real_t y_hat = 0;
for (int i = 0; i < x->size(); i++) {
- y_hat += _initial->get_element(i) * Math::pow(_weights->get_element(i), x->get_element(i));
+ y_hat += _initial->element_get(i) * Math::pow(_weights->element_get(i), x->element_get(i));
}
return y_hat + _bias;
@@ -291,12 +291,12 @@ Ref MLPPExpReg::evaluatem(const Ref &X) {
real_t y = 0;
for (int j = 0; j < X->size().x; j++) {
- y += _initial->get_element(j) * Math::pow(_weights->get_element(j), X->get_element(i, j));
+ y += _initial->element_get(j) * Math::pow(_weights->element_get(j), X->element_get(i, j));
}
y += _bias;
- y_hat->set_element(i, y);
+ y_hat->element_set(i, y);
}
return y_hat;
diff --git a/mlpp/gaussian_nb/gaussian_nb.cpp b/mlpp/gaussian_nb/gaussian_nb.cpp
index a232dcd..2e14d3a 100644
--- a/mlpp/gaussian_nb/gaussian_nb.cpp
+++ b/mlpp/gaussian_nb/gaussian_nb.cpp
@@ -47,7 +47,7 @@ Ref MLPPGaussianNB::model_set_test(const Ref &X) {
for (int i = 0; i < X->size().y; i++) {
X->get_row_into_mlpp_vector(i, x_row_tmp);
- y_hat->set_element(i, model_test(x_row_tmp));
+ y_hat->element_set(i, model_test(x_row_tmp));
}
return y_hat;
@@ -60,11 +60,11 @@ real_t MLPPGaussianNB::model_test(const Ref &x) {
real_t y_hat_i = 1;
for (int i = _class_num - 1; i >= 0; i--) {
- real_t sigma_i = _sigma->get_element(i);
- real_t x_i = x->get_element(i);
- real_t mu_i = _mu->get_element(i);
+ real_t sigma_i = _sigma->element_get(i);
+ real_t x_i = x->element_get(i);
+ real_t mu_i = _mu->element_get(i);
- y_hat_i += Math::log(_priors->get_element(i) * (1 / Math::sqrt(2 * Math_PI * sigma_i * sigma_i)) * Math::exp(-(x_i * mu_i) * (x_i * mu_i) / (2 * sigma_i * sigma_i)));
+ y_hat_i += Math::log(_priors->element_get(i) * (1 / Math::sqrt(2 * Math_PI * sigma_i * sigma_i)) * Math::exp(-(x_i * mu_i) * (x_i * mu_i) / (2 * sigma_i * sigma_i)));
score[i] = Math::exp(y_hat_i);
}
@@ -140,24 +140,24 @@ void MLPPGaussianNB::evaluate() {
for (int j = 0; j < _input_set->size().y; j++) {
for (int k = 0; k < _input_set->size().x; k++) {
- if (_output_set->get_element(j) == i) {
- set.push_back(_input_set->get_element(j, k));
+ if (_output_set->element_get(j) == i) {
+ set.push_back(_input_set->element_get(j, k));
}
}
}
set_vec->set_from_pool_vector(set);
- _mu->set_element(i, stat.meanv(set_vec));
- _sigma->set_element(i, stat.standard_deviationv(set_vec));
+ _mu->element_set(i, stat.meanv(set_vec));
+ _sigma->element_set(i, stat.standard_deviationv(set_vec));
}
// Priors
_priors->resize(_class_num);
_priors->fill(0);
for (int i = 0; i < _output_set->size(); i++) {
- int indx = static_cast(_output_set->get_element(i));
- _priors->set_element(indx, _priors->get_element(indx));
+ int indx = static_cast(_output_set->element_get(i));
+ _priors->element_set(indx, _priors->element_get(indx));
}
_priors = alg.scalar_multiplynv(real_t(1) / real_t(_output_set->size()), _priors);
@@ -170,11 +170,11 @@ void MLPPGaussianNB::evaluate() {
for (int j = _class_num - 1; j >= 0; j--) {
for (int k = 0; k < _input_set->size().x; k++) {
- real_t sigma_j = _sigma->get_element(j);
- real_t mu_j = _mu->get_element(j);
- real_t input_set_i_k = _input_set->get_element(i, k);
+ real_t sigma_j = _sigma->element_get(j);
+ real_t mu_j = _mu->element_get(j);
+ real_t input_set_i_k = _input_set->element_get(i, k);
- y_hat_i += Math::log(_priors->get_element(j) * (1 / Math::sqrt(2 * Math_PI * sigma_j * sigma_j)) * Math::exp(-(input_set_i_k * mu_j) * (input_set_i_k * mu_j) / (2 * sigma_j * sigma_j)));
+ y_hat_i += Math::log(_priors->element_get(j) * (1 / Math::sqrt(2 * Math_PI * sigma_j * sigma_j)) * Math::exp(-(input_set_i_k * mu_j) * (input_set_i_k * mu_j) / (2 * sigma_j * sigma_j)));
}
score[j] = Math::exp(y_hat_i);
@@ -192,7 +192,7 @@ void MLPPGaussianNB::evaluate() {
}
}
- _y_hat->set_element(i, max_element_index);
+ _y_hat->element_set(i, max_element_index);
}
}
diff --git a/mlpp/kmeans/kmeans.cpp b/mlpp/kmeans/kmeans.cpp
index fe0f0a9..99fca87 100644
--- a/mlpp/kmeans/kmeans.cpp
+++ b/mlpp/kmeans/kmeans.cpp
@@ -271,17 +271,17 @@ Ref MLPPKMeans::silhouette_scores() {
}
}
- silhouette_scores->set_element(i, (b - a) / fmax(a, b));
+ silhouette_scores->element_set(i, (b - a) / fmax(a, b));
// Or the expanded version:
// if(a < b) {
- // silhouette_scores->set_element(i, 1 - a/b);
+ // silhouette_scores->element_set(i, 1 - a/b);
// }
// else if(a == b){
- // silhouette_scores->set_element(i, 0);
+ // silhouette_scores->element_set(i, 0);
// }
// else{
- // silhouette_scores->set_element(i, b/a - 1);
+ // silhouette_scores->element_set(i, b/a - 1);
// }
}
@@ -349,7 +349,7 @@ void MLPPKMeans::_evaluate() {
}
}
- _r->set_element(i, closest_centroid_index, 1);
+ _r->element_set(i, closest_centroid_index, 1);
}
}
@@ -383,9 +383,9 @@ void MLPPKMeans::_compute_mu() {
for (int j = 0; j < r_size_y; ++j) {
_input_set->get_row_into_mlpp_vector(j, input_set_j_tempv);
- real_t r_j_i = _r->get_element(j, i);
+ real_t r_j_i = _r->element_get(j, i);
- alg.scalar_multiplyv(_r->get_element(j, i), input_set_j_tempv, mat_tempv);
+ alg.scalar_multiplyv(_r->element_get(j, i), input_set_j_tempv, mat_tempv);
alg.additionv(num, mat_tempv, num);
den += r_j_i;
@@ -501,7 +501,7 @@ real_t MLPPKMeans::_cost() {
_mu->get_row_into_mlpp_vector(j, mu_j_tempv);
alg.subtractionv(input_set_i_tempv, mu_j_tempv, sub_tempv);
- sum += _r->get_element(i, j) * alg.norm_sqv(sub_tempv);
+ sum += _r->element_get(i, j) * alg.norm_sqv(sub_tempv);
}
}
diff --git a/mlpp/lin_alg/lin_alg.cpp b/mlpp/lin_alg/lin_alg.cpp
index 79cdbef..a964892 100644
--- a/mlpp/lin_alg/lin_alg.cpp
+++ b/mlpp/lin_alg/lin_alg.cpp
@@ -117,7 +117,7 @@ Ref MLPPLinAlg::matmultnm(const Ref &A, const Refset_element(i, j, C->get_element(i, j) + A->get_element(i, k) * B->get_element(k, j
+ //C->element_set(i, j, C->element_get(i, j) + A->element_get(i, k) * B->element_get(k, j
}
}
}
@@ -421,7 +421,7 @@ real_t MLPPLinAlg::detm(const Ref &A, int d) {
Recursion is performed unless and until we reach this base case,
such that we recieve a scalar as the result. */
if (d == 2) {
- return A->get_element(0, 0) * A->get_element(1, 1) - A->get_element(0, 1) * A->get_element(1, 0);
+ return A->element_get(0, 0) * A->element_get(1, 1) - A->element_get(0, 1) * A->element_get(1, 0);
} else {
for (int i = 0; i < d; i++) {
int sub_i = 0;
@@ -432,13 +432,13 @@ real_t MLPPLinAlg::detm(const Ref &A, int d) {
continue;
}
- B->set_element(sub_i, sub_j, A->get_element(j, k));
+ B->element_set(sub_i, sub_j, A->element_get(j, k));
sub_j++;
}
sub_i++;
}
- deter += Math::pow(static_cast(-1), static_cast(i)) * A->get_element(0, i) * detm(B, d - 1);
+ deter += Math::pow(static_cast(-1), static_cast(i)) * A->element_get(0, i) * detm(B, d - 1);
}
}
@@ -466,7 +466,7 @@ Ref MLPPLinAlg::cofactornm(const Ref &A, int n, int i, i
for (int row = 0; row < n; row++) {
for (int col = 0; col < n; col++) {
if (row != i && col != j) {
- cof->set_element(sub_i, sub_j++, A->get_element(row, col));
+ cof->element_set(sub_i, sub_j++, A->element_get(row, col));
if (sub_j == n - 1) {
sub_j = 0;
@@ -494,16 +494,16 @@ Ref MLPPLinAlg::adjointnm(const Ref &A) {
// Checking for the case where the given N x N matrix is a scalar
if (a_size.y == 1) {
- adj->set_element(0, 0, 1);
+ adj->element_set(0, 0, 1);
return adj;
}
if (a_size.y == 2) {
- adj->set_element(0, 0, A->get_element(1, 1));
- adj->set_element(1, 1, A->get_element(0, 0));
+ adj->element_set(0, 0, A->element_get(1, 1));
+ adj->element_set(1, 1, A->element_get(0, 0));
- adj->set_element(0, 1, -A->get_element(0, 1));
- adj->set_element(1, 0, -A->get_element(1, 0));
+ adj->element_set(0, 1, -A->element_get(0, 1));
+ adj->element_set(1, 0, -A->element_get(1, 0));
return adj;
}
@@ -513,7 +513,7 @@ Ref MLPPLinAlg::adjointnm(const Ref &A) {
Ref cof = cofactornm(A, a_size.y, i, j);
// 1 if even, -1 if odd
int sign = (i + j) % 2 == 0 ? 1 : -1;
- adj->set_element(j, i, sign * detm(cof, int(a_size.y) - 1));
+ adj->element_set(j, i, sign * detm(cof, int(a_size.y) - 1));
}
}
return adj;
@@ -694,7 +694,7 @@ Ref MLPPLinAlg::covnm(const Ref &A) {
for (int j = 0; j < a_size.x; ++j) {
A->get_row_into_mlpp_vector(j, a_j_row_tmp);
- cov_mat->set_element(i, j, stat.covariancev(a_i_row_tmp, a_j_row_tmp));
+ cov_mat->element_set(i, j, stat.covariancev(a_i_row_tmp, a_j_row_tmp));
}
}
@@ -720,12 +720,12 @@ MLPPLinAlg::EigenResult MLPPLinAlg::eigen(Ref A) {
Size2i a_size = A->size();
do {
- real_t a_ij = A->get_element(0, 1);
+ real_t a_ij = A->element_get(0, 1);
real_t sub_i = 0;
real_t sub_j = 1;
for (int i = 0; i < a_size.y; ++i) {
for (int j = 0; j < a_size.x; ++j) {
- real_t ca_ij = A->get_element(i, j);
+ real_t ca_ij = A->element_get(i, j);
real_t abs_ca_ij = ABS(ca_ij);
if (i != j && abs_ca_ij > a_ij) {
@@ -742,9 +742,9 @@ MLPPLinAlg::EigenResult MLPPLinAlg::eigen(Ref A) {
}
}
- real_t a_ii = A->get_element(sub_i, sub_i);
- real_t a_jj = A->get_element(sub_j, sub_j);
- //real_t a_ji = A->get_element(sub_j, sub_i);
+ real_t a_ii = A->element_get(sub_i, sub_i);
+ real_t a_jj = A->element_get(sub_j, sub_j);
+ //real_t a_ji = A->element_get(sub_j, sub_i);
real_t theta;
if (a_ii == a_jj) {
@@ -754,10 +754,10 @@ MLPPLinAlg::EigenResult MLPPLinAlg::eigen(Ref A) {
}
Ref P = identitym(A->size().y);
- P->set_element(sub_i, sub_j, -Math::sin(theta));
- P->set_element(sub_i, sub_i, Math::cos(theta));
- P->set_element(sub_j, sub_j, Math::cos(theta));
- P->set_element(sub_j, sub_i, Math::sin(theta));
+ P->element_set(sub_i, sub_j, -Math::sin(theta));
+ P->element_set(sub_i, sub_i, Math::cos(theta));
+ P->element_set(sub_j, sub_j, Math::cos(theta));
+ P->element_set(sub_j, sub_i, Math::sin(theta));
a_new = matmultnm(matmultnm(inversenm(P), A), P);
@@ -765,8 +765,8 @@ MLPPLinAlg::EigenResult MLPPLinAlg::eigen(Ref A) {
for (int i = 0; i < a_new_size.y; ++i) {
for (int j = 0; j < a_new_size.x; ++j) {
- if (i != j && Math::is_zero_approx(Math::round(a_new->get_element(i, j)))) {
- a_new->set_element(i, j, 0);
+ if (i != j && Math::is_zero_approx(Math::round(a_new->element_get(i, j)))) {
+ a_new->element_set(i, j, 0);
}
}
}
@@ -774,7 +774,7 @@ MLPPLinAlg::EigenResult MLPPLinAlg::eigen(Ref A) {
bool non_zero = false;
for (int i = 0; i < a_new_size.y; ++i) {
for (int j = 0; j < a_new_size.x; ++j) {
- if (i != j && Math::is_zero_approx(Math::round(a_new->get_element(i, j)))) {
+ if (i != j && Math::is_zero_approx(Math::round(a_new->element_get(i, j)))) {
non_zero = true;
}
}
@@ -791,7 +791,7 @@ MLPPLinAlg::EigenResult MLPPLinAlg::eigen(Ref A) {
for (int i = 0; i < a_new_size.y; ++i) {
for (int j = 0; j < a_new_size.x; ++j) {
if (i != j) {
- a_new->set_element(i, j, 0);
+ a_new->element_set(i, j, 0);
}
}
}
@@ -809,17 +809,17 @@ MLPPLinAlg::EigenResult MLPPLinAlg::eigen(Ref A) {
// Bubble Sort. Should change this later.
for (int i = 0; i < a_new_size.y - 1; ++i) {
for (int j = 0; j < a_new_size.x - 1 - i; ++j) {
- if (a_new->get_element(j, j) < a_new->get_element(j + 1, j + 1)) {
- real_t temp = a_new->get_element(j + 1, j + 1);
- a_new->set_element(j + 1, j + 1, a_new->get_element(j, j));
- a_new->set_element(j, j, temp);
+ if (a_new->element_get(j, j) < a_new->element_get(j + 1, j + 1)) {
+ real_t temp = a_new->element_get(j + 1, j + 1);
+ a_new->element_set(j + 1, j + 1, a_new->element_get(j, j));
+ a_new->element_set(j, j, temp);
}
}
}
for (int i = 0; i < a_new_size.y; ++i) {
for (int j = 0; j < a_new_size.x; ++j) {
- if (a_new->get_element(i, i) == a_new_prior->get_element(j, j)) {
+ if (a_new->element_get(i, i) == a_new_prior->element_get(j, j)) {
val_to_vec[i] = j;
}
}
@@ -831,7 +831,7 @@ MLPPLinAlg::EigenResult MLPPLinAlg::eigen(Ref A) {
for (int i = 0; i < eigenvectors_size.y; ++i) {
for (int j = 0; j < eigenvectors_size.x; ++j) {
- eigenvectors->set_element(i, j, eigen_temp->get_element(i, val_to_vec[j]));
+ eigenvectors->element_set(i, j, eigen_temp->element_get(i, val_to_vec[j]));
}
}
@@ -858,7 +858,7 @@ MLPPLinAlg::SVDResult MLPPLinAlg::svd(const Ref &A) {
for (int i = 0; i < singularvals_size.y; ++i) {
for (int j = 0; j < singularvals_size.x; ++j) {
- sigma->set_element(i, j, singularvals->get_element(i, j));
+ sigma->element_set(i, j, singularvals->element_get(i, j));
}
}
@@ -1720,7 +1720,7 @@ Ref MLPPLinAlg::outer_product(const Ref &a, const Refset_element(i, j, curr_a * b_ptr[j]);
+ C->element_set(i, j, curr_a * b_ptr[j]);
}
}
diff --git a/mlpp/lin_alg/mlpp_matrix.cpp b/mlpp/lin_alg/mlpp_matrix.cpp
index 003a064..dd9dbd4 100644
--- a/mlpp/lin_alg/mlpp_matrix.cpp
+++ b/mlpp/lin_alg/mlpp_matrix.cpp
@@ -1524,7 +1524,7 @@ real_t MLPPMatrix::detb(const Ref &A, int d) const {
Recursion is performed unless and until we reach this base case,
such that we recieve a scalar as the result. */
if (d == 2) {
- return A->get_element(0, 0) * A->get_element(1, 1) - A->get_element(0, 1) * A->get_element(1, 0);
+ return A->element_get(0, 0) * A->element_get(1, 1) - A->element_get(0, 1) * A->element_get(1, 0);
} else {
for (int i = 0; i < d; i++) {
int sub_i = 0;
@@ -1535,13 +1535,13 @@ real_t MLPPMatrix::detb(const Ref &A, int d) const {
continue;
}
- B->set_element(sub_i, sub_j, A->get_element(j, k));
+ B->element_set(sub_i, sub_j, A->element_get(j, k));
sub_j++;
}
sub_i++;
}
- deter += Math::pow(static_cast(-1), static_cast(i)) * A->get_element(0, i) * B->det(d - 1);
+ deter += Math::pow(static_cast(-1), static_cast(i)) * A->element_get(0, i) * B->det(d - 1);
}
}
@@ -1569,7 +1569,7 @@ Ref MLPPMatrix::cofactor(int n, int i, int j) const {
for (int row = 0; row < n; row++) {
for (int col = 0; col < n; col++) {
if (row != i && col != j) {
- cof->set_element(sub_i, sub_j++, get_element(row, col));
+ cof->element_set(sub_i, sub_j++, element_get(row, col));
if (sub_j == n - 1) {
sub_j = 0;
@@ -1594,7 +1594,7 @@ void MLPPMatrix::cofactoro(int n, int i, int j, Ref out) const {
for (int row = 0; row < n; row++) {
for (int col = 0; col < n; col++) {
if (row != i && col != j) {
- out->set_element(sub_i, sub_j++, get_element(row, col));
+ out->element_set(sub_i, sub_j++, element_get(row, col));
if (sub_j == n - 1) {
sub_j = 0;
@@ -1617,16 +1617,16 @@ Ref MLPPMatrix::adjoint() const {
// Checking for the case where the given N x N matrix is a scalar
if (_size.y == 1) {
- adj->set_element(0, 0, 1);
+ adj->element_set(0, 0, 1);
return adj;
}
if (_size.y == 2) {
- adj->set_element(0, 0, get_element(1, 1));
- adj->set_element(1, 1, get_element(0, 0));
+ adj->element_set(0, 0, element_get(1, 1));
+ adj->element_set(1, 1, element_get(0, 0));
- adj->set_element(0, 1, -get_element(0, 1));
- adj->set_element(1, 0, -get_element(1, 0));
+ adj->element_set(0, 1, -element_get(0, 1));
+ adj->element_set(1, 0, -element_get(1, 0));
return adj;
}
@@ -1636,7 +1636,7 @@ Ref MLPPMatrix::adjoint() const {
Ref cof = cofactor(_size.y, i, j);
// 1 if even, -1 if odd
int sign = (i + j) % 2 == 0 ? 1 : -1;
- adj->set_element(j, i, sign * cof->det(int(_size.y) - 1));
+ adj->element_set(j, i, sign * cof->det(int(_size.y) - 1));
}
}
return adj;
@@ -1654,16 +1654,16 @@ void MLPPMatrix::adjointo(Ref out) const {
// Checking for the case where the given N x N matrix is a scalar
if (_size.y == 1) {
- out->set_element(0, 0, 1);
+ out->element_set(0, 0, 1);
return;
}
if (_size.y == 2) {
- out->set_element(0, 0, get_element(1, 1));
- out->set_element(1, 1, get_element(0, 0));
+ out->element_set(0, 0, element_get(1, 1));
+ out->element_set(1, 1, element_get(0, 0));
- out->set_element(0, 1, -get_element(0, 1));
- out->set_element(1, 0, -get_element(1, 0));
+ out->element_set(0, 1, -element_get(0, 1));
+ out->element_set(1, 0, -element_get(1, 0));
return;
}
@@ -1673,7 +1673,7 @@ void MLPPMatrix::adjointo(Ref out) const {
Ref cof = cofactor(_size.y, i, j);
// 1 if even, -1 if odd
int sign = (i + j) % 2 == 0 ? 1 : -1;
- out->set_element(j, i, sign * cof->det(int(_size.y) - 1));
+ out->element_set(j, i, sign * cof->det(int(_size.y) - 1));
}
}
}
@@ -1968,7 +1968,7 @@ Ref MLPPMatrix::cov() const {
for (int j = 0; j < _size.x; ++j) {
get_row_into_mlpp_vector(j, a_j_row_tmp);
- cov_mat->set_element(i, j, stat.covariancev(a_i_row_tmp, a_j_row_tmp));
+ cov_mat->element_set(i, j, stat.covariancev(a_i_row_tmp, a_j_row_tmp));
}
}
@@ -1997,7 +1997,7 @@ void MLPPMatrix::covo(Ref out) const {
for (int j = 0; j < _size.x; ++j) {
get_row_into_mlpp_vector(j, a_j_row_tmp);
- out->set_element(i, j, stat.covariancev(a_i_row_tmp, a_j_row_tmp));
+ out->element_set(i, j, stat.covariancev(a_i_row_tmp, a_j_row_tmp));
}
}
}
@@ -2020,12 +2020,12 @@ MLPPMatrix::EigenResult MLPPMatrix::eigen() const {
Size2i a_size = a_mat->size();
do {
- real_t a_ij = a_mat->get_element(0, 1);
+ real_t a_ij = a_mat->element_get(0, 1);
real_t sub_i = 0;
real_t sub_j = 1;
for (int i = 0; i < a_size.y; ++i) {
for (int j = 0; j < a_size.x; ++j) {
- real_t ca_ij = a_mat->get_element(i, j);
+ real_t ca_ij = a_mat->element_get(i, j);
real_t abs_ca_ij = ABS(ca_ij);
if (i != j && abs_ca_ij > a_ij) {
@@ -2042,9 +2042,9 @@ MLPPMatrix::EigenResult MLPPMatrix::eigen() const {
}
}
- real_t a_ii = a_mat->get_element(sub_i, sub_i);
- real_t a_jj = a_mat->get_element(sub_j, sub_j);
- //real_t a_ji = a_mat->get_element(sub_j, sub_i);
+ real_t a_ii = a_mat->element_get(sub_i, sub_i);
+ real_t a_jj = a_mat->element_get(sub_j, sub_j);
+ //real_t a_ji = a_mat->element_get(sub_j, sub_i);
real_t theta;
if (a_ii == a_jj) {
@@ -2054,10 +2054,10 @@ MLPPMatrix::EigenResult MLPPMatrix::eigen() const {
}
Ref P = identity_mat(a_mat->size().y);
- P->set_element(sub_i, sub_j, -Math::sin(theta));
- P->set_element(sub_i, sub_i, Math::cos(theta));
- P->set_element(sub_j, sub_j, Math::cos(theta));
- P->set_element(sub_j, sub_i, Math::sin(theta));
+ P->element_set(sub_i, sub_j, -Math::sin(theta));
+ P->element_set(sub_i, sub_i, Math::cos(theta));
+ P->element_set(sub_j, sub_j, Math::cos(theta));
+ P->element_set(sub_j, sub_i, Math::sin(theta));
a_new = P->inverse()->multn(a_mat)->multn(P);
@@ -2065,8 +2065,8 @@ MLPPMatrix::EigenResult MLPPMatrix::eigen() const {
for (int i = 0; i < a_new_size.y; ++i) {
for (int j = 0; j < a_new_size.x; ++j) {
- if (i != j && Math::is_zero_approx(Math::round(a_new->get_element(i, j)))) {
- a_new->set_element(i, j, 0);
+ if (i != j && Math::is_zero_approx(Math::round(a_new->element_get(i, j)))) {
+ a_new->element_set(i, j, 0);
}
}
}
@@ -2074,7 +2074,7 @@ MLPPMatrix::EigenResult MLPPMatrix::eigen() const {
bool non_zero = false;
for (int i = 0; i < a_new_size.y; ++i) {
for (int j = 0; j < a_new_size.x; ++j) {
- if (i != j && Math::is_zero_approx(Math::round(a_new->get_element(i, j)))) {
+ if (i != j && Math::is_zero_approx(Math::round(a_new->element_get(i, j)))) {
non_zero = true;
}
}
@@ -2091,7 +2091,7 @@ MLPPMatrix::EigenResult MLPPMatrix::eigen() const {
for (int i = 0; i < a_new_size.y; ++i) {
for (int j = 0; j < a_new_size.x; ++j) {
if (i != j) {
- a_new->set_element(i, j, 0);
+ a_new->element_set(i, j, 0);
}
}
}
@@ -2109,17 +2109,17 @@ MLPPMatrix::EigenResult MLPPMatrix::eigen() const {
// Bubble Sort. Should change this later.
for (int i = 0; i < a_new_size.y - 1; ++i) {
for (int j = 0; j < a_new_size.x - 1 - i; ++j) {
- if (a_new->get_element(j, j) < a_new->get_element(j + 1, j + 1)) {
- real_t temp = a_new->get_element(j + 1, j + 1);
- a_new->set_element(j + 1, j + 1, a_new->get_element(j, j));
- a_new->set_element(j, j, temp);
+ if (a_new->element_get(j, j) < a_new->element_get(j + 1, j + 1)) {
+ real_t temp = a_new->element_get(j + 1, j + 1);
+ a_new->element_set(j + 1, j + 1, a_new->element_get(j, j));
+ a_new->element_set(j, j, temp);
}
}
}
for (int i = 0; i < a_new_size.y; ++i) {
for (int j = 0; j < a_new_size.x; ++j) {
- if (a_new->get_element(i, i) == a_new_prior->get_element(j, j)) {
+ if (a_new->element_get(i, i) == a_new_prior->element_get(j, j)) {
val_to_vec[i] = j;
}
}
@@ -2131,7 +2131,7 @@ MLPPMatrix::EigenResult MLPPMatrix::eigen() const {
for (int i = 0; i < eigenvectors_size.y; ++i) {
for (int j = 0; j < eigenvectors_size.x; ++j) {
- eigenvectors->set_element(i, j, eigen_temp->get_element(i, val_to_vec[j]));
+ eigenvectors->element_set(i, j, eigen_temp->element_get(i, val_to_vec[j]));
}
}
@@ -2160,12 +2160,12 @@ MLPPMatrix::EigenResult MLPPMatrix::eigenb(const Ref &A) const {
Size2i a_size = a_mat->size();
do {
- real_t a_ij = a_mat->get_element(0, 1);
+ real_t a_ij = a_mat->element_get(0, 1);
real_t sub_i = 0;
real_t sub_j = 1;
for (int i = 0; i < a_size.y; ++i) {
for (int j = 0; j < a_size.x; ++j) {
- real_t ca_ij = a_mat->get_element(i, j);
+ real_t ca_ij = a_mat->element_get(i, j);
real_t abs_ca_ij = ABS(ca_ij);
if (i != j && abs_ca_ij > a_ij) {
@@ -2182,9 +2182,9 @@ MLPPMatrix::EigenResult MLPPMatrix::eigenb(const Ref &A) const {
}
}
- real_t a_ii = a_mat->get_element(sub_i, sub_i);
- real_t a_jj = a_mat->get_element(sub_j, sub_j);
- //real_t a_ji = a_mat->get_element(sub_j, sub_i);
+ real_t a_ii = a_mat->element_get(sub_i, sub_i);
+ real_t a_jj = a_mat->element_get(sub_j, sub_j);
+ //real_t a_ji = a_mat->element_get(sub_j, sub_i);
real_t theta;
if (a_ii == a_jj) {
@@ -2194,10 +2194,10 @@ MLPPMatrix::EigenResult MLPPMatrix::eigenb(const Ref &A) const {
}
Ref P = identity_mat(a_mat->size().y);
- P->set_element(sub_i, sub_j, -Math::sin(theta));
- P->set_element(sub_i, sub_i, Math::cos(theta));
- P->set_element(sub_j, sub_j, Math::cos(theta));
- P->set_element(sub_j, sub_i, Math::sin(theta));
+ P->element_set(sub_i, sub_j, -Math::sin(theta));
+ P->element_set(sub_i, sub_i, Math::cos(theta));
+ P->element_set(sub_j, sub_j, Math::cos(theta));
+ P->element_set(sub_j, sub_i, Math::sin(theta));
a_new = P->inverse()->multn(a_mat)->multn(P);
@@ -2205,8 +2205,8 @@ MLPPMatrix::EigenResult MLPPMatrix::eigenb(const Ref &A) const {
for (int i = 0; i < a_new_size.y; ++i) {
for (int j = 0; j < a_new_size.x; ++j) {
- if (i != j && Math::is_zero_approx(Math::round(a_new->get_element(i, j)))) {
- a_new->set_element(i, j, 0);
+ if (i != j && Math::is_zero_approx(Math::round(a_new->element_get(i, j)))) {
+ a_new->element_set(i, j, 0);
}
}
}
@@ -2214,7 +2214,7 @@ MLPPMatrix::EigenResult MLPPMatrix::eigenb(const Ref &A) const {
bool non_zero = false;
for (int i = 0; i < a_new_size.y; ++i) {
for (int j = 0; j < a_new_size.x; ++j) {
- if (i != j && Math::is_zero_approx(Math::round(a_new->get_element(i, j)))) {
+ if (i != j && Math::is_zero_approx(Math::round(a_new->element_get(i, j)))) {
non_zero = true;
}
}
@@ -2231,7 +2231,7 @@ MLPPMatrix::EigenResult MLPPMatrix::eigenb(const Ref &A) const {
for (int i = 0; i < a_new_size.y; ++i) {
for (int j = 0; j < a_new_size.x; ++j) {
if (i != j) {
- a_new->set_element(i, j, 0);
+ a_new->element_set(i, j, 0);
}
}
}
@@ -2249,17 +2249,17 @@ MLPPMatrix::EigenResult MLPPMatrix::eigenb(const Ref &A) const {
// Bubble Sort. Should change this later.
for (int i = 0; i < a_new_size.y - 1; ++i) {
for (int j = 0; j < a_new_size.x - 1 - i; ++j) {
- if (a_new->get_element(j, j) < a_new->get_element(j + 1, j + 1)) {
- real_t temp = a_new->get_element(j + 1, j + 1);
- a_new->set_element(j + 1, j + 1, a_new->get_element(j, j));
- a_new->set_element(j, j, temp);
+ if (a_new->element_get(j, j) < a_new->element_get(j + 1, j + 1)) {
+ real_t temp = a_new->element_get(j + 1, j + 1);
+ a_new->element_set(j + 1, j + 1, a_new->element_get(j, j));
+ a_new->element_set(j, j, temp);
}
}
}
for (int i = 0; i < a_new_size.y; ++i) {
for (int j = 0; j < a_new_size.x; ++j) {
- if (a_new->get_element(i, i) == a_new_prior->get_element(j, j)) {
+ if (a_new->element_get(i, i) == a_new_prior->element_get(j, j)) {
val_to_vec[i] = j;
}
}
@@ -2271,7 +2271,7 @@ MLPPMatrix::EigenResult MLPPMatrix::eigenb(const Ref &A) const {
for (int i = 0; i < eigenvectors_size.y; ++i) {
for (int j = 0; j < eigenvectors_size.x; ++j) {
- eigenvectors->set_element(i, j, eigen_temp->get_element(i, val_to_vec[j]));
+ eigenvectors->element_set(i, j, eigen_temp->element_get(i, val_to_vec[j]));
}
}
@@ -2316,7 +2316,7 @@ MLPPMatrix::SVDResult MLPPMatrix::svd() const {
for (int i = 0; i < singularvals_size.y; ++i) {
for (int j = 0; j < singularvals_size.x; ++j) {
- sigma->set_element(i, j, singularvals->get_element(i, j));
+ sigma->element_set(i, j, singularvals->element_get(i, j));
}
}
@@ -2344,7 +2344,7 @@ MLPPMatrix::SVDResult MLPPMatrix::svdb(const Ref &A) const {
for (int i = 0; i < singularvals_size.y; ++i) {
for (int j = 0; j < singularvals_size.x; ++j) {
- sigma->set_element(i, j, singularvals->get_element(i, j));
+ sigma->element_set(i, j, singularvals->element_get(i, j));
}
}
@@ -2679,7 +2679,7 @@ void MLPPMatrix::outer_product(const Ref &a, const Ref &
real_t curr_a = a_ptr[i];
for (int j = 0; j < s.x; ++j) {
- set_element(i, j, curr_a * b_ptr[j]);
+ element_set(i, j, curr_a * b_ptr[j]);
}
}
}
@@ -2699,7 +2699,7 @@ Ref MLPPMatrix::outer_productn(const Ref &a, const Refset_element(i, j, curr_a * b_ptr[j]);
+ C->element_set(i, j, curr_a * b_ptr[j]);
}
}
@@ -2865,7 +2865,7 @@ void MLPPMatrix::set_from_image(const Ref &p_img, const int p_image_chann
for (int x = 0; x < _size.x; ++x) {
Color c = img->get_pixel(x, y);
- set_element(y, x, c[p_image_channel]);
+ element_set(y, x, c[p_image_channel]);
}
}
@@ -3018,11 +3018,11 @@ void MLPPMatrix::_bind_methods() {
ClassDB::bind_method(D_METHOD("resize", "size"), &MLPPMatrix::resize);
- ClassDB::bind_method(D_METHOD("get_element_index", "index"), &MLPPMatrix::get_element_index);
- ClassDB::bind_method(D_METHOD("set_element_index", "index", "val"), &MLPPMatrix::set_element_index);
+ ClassDB::bind_method(D_METHOD("element_get_index", "index"), &MLPPMatrix::element_get_index);
+ ClassDB::bind_method(D_METHOD("element_set_index", "index", "val"), &MLPPMatrix::element_set_index);
- ClassDB::bind_method(D_METHOD("get_element", "index_y", "index_x"), &MLPPMatrix::get_element);
- ClassDB::bind_method(D_METHOD("set_element", "index_y", "index_x", "val"), &MLPPMatrix::set_element);
+ ClassDB::bind_method(D_METHOD("element_get", "index_y", "index_x"), &MLPPMatrix::element_get);
+ ClassDB::bind_method(D_METHOD("element_set", "index_y", "index_x", "val"), &MLPPMatrix::element_set);
ClassDB::bind_method(D_METHOD("get_row_pool_vector", "index_y"), &MLPPMatrix::get_row_pool_vector);
ClassDB::bind_method(D_METHOD("get_row_mlpp_vector", "index_y"), &MLPPMatrix::get_row_mlpp_vector);
diff --git a/mlpp/lin_alg/mlpp_matrix.h b/mlpp/lin_alg/mlpp_matrix.h
index 4192ade..c743e68 100644
--- a/mlpp/lin_alg/mlpp_matrix.h
+++ b/mlpp/lin_alg/mlpp_matrix.h
@@ -72,26 +72,26 @@ public:
return _data[p_index];
}
- _FORCE_INLINE_ real_t get_element_index(int p_index) const {
+ _FORCE_INLINE_ real_t element_get_index(int p_index) const {
ERR_FAIL_INDEX_V(p_index, data_size(), 0);
return _data[p_index];
}
- _FORCE_INLINE_ void set_element_index(int p_index, real_t p_val) {
+ _FORCE_INLINE_ void element_set_index(int p_index, real_t p_val) {
ERR_FAIL_INDEX(p_index, data_size());
_data[p_index] = p_val;
}
- _FORCE_INLINE_ real_t get_element(int p_index_y, int p_index_x) const {
+ _FORCE_INLINE_ real_t element_get(int p_index_y, int p_index_x) const {
ERR_FAIL_INDEX_V(p_index_x, _size.x, 0);
ERR_FAIL_INDEX_V(p_index_y, _size.y, 0);
return _data[p_index_y * _size.x + p_index_x];
}
- _FORCE_INLINE_ void set_element(int p_index_y, int p_index_x, real_t p_val) {
+ _FORCE_INLINE_ void element_set(int p_index_y, int p_index_x, real_t p_val) {
ERR_FAIL_INDEX(p_index_x, _size.x);
ERR_FAIL_INDEX(p_index_y, _size.y);
diff --git a/mlpp/lin_alg/mlpp_tensor3.cpp b/mlpp/lin_alg/mlpp_tensor3.cpp
index fdddde6..b674e95 100644
--- a/mlpp/lin_alg/mlpp_tensor3.cpp
+++ b/mlpp/lin_alg/mlpp_tensor3.cpp
@@ -48,7 +48,7 @@ void MLPPTensor3::set_data(const Array &p_from) {
}
}
-void MLPPTensor3::add_z_slice(const Vector &p_row) {
+void MLPPTensor3::z_slice_add(const Vector &p_row) {
if (p_row.size() == 0) {
return;
}
@@ -71,7 +71,7 @@ void MLPPTensor3::add_z_slice(const Vector &p_row) {
}
}
-void MLPPTensor3::add_z_slice_pool_vector(const PoolRealArray &p_row) {
+void MLPPTensor3::z_slice_add_pool_vector(const PoolRealArray &p_row) {
if (p_row.size() == 0) {
return;
}
@@ -95,7 +95,7 @@ void MLPPTensor3::add_z_slice_pool_vector(const PoolRealArray &p_row) {
}
}
-void MLPPTensor3::add_z_slice_mlpp_vector(const Ref &p_row) {
+void MLPPTensor3::z_slice_add_mlpp_vector(const Ref &p_row) {
ERR_FAIL_COND(!p_row.is_valid());
int p_row_size = p_row->size();
@@ -122,7 +122,7 @@ void MLPPTensor3::add_z_slice_mlpp_vector(const Ref &p_row) {
}
}
-void MLPPTensor3::add_z_slice_mlpp_matrix(const Ref &p_matrix) {
+void MLPPTensor3::z_slice_add_mlpp_matrix(const Ref &p_matrix) {
ERR_FAIL_COND(!p_matrix.is_valid());
int other_data_size = p_matrix->data_size();
@@ -150,7 +150,7 @@ void MLPPTensor3::add_z_slice_mlpp_matrix(const Ref &p_matrix) {
}
}
-void MLPPTensor3::remove_z_slice(int p_index) {
+void MLPPTensor3::z_slice_remove(int p_index) {
ERR_FAIL_INDEX(p_index, _size.z);
--_size.z;
@@ -175,7 +175,7 @@ void MLPPTensor3::remove_z_slice(int p_index) {
// Removes the item copying the last value into the position of the one to
// remove. It's generally faster than `remove`.
-void MLPPTensor3::remove_z_slice_unordered(int p_index) {
+void MLPPTensor3::z_slice_remove_unordered(int p_index) {
ERR_FAIL_INDEX(p_index, _size.z);
--_size.z;
@@ -199,7 +199,7 @@ void MLPPTensor3::remove_z_slice_unordered(int p_index) {
CRASH_COND_MSG(!_data, "Out of memory");
}
-void MLPPTensor3::swap_z_slice(int p_index_1, int p_index_2) {
+void MLPPTensor3::z_slice_swap(int p_index_1, int p_index_2) {
ERR_FAIL_INDEX(p_index_1, _size.z);
ERR_FAIL_INDEX(p_index_2, _size.z);
@@ -240,7 +240,7 @@ void MLPPTensor3::set_shape(const Size3i &p_size) {
_size = p_size;
}
-Vector MLPPTensor3::get_row_vector(int p_index_y, int p_index_z) const {
+Vector MLPPTensor3::row_get_vector(int p_index_y, int p_index_z) const {
ERR_FAIL_INDEX_V(p_index_y, _size.y, Vector());
ERR_FAIL_INDEX_V(p_index_z, _size.z, Vector());
@@ -263,7 +263,7 @@ Vector MLPPTensor3::get_row_vector(int p_index_y, int p_index_z) const {
return ret;
}
-PoolRealArray MLPPTensor3::get_row_pool_vector(int p_index_y, int p_index_z) const {
+PoolRealArray MLPPTensor3::row_get_pool_vector(int p_index_y, int p_index_z) const {
ERR_FAIL_INDEX_V(p_index_y, _size.y, PoolRealArray());
ERR_FAIL_INDEX_V(p_index_z, _size.z, PoolRealArray());
@@ -287,7 +287,7 @@ PoolRealArray MLPPTensor3::get_row_pool_vector(int p_index_y, int p_index_z) con
return ret;
}
-Ref MLPPTensor3::get_row_mlpp_vector(int p_index_y, int p_index_z) const {
+Ref MLPPTensor3::row_get_mlpp_vector(int p_index_y, int p_index_z) const {
ERR_FAIL_INDEX_V(p_index_y, _size.y, Ref());
ERR_FAIL_INDEX_V(p_index_z, _size.z, Ref());
@@ -311,7 +311,7 @@ Ref MLPPTensor3::get_row_mlpp_vector(int p_index_y, int p_index_z) c
return ret;
}
-void MLPPTensor3::get_row_into_mlpp_vector(int p_index_y, int p_index_z, Ref target) const {
+void MLPPTensor3::row_get_into_mlpp_vector(int p_index_y, int p_index_z, Ref target) const {
ERR_FAIL_COND(!target.is_valid());
ERR_FAIL_INDEX(p_index_y, _size.y);
ERR_FAIL_INDEX(p_index_z, _size.z);
@@ -329,7 +329,7 @@ void MLPPTensor3::get_row_into_mlpp_vector(int p_index_y, int p_index_z, Ref &p_row) {
+void MLPPTensor3::row_set_vector(int p_index_y, int p_index_z, const Vector &p_row) {
ERR_FAIL_COND(p_row.size() != _size.x);
ERR_FAIL_INDEX(p_index_y, _size.y);
ERR_FAIL_INDEX(p_index_z, _size.z);
@@ -343,7 +343,7 @@ void MLPPTensor3::set_row_vector(int p_index_y, int p_index_z, const Vector &p_row) {
+void MLPPTensor3::row_set_mlpp_vector(int p_index_y, int p_index_z, const Ref &p_row) {
ERR_FAIL_COND(!p_row.is_valid());
ERR_FAIL_COND(p_row->size() != _size.x);
ERR_FAIL_INDEX(p_index_y, _size.y);
@@ -373,7 +373,7 @@ void MLPPTensor3::set_row_mlpp_vector(int p_index_y, int p_index_z, const Ref MLPPTensor3::get_z_slice_vector(int p_index_z) const {
+Vector MLPPTensor3::z_slice_get_vector(int p_index_z) const {
ERR_FAIL_INDEX_V(p_index_z, _size.z, Vector());
Vector ret;
@@ -397,7 +397,7 @@ Vector MLPPTensor3::get_z_slice_vector(int p_index_z) const {
return ret;
}
-PoolRealArray MLPPTensor3::get_z_slice_pool_vector(int p_index_z) const {
+PoolRealArray MLPPTensor3::z_slice_get_pool_vector(int p_index_z) const {
ERR_FAIL_INDEX_V(p_index_z, _size.z, PoolRealArray());
PoolRealArray ret;
@@ -422,7 +422,7 @@ PoolRealArray MLPPTensor3::get_z_slice_pool_vector(int p_index_z) const {
return ret;
}
-Ref MLPPTensor3::get_z_slice_mlpp_vector(int p_index_z) const {
+Ref MLPPTensor3::z_slice_get_mlpp_vector(int p_index_z) const {
ERR_FAIL_INDEX_V(p_index_z, _size.z, Ref());
Ref ret;
@@ -447,7 +447,7 @@ Ref MLPPTensor3::get_z_slice_mlpp_vector(int p_index_z) const {
return ret;
}
-void MLPPTensor3::get_z_slice_into_mlpp_vector(int p_index_z, Ref target) const {
+void MLPPTensor3::z_slice_get_into_mlpp_vector(int p_index_z, Ref target) const {
ERR_FAIL_INDEX(p_index_z, _size.z);
int fmds = z_slice_data_size();
@@ -465,7 +465,7 @@ void MLPPTensor3::get_z_slice_into_mlpp_vector(int p_index_z, Ref ta
}
}
-Ref MLPPTensor3::get_z_slice_mlpp_matrix(int p_index_z) const {
+Ref MLPPTensor3::z_slice_get_mlpp_matrix(int p_index_z) const {
ERR_FAIL_INDEX_V(p_index_z, _size.z, Ref());
Ref ret;
@@ -490,7 +490,7 @@ Ref MLPPTensor3::get_z_slice_mlpp_matrix(int p_index_z) const {
return ret;
}
-void MLPPTensor3::get_z_slice_into_mlpp_matrix(int p_index_z, Ref target) const {
+void MLPPTensor3::z_slice_get_into_mlpp_matrix(int p_index_z, Ref target) const {
ERR_FAIL_INDEX(p_index_z, _size.z);
int fmds = z_slice_data_size();
@@ -509,7 +509,7 @@ void MLPPTensor3::get_z_slice_into_mlpp_matrix(int p_index_z, Ref ta
}
}
-void MLPPTensor3::set_z_slice_vector(int p_index_z, const Vector &p_row) {
+void MLPPTensor3::z_slice_set_vector(int p_index_z, const Vector &p_row) {
ERR_FAIL_INDEX(p_index_z, _size.z);
int fmds = z_slice_data_size();
@@ -525,7 +525,7 @@ void MLPPTensor3::set_z_slice_vector(int p_index_z, const Vector &p_row)
}
}
-void MLPPTensor3::set_z_slice_pool_vector(int p_index_z, const PoolRealArray &p_row) {
+void MLPPTensor3::z_slice_set_pool_vector(int p_index_z, const PoolRealArray &p_row) {
ERR_FAIL_INDEX(p_index_z, _size.z);
int fmds = z_slice_data_size();
@@ -542,7 +542,7 @@ void MLPPTensor3::set_z_slice_pool_vector(int p_index_z, const PoolRealArray &p_
}
}
-void MLPPTensor3::set_z_slice_mlpp_vector(int p_index_z, const Ref &p_row) {
+void MLPPTensor3::z_slice_set_mlpp_vector(int p_index_z, const Ref &p_row) {
ERR_FAIL_INDEX(p_index_z, _size.z);
ERR_FAIL_COND(!p_row.is_valid());
@@ -559,7 +559,7 @@ void MLPPTensor3::set_z_slice_mlpp_vector(int p_index_z, const Ref &
}
}
-void MLPPTensor3::set_z_slice_mlpp_matrix(int p_index_z, const Ref &p_mat) {
+void MLPPTensor3::z_slice_set_mlpp_matrix(int p_index_z, const Ref &p_mat) {
ERR_FAIL_INDEX(p_index_z, _size.z);
ERR_FAIL_COND(!p_mat.is_valid());
@@ -576,7 +576,7 @@ void MLPPTensor3::set_z_slice_mlpp_matrix(int p_index_z, const Ref &
}
}
-void MLPPTensor3::get_x_slice_into(int p_index_x, Ref target) const {
+void MLPPTensor3::x_slice_get_into(int p_index_x, Ref target) const {
ERR_FAIL_INDEX(p_index_x, _size.x);
ERR_FAIL_COND(!target.is_valid());
@@ -586,33 +586,33 @@ void MLPPTensor3::get_x_slice_into(int p_index_x, Ref target) const
for (int z = 0; z < _size.z; ++z) {
for (int y = 0; y < _size.y; ++y) {
- target->set_element(z, y, get_element(p_index_x, y, z));
+ target->element_set(z, y, element_get(p_index_x, y, z));
}
}
}
-Ref MLPPTensor3::get_x_slice(int p_index_x) const {
+Ref MLPPTensor3::x_slice_get(int p_index_x) const {
ERR_FAIL_INDEX_V(p_index_x, _size.x, Ref());
Ref m;
m.instance();
- get_x_slice_into(p_index_x, m);
+ x_slice_get_into(p_index_x, m);
return m;
}
-void MLPPTensor3::set_x_slice(int p_index_x, const Ref &p_mat) {
+void MLPPTensor3::x_slice_set(int p_index_x, const Ref &p_mat) {
ERR_FAIL_INDEX(p_index_x, _size.x);
ERR_FAIL_COND(!p_mat.is_valid());
ERR_FAIL_COND(p_mat->size() != Size2i(_size.y, _size.z));
for (int z = 0; z < _size.z; ++z) {
for (int y = 0; y < _size.y; ++y) {
- set_element(p_index_x, y, z, p_mat->get_element(z, y));
+ element_set(p_index_x, y, z, p_mat->element_get(z, y));
}
}
}
-void MLPPTensor3::get_y_slice_into(int p_index_y, Ref target) const {
+void MLPPTensor3::y_slice_get_into(int p_index_y, Ref target) const {
ERR_FAIL_INDEX(p_index_y, _size.y);
ERR_FAIL_COND(!target.is_valid());
@@ -622,33 +622,33 @@ void MLPPTensor3::get_y_slice_into(int p_index_y, Ref target) const
for (int z = 0; z < _size.z; ++z) {
for (int x = 0; x < _size.x; ++x) {
- target->set_element(z, x, get_element(x, p_index_y, z));
+ target->element_set(z, x, element_get(x, p_index_y, z));
}
}
}
-Ref MLPPTensor3::get_y_slice(int p_index_y) const {
+Ref MLPPTensor3::y_slice_get(int p_index_y) const {
ERR_FAIL_INDEX_V(p_index_y, _size.y, Ref());
Ref m;
m.instance();
- get_y_slice_into(p_index_y, m);
+ y_slice_get_into(p_index_y, m);
return m;
}
-void MLPPTensor3::set_y_slice(int p_index_y, const Ref &p_mat) {
+void MLPPTensor3::y_slice_set(int p_index_y, const Ref &p_mat) {
ERR_FAIL_INDEX(p_index_y, _size.y);
ERR_FAIL_COND(!p_mat.is_valid());
ERR_FAIL_COND(p_mat->size() != Size2i(_size.y, _size.z));
for (int z = 0; z < _size.z; ++z) {
for (int x = 0; x < _size.x; ++x) {
- set_element(x, p_index_y, z, p_mat->get_element(z, x));
+ element_set(x, p_index_y, z, p_mat->element_get(z, x));
}
}
}
-void MLPPTensor3::add_z_slices_image(const Ref &p_img, const int p_channels) {
+void MLPPTensor3::z_slices_add_image(const Ref &p_img, const int p_channels) {
ERR_FAIL_COND(!p_img.is_valid());
Size2i img_size = Size2i(p_img->get_width(), p_img->get_height());
@@ -701,7 +701,7 @@ void MLPPTensor3::add_z_slices_image(const Ref &p_img, const int p_channe
Color c = img->get_pixel(x, y);
for (int i = 0; i < channel_count; ++i) {
- set_element(y, x, start_channel + i, c[channels[i]]);
+ element_set(y, x, start_channel + i, c[channels[i]]);
}
}
}
@@ -709,7 +709,7 @@ void MLPPTensor3::add_z_slices_image(const Ref &p_img, const int p_channe
img->unlock();
}
-Ref MLPPTensor3::get_z_slice_image(const int p_index_z) const {
+Ref MLPPTensor3::z_slice_get_image(const int p_index_z) const {
ERR_FAIL_INDEX_V(p_index_z, _size.z, Ref());
Ref image;
@@ -737,7 +737,7 @@ Ref MLPPTensor3::get_z_slice_image(const int p_index_z) const {
return image;
}
-Ref MLPPTensor3::get_z_slices_image(const int p_index_r, const int p_index_g, const int p_index_b, const int p_index_a) const {
+Ref MLPPTensor3::z_slices_get_image(const int p_index_r, const int p_index_g, const int p_index_b, const int p_index_a) const {
if (p_index_r != -1) {
ERR_FAIL_INDEX_V(p_index_r, _size.z, Ref());
}
@@ -772,19 +772,19 @@ Ref MLPPTensor3::get_z_slices_image(const int p_index_r, const int p_inde
Color c;
if (p_index_r != -1) {
- c.r = get_element(y, x, p_index_r);
+ c.r = element_get(y, x, p_index_r);
}
if (p_index_g != -1) {
- c.g = get_element(y, x, p_index_g);
+ c.g = element_get(y, x, p_index_g);
}
if (p_index_b != -1) {
- c.b = get_element(y, x, p_index_b);
+ c.b = element_get(y, x, p_index_b);
}
if (p_index_a != -1) {
- c.a = get_element(y, x, p_index_a);
+ c.a = element_get(y, x, p_index_a);
}
image->set_pixel(x, y, c);
@@ -796,7 +796,7 @@ Ref MLPPTensor3::get_z_slices_image(const int p_index_r, const int p_inde
return image;
}
-void MLPPTensor3::get_z_slice_into_image(Ref p_target, const int p_index_z, const int p_target_channels) const {
+void MLPPTensor3::z_slice_get_into_image(Ref p_target, const int p_index_z, const int p_target_channels) const {
ERR_FAIL_INDEX(p_index_z, _size.z);
ERR_FAIL_COND(!p_target.is_valid());
@@ -851,7 +851,7 @@ void MLPPTensor3::get_z_slice_into_image(Ref p_target, const int p_index_
for (int x = 0; x < fms.x; ++x) {
Color c;
- float e = get_element(y, x, p_index_z);
+ float e = element_get(y, x, p_index_z);
for (int i = 0; i < channel_count; ++i) {
c[channels[i]] = e;
@@ -863,7 +863,7 @@ void MLPPTensor3::get_z_slice_into_image(Ref p_target, const int p_index_
p_target->unlock();
}
-void MLPPTensor3::get_z_slices_into_image(Ref p_target, const int p_index_r, const int p_index_g, const int p_index_b, const int p_index_a) const {
+void MLPPTensor3::z_slices_get_into_image(Ref p_target, const int p_index_r, const int p_index_g, const int p_index_b, const int p_index_a) const {
ERR_FAIL_COND(!p_target.is_valid());
if (p_index_r != -1) {
@@ -909,19 +909,19 @@ void MLPPTensor3::get_z_slices_into_image(Ref p_target, const int p_index
Color c;
if (p_index_r != -1) {
- c.r = get_element(y, x, p_index_r);
+ c.r = element_get(y, x, p_index_r);
}
if (p_index_g != -1) {
- c.g = get_element(y, x, p_index_g);
+ c.g = element_get(y, x, p_index_g);
}
if (p_index_b != -1) {
- c.b = get_element(y, x, p_index_b);
+ c.b = element_get(y, x, p_index_b);
}
if (p_index_a != -1) {
- c.a = get_element(y, x, p_index_a);
+ c.a = element_get(y, x, p_index_a);
}
p_target->set_pixel(x, y, c);
@@ -931,7 +931,7 @@ void MLPPTensor3::get_z_slices_into_image(Ref p_target, const int p_index
p_target->unlock();
}
-void MLPPTensor3::set_z_slice_image(const Ref &p_img, const int p_index_z, const int p_image_channel_flag) {
+void MLPPTensor3::z_slice_set_image(const Ref &p_img, const int p_index_z, const int p_image_channel_flag) {
ERR_FAIL_COND(!p_img.is_valid());
ERR_FAIL_INDEX(p_index_z, _size.z);
@@ -959,13 +959,13 @@ void MLPPTensor3::set_z_slice_image(const Ref &p_img, const int p_index_z
for (int x = 0; x < fms.x; ++x) {
Color c = img->get_pixel(x, y);
- set_element(y, x, p_index_z, c[channel_index]);
+ element_set(y, x, p_index_z, c[channel_index]);
}
}
img->unlock();
}
-void MLPPTensor3::set_z_slices_image(const Ref &p_img, const int p_index_r, const int p_index_g, const int p_index_b, const int p_index_a) {
+void MLPPTensor3::z_slices_set_image(const Ref &p_img, const int p_index_r, const int p_index_g, const int p_index_b, const int p_index_a) {
ERR_FAIL_COND(!p_img.is_valid());
if (p_index_r != -1) {
@@ -998,19 +998,19 @@ void MLPPTensor3::set_z_slices_image(const Ref &p_img, const int p_index_
Color c = img->get_pixel(x, y);
if (p_index_r != -1) {
- set_element(y, x, p_index_r, c.r);
+ element_set(y, x, p_index_r, c.r);
}
if (p_index_g != -1) {
- set_element(y, x, p_index_g, c.g);
+ element_set(y, x, p_index_g, c.g);
}
if (p_index_b != -1) {
- set_element(y, x, p_index_b, c.b);
+ element_set(y, x, p_index_b, c.b);
}
if (p_index_a != -1) {
- set_element(y, x, p_index_a, c.a);
+ element_set(y, x, p_index_a, c.a);
}
}
}
@@ -1061,7 +1061,7 @@ void MLPPTensor3::set_from_image(const Ref &p_img, const int p_channels)
Color c = img->get_pixel(x, y);
for (int i = 0; i < channel_count; ++i) {
- set_element(y, x, i, c[channels[i]]);
+ element_set(y, x, i, c[channels[i]]);
}
}
}
@@ -1069,7 +1069,7 @@ void MLPPTensor3::set_from_image(const Ref &p_img, const int p_channels)
img->unlock();
}
-Ref MLPPTensor3::get_x_slice_image(const int p_index_x) const {
+Ref MLPPTensor3::x_slice_get_image(const int p_index_x) const {
ERR_FAIL_INDEX_V(p_index_x, _size.x, Ref());
Ref image;
@@ -1088,7 +1088,7 @@ Ref MLPPTensor3::get_x_slice_image(const int p_index_x) const {
for (int z = 0; z < _size.z; ++z) {
for (int y = 0; y < _size.y; ++y) {
- wptr[i] = static_cast(get_element(p_index_x, y, z) * 255.0);
+ wptr[i] = static_cast(element_get(p_index_x, y, z) * 255.0);
++i;
}
@@ -1098,7 +1098,7 @@ Ref MLPPTensor3::get_x_slice_image(const int p_index_x) const {
return image;
}
-void MLPPTensor3::get_x_slice_into_image(Ref p_target, const int p_index_x, const int p_target_channels) const {
+void MLPPTensor3::x_slice_get_into_image(Ref p_target, const int p_index_x, const int p_target_channels) const {
ERR_FAIL_INDEX(p_index_x, _size.x);
ERR_FAIL_COND(!p_target.is_valid());
@@ -1153,7 +1153,7 @@ void MLPPTensor3::get_x_slice_into_image(Ref p_target, const int p_index_
for (int z = 0; z < fms.x; ++z) {
Color c;
- float e = get_element(y, p_index_x, z);
+ float e = element_get(y, p_index_x, z);
for (int i = 0; i < channel_count; ++i) {
c[channels[i]] = e;
@@ -1165,7 +1165,7 @@ void MLPPTensor3::get_x_slice_into_image(Ref p_target, const int p_index_
p_target->unlock();
}
-void MLPPTensor3::set_x_slice_image(const Ref &p_img, const int p_index_x, const int p_image_channel_flag) {
+void MLPPTensor3::x_slice_set_image(const Ref &p_img, const int p_index_x, const int p_image_channel_flag) {
ERR_FAIL_COND(!p_img.is_valid());
ERR_FAIL_INDEX(p_index_x, _size.x);
@@ -1193,14 +1193,14 @@ void MLPPTensor3::set_x_slice_image(const Ref &p_img, const int p_index_x
for (int z = 0; z < fms.x; ++z) {
Color c = img->get_pixel(z, y);
- set_element(y, p_index_x, z, c[channel_index]);
+ element_set(y, p_index_x, z, c[channel_index]);
}
}
img->unlock();
}
-Ref MLPPTensor3::get_y_slice_image(const int p_index_y) const {
+Ref MLPPTensor3::y_slice_get_image(const int p_index_y) const {
ERR_FAIL_INDEX_V(p_index_y, _size.y, Ref());
Ref image;
@@ -1219,7 +1219,7 @@ Ref MLPPTensor3::get_y_slice_image(const int p_index_y) const {
for (int z = 0; z < _size.z; ++z) {
for (int x = 0; x < _size.x; ++x) {
- wptr[i] = static_cast(get_element(x, p_index_y, z) * 255.0);
+ wptr[i] = static_cast(element_get(x, p_index_y, z) * 255.0);
++i;
}
@@ -1229,7 +1229,7 @@ Ref MLPPTensor3::get_y_slice_image(const int p_index_y) const {
return image;
}
-void MLPPTensor3::get_y_slice_into_image(Ref p_target, const int p_index_y, const int p_target_channels) const {
+void MLPPTensor3::y_slice_get_into_image(Ref p_target, const int p_index_y, const int p_target_channels) const {
ERR_FAIL_INDEX(p_index_y, _size.y);
ERR_FAIL_COND(!p_target.is_valid());
@@ -1284,7 +1284,7 @@ void MLPPTensor3::get_y_slice_into_image(Ref p_target, const int p_index_
for (int z = 0; z < fms.x; ++z) {
Color c;
- float e = get_element(p_index_y, x, z);
+ float e = element_get(p_index_y, x, z);
for (int i = 0; i < channel_count; ++i) {
c[channels[i]] = e;
@@ -1296,7 +1296,7 @@ void MLPPTensor3::get_y_slice_into_image(Ref p_target, const int p_index_
p_target->unlock();
}
-void MLPPTensor3::set_y_slice_image(const Ref &p_img, const int p_index_y, const int p_image_channel_flag) {
+void MLPPTensor3::y_slice_set_image(const Ref &p_img, const int p_index_y, const int p_image_channel_flag) {
ERR_FAIL_COND(!p_img.is_valid());
ERR_FAIL_INDEX(p_index_y, _size.y);
@@ -1324,7 +1324,7 @@ void MLPPTensor3::set_y_slice_image(const Ref &p_img, const int p_index_y
for (int x = 0; x < fms.x; ++x) {
Color c = img->get_pixel(x, z);
- set_element(p_index_y, x, z, c[channel_index]);
+ element_set(p_index_y, x, z, c[channel_index]);
}
}
@@ -2277,14 +2277,14 @@ void MLPPTensor3::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_data", "data"), &MLPPTensor3::set_data);
ADD_PROPERTY(PropertyInfo(Variant::ARRAY, "data"), "set_data", "get_data");
- ClassDB::bind_method(D_METHOD("add_z_slice_pool_vector", "row"), &MLPPTensor3::add_z_slice_pool_vector);
- ClassDB::bind_method(D_METHOD("add_z_slice_mlpp_vector", "row"), &MLPPTensor3::add_z_slice_mlpp_vector);
- ClassDB::bind_method(D_METHOD("add_z_slice_mlpp_matrix", "matrix"), &MLPPTensor3::add_z_slice_mlpp_matrix);
+ ClassDB::bind_method(D_METHOD("z_slice_add_pool_vector", "row"), &MLPPTensor3::z_slice_add_pool_vector);
+ ClassDB::bind_method(D_METHOD("z_add_slice_mlpp_vector", "row"), &MLPPTensor3::z_slice_add_mlpp_vector);
+ ClassDB::bind_method(D_METHOD("z_slice_add_mlpp_matrix", "matrix"), &MLPPTensor3::z_slice_add_mlpp_matrix);
- ClassDB::bind_method(D_METHOD("remove_z_slice", "index"), &MLPPTensor3::remove_z_slice);
- ClassDB::bind_method(D_METHOD("remove_z_slice_unordered", "index"), &MLPPTensor3::remove_z_slice_unordered);
+ ClassDB::bind_method(D_METHOD("z_slice_remove", "index"), &MLPPTensor3::z_slice_remove);
+ ClassDB::bind_method(D_METHOD("z_slice_remove_unordered", "index"), &MLPPTensor3::z_slice_remove_unordered);
- ClassDB::bind_method(D_METHOD("swap_z_slice", "index_1", "index_2"), &MLPPTensor3::swap_z_slice);
+ ClassDB::bind_method(D_METHOD("z_slice_swap", "index_1", "index_2"), &MLPPTensor3::z_slice_swap);
ClassDB::bind_method(D_METHOD("clear"), &MLPPTensor3::clear);
ClassDB::bind_method(D_METHOD("reset"), &MLPPTensor3::reset);
@@ -2302,58 +2302,58 @@ void MLPPTensor3::_bind_methods() {
ClassDB::bind_method(D_METHOD("calculate_index", "index_y", "index_x", "index_z"), &MLPPTensor3::calculate_index);
ClassDB::bind_method(D_METHOD("calculate_z_slice_index", "index_z"), &MLPPTensor3::calculate_z_slice_index);
- ClassDB::bind_method(D_METHOD("get_element_index", "index"), &MLPPTensor3::get_element_index);
- ClassDB::bind_method(D_METHOD("set_element_index", "index", "val"), &MLPPTensor3::set_element_index);
+ ClassDB::bind_method(D_METHOD("element_get_index", "index"), &MLPPTensor3::element_get_index);
+ ClassDB::bind_method(D_METHOD("element_set_index", "index", "val"), &MLPPTensor3::element_set_index);
- ClassDB::bind_method(D_METHOD("get_element", "index_y", "index_x", "index_z"), &MLPPTensor3::get_element);
- ClassDB::bind_method(D_METHOD("set_element", "index_y", "index_x", "index_z", "val"), &MLPPTensor3::set_element);
+ ClassDB::bind_method(D_METHOD("element_get", "index_y", "index_x", "index_z"), &MLPPTensor3::element_get);
+ ClassDB::bind_method(D_METHOD("element_set", "index_y", "index_x", "index_z", "val"), &MLPPTensor3::element_set);
- ClassDB::bind_method(D_METHOD("get_row_pool_vector", "index_y", "index_z"), &MLPPTensor3::get_row_pool_vector);
- ClassDB::bind_method(D_METHOD("get_row_mlpp_vector", "index_y", "index_z"), &MLPPTensor3::get_row_mlpp_vector);
- ClassDB::bind_method(D_METHOD("get_row_into_mlpp_vector", "index_y", "index_z", "target"), &MLPPTensor3::get_row_into_mlpp_vector);
+ ClassDB::bind_method(D_METHOD("row_get_pool_vector", "index_y", "index_z"), &MLPPTensor3::row_get_pool_vector);
+ ClassDB::bind_method(D_METHOD("row_get_mlpp_vector", "index_y", "index_z"), &MLPPTensor3::row_get_mlpp_vector);
+ ClassDB::bind_method(D_METHOD("row_get_into_mlpp_vector", "index_y", "index_z", "target"), &MLPPTensor3::row_get_into_mlpp_vector);
- ClassDB::bind_method(D_METHOD("set_row_pool_vector", "index_y", "index_z", "row"), &MLPPTensor3::set_row_pool_vector);
- ClassDB::bind_method(D_METHOD("set_row_mlpp_vector", "index_y", "index_z", "row"), &MLPPTensor3::set_row_mlpp_vector);
+ ClassDB::bind_method(D_METHOD("row_set_pool_vector", "index_y", "index_z", "row"), &MLPPTensor3::row_set_pool_vector);
+ ClassDB::bind_method(D_METHOD("row_set_mlpp_vector", "index_y", "index_z", "row"), &MLPPTensor3::row_set_mlpp_vector);
- ClassDB::bind_method(D_METHOD("get_z_slice_pool_vector", "index_z"), &MLPPTensor3::get_z_slice_pool_vector);
- ClassDB::bind_method(D_METHOD("get_z_slice_mlpp_vector", "index_z"), &MLPPTensor3::get_z_slice_mlpp_vector);
- ClassDB::bind_method(D_METHOD("get_z_slice_into_mlpp_vector", "index_z", "target"), &MLPPTensor3::get_z_slice_into_mlpp_vector);
+ ClassDB::bind_method(D_METHOD("z_slice_get_pool_vector", "index_z"), &MLPPTensor3::z_slice_get_pool_vector);
+ ClassDB::bind_method(D_METHOD("z_slice_get_mlpp_vector", "index_z"), &MLPPTensor3::z_slice_get_mlpp_vector);
+ ClassDB::bind_method(D_METHOD("z_slice_get_into_mlpp_vector", "index_z", "target"), &MLPPTensor3::z_slice_get_into_mlpp_vector);
- ClassDB::bind_method(D_METHOD("get_z_slice_mlpp_matrix", "index_z"), &MLPPTensor3::get_z_slice_mlpp_matrix);
- ClassDB::bind_method(D_METHOD("get_z_slice_into_mlpp_matrix", "index_z", "target"), &MLPPTensor3::get_z_slice_into_mlpp_matrix);
+ ClassDB::bind_method(D_METHOD("z_slice_get_mlpp_matrix", "index_z"), &MLPPTensor3::z_slice_get_mlpp_matrix);
+ ClassDB::bind_method(D_METHOD("z_slice_get_into_mlpp_matrix", "index_z", "target"), &MLPPTensor3::z_slice_get_into_mlpp_matrix);
- ClassDB::bind_method(D_METHOD("set_z_slice_pool_vector", "index_z", "row"), &MLPPTensor3::set_z_slice_pool_vector);
- ClassDB::bind_method(D_METHOD("set_z_slice_mlpp_vector", "index_z", "row"), &MLPPTensor3::set_z_slice_mlpp_vector);
- ClassDB::bind_method(D_METHOD("set_z_slice_mlpp_matrix", "index_z", "mat"), &MLPPTensor3::set_z_slice_mlpp_matrix);
+ ClassDB::bind_method(D_METHOD("z_slice_set_pool_vector", "index_z", "row"), &MLPPTensor3::z_slice_set_pool_vector);
+ ClassDB::bind_method(D_METHOD("z_slice_set_mlpp_vector", "index_z", "row"), &MLPPTensor3::z_slice_set_mlpp_vector);
+ ClassDB::bind_method(D_METHOD("z_slice_set_mlpp_matrix", "index_z", "mat"), &MLPPTensor3::z_slice_set_mlpp_matrix);
- ClassDB::bind_method(D_METHOD("get_x_slice_into", "index_x", "target"), &MLPPTensor3::get_x_slice_into);
- ClassDB::bind_method(D_METHOD("get_x_slice", "index_x"), &MLPPTensor3::get_x_slice);
- ClassDB::bind_method(D_METHOD("set_x_slice", "index_x", "mat"), &MLPPTensor3::set_x_slice);
+ ClassDB::bind_method(D_METHOD("x_slice_get_into", "index_x", "target"), &MLPPTensor3::x_slice_get_into);
+ ClassDB::bind_method(D_METHOD("x_slice_get", "index_x"), &MLPPTensor3::x_slice_get);
+ ClassDB::bind_method(D_METHOD("x_slice_set", "index_x", "mat"), &MLPPTensor3::x_slice_set);
- ClassDB::bind_method(D_METHOD("get_y_slice_into", "index_y", "target"), &MLPPTensor3::get_y_slice_into);
- ClassDB::bind_method(D_METHOD("get_y_slice", "index_y"), &MLPPTensor3::get_y_slice);
- ClassDB::bind_method(D_METHOD("set_y_slice", "index_y", "mat"), &MLPPTensor3::set_y_slice);
+ ClassDB::bind_method(D_METHOD("y_slice_get_into", "index_y", "target"), &MLPPTensor3::y_slice_get_into);
+ ClassDB::bind_method(D_METHOD("y_slice_get", "index_y"), &MLPPTensor3::y_slice_get);
+ ClassDB::bind_method(D_METHOD("y_slice_set", "index_y", "mat"), &MLPPTensor3::y_slice_set);
- ClassDB::bind_method(D_METHOD("add_z_slices_image", "img", "channels"), &MLPPTensor3::add_z_slices_image, IMAGE_CHANNEL_FLAG_RGBA);
+ ClassDB::bind_method(D_METHOD("z_slices_add_image", "img", "channels"), &MLPPTensor3::z_slices_add_image, IMAGE_CHANNEL_FLAG_RGBA);
- ClassDB::bind_method(D_METHOD("get_z_slice_image", "index_z"), &MLPPTensor3::get_z_slice_image);
- ClassDB::bind_method(D_METHOD("get_z_slices_image", "index_r", "index_g", "index_b", "index_a"), &MLPPTensor3::get_z_slices_image, -1, -1, -1, -1);
+ ClassDB::bind_method(D_METHOD("z_slice_get_image", "index_z"), &MLPPTensor3::z_slice_get_image);
+ ClassDB::bind_method(D_METHOD("z_slices_get_image", "index_r", "index_g", "index_b", "index_a"), &MLPPTensor3::z_slices_get_image, -1, -1, -1, -1);
- ClassDB::bind_method(D_METHOD("get_z_slice_into_image", "target", "index_z", "target_channels"), &MLPPTensor3::get_z_slice_into_image, IMAGE_CHANNEL_FLAG_RGB);
- ClassDB::bind_method(D_METHOD("get_z_slices_into_image", "target", "index_r", "index_g", "index_b", "index_a"), &MLPPTensor3::get_z_slices_into_image, -1, -1, -1, -1);
+ ClassDB::bind_method(D_METHOD("z_slice_get_into_image", "target", "index_z", "target_channels"), &MLPPTensor3::z_slice_get_into_image, IMAGE_CHANNEL_FLAG_RGB);
+ ClassDB::bind_method(D_METHOD("z_slices_get_into_image", "target", "index_r", "index_g", "index_b", "index_a"), &MLPPTensor3::z_slices_get_into_image, -1, -1, -1, -1);
- ClassDB::bind_method(D_METHOD("set_z_slice_image", "img", "index_z", "image_channel_flag"), &MLPPTensor3::set_z_slice_image, IMAGE_CHANNEL_FLAG_R);
- ClassDB::bind_method(D_METHOD("set_z_slices_image", "img", "index_r", "index_g", "index_b", "index_a"), &MLPPTensor3::set_z_slices_image);
+ ClassDB::bind_method(D_METHOD("z_slice_set_image", "img", "index_z", "image_channel_flag"), &MLPPTensor3::z_slice_set_image, IMAGE_CHANNEL_FLAG_R);
+ ClassDB::bind_method(D_METHOD("z_slices_set_image", "img", "index_r", "index_g", "index_b", "index_a"), &MLPPTensor3::z_slices_set_image);
ClassDB::bind_method(D_METHOD("set_from_image", "img", "channels"), &MLPPTensor3::set_from_image, IMAGE_CHANNEL_FLAG_RGBA);
- ClassDB::bind_method(D_METHOD("get_x_slice_image", "index_x"), &MLPPTensor3::get_x_slice_image);
- ClassDB::bind_method(D_METHOD("get_x_slice_into_image", "target", "index_x", "target_channels"), &MLPPTensor3::get_x_slice_into_image, IMAGE_CHANNEL_FLAG_RGB);
- ClassDB::bind_method(D_METHOD("set_x_slice_image", "img", "index_x", "image_channel_flag"), &MLPPTensor3::set_x_slice_image, IMAGE_CHANNEL_FLAG_R);
+ ClassDB::bind_method(D_METHOD("x_slice_get_image", "index_x"), &MLPPTensor3::x_slice_get_image);
+ ClassDB::bind_method(D_METHOD("x_slice_get_into_image", "target", "index_x", "target_channels"), &MLPPTensor3::x_slice_get_into_image, IMAGE_CHANNEL_FLAG_RGB);
+ ClassDB::bind_method(D_METHOD("x_slice_set_image", "img", "index_x", "image_channel_flag"), &MLPPTensor3::x_slice_set_image, IMAGE_CHANNEL_FLAG_R);
- ClassDB::bind_method(D_METHOD("get_y_slice_image", "index_x"), &MLPPTensor3::get_y_slice_image);
- ClassDB::bind_method(D_METHOD("get_y_slice_into_image", "target", "index_x", "target_channels"), &MLPPTensor3::get_y_slice_into_image, IMAGE_CHANNEL_FLAG_RGB);
- ClassDB::bind_method(D_METHOD("set_y_slice_image", "img", "index_x", "image_channel_flag"), &MLPPTensor3::set_y_slice_image, IMAGE_CHANNEL_FLAG_R);
+ ClassDB::bind_method(D_METHOD("y_slice_get_image", "index_x"), &MLPPTensor3::y_slice_get_image);
+ ClassDB::bind_method(D_METHOD("y_slice_get_into_image", "target", "index_x", "target_channels"), &MLPPTensor3::y_slice_get_into_image, IMAGE_CHANNEL_FLAG_RGB);
+ ClassDB::bind_method(D_METHOD("y_slice_set_image", "img", "index_x", "image_channel_flag"), &MLPPTensor3::y_slice_set_image, IMAGE_CHANNEL_FLAG_R);
ClassDB::bind_method(D_METHOD("fill", "val"), &MLPPTensor3::fill);
diff --git a/mlpp/lin_alg/mlpp_tensor3.h b/mlpp/lin_alg/mlpp_tensor3.h
index 96b534e..0362f6e 100644
--- a/mlpp/lin_alg/mlpp_tensor3.h
+++ b/mlpp/lin_alg/mlpp_tensor3.h
@@ -23,7 +23,7 @@ class MLPPTensor3 : public Resource {
public:
Array get_data();
void set_data(const Array &p_from);
-
+
_FORCE_INLINE_ real_t *ptrw() {
return _data;
}
@@ -32,17 +32,17 @@ public:
return _data;
}
- void add_z_slice(const Vector &p_row);
- void add_z_slice_pool_vector(const PoolRealArray &p_row);
- void add_z_slice_mlpp_vector(const Ref &p_row);
- void add_z_slice_mlpp_matrix(const Ref &p_matrix);
- void remove_z_slice(int p_index);
+ void z_slice_add(const Vector &p_row);
+ void z_slice_add_pool_vector(const PoolRealArray &p_row);
+ void z_slice_add_mlpp_vector(const Ref &p_row);
+ void z_slice_add_mlpp_matrix(const Ref &p_matrix);
+ void z_slice_remove(int p_index);
// Removes the item copying the last value into the position of the one to
// remove. It's generally faster than `remove`.
- void remove_z_slice_unordered(int p_index);
+ void z_slice_remove_unordered(int p_index);
- void swap_z_slice(int p_index_1, int p_index_2);
+ void z_slice_swap(int p_index_1, int p_index_2);
_FORCE_INLINE_ void clear() { resize(Size3i()); }
_FORCE_INLINE_ void reset() {
@@ -79,19 +79,19 @@ public:
return _data[p_index];
}
- _FORCE_INLINE_ real_t get_element_index(int p_index) const {
+ _FORCE_INLINE_ real_t element_get_index(int p_index) const {
ERR_FAIL_INDEX_V(p_index, data_size(), 0);
return _data[p_index];
}
- _FORCE_INLINE_ void set_element_index(int p_index, real_t p_val) {
+ _FORCE_INLINE_ void element_set_index(int p_index, real_t p_val) {
ERR_FAIL_INDEX(p_index, data_size());
_data[p_index] = p_val;
}
- _FORCE_INLINE_ real_t get_element(int p_index_y, int p_index_x, int p_index_z) const {
+ _FORCE_INLINE_ real_t element_get(int p_index_y, int p_index_x, int p_index_z) const {
ERR_FAIL_INDEX_V(p_index_x, _size.x, 0);
ERR_FAIL_INDEX_V(p_index_y, _size.y, 0);
ERR_FAIL_INDEX_V(p_index_z, _size.z, 0);
@@ -99,7 +99,7 @@ public:
return _data[p_index_y * _size.x + p_index_x + _size.x * _size.y * p_index_z];
}
- _FORCE_INLINE_ void set_element(int p_index_y, int p_index_x, int p_index_z, real_t p_val) {
+ _FORCE_INLINE_ void element_set(int p_index_y, int p_index_x, int p_index_z, real_t p_val) {
ERR_FAIL_INDEX(p_index_x, _size.x);
ERR_FAIL_INDEX(p_index_y, _size.y);
ERR_FAIL_INDEX(p_index_z, _size.z);
@@ -107,39 +107,39 @@ public:
_data[p_index_y * _size.x + p_index_x + _size.x * _size.y * p_index_z] = p_val;
}
- Vector get_row_vector(int p_index_y, int p_index_z) const;
- PoolRealArray get_row_pool_vector(int p_index_y, int p_index_z) const;
- Ref get_row_mlpp_vector(int p_index_y, int p_index_z) const;
- void get_row_into_mlpp_vector(int p_index_y, int p_index_z, Ref target) const;
+ Vector row_get_vector(int p_index_y, int p_index_z) const;
+ PoolRealArray row_get_pool_vector(int p_index_y, int p_index_z) const;
+ Ref row_get_mlpp_vector(int p_index_y, int p_index_z) const;
+ void row_get_into_mlpp_vector(int p_index_y, int p_index_z, Ref target) const;
- void set_row_vector(int p_index_y, int p_index_z, const Vector &p_row);
- void set_row_pool_vector(int p_index_y, int p_index_z, const PoolRealArray &p_row);
- void set_row_mlpp_vector(int p_index_y, int p_index_z, const Ref &p_row);
+ void row_set_vector(int p_index_y, int p_index_z, const Vector