mirror of
https://github.com/Relintai/pmlpp.git
synced 2025-03-12 22:38:51 +01:00
Fully ported MLPPMLP.
This commit is contained in:
parent
7581be0e7f
commit
fbc20cc749
@ -1410,6 +1410,26 @@ std::vector<std::vector<real_t>> MLPPLinAlg::outerProduct(std::vector<real_t> a,
|
||||
return C;
|
||||
}
|
||||
|
||||
Ref<MLPPMatrix> MLPPLinAlg::outer_product(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b) {
|
||||
Ref<MLPPMatrix> C;
|
||||
C.instance();
|
||||
Size2i size = Size2i(a->size(), b->size());
|
||||
C->resize(size);
|
||||
|
||||
const real_t *a_ptr = a->ptr();
|
||||
const real_t *b_ptr = b->ptr();
|
||||
|
||||
for (int i = 0; i < size.y; ++i) {
|
||||
real_t curr_a = a_ptr[i];
|
||||
|
||||
for (int j = 0; j < size.x; ++j) {
|
||||
C->set_element(i, j, curr_a * b_ptr[j]);
|
||||
}
|
||||
}
|
||||
|
||||
return C;
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPLinAlg::hadamard_product(std::vector<real_t> a, std::vector<real_t> b) {
|
||||
std::vector<real_t> c;
|
||||
c.resize(a.size());
|
||||
@ -1694,6 +1714,25 @@ std::vector<real_t> MLPPLinAlg::subtractMatrixRows(std::vector<real_t> a, std::v
|
||||
return a;
|
||||
}
|
||||
|
||||
Ref<MLPPVector> MLPPLinAlg::subtract_matrix_rows(const Ref<MLPPVector> &a, const Ref<MLPPMatrix> &B) {
|
||||
Ref<MLPPVector> c = a->duplicate();
|
||||
|
||||
Size2i b_size = B->size();
|
||||
|
||||
ERR_FAIL_COND_V(b_size.x != c->size(), c);
|
||||
|
||||
const real_t *b_ptr = B->ptr();
|
||||
real_t *c_ptr = c->ptrw();
|
||||
|
||||
for (int i = 0; i < b_size.y; ++i) {
|
||||
for (int j = 0; j < b_size.x; ++j) {
|
||||
c_ptr[j] -= b_ptr[B->calculate_index(i, j)];
|
||||
}
|
||||
}
|
||||
|
||||
return c;
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPLinAlg::log(std::vector<real_t> a) {
|
||||
std::vector<real_t> b;
|
||||
b.resize(a.size());
|
||||
@ -2182,6 +2221,18 @@ real_t MLPPLinAlg::sum_elements(std::vector<real_t> a) {
|
||||
return sum;
|
||||
}
|
||||
|
||||
real_t MLPPLinAlg::sum_elementsv(const Ref<MLPPVector> &a) {
|
||||
int a_size = a->size();
|
||||
|
||||
const real_t *a_ptr = a->ptr();
|
||||
|
||||
real_t sum = 0;
|
||||
for (int i = 0; i < a_size; ++i) {
|
||||
sum += a_ptr[i];
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
|
||||
real_t MLPPLinAlg::cosineSimilarity(std::vector<real_t> a, std::vector<real_t> b) {
|
||||
return dot(a, b) / (norm_2(a) * norm_2(b));
|
||||
}
|
||||
|
@ -170,6 +170,7 @@ public:
|
||||
// VECTOR FUNCTIONS
|
||||
|
||||
std::vector<std::vector<real_t>> outerProduct(std::vector<real_t> a, std::vector<real_t> b); // This multiplies a, bT
|
||||
Ref<MLPPMatrix> outer_product(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b); // This multiplies a, bT
|
||||
|
||||
std::vector<real_t> hadamard_product(std::vector<real_t> a, std::vector<real_t> b);
|
||||
Ref<MLPPVector> hadamard_productnv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b);
|
||||
@ -195,6 +196,7 @@ public:
|
||||
void subtractionv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b, Ref<MLPPVector> out);
|
||||
|
||||
std::vector<real_t> subtractMatrixRows(std::vector<real_t> a, std::vector<std::vector<real_t>> B);
|
||||
Ref<MLPPVector> subtract_matrix_rows(const Ref<MLPPVector> &a, const Ref<MLPPMatrix> &B);
|
||||
|
||||
std::vector<real_t> log(std::vector<real_t> a);
|
||||
std::vector<real_t> log10(std::vector<real_t> a);
|
||||
@ -256,6 +258,7 @@ public:
|
||||
real_t norm_sqv(const Ref<MLPPVector> &a);
|
||||
|
||||
real_t sum_elements(std::vector<real_t> a);
|
||||
real_t sum_elementsv(const Ref<MLPPVector> &a);
|
||||
|
||||
real_t cosineSimilarity(std::vector<real_t> a, std::vector<real_t> b);
|
||||
|
||||
|
@ -56,4 +56,5 @@ void MLPPMatrix::_bind_methods() {
|
||||
|
||||
ClassDB::bind_method(D_METHOD("set_from_mlpp_vectors_array", "from"), &MLPPMatrix::set_from_mlpp_vectors_array);
|
||||
ClassDB::bind_method(D_METHOD("set_from_arrays", "from"), &MLPPMatrix::set_from_arrays);
|
||||
ClassDB::bind_method(D_METHOD("set_from_mlpp_matrix", "from"), &MLPPMatrix::set_from_mlpp_matrix);
|
||||
}
|
||||
|
@ -373,9 +373,18 @@ public:
|
||||
return ret;
|
||||
}
|
||||
|
||||
_FORCE_INLINE_ void set_from_mlpp_matrix(const Ref<MLPPMatrix> &p_from) {
|
||||
ERR_FAIL_COND(!p_from.is_valid());
|
||||
|
||||
resize(p_from->size());
|
||||
for (int i = 0; i < p_from->data_size(); ++i) {
|
||||
_data[i] = p_from->_data[i];
|
||||
}
|
||||
}
|
||||
|
||||
_FORCE_INLINE_ void set_from_mlpp_matrixr(const MLPPMatrix &p_from) {
|
||||
resize(p_from.size());
|
||||
for (int i = 0; i < p_from.data_size(); i++) {
|
||||
for (int i = 0; i < p_from.data_size(); ++i) {
|
||||
_data[i] = p_from._data[i];
|
||||
}
|
||||
}
|
||||
|
418
mlpp/mlp/mlp.cpp
418
mlpp/mlp/mlp.cpp
@ -15,15 +15,71 @@
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
|
||||
std::vector<real_t> MLPPMLP::model_set_test(std::vector<std::vector<real_t>> X) {
|
||||
return evaluate(X);
|
||||
Ref<MLPPMatrix> MLPPMLP::get_input_set() {
|
||||
return input_set;
|
||||
}
|
||||
void MLPPMLP::set_input_set(const Ref<MLPPMatrix> &val) {
|
||||
input_set = val;
|
||||
|
||||
_initialized = false;
|
||||
}
|
||||
|
||||
real_t MLPPMLP::model_test(std::vector<real_t> x) {
|
||||
return evaluate(x);
|
||||
Ref<MLPPVector> MLPPMLP::get_output_set() {
|
||||
return output_set;
|
||||
}
|
||||
void MLPPMLP::set_output_set(const Ref<MLPPVector> &val) {
|
||||
output_set = val;
|
||||
|
||||
_initialized = false;
|
||||
}
|
||||
|
||||
int MLPPMLP::get_n_hidden() {
|
||||
return n_hidden;
|
||||
}
|
||||
void MLPPMLP::set_n_hidden(const int val) {
|
||||
n_hidden = val;
|
||||
|
||||
_initialized = false;
|
||||
}
|
||||
|
||||
real_t MLPPMLP::get_lambda() {
|
||||
return lambda;
|
||||
}
|
||||
void MLPPMLP::set_lambda(const real_t val) {
|
||||
lambda = val;
|
||||
|
||||
_initialized = false;
|
||||
}
|
||||
|
||||
real_t MLPPMLP::get_alpha() {
|
||||
return alpha;
|
||||
}
|
||||
void MLPPMLP::set_alpha(const real_t val) {
|
||||
alpha = val;
|
||||
|
||||
_initialized = false;
|
||||
}
|
||||
|
||||
MLPPReg::RegularizationType MLPPMLP::get_reg() {
|
||||
return reg;
|
||||
}
|
||||
void MLPPMLP::set_reg(const MLPPReg::RegularizationType val) {
|
||||
reg = val;
|
||||
|
||||
_initialized = false;
|
||||
}
|
||||
|
||||
Ref<MLPPVector> MLPPMLP::model_set_test(const Ref<MLPPMatrix> &X) {
|
||||
return evaluatem(X);
|
||||
}
|
||||
|
||||
real_t MLPPMLP::model_test(const Ref<MLPPVector> &x) {
|
||||
return evaluatev(x);
|
||||
}
|
||||
|
||||
void MLPPMLP::gradient_descent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
ERR_FAIL_COND(!_initialized);
|
||||
|
||||
MLPPActivation avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
@ -33,47 +89,46 @@ void MLPPMLP::gradient_descent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
forward_pass();
|
||||
|
||||
while (true) {
|
||||
cost_prev = cost(y_hat, outputSet);
|
||||
cost_prev = cost(y_hat, output_set);
|
||||
|
||||
// Calculating the errors
|
||||
std::vector<real_t> error = alg.subtraction(y_hat, outputSet);
|
||||
Ref<MLPPVector> error = alg.subtractionnv(y_hat, output_set);
|
||||
|
||||
// Calculating the weight/bias gradients for layer 2
|
||||
|
||||
std::vector<real_t> D2_1 = alg.mat_vec_mult(alg.transpose(a2), error);
|
||||
Ref<MLPPVector> D2_1 = alg.mat_vec_multv(alg.transposem(a2), error);
|
||||
|
||||
// weights and bias updation for layer 2
|
||||
weights2 = alg.subtraction(weights2, alg.scalarMultiply(learning_rate / n, D2_1));
|
||||
weights2 = regularization.regWeights(weights2, lambda, alpha, reg);
|
||||
weights2 = alg.subtractionnv(weights2, alg.scalar_multiplynv(learning_rate / n, D2_1));
|
||||
weights2 = regularization.reg_weightsv(weights2, lambda, alpha, reg);
|
||||
|
||||
bias2 -= learning_rate * alg.sum_elements(error) / n;
|
||||
bias2 -= learning_rate * alg.sum_elementsv(error) / n;
|
||||
|
||||
// Calculating the weight/bias for layer 1
|
||||
|
||||
std::vector<std::vector<real_t>> D1_1;
|
||||
D1_1.resize(n);
|
||||
Ref<MLPPMatrix> D1_1;
|
||||
|
||||
D1_1 = alg.outerProduct(error, weights2);
|
||||
D1_1 = alg.outer_product(error, weights2);
|
||||
|
||||
std::vector<std::vector<real_t>> D1_2 = alg.hadamard_product(D1_1, avn.sigmoid(z2, 1));
|
||||
Ref<MLPPMatrix> D1_2 = alg.hadamard_productm(D1_1, avn.sigmoid_derivm(z2));
|
||||
|
||||
std::vector<std::vector<real_t>> D1_3 = alg.matmult(alg.transpose(inputSet), D1_2);
|
||||
Ref<MLPPMatrix> D1_3 = alg.matmultm(alg.transposem(input_set), D1_2);
|
||||
|
||||
// weight an bias updation for layer 1
|
||||
weights1 = alg.subtraction(weights1, alg.scalarMultiply(learning_rate / n, D1_3));
|
||||
weights1 = regularization.regWeights(weights1, lambda, alpha, reg);
|
||||
weights1 = alg.subtractionm(weights1, alg.scalar_multiplym(learning_rate / n, D1_3));
|
||||
weights1 = regularization.reg_weightsm(weights1, lambda, alpha, reg);
|
||||
|
||||
bias1 = alg.subtractMatrixRows(bias1, alg.scalarMultiply(learning_rate / n, D1_2));
|
||||
bias1 = alg.subtract_matrix_rows(bias1, alg.scalar_multiplym(learning_rate / n, D1_2));
|
||||
|
||||
forward_pass();
|
||||
|
||||
// UI PORTION
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, cost(y_hat, outputSet));
|
||||
MLPPUtilities::cost_info(epoch, cost_prev, cost(y_hat, output_set));
|
||||
std::cout << "Layer 1:" << std::endl;
|
||||
MLPPUtilities::UI(weights1, bias1);
|
||||
MLPPUtilities::print_ui_mb(weights1, bias1);
|
||||
std::cout << "Layer 2:" << std::endl;
|
||||
MLPPUtilities::UI(weights2, bias2);
|
||||
MLPPUtilities::print_ui_vb(weights2, bias2);
|
||||
}
|
||||
epoch++;
|
||||
|
||||
@ -84,50 +139,77 @@ void MLPPMLP::gradient_descent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
}
|
||||
|
||||
void MLPPMLP::sgd(real_t learning_rate, int max_epoch, bool UI) {
|
||||
ERR_FAIL_COND(!_initialized);
|
||||
|
||||
MLPPActivation avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
while (true) {
|
||||
std::random_device rd;
|
||||
std::default_random_engine generator(rd());
|
||||
std::uniform_int_distribution<int> distribution(0, int(n - 1));
|
||||
int outputIndex = distribution(generator);
|
||||
std::random_device rd;
|
||||
std::default_random_engine generator(rd());
|
||||
std::uniform_int_distribution<int> distribution(0, int(n - 1));
|
||||
|
||||
real_t y_hat = evaluate(inputSet[outputIndex]);
|
||||
auto [z2, a2] = propagate(inputSet[outputIndex]);
|
||||
cost_prev = cost({ y_hat }, { outputSet[outputIndex] });
|
||||
real_t error = y_hat - outputSet[outputIndex];
|
||||
Ref<MLPPVector> input_set_row_tmp;
|
||||
input_set_row_tmp.instance();
|
||||
input_set_row_tmp->resize(input_set->size().x);
|
||||
|
||||
Ref<MLPPVector> output_set_row_tmp;
|
||||
output_set_row_tmp.instance();
|
||||
output_set_row_tmp->resize(1);
|
||||
|
||||
Ref<MLPPVector> y_hat_row_tmp;
|
||||
y_hat_row_tmp.instance();
|
||||
y_hat_row_tmp->resize(1);
|
||||
|
||||
Ref<MLPPMatrix> lz2;
|
||||
lz2.instance();
|
||||
Ref<MLPPMatrix> la2;
|
||||
la2.instance();
|
||||
|
||||
while (true) {
|
||||
int output_Index = distribution(generator);
|
||||
|
||||
input_set->get_row_into_mlpp_vector(output_Index, input_set_row_tmp);
|
||||
real_t output_element = output_set->get_element(output_Index);
|
||||
output_set_row_tmp->set_element(0, output_element);
|
||||
|
||||
real_t y_hat = evaluatev(input_set_row_tmp);
|
||||
y_hat_row_tmp->set_element(0, y_hat);
|
||||
propagatev(input_set_row_tmp, lz2, la2);
|
||||
cost_prev = cost(y_hat_row_tmp, output_set_row_tmp);
|
||||
real_t error = y_hat - output_element;
|
||||
|
||||
// Weight updation for layer 2
|
||||
std::vector<real_t> D2_1 = alg.scalarMultiply(error, a2);
|
||||
weights2 = alg.subtraction(weights2, alg.scalarMultiply(learning_rate, D2_1));
|
||||
weights2 = regularization.regWeights(weights2, lambda, alpha, reg);
|
||||
Ref<MLPPVector> D2_1 = alg.scalar_multiplym(error, a2);
|
||||
weights2 = alg.subtractionm(weights2, alg.scalar_multiplym(learning_rate, D2_1));
|
||||
weights2 = regularization.reg_weightsm(weights2, lambda, alpha, reg);
|
||||
|
||||
// Bias updation for layer 2
|
||||
bias2 -= learning_rate * error;
|
||||
|
||||
// Weight updation for layer 1
|
||||
std::vector<real_t> D1_1 = alg.scalarMultiply(error, weights2);
|
||||
std::vector<real_t> D1_2 = alg.hadamard_product(D1_1, avn.sigmoid(z2, 1));
|
||||
std::vector<std::vector<real_t>> D1_3 = alg.outerProduct(inputSet[outputIndex], D1_2);
|
||||
Ref<MLPPVector> D1_1 = alg.scalar_multiplym(error, weights2);
|
||||
Ref<MLPPVector> D1_2 = alg.hadamard_productm(D1_1, avn.sigmoid_derivm(z2));
|
||||
Ref<MLPPMatrix> D1_3 = alg.outer_product(input_set_row_tmp, D1_2);
|
||||
|
||||
weights1 = alg.subtraction(weights1, alg.scalarMultiply(learning_rate, D1_3));
|
||||
weights1 = regularization.regWeights(weights1, lambda, alpha, reg);
|
||||
weights1 = alg.subtractionm(weights1, alg.scalar_multiplym(learning_rate, D1_3));
|
||||
weights1 = regularization.reg_weightsm(weights1, lambda, alpha, reg);
|
||||
// Bias updation for layer 1
|
||||
|
||||
bias1 = alg.subtraction(bias1, alg.scalarMultiply(learning_rate, D1_2));
|
||||
bias1 = alg.subtractionm(bias1, alg.scalar_multiplym(learning_rate, D1_2));
|
||||
|
||||
y_hat = evaluatev(input_set_row_tmp);
|
||||
|
||||
y_hat = evaluate(inputSet[outputIndex]);
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, cost({ y_hat }, { outputSet[outputIndex] }));
|
||||
MLPPUtilities::cost_info(epoch, cost_prev, cost_prev);
|
||||
std::cout << "Layer 1:" << std::endl;
|
||||
MLPPUtilities::UI(weights1, bias1);
|
||||
MLPPUtilities::print_ui_mb(weights1, bias1);
|
||||
std::cout << "Layer 2:" << std::endl;
|
||||
MLPPUtilities::UI(weights2, bias2);
|
||||
MLPPUtilities::print_ui_vb(weights2, bias2);
|
||||
}
|
||||
|
||||
epoch++;
|
||||
|
||||
if (epoch > max_epoch) {
|
||||
@ -139,61 +221,74 @@ void MLPPMLP::sgd(real_t learning_rate, int max_epoch, bool UI) {
|
||||
}
|
||||
|
||||
void MLPPMLP::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
|
||||
ERR_FAIL_COND(!_initialized);
|
||||
|
||||
MLPPActivation avn;
|
||||
MLPPLinAlg alg;
|
||||
MLPPReg regularization;
|
||||
real_t cost_prev = 0;
|
||||
int epoch = 1;
|
||||
|
||||
Ref<MLPPMatrix> lz2;
|
||||
lz2.instance();
|
||||
Ref<MLPPMatrix> la2;
|
||||
la2.instance();
|
||||
|
||||
// Creating the mini-batches
|
||||
int n_mini_batch = n / mini_batch_size;
|
||||
auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
|
||||
|
||||
MLPPUtilities::CreateMiniBatchMVBatch batches = MLPPUtilities::create_mini_batchesmv(input_set, output_set, n_mini_batch);
|
||||
|
||||
while (true) {
|
||||
for (int i = 0; i < n_mini_batch; i++) {
|
||||
std::vector<real_t> y_hat = evaluate(inputMiniBatches[i]);
|
||||
auto [z2, a2] = propagate(inputMiniBatches[i]);
|
||||
cost_prev = cost(y_hat, outputMiniBatches[i]);
|
||||
Ref<MLPPMatrix> current_input = batches.input_sets[i];
|
||||
Ref<MLPPVector> current_output = batches.output_sets[i];
|
||||
|
||||
Ref<MLPPVector> y_hat = evaluatem(current_input);
|
||||
propagatev(current_input, lz2, la2);
|
||||
cost_prev = cost(y_hat, current_output);
|
||||
|
||||
// Calculating the errors
|
||||
std::vector<real_t> error = alg.subtraction(y_hat, outputMiniBatches[i]);
|
||||
Ref<MLPPVector> error = alg.subtractionnv(y_hat, current_output);
|
||||
|
||||
// Calculating the weight/bias gradients for layer 2
|
||||
|
||||
std::vector<real_t> D2_1 = alg.mat_vec_mult(alg.transpose(a2), error);
|
||||
Ref<MLPPVector> D2_1 = alg.mat_vec_multv(alg.transposem(a2), error);
|
||||
|
||||
real_t lr_d_cos = learning_rate / static_cast<real_t>(current_output->size());
|
||||
|
||||
// weights and bias updation for layser 2
|
||||
weights2 = alg.subtraction(weights2, alg.scalarMultiply(learning_rate / outputMiniBatches[i].size(), D2_1));
|
||||
weights2 = regularization.regWeights(weights2, lambda, alpha, reg);
|
||||
weights2 = alg.subtractionnv(weights2, alg.scalar_multiplynv(lr_d_cos, D2_1));
|
||||
weights2 = regularization.reg_weightsm(weights2, lambda, alpha, reg);
|
||||
|
||||
// Calculating the bias gradients for layer 2
|
||||
real_t b_gradient = alg.sum_elements(error);
|
||||
real_t b_gradient = alg.sum_elementsv(error);
|
||||
|
||||
// Bias Updation for layer 2
|
||||
bias2 -= learning_rate * alg.sum_elements(error) / outputMiniBatches[i].size();
|
||||
bias2 -= learning_rate * b_gradient / current_output->size();
|
||||
|
||||
//Calculating the weight/bias for layer 1
|
||||
|
||||
std::vector<std::vector<real_t>> D1_1 = alg.outerProduct(error, weights2);
|
||||
Ref<MLPPMatrix> D1_1 = alg.outer_product(error, weights2);
|
||||
|
||||
std::vector<std::vector<real_t>> D1_2 = alg.hadamard_product(D1_1, avn.sigmoid(z2, 1));
|
||||
Ref<MLPPMatrix> D1_2 = alg.hadamard_productm(D1_1, avn.sigmoid_derivm(z2));
|
||||
|
||||
std::vector<std::vector<real_t>> D1_3 = alg.matmult(alg.transpose(inputMiniBatches[i]), D1_2);
|
||||
Ref<MLPPMatrix> D1_3 = alg.matmultm(alg.transposem(current_input), D1_2);
|
||||
|
||||
// weight an bias updation for layer 1
|
||||
weights1 = alg.subtraction(weights1, alg.scalarMultiply(learning_rate / outputMiniBatches[i].size(), D1_3));
|
||||
weights1 = regularization.regWeights(weights1, lambda, alpha, reg);
|
||||
weights1 = alg.subtractionm(weights1, alg.scalar_multiplym(lr_d_cos, D1_3));
|
||||
weights1 = regularization.reg_weightsm(weights1, lambda, alpha, reg);
|
||||
|
||||
bias1 = alg.subtractMatrixRows(bias1, alg.scalarMultiply(learning_rate / outputMiniBatches[i].size(), D1_2));
|
||||
bias1 = alg.subtract_matrix_rows(bias1, alg.scalar_multiplym(lr_d_cos, D1_2));
|
||||
|
||||
y_hat = evaluate(inputMiniBatches[i]);
|
||||
y_hat = evaluatem(current_input);
|
||||
|
||||
if (UI) {
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, cost(y_hat, outputMiniBatches[i]));
|
||||
MLPPUtilities::CostInfo(epoch, cost_prev, cost(y_hat, current_output));
|
||||
std::cout << "Layer 1:" << std::endl;
|
||||
MLPPUtilities::UI(weights1, bias1);
|
||||
MLPPUtilities::print_ui_mb(weights1, bias1);
|
||||
std::cout << "Layer 2:" << std::endl;
|
||||
MLPPUtilities::UI(weights2, bias2);
|
||||
MLPPUtilities::print_ui_vb(weights2, bias2);
|
||||
}
|
||||
}
|
||||
|
||||
@ -209,77 +304,208 @@ void MLPPMLP::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size, boo
|
||||
|
||||
real_t MLPPMLP::score() {
|
||||
MLPPUtilities util;
|
||||
return util.performance(y_hat, outputSet);
|
||||
return util.performance_mat(y_hat, output_set);
|
||||
}
|
||||
|
||||
void MLPPMLP::save(std::string fileName) {
|
||||
void MLPPMLP::save(const String &fileName) {
|
||||
ERR_FAIL_COND(!_initialized);
|
||||
|
||||
MLPPUtilities util;
|
||||
util.saveParameters(fileName, weights1, bias1, 0, 1);
|
||||
util.saveParameters(fileName, weights2, bias2, 1, 2);
|
||||
//util.saveParameters(fileName, weights1, bias1, 0, 1);
|
||||
//util.saveParameters(fileName, weights2, bias2, 1, 2);
|
||||
}
|
||||
|
||||
real_t MLPPMLP::cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
|
||||
bool MLPPMLP::is_initialized() {
|
||||
return _initialized;
|
||||
}
|
||||
|
||||
void MLPPMLP::initialize() {
|
||||
if (_initialized) {
|
||||
return;
|
||||
}
|
||||
|
||||
ERR_FAIL_COND(!input_set.is_valid() || !output_set.is_valid() || n_hidden == 0);
|
||||
|
||||
n = input_set->size().y;
|
||||
k = input_set->size().x;
|
||||
|
||||
MLPPActivation avn;
|
||||
y_hat->resize(n);
|
||||
|
||||
MLPPUtilities util;
|
||||
|
||||
weights1->resize(Size2i(k, n_hidden));
|
||||
weights2->resize(n_hidden);
|
||||
bias1->resize(n_hidden);
|
||||
|
||||
util.weight_initializationm(weights1);
|
||||
util.weight_initializationv(weights2);
|
||||
util.bias_initializationv(bias1);
|
||||
|
||||
bias2 = util.bias_initializationr();
|
||||
|
||||
z2.instance();
|
||||
a2.instance();
|
||||
|
||||
_initialized = true;
|
||||
}
|
||||
|
||||
real_t MLPPMLP::cost(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y) {
|
||||
MLPPReg regularization;
|
||||
class MLPPCost cost;
|
||||
return cost.LogLoss(y_hat, y) + regularization.regTerm(weights2, lambda, alpha, reg) + regularization.regTerm(weights1, lambda, alpha, reg);
|
||||
|
||||
return cost.log_lossv(y_hat, y) + regularization.reg_termv(weights2, lambda, alpha, reg) + regularization.reg_termv(weights1, lambda, alpha, reg);
|
||||
}
|
||||
|
||||
std::vector<real_t> MLPPMLP::evaluate(std::vector<std::vector<real_t>> X) {
|
||||
Ref<MLPPVector> MLPPMLP::evaluatem(const Ref<MLPPMatrix> &X) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
std::vector<std::vector<real_t>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
|
||||
std::vector<std::vector<real_t>> a2 = avn.sigmoid(z2);
|
||||
return avn.sigmoid(alg.scalarAdd(bias2, alg.mat_vec_mult(a2, weights2)));
|
||||
|
||||
Ref<MLPPVector> pz2 = alg.mat_vec_addv(alg.matmultm(X, weights1), bias1);
|
||||
Ref<MLPPVector> pa2 = avn.sigmoid_normm(pz2);
|
||||
|
||||
return avn.sigmoid_normv(alg.scalar_addnv(bias2, alg.mat_vec_multv(pa2, weights2)));
|
||||
}
|
||||
|
||||
std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> MLPPMLP::propagate(std::vector<std::vector<real_t>> X) {
|
||||
void MLPPMLP::propagatem(const Ref<MLPPMatrix> &X, Ref<MLPPMatrix> z2_out, Ref<MLPPMatrix> a2_out) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
std::vector<std::vector<real_t>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
|
||||
std::vector<std::vector<real_t>> a2 = avn.sigmoid(z2);
|
||||
return { z2, a2 };
|
||||
|
||||
z2_out->set_from_mlpp_matrix(alg.mat_vec_addv(alg.matmultm(X, weights1), bias1));
|
||||
a2_out->set_from_mlpp_matrix(avn.sigmoid_normm(z2));
|
||||
}
|
||||
|
||||
real_t MLPPMLP::evaluate(std::vector<real_t> x) {
|
||||
real_t MLPPMLP::evaluatev(const Ref<MLPPVector> &x) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
std::vector<real_t> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
|
||||
std::vector<real_t> a2 = avn.sigmoid(z2);
|
||||
return avn.sigmoid(alg.dot(weights2, a2) + bias2);
|
||||
|
||||
Ref<MLPPVector> pz2 = alg.additionnv(alg.mat_vec_multv(alg.transposem(weights1), x), bias1);
|
||||
Ref<MLPPVector> pa2 = avn.sigmoid_normv(pz2);
|
||||
|
||||
return avn.sigmoid(alg.dotv(weights2, pa2) + bias2);
|
||||
}
|
||||
|
||||
std::tuple<std::vector<real_t>, std::vector<real_t>> MLPPMLP::propagate(std::vector<real_t> x) {
|
||||
void MLPPMLP::propagatev(const Ref<MLPPVector> &x, Ref<MLPPVector> z2_out, Ref<MLPPVector> a2_out) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
std::vector<real_t> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
|
||||
std::vector<real_t> a2 = avn.sigmoid(z2);
|
||||
return { z2, a2 };
|
||||
|
||||
z2_out->set_from_mlpp_vector(alg.additionnv(alg.mat_vec_multv(alg.transposem(weights1), x), bias1));
|
||||
a2_out->set_from_mlpp_vector(avn.sigmoid_normv(z2));
|
||||
}
|
||||
|
||||
void MLPPMLP::forward_pass() {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1);
|
||||
a2 = avn.sigmoid(z2);
|
||||
y_hat = avn.sigmoid(alg.scalarAdd(bias2, alg.mat_vec_mult(a2, weights2)));
|
||||
|
||||
z2 = alg.mat_vec_addv(alg.matmultm(input_set, weights1), bias1);
|
||||
a2 = avn.sigmoid_normv(z2);
|
||||
|
||||
y_hat = avn.sigmoid_normv(alg.scalar_addm(bias2, alg.mat_vec_multv(a2, weights2)));
|
||||
}
|
||||
|
||||
MLPPMLP::MLPPMLP(std::vector<std::vector<real_t>> inputSet, std::vector<real_t> outputSet, int n_hidden, std::string reg, real_t lambda, real_t alpha) :
|
||||
inputSet(inputSet), outputSet(outputSet), n_hidden(n_hidden), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
|
||||
MLPPActivation avn;
|
||||
y_hat.resize(n);
|
||||
MLPPMLP::MLPPMLP(const Ref<MLPPMatrix> &p_input_set, const Ref<MLPPVector> &p_output_set, int p_n_hidden, MLPPReg::RegularizationType p_reg, real_t p_lambda, real_t p_alpha) {
|
||||
input_set = p_input_set;
|
||||
output_set = p_output_set;
|
||||
|
||||
weights1 = MLPPUtilities::weightInitialization(k, n_hidden);
|
||||
weights2 = MLPPUtilities::weightInitialization(n_hidden);
|
||||
bias1 = MLPPUtilities::biasInitialization(n_hidden);
|
||||
bias2 = MLPPUtilities::biasInitialization();
|
||||
y_hat.instance();
|
||||
|
||||
n_hidden = p_n_hidden;
|
||||
n = input_set->size().y;
|
||||
k = input_set->size().x;
|
||||
reg = p_reg;
|
||||
lambda = p_lambda;
|
||||
alpha = p_alpha;
|
||||
|
||||
MLPPActivation avn;
|
||||
y_hat->resize(n);
|
||||
|
||||
MLPPUtilities util;
|
||||
|
||||
weights1.instance();
|
||||
weights1->resize(Size2i(k, n_hidden));
|
||||
|
||||
weights2.instance();
|
||||
weights2->resize(n_hidden);
|
||||
|
||||
bias1.instance();
|
||||
bias1->resize(n_hidden);
|
||||
|
||||
util.weight_initializationm(weights1);
|
||||
util.weight_initializationv(weights2);
|
||||
util.bias_initializationv(bias1);
|
||||
|
||||
bias2 = util.bias_initializationr();
|
||||
|
||||
z2.instance();
|
||||
a2.instance();
|
||||
|
||||
_initialized = true;
|
||||
}
|
||||
|
||||
MLPPMLP::MLPPMLP() {
|
||||
y_hat.instance();
|
||||
|
||||
n_hidden = 0;
|
||||
n = 0;
|
||||
k = 0;
|
||||
reg = MLPPReg::REGULARIZATION_TYPE_NONE;
|
||||
lambda = 0.5;
|
||||
alpha = 0.5;
|
||||
|
||||
weights1.instance();
|
||||
weights2.instance();
|
||||
bias1.instance();
|
||||
|
||||
bias2 = 0;
|
||||
|
||||
z2.instance();
|
||||
a2.instance();
|
||||
|
||||
_initialized = false;
|
||||
}
|
||||
|
||||
MLPPMLP::~MLPPMLP() {
|
||||
}
|
||||
|
||||
void MLPPMLP::_bind_methods() {
|
||||
ClassDB::bind_method(D_METHOD("get_input_set"), &MLPPMLP::get_input_set);
|
||||
ClassDB::bind_method(D_METHOD("set_input_set", "val"), &MLPPMLP::set_input_set);
|
||||
ADD_PROPERTY(PropertyInfo(Variant::OBJECT, "input_set", PROPERTY_HINT_RESOURCE_TYPE, "MLPPMatrix"), "set_input_set", "get_input_set");
|
||||
|
||||
ClassDB::bind_method(D_METHOD("get_output_set"), &MLPPMLP::get_output_set);
|
||||
ClassDB::bind_method(D_METHOD("set_output_set", "val"), &MLPPMLP::set_output_set);
|
||||
ADD_PROPERTY(PropertyInfo(Variant::OBJECT, "output_set", PROPERTY_HINT_RESOURCE_TYPE, "MLPPVector"), "set_output_set", "get_output_set");
|
||||
|
||||
ClassDB::bind_method(D_METHOD("get_n_hidden"), &MLPPMLP::get_n_hidden);
|
||||
ClassDB::bind_method(D_METHOD("set_n_hidden", "val"), &MLPPMLP::set_n_hidden);
|
||||
ADD_PROPERTY(PropertyInfo(Variant::INT, "n_hidden"), "set_n_hidden", "get_n_hidden");
|
||||
|
||||
ClassDB::bind_method(D_METHOD("get_lambda"), &MLPPMLP::get_lambda);
|
||||
ClassDB::bind_method(D_METHOD("set_lambda", "val"), &MLPPMLP::set_lambda);
|
||||
ADD_PROPERTY(PropertyInfo(Variant::REAL, "lambda"), "set_lambda", "get_lambda");
|
||||
|
||||
ClassDB::bind_method(D_METHOD("get_alpha"), &MLPPMLP::get_alpha);
|
||||
ClassDB::bind_method(D_METHOD("set_alpha", "val"), &MLPPMLP::set_alpha);
|
||||
ADD_PROPERTY(PropertyInfo(Variant::REAL, "alpha"), "set_alpha", "get_alpha");
|
||||
|
||||
ClassDB::bind_method(D_METHOD("get_reg"), &MLPPMLP::get_reg);
|
||||
ClassDB::bind_method(D_METHOD("set_reg", "val"), &MLPPMLP::set_reg);
|
||||
ADD_PROPERTY(PropertyInfo(Variant::INT, "reg"), "set_reg", "get_reg");
|
||||
|
||||
ClassDB::bind_method(D_METHOD("is_initialized"), &MLPPMLP::is_initialized);
|
||||
ClassDB::bind_method(D_METHOD("initialize"), &MLPPMLP::initialize);
|
||||
|
||||
ClassDB::bind_method(D_METHOD("model_set_test", "X"), &MLPPMLP::model_set_test);
|
||||
ClassDB::bind_method(D_METHOD("model_test", "x"), &MLPPMLP::model_test);
|
||||
|
||||
ClassDB::bind_method(D_METHOD("gradient_descent", "learning_rate", "max_epoch", "UI"), &MLPPMLP::gradient_descent, false);
|
||||
ClassDB::bind_method(D_METHOD("sgd", "learning_rate", "max_epoch", "UI"), &MLPPMLP::sgd, false);
|
||||
ClassDB::bind_method(D_METHOD("mbgd", "learning_rate", "max_epoch", "mini_batch_size", "UI"), &MLPPMLP::mbgd, false);
|
||||
|
||||
ClassDB::bind_method(D_METHOD("score", "x"), &MLPPMLP::score);
|
||||
ClassDB::bind_method(D_METHOD("save", "file_name"), &MLPPMLP::save);
|
||||
}
|
||||
|
||||
// ======= OLD =======
|
||||
|
||||
MLPPMLPOld::MLPPMLPOld(std::vector<std::vector<real_t>> inputSet, std::vector<real_t> outputSet, int n_hidden, std::string reg, real_t lambda, real_t alpha) :
|
||||
|
@ -15,6 +15,8 @@
|
||||
|
||||
#include "core/object/reference.h"
|
||||
|
||||
#include "../regularization/reg.h"
|
||||
|
||||
#include "../lin_alg/mlpp_matrix.h"
|
||||
#include "../lin_alg/mlpp_vector.h"
|
||||
|
||||
@ -26,52 +28,78 @@ class MLPPMLP : public Reference {
|
||||
GDCLASS(MLPPMLP, Reference);
|
||||
|
||||
public:
|
||||
std::vector<real_t> model_set_test(std::vector<std::vector<real_t>> X);
|
||||
real_t model_test(std::vector<real_t> x);
|
||||
Ref<MLPPMatrix> get_input_set();
|
||||
void set_input_set(const Ref<MLPPMatrix> &val);
|
||||
|
||||
Ref<MLPPVector> get_output_set();
|
||||
void set_output_set(const Ref<MLPPVector> &val);
|
||||
|
||||
int get_n_hidden();
|
||||
void set_n_hidden(const int val);
|
||||
|
||||
real_t get_lambda();
|
||||
void set_lambda(const real_t val);
|
||||
|
||||
real_t get_alpha();
|
||||
void set_alpha(const real_t val);
|
||||
|
||||
MLPPReg::RegularizationType get_reg();
|
||||
void set_reg(const MLPPReg::RegularizationType val);
|
||||
|
||||
Ref<MLPPVector> model_set_test(const Ref<MLPPMatrix> &X);
|
||||
real_t model_test(const Ref<MLPPVector> &x);
|
||||
|
||||
bool is_initialized();
|
||||
void initialize();
|
||||
|
||||
void gradient_descent(real_t learning_rate, int max_epoch, bool UI = false);
|
||||
void sgd(real_t learning_rate, int max_epoch, bool UI = false);
|
||||
void mbgd(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false);
|
||||
|
||||
real_t score();
|
||||
void save(std::string fileName);
|
||||
void save(const String &file_name);
|
||||
|
||||
MLPPMLP(std::vector<std::vector<real_t>> inputSet, std::vector<real_t> outputSet, int n_hidden, std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5);
|
||||
MLPPMLP(const Ref<MLPPMatrix> &p_input_set, const Ref<MLPPVector> &p_output_set, int p_n_hidden, MLPPReg::RegularizationType p_reg = MLPPReg::REGULARIZATION_TYPE_NONE, real_t p_lambda = 0.5, real_t p_alpha = 0.5);
|
||||
|
||||
MLPPMLP();
|
||||
~MLPPMLP();
|
||||
|
||||
private:
|
||||
real_t cost(std::vector<real_t> y_hat, std::vector<real_t> y);
|
||||
protected:
|
||||
real_t cost(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y);
|
||||
|
||||
std::vector<real_t> evaluate(std::vector<std::vector<real_t>> X);
|
||||
std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> propagate(std::vector<std::vector<real_t>> X);
|
||||
real_t evaluate(std::vector<real_t> x);
|
||||
std::tuple<std::vector<real_t>, std::vector<real_t>> propagate(std::vector<real_t> x);
|
||||
Ref<MLPPVector> evaluatem(const Ref<MLPPMatrix> &X);
|
||||
void propagatem(const Ref<MLPPMatrix> &X, Ref<MLPPMatrix> z2_out, Ref<MLPPMatrix> a2_out);
|
||||
|
||||
real_t evaluatev(const Ref<MLPPVector> &x);
|
||||
void propagatev(const Ref<MLPPVector> &x, Ref<MLPPVector> z2_out, Ref<MLPPVector> a2_out);
|
||||
|
||||
void forward_pass();
|
||||
|
||||
std::vector<std::vector<real_t>> inputSet;
|
||||
std::vector<real_t> outputSet;
|
||||
std::vector<real_t> y_hat;
|
||||
static void _bind_methods();
|
||||
|
||||
std::vector<std::vector<real_t>> weights1;
|
||||
std::vector<real_t> weights2;
|
||||
Ref<MLPPMatrix> input_set;
|
||||
Ref<MLPPVector> output_set;
|
||||
Ref<MLPPVector> y_hat;
|
||||
|
||||
std::vector<real_t> bias1;
|
||||
Ref<MLPPMatrix> weights1;
|
||||
Ref<MLPPVector> weights2;
|
||||
|
||||
Ref<MLPPVector> bias1;
|
||||
real_t bias2;
|
||||
|
||||
std::vector<std::vector<real_t>> z2;
|
||||
std::vector<std::vector<real_t>> a2;
|
||||
Ref<MLPPMatrix> z2;
|
||||
Ref<MLPPMatrix> a2;
|
||||
|
||||
int n;
|
||||
int k;
|
||||
int n_hidden;
|
||||
|
||||
// Regularization Params
|
||||
std::string reg;
|
||||
MLPPReg::RegularizationType reg;
|
||||
real_t lambda; /* Regularization Parameter */
|
||||
real_t alpha; /* This is the controlling param for Elastic Net*/
|
||||
|
||||
int _initialized;
|
||||
};
|
||||
|
||||
class MLPPMLPOld {
|
||||
|
@ -154,6 +154,7 @@ void MLPPReg::_bind_methods() {
|
||||
ClassDB::bind_method(D_METHOD("reg_deriv_termv", "weights", "lambda", "alpha", "reg"), &MLPPReg::reg_deriv_termv);
|
||||
ClassDB::bind_method(D_METHOD("reg_deriv_termm", "weights", "lambda", "alpha", "reg"), &MLPPReg::reg_deriv_termm);
|
||||
|
||||
BIND_ENUM_CONSTANT(REGULARIZATION_TYPE_NONE);
|
||||
BIND_ENUM_CONSTANT(REGULARIZATION_TYPE_RIDGE);
|
||||
BIND_ENUM_CONSTANT(REGULARIZATION_TYPE_LASSO);
|
||||
BIND_ENUM_CONSTANT(REGULARIZATION_TYPE_ELASTIC_NET);
|
||||
|
@ -24,7 +24,8 @@ class MLPPReg : public Reference {
|
||||
|
||||
public:
|
||||
enum RegularizationType {
|
||||
REGULARIZATION_TYPE_RIDGE = 0,
|
||||
REGULARIZATION_TYPE_NONE = 0,
|
||||
REGULARIZATION_TYPE_RIDGE,
|
||||
REGULARIZATION_TYPE_LASSO,
|
||||
REGULARIZATION_TYPE_ELASTIC_NET,
|
||||
REGULARIZATION_TYPE_WEIGHT_CLIPPING,
|
||||
|
@ -478,6 +478,36 @@ void MLPPUtilities::UI(std::vector<real_t> weights, std::vector<real_t> initial,
|
||||
std::cout << bias << std::endl;
|
||||
}
|
||||
|
||||
void MLPPUtilities::print_ui_vb(Ref<MLPPVector> weights, real_t bias) {
|
||||
String str = "Values of the weight(s):\n";
|
||||
str += weights->to_string();
|
||||
str += "\nValue of the bias:\n";
|
||||
str += String::num(bias);
|
||||
|
||||
PLOG_MSG(str);
|
||||
}
|
||||
void MLPPUtilities::print_ui_vib(Ref<MLPPVector> weights, Ref<MLPPVector> initial, real_t bias) {
|
||||
String str = "Values of the weight(s):\n";
|
||||
str += weights->to_string();
|
||||
|
||||
str += "\nValues of the initial(s):\n";
|
||||
str += initial->to_string();
|
||||
|
||||
str += "\nValue of the bias:\n";
|
||||
str += String::num(bias);
|
||||
|
||||
PLOG_MSG(str);
|
||||
}
|
||||
void MLPPUtilities::print_ui_mb(Ref<MLPPMatrix> weights, Ref<MLPPVector> bias) {
|
||||
String str = "Values of the weight(s):\n";
|
||||
str += weights->to_string();
|
||||
|
||||
str += "\nValue of the biased:\n";
|
||||
str += bias->to_string();
|
||||
|
||||
PLOG_MSG(str);
|
||||
}
|
||||
|
||||
void MLPPUtilities::CostInfo(int epoch, real_t cost_prev, real_t Cost) {
|
||||
std::cout << "-----------------------------------" << std::endl;
|
||||
std::cout << "This is epoch: " << epoch << std::endl;
|
||||
|
@ -65,7 +65,12 @@ public:
|
||||
// Gradient Descent related
|
||||
static void UI(std::vector<real_t> weights, real_t bias);
|
||||
static void UI(std::vector<real_t> weights, std::vector<real_t> initial, real_t bias);
|
||||
static void UI(std::vector<std::vector<real_t>>, std::vector<real_t> bias);
|
||||
static void UI(std::vector<std::vector<real_t>> weights, std::vector<real_t> bias);
|
||||
|
||||
static void print_ui_vb(Ref<MLPPVector> weights, real_t bias);
|
||||
static void print_ui_vib(Ref<MLPPVector> weights, Ref<MLPPVector> initial, real_t bias);
|
||||
static void print_ui_mb(Ref<MLPPMatrix> weights, Ref<MLPPVector> bias);
|
||||
|
||||
static void CostInfo(int epoch, real_t cost_prev, real_t Cost);
|
||||
static void cost_info(int epoch, real_t cost_prev, real_t cost);
|
||||
|
||||
|
@ -39,6 +39,8 @@ SOFTWARE.
|
||||
#include "mlpp/kmeans/kmeans.h"
|
||||
#include "mlpp/knn/knn.h"
|
||||
|
||||
#include "mlpp/mlp/mlp.h"
|
||||
|
||||
#include "test/mlpp_tests.h"
|
||||
|
||||
void register_pmlpp_types(ModuleRegistrationLevel p_level) {
|
||||
@ -58,6 +60,8 @@ void register_pmlpp_types(ModuleRegistrationLevel p_level) {
|
||||
ClassDB::register_class<MLPPKNN>();
|
||||
ClassDB::register_class<MLPPKMeans>();
|
||||
|
||||
ClassDB::register_class<MLPPMLP>();
|
||||
|
||||
ClassDB::register_class<MLPPDataESimple>();
|
||||
ClassDB::register_class<MLPPDataSimple>();
|
||||
ClassDB::register_class<MLPPDataComplex>();
|
||||
|
Loading…
Reference in New Issue
Block a user