pmlpp/mlp/mlp.cpp

503 lines
15 KiB
C++
Raw Normal View History

2023-12-30 00:41:59 +01:00
/*************************************************************************/
/* mlp.cpp */
/*************************************************************************/
/* This file is part of: */
/* PMLPP Machine Learning Library */
/* https://github.com/Relintai/pmlpp */
/*************************************************************************/
2023-12-30 00:43:39 +01:00
/* Copyright (c) 2023-present Péter Magyar. */
2023-12-30 00:41:59 +01:00
/* Copyright (c) 2022-2023 Marc Melikyan */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/*************************************************************************/
2023-01-24 18:12:23 +01:00
#include "mlp.h"
2023-02-05 13:14:09 +01:00
#include "core/log/logger.h"
2023-01-24 18:12:23 +01:00
#include "../activation/activation.h"
2023-01-24 19:00:54 +01:00
#include "../cost/cost.h"
2023-01-24 18:12:23 +01:00
#include "../regularization/reg.h"
#include "../utilities/utilities.h"
#include <iostream>
#include <random>
2023-02-05 00:58:00 +01:00
Ref<MLPPMatrix> MLPPMLP::get_input_set() {
2023-02-13 00:19:16 +01:00
return _input_set;
2023-02-04 16:13:54 +01:00
}
2023-02-05 00:58:00 +01:00
void MLPPMLP::set_input_set(const Ref<MLPPMatrix> &val) {
2023-02-13 00:19:16 +01:00
_input_set = val;
2023-02-04 16:13:54 +01:00
2023-02-05 00:58:00 +01:00
_initialized = false;
}
Ref<MLPPVector> MLPPMLP::get_output_set() {
2023-02-13 00:19:16 +01:00
return _output_set;
2023-02-05 00:58:00 +01:00
}
void MLPPMLP::set_output_set(const Ref<MLPPVector> &val) {
2023-02-13 00:19:16 +01:00
_output_set = val;
2023-02-05 00:58:00 +01:00
_initialized = false;
}
int MLPPMLP::get_n_hidden() {
2023-02-13 00:19:16 +01:00
return _n_hidden;
2023-02-05 00:58:00 +01:00
}
void MLPPMLP::set_n_hidden(const int val) {
2023-02-13 00:19:16 +01:00
_n_hidden = val;
2023-02-05 00:58:00 +01:00
_initialized = false;
}
real_t MLPPMLP::get_lambda() {
2023-02-13 00:19:16 +01:00
return _lambda;
2023-02-05 00:58:00 +01:00
}
void MLPPMLP::set_lambda(const real_t val) {
2023-02-13 00:19:16 +01:00
_lambda = val;
2023-02-05 00:58:00 +01:00
_initialized = false;
}
real_t MLPPMLP::get_alpha() {
2023-02-13 00:19:16 +01:00
return _alpha;
2023-02-05 00:58:00 +01:00
}
void MLPPMLP::set_alpha(const real_t val) {
2023-02-13 00:19:16 +01:00
_alpha = val;
2023-02-05 00:58:00 +01:00
_initialized = false;
}
MLPPReg::RegularizationType MLPPMLP::get_reg() {
2023-02-13 00:19:16 +01:00
return _reg;
2023-02-05 00:58:00 +01:00
}
void MLPPMLP::set_reg(const MLPPReg::RegularizationType val) {
2023-02-13 00:19:16 +01:00
_reg = val;
2023-02-05 00:58:00 +01:00
_initialized = false;
}
Ref<MLPPVector> MLPPMLP::model_set_test(const Ref<MLPPMatrix> &X) {
return evaluatem(X);
}
real_t MLPPMLP::model_test(const Ref<MLPPVector> &x) {
return evaluatev(x);
2023-02-04 16:13:54 +01:00
}
2023-02-04 16:48:31 +01:00
void MLPPMLP::gradient_descent(real_t learning_rate, int max_epoch, bool UI) {
2023-02-05 00:58:00 +01:00
ERR_FAIL_COND(!_initialized);
2023-02-04 16:13:54 +01:00
MLPPActivation avn;
MLPPReg regularization;
real_t cost_prev = 0;
int epoch = 1;
2023-02-04 16:48:31 +01:00
2023-02-13 00:19:16 +01:00
_y_hat->fill(0);
2023-02-04 16:48:31 +01:00
forward_pass();
2023-02-04 16:13:54 +01:00
while (true) {
2023-02-13 00:19:16 +01:00
cost_prev = cost(_y_hat, _output_set);
2023-02-04 16:13:54 +01:00
// Calculating the errors
Ref<MLPPVector> error = _y_hat->subn(_output_set);
2023-02-04 16:13:54 +01:00
// Calculating the weight/bias gradients for layer 2
Ref<MLPPVector> D2_1 = _a2->transposen()->mult_vec(error);
2023-02-04 16:13:54 +01:00
// weights and bias updation for layer 2
_weights2->sub(D2_1->scalar_multiplyn(learning_rate / static_cast<real_t>(_n)));
2023-02-13 00:19:16 +01:00
_weights2->set_from_mlpp_vector(regularization.reg_weightsv(_weights2, _lambda, _alpha, _reg));
2023-02-04 16:13:54 +01:00
_bias2 -= learning_rate * error->sum_elements() / static_cast<real_t>(_n);
2023-02-04 16:13:54 +01:00
// Calculating the weight/bias for layer 1
Ref<MLPPMatrix> D1_1 = error->outer_product(_weights2);
2023-12-27 18:40:19 +01:00
Ref<MLPPMatrix> D1_2 = D1_1->hadamard_productn(avn.sigmoid_derivm(_z2));
Ref<MLPPMatrix> D1_3 = _input_set->transposen()->multn(D1_2);
2023-02-04 16:13:54 +01:00
// weight an bias updation for layer 1
_weights1->sub(D1_3->scalar_multiplyn(learning_rate / _n));
2023-02-13 00:19:16 +01:00
_weights1->set_from_mlpp_matrix(regularization.reg_weightsm(_weights1, _lambda, _alpha, _reg));
2023-02-04 16:13:54 +01:00
_bias1->subtract_matrix_rows(D1_2->scalar_multiplyn(learning_rate / _n));
2023-02-04 16:13:54 +01:00
2023-02-04 16:48:31 +01:00
forward_pass();
2023-02-04 16:13:54 +01:00
// UI PORTION
if (UI) {
2023-02-13 00:19:16 +01:00
MLPPUtilities::cost_info(epoch, cost_prev, cost(_y_hat, _output_set));
2023-02-05 13:14:09 +01:00
PLOG_MSG("Layer 1:");
2023-02-13 00:19:16 +01:00
MLPPUtilities::print_ui_mb(_weights1, _bias1);
2023-02-05 13:14:09 +01:00
PLOG_MSG("Layer 2:");
2023-02-13 00:19:16 +01:00
MLPPUtilities::print_ui_vb(_weights2, _bias2);
2023-02-04 16:13:54 +01:00
}
2023-02-04 16:13:54 +01:00
epoch++;
if (epoch > max_epoch) {
break;
}
}
}
2023-02-04 16:48:31 +01:00
void MLPPMLP::sgd(real_t learning_rate, int max_epoch, bool UI) {
2023-02-05 00:58:00 +01:00
ERR_FAIL_COND(!_initialized);
2023-02-04 16:13:54 +01:00
MLPPActivation avn;
MLPPReg regularization;
real_t cost_prev = 0;
int epoch = 1;
2023-02-05 00:58:00 +01:00
std::random_device rd;
std::default_random_engine generator(rd());
2023-02-13 00:19:16 +01:00
std::uniform_int_distribution<int> distribution(0, int(_n - 1));
2023-02-05 00:58:00 +01:00
Ref<MLPPVector> input_set_row_tmp;
input_set_row_tmp.instance();
2023-02-13 00:19:16 +01:00
input_set_row_tmp->resize(_input_set->size().x);
2023-02-05 00:58:00 +01:00
Ref<MLPPVector> output_set_row_tmp;
output_set_row_tmp.instance();
output_set_row_tmp->resize(1);
Ref<MLPPVector> y_hat_row_tmp;
y_hat_row_tmp.instance();
y_hat_row_tmp->resize(1);
Ref<MLPPVector> lz2;
2023-02-05 00:58:00 +01:00
lz2.instance();
Ref<MLPPVector> la2;
2023-02-05 00:58:00 +01:00
la2.instance();
2023-02-04 16:13:54 +01:00
while (true) {
2023-02-05 00:58:00 +01:00
int output_Index = distribution(generator);
2023-02-04 16:13:54 +01:00
2023-04-29 15:07:30 +02:00
_input_set->row_get_into_mlpp_vector(output_Index, input_set_row_tmp);
real_t output_element = _output_set->element_get(output_Index);
output_set_row_tmp->element_set(0, output_element);
2023-02-05 00:58:00 +01:00
real_t ly_hat = evaluatev(input_set_row_tmp);
y_hat_row_tmp->element_set(0, ly_hat);
2023-02-05 00:58:00 +01:00
propagatev(input_set_row_tmp, lz2, la2);
cost_prev = cost(y_hat_row_tmp, output_set_row_tmp);
real_t error = ly_hat - output_element;
2023-02-04 16:13:54 +01:00
// Weight updation for layer 2
Ref<MLPPVector> D2_1 = la2->scalar_multiplyn(error);
_weights2->sub(D2_1->scalar_multiplyn(learning_rate));
2023-02-13 00:19:16 +01:00
_weights2->set_from_mlpp_vector(regularization.reg_weightsv(_weights2, _lambda, _alpha, _reg));
2023-02-04 16:13:54 +01:00
// Bias updation for layer 2
2023-02-13 00:19:16 +01:00
_bias2 -= learning_rate * error;
2023-02-04 16:13:54 +01:00
// Weight updation for layer 1
Ref<MLPPVector> D1_1 = _weights2->scalar_multiplyn(error);
Ref<MLPPVector> D1_2 = D1_1->hadamard_productn(avn.sigmoid_derivv(lz2));
Ref<MLPPMatrix> D1_3 = input_set_row_tmp->outer_product(D1_2);
2023-02-04 16:13:54 +01:00
_weights1->sub(D1_3->scalar_multiplyn(learning_rate));
2023-02-13 00:19:16 +01:00
_weights1->set_from_mlpp_matrix(regularization.reg_weightsm(_weights1, _lambda, _alpha, _reg));
2023-02-04 16:13:54 +01:00
// Bias updation for layer 1
_bias1->sub(D1_2->scalar_multiplyn(learning_rate));
2023-02-05 00:58:00 +01:00
ly_hat = evaluatev(input_set_row_tmp);
2023-02-04 16:13:54 +01:00
if (UI) {
2023-02-05 00:58:00 +01:00
MLPPUtilities::cost_info(epoch, cost_prev, cost_prev);
2023-02-05 13:14:09 +01:00
PLOG_MSG("Layer 1:");
2023-02-13 00:19:16 +01:00
MLPPUtilities::print_ui_mb(_weights1, _bias1);
2023-02-05 13:14:09 +01:00
PLOG_MSG("Layer 2:");
2023-02-13 00:19:16 +01:00
MLPPUtilities::print_ui_vb(_weights2, _bias2);
2023-02-04 16:13:54 +01:00
}
2023-02-05 00:58:00 +01:00
2023-02-04 16:13:54 +01:00
epoch++;
if (epoch > max_epoch) {
break;
}
}
2023-02-04 16:48:31 +01:00
forward_pass();
2023-02-04 16:13:54 +01:00
}
2023-02-04 16:48:31 +01:00
void MLPPMLP::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
2023-02-05 00:58:00 +01:00
ERR_FAIL_COND(!_initialized);
2023-02-04 16:13:54 +01:00
MLPPActivation avn;
MLPPReg regularization;
real_t cost_prev = 0;
int epoch = 1;
2023-02-05 00:58:00 +01:00
Ref<MLPPMatrix> lz2;
lz2.instance();
Ref<MLPPMatrix> la2;
la2.instance();
2023-02-04 16:13:54 +01:00
// Creating the mini-batches
2023-02-13 00:19:16 +01:00
int n_mini_batch = _n / mini_batch_size;
2023-02-05 00:58:00 +01:00
2023-02-13 00:19:16 +01:00
MLPPUtilities::CreateMiniBatchMVBatch batches = MLPPUtilities::create_mini_batchesmv(_input_set, _output_set, n_mini_batch);
2023-02-04 16:13:54 +01:00
while (true) {
for (int i = 0; i < n_mini_batch; i++) {
2023-02-05 00:58:00 +01:00
Ref<MLPPMatrix> current_input = batches.input_sets[i];
Ref<MLPPVector> current_output = batches.output_sets[i];
Ref<MLPPVector> ly_hat = evaluatem(current_input);
propagatem(current_input, lz2, la2);
cost_prev = cost(ly_hat, current_output);
2023-02-04 16:13:54 +01:00
// Calculating the errors
Ref<MLPPVector> error = ly_hat->subn(current_output);
2023-02-04 16:13:54 +01:00
// Calculating the weight/bias gradients for layer 2
Ref<MLPPVector> D2_1 = la2->transposen()->mult_vec(error);
2023-02-05 00:58:00 +01:00
real_t lr_d_cos = learning_rate / static_cast<real_t>(current_output->size());
2023-02-04 16:13:54 +01:00
// weights and bias updation for layser 2
_weights2->sub(D2_1->scalar_multiplyn(lr_d_cos));
2023-02-13 00:19:16 +01:00
_weights2->set_from_mlpp_vector(regularization.reg_weightsv(_weights2, _lambda, _alpha, _reg));
2023-02-04 16:13:54 +01:00
// Calculating the bias gradients for layer 2
real_t b_gradient = error->sum_elements();
2023-02-04 16:13:54 +01:00
// Bias Updation for layer 2
2023-02-13 00:19:16 +01:00
_bias2 -= learning_rate * b_gradient / current_output->size();
2023-02-04 16:13:54 +01:00
//Calculating the weight/bias for layer 1
Ref<MLPPMatrix> D1_1 = error->outer_product(_weights2);
Ref<MLPPMatrix> D1_2 = D1_1->hadamard_productn(avn.sigmoid_derivm(lz2));
Ref<MLPPMatrix> D1_3 = current_input->transposen()->multn(D1_2);
2023-02-04 16:13:54 +01:00
// weight an bias updation for layer 1
_weights1->sub(D1_3->scalar_multiplyn(lr_d_cos));
2023-02-13 00:19:16 +01:00
_weights1->set_from_mlpp_matrix(regularization.reg_weightsm(_weights1, _lambda, _alpha, _reg));
2023-02-04 16:13:54 +01:00
_bias1->subtract_matrix_rows(D1_2->scalar_multiplyn(lr_d_cos));
2023-02-04 16:13:54 +01:00
2023-02-13 00:19:16 +01:00
_y_hat = evaluatem(current_input);
2023-02-04 16:13:54 +01:00
if (UI) {
MLPPUtilities::CostInfo(epoch, cost_prev, cost(ly_hat, current_output));
2023-02-05 13:14:09 +01:00
PLOG_MSG("Layer 1:");
2023-02-13 00:19:16 +01:00
MLPPUtilities::print_ui_mb(_weights1, _bias1);
2023-02-05 13:14:09 +01:00
PLOG_MSG("Layer 2:");
2023-02-13 00:19:16 +01:00
MLPPUtilities::print_ui_vb(_weights2, _bias2);
2023-02-04 16:13:54 +01:00
}
}
2023-02-04 16:48:31 +01:00
2023-02-04 16:13:54 +01:00
epoch++;
2023-02-04 16:48:31 +01:00
2023-02-04 16:13:54 +01:00
if (epoch > max_epoch) {
break;
}
}
2023-02-04 16:48:31 +01:00
forward_pass();
2023-02-04 16:13:54 +01:00
}
real_t MLPPMLP::score() {
2023-02-04 16:48:31 +01:00
MLPPUtilities util;
2023-02-13 00:19:16 +01:00
return util.performance_vec(_y_hat, _output_set);
2023-02-04 16:13:54 +01:00
}
2023-02-05 00:58:00 +01:00
void MLPPMLP::save(const String &fileName) {
ERR_FAIL_COND(!_initialized);
2023-02-04 16:48:31 +01:00
MLPPUtilities util;
2023-02-05 00:58:00 +01:00
//util.saveParameters(fileName, weights1, bias1, 0, 1);
//util.saveParameters(fileName, weights2, bias2, 1, 2);
2023-02-04 16:13:54 +01:00
}
2023-02-05 00:58:00 +01:00
bool MLPPMLP::is_initialized() {
return _initialized;
}
void MLPPMLP::initialize() {
if (_initialized) {
return;
}
2023-02-13 00:19:16 +01:00
ERR_FAIL_COND(!_input_set.is_valid() || !_output_set.is_valid() || _n_hidden == 0);
2023-02-05 00:58:00 +01:00
2023-02-13 00:19:16 +01:00
_n = _input_set->size().y;
_k = _input_set->size().x;
2023-02-05 00:58:00 +01:00
MLPPActivation avn;
2023-02-13 00:19:16 +01:00
_y_hat->resize(_n);
2023-02-05 00:58:00 +01:00
MLPPUtilities util;
2023-12-27 18:40:19 +01:00
_weights1->resize(Size2i(_n_hidden, _k));
2023-02-13 00:19:16 +01:00
_weights2->resize(_n_hidden);
_bias1->resize(_n_hidden);
2023-02-05 00:58:00 +01:00
2023-02-13 00:19:16 +01:00
util.weight_initializationm(_weights1);
util.weight_initializationv(_weights2);
util.bias_initializationv(_bias1);
2023-02-05 00:58:00 +01:00
2023-02-13 00:19:16 +01:00
_bias2 = util.bias_initializationr();
2023-02-05 00:58:00 +01:00
2023-02-13 00:19:16 +01:00
_z2.instance();
_a2.instance();
2023-02-05 00:58:00 +01:00
_initialized = true;
}
real_t MLPPMLP::cost(const Ref<MLPPVector> &p_y_hat, const Ref<MLPPVector> &p_y) {
2023-02-04 16:13:54 +01:00
MLPPReg regularization;
2023-02-13 00:19:16 +01:00
MLPPCost mlpp_cost;
2023-02-05 00:58:00 +01:00
2023-02-13 00:19:16 +01:00
return mlpp_cost.log_lossv(p_y_hat, p_y) + regularization.reg_termv(_weights2, _lambda, _alpha, _reg) + regularization.reg_termm(_weights1, _lambda, _alpha, _reg);
2023-02-04 16:13:54 +01:00
}
2023-02-05 00:58:00 +01:00
Ref<MLPPVector> MLPPMLP::evaluatem(const Ref<MLPPMatrix> &X) {
2023-02-04 16:13:54 +01:00
MLPPActivation avn;
2023-02-05 00:58:00 +01:00
Ref<MLPPMatrix> pz2 = X->multn(_weights1)->add_vecn(_bias1);
2023-02-05 10:59:45 +01:00
Ref<MLPPMatrix> pa2 = avn.sigmoid_normm(pz2);
2023-02-05 00:58:00 +01:00
return avn.sigmoid_normv(pa2->mult_vec(_weights2)->scalar_addn(_bias2));
2023-02-04 16:13:54 +01:00
}
2023-02-05 00:58:00 +01:00
void MLPPMLP::propagatem(const Ref<MLPPMatrix> &X, Ref<MLPPMatrix> z2_out, Ref<MLPPMatrix> a2_out) {
2023-02-04 16:13:54 +01:00
MLPPActivation avn;
2023-02-05 00:58:00 +01:00
z2_out->set_from_mlpp_matrix(X->multn(_weights1)->add_vecn(_bias1));
a2_out->set_from_mlpp_matrix(avn.sigmoid_normm(z2_out));
2023-02-04 16:13:54 +01:00
}
2023-02-05 00:58:00 +01:00
real_t MLPPMLP::evaluatev(const Ref<MLPPVector> &x) {
2023-02-04 16:13:54 +01:00
MLPPActivation avn;
2023-02-05 00:58:00 +01:00
Ref<MLPPVector> pz2 = _weights1->transposen()->mult_vec(x)->addn(_bias1);
2023-02-05 00:58:00 +01:00
Ref<MLPPVector> pa2 = avn.sigmoid_normv(pz2);
return avn.sigmoid_normr(_weights2->dot(pa2) + _bias2);
2023-02-04 16:13:54 +01:00
}
2023-02-05 00:58:00 +01:00
void MLPPMLP::propagatev(const Ref<MLPPVector> &x, Ref<MLPPVector> z2_out, Ref<MLPPVector> a2_out) {
2023-02-04 16:13:54 +01:00
MLPPActivation avn;
2023-02-05 00:58:00 +01:00
z2_out->set_from_mlpp_vector(_weights1->transposen()->mult_vec(x)->addn(_bias1));
a2_out->set_from_mlpp_vector(avn.sigmoid_normv(z2_out));
2023-02-04 16:13:54 +01:00
}
2023-02-04 16:48:31 +01:00
void MLPPMLP::forward_pass() {
2023-02-04 16:13:54 +01:00
MLPPActivation avn;
2023-02-05 00:58:00 +01:00
_z2->set_from_mlpp_matrix(_input_set->multn(_weights1)->add_vecn(_bias1));
2023-02-13 00:19:16 +01:00
_a2->set_from_mlpp_matrix(avn.sigmoid_normm(_z2));
2023-02-05 00:58:00 +01:00
_y_hat->set_from_mlpp_vector(avn.sigmoid_normv(_a2->mult_vec(_weights2)->scalar_addn(_bias2)));
2023-02-04 16:13:54 +01:00
}
2023-02-05 00:58:00 +01:00
MLPPMLP::MLPPMLP(const Ref<MLPPMatrix> &p_input_set, const Ref<MLPPVector> &p_output_set, int p_n_hidden, MLPPReg::RegularizationType p_reg, real_t p_lambda, real_t p_alpha) {
2023-02-13 00:19:16 +01:00
_input_set = p_input_set;
_output_set = p_output_set;
2023-02-05 00:58:00 +01:00
2023-02-13 00:19:16 +01:00
_y_hat.instance();
2023-12-27 18:40:19 +01:00
_weights1.instance();
_weights2.instance();
_z2.instance();
_a2.instance();
_bias1.instance();
2023-02-05 00:58:00 +01:00
2023-02-13 00:19:16 +01:00
_n_hidden = p_n_hidden;
_reg = p_reg;
_lambda = p_lambda;
_alpha = p_alpha;
2023-02-05 00:58:00 +01:00
2023-12-27 18:40:19 +01:00
_initialized = false;
2023-02-05 00:58:00 +01:00
2023-12-27 18:40:19 +01:00
initialize();
2023-02-04 16:48:31 +01:00
}
MLPPMLP::MLPPMLP() {
2023-02-13 00:19:16 +01:00
_y_hat.instance();
2023-02-05 00:58:00 +01:00
2023-02-13 00:19:16 +01:00
_n_hidden = 0;
_n = 0;
_k = 0;
_reg = MLPPReg::REGULARIZATION_TYPE_NONE;
_lambda = 0.5;
_alpha = 0.5;
2023-02-05 00:58:00 +01:00
2023-02-13 00:19:16 +01:00
_weights1.instance();
_weights2.instance();
_bias1.instance();
2023-02-05 00:58:00 +01:00
2023-02-13 00:19:16 +01:00
_bias2 = 0;
2023-02-05 00:58:00 +01:00
2023-02-13 00:19:16 +01:00
_z2.instance();
_a2.instance();
2023-02-05 00:58:00 +01:00
_initialized = false;
2023-02-04 16:48:31 +01:00
}
2023-02-05 00:58:00 +01:00
2023-02-04 16:48:31 +01:00
MLPPMLP::~MLPPMLP() {
}
2023-02-04 16:13:54 +01:00
2023-02-05 00:58:00 +01:00
void MLPPMLP::_bind_methods() {
ClassDB::bind_method(D_METHOD("get_input_set"), &MLPPMLP::get_input_set);
ClassDB::bind_method(D_METHOD("set_input_set", "val"), &MLPPMLP::set_input_set);
ADD_PROPERTY(PropertyInfo(Variant::OBJECT, "input_set", PROPERTY_HINT_RESOURCE_TYPE, "MLPPMatrix"), "set_input_set", "get_input_set");
ClassDB::bind_method(D_METHOD("get_output_set"), &MLPPMLP::get_output_set);
ClassDB::bind_method(D_METHOD("set_output_set", "val"), &MLPPMLP::set_output_set);
ADD_PROPERTY(PropertyInfo(Variant::OBJECT, "output_set", PROPERTY_HINT_RESOURCE_TYPE, "MLPPVector"), "set_output_set", "get_output_set");
ClassDB::bind_method(D_METHOD("get_n_hidden"), &MLPPMLP::get_n_hidden);
ClassDB::bind_method(D_METHOD("set_n_hidden", "val"), &MLPPMLP::set_n_hidden);
ADD_PROPERTY(PropertyInfo(Variant::INT, "n_hidden"), "set_n_hidden", "get_n_hidden");
ClassDB::bind_method(D_METHOD("get_lambda"), &MLPPMLP::get_lambda);
ClassDB::bind_method(D_METHOD("set_lambda", "val"), &MLPPMLP::set_lambda);
ADD_PROPERTY(PropertyInfo(Variant::REAL, "lambda"), "set_lambda", "get_lambda");
ClassDB::bind_method(D_METHOD("get_alpha"), &MLPPMLP::get_alpha);
ClassDB::bind_method(D_METHOD("set_alpha", "val"), &MLPPMLP::set_alpha);
ADD_PROPERTY(PropertyInfo(Variant::REAL, "alpha"), "set_alpha", "get_alpha");
ClassDB::bind_method(D_METHOD("get_reg"), &MLPPMLP::get_reg);
ClassDB::bind_method(D_METHOD("set_reg", "val"), &MLPPMLP::set_reg);
ADD_PROPERTY(PropertyInfo(Variant::INT, "reg"), "set_reg", "get_reg");
ClassDB::bind_method(D_METHOD("is_initialized"), &MLPPMLP::is_initialized);
ClassDB::bind_method(D_METHOD("initialize"), &MLPPMLP::initialize);
ClassDB::bind_method(D_METHOD("model_set_test", "X"), &MLPPMLP::model_set_test);
ClassDB::bind_method(D_METHOD("model_test", "x"), &MLPPMLP::model_test);
ClassDB::bind_method(D_METHOD("gradient_descent", "learning_rate", "max_epoch", "UI"), &MLPPMLP::gradient_descent, false);
ClassDB::bind_method(D_METHOD("sgd", "learning_rate", "max_epoch", "UI"), &MLPPMLP::sgd, false);
ClassDB::bind_method(D_METHOD("mbgd", "learning_rate", "max_epoch", "mini_batch_size", "UI"), &MLPPMLP::mbgd, false);
2023-02-05 01:09:59 +01:00
ClassDB::bind_method(D_METHOD("score"), &MLPPMLP::score);
2023-02-05 00:58:00 +01:00
ClassDB::bind_method(D_METHOD("save", "file_name"), &MLPPMLP::save);
}