pmlpp/mlpp/gan/gan.cpp

388 lines
15 KiB
C++
Raw Normal View History

//
// GAN.cpp
//
// Created by Marc Melikyan on 11/4/20.
//
2023-01-24 18:12:23 +01:00
#include "gan.h"
#include "../activation/activation.h"
2023-01-24 19:00:54 +01:00
#include "../cost/cost.h"
2023-01-24 18:12:23 +01:00
#include "../lin_alg/lin_alg.h"
#include "../regularization/reg.h"
#include "../utilities/utilities.h"
2023-02-16 17:32:35 +01:00
#include "core/log/logger.h"
#include <cmath>
2023-01-24 19:00:54 +01:00
#include <iostream>
2023-02-12 10:05:17 +01:00
/*
Ref<MLPPMatrix> MLPPGAN::get_input_set() {
return _input_set;
}
void MLPPGAN::set_input_set(const Ref<MLPPMatrix> &val) {
_input_set = val;
2023-01-24 19:00:54 +01:00
}
2023-02-12 10:05:17 +01:00
Ref<MLPPVector> MLPPGAN::get_output_set() {
return _output_set;
}
void MLPPGAN::set_output_set(const Ref<MLPPVector> &val) {
_output_set = val;
}
int MLPPGAN::get_k() {
return _k;
2023-01-24 19:00:54 +01:00
}
2023-02-12 10:05:17 +01:00
void MLPPGAN::set_k(const int val) {
_k = val;
}
*/
2023-01-24 19:00:54 +01:00
2023-02-16 17:32:35 +01:00
Ref<MLPPMatrix> MLPPGAN::generate_example(int n) {
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg;
2023-02-12 10:05:17 +01:00
2023-02-16 17:32:35 +01:00
return model_set_test_generator(alg.gaussian_noise(n, _k));
2023-01-24 19:00:54 +01:00
}
2023-02-12 10:05:17 +01:00
void MLPPGAN::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
MLPPCost mlpp_cost;
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg;
2023-01-27 13:01:16 +01:00
real_t cost_prev = 0;
2023-01-24 19:00:54 +01:00
int epoch = 1;
2023-02-12 10:05:17 +01:00
forward_pass();
2023-01-24 19:00:54 +01:00
while (true) {
2023-02-16 17:32:35 +01:00
cost_prev = cost(_y_hat, alg.onevecv(_n));
2023-01-24 19:00:54 +01:00
// Training of the discriminator.
2023-02-16 17:32:35 +01:00
Ref<MLPPMatrix> generator_input_set = alg.gaussian_noise(_n, _k);
Ref<MLPPMatrix> discriminator_input_set = model_set_test_generator(generator_input_set);
discriminator_input_set->add_rows_mlpp_matrix(_output_set); // Fake + real inputs.
2023-01-24 19:00:54 +01:00
2023-02-16 17:32:35 +01:00
Ref<MLPPVector> y_hat = model_set_test_discriminator(discriminator_input_set);
Ref<MLPPVector> output_set = alg.zerovecv(_n);
Ref<MLPPVector> output_set_real = alg.onevecv(_n);
output_set->add_mlpp_vector(output_set_real); // Fake + real output scores.
2023-01-24 19:00:54 +01:00
2023-02-16 17:32:35 +01:00
ComputeDiscriminatorGradientsResult dgrads = compute_discriminator_gradients(y_hat, _output_set);
2023-02-10 22:19:06 +01:00
2023-02-16 17:32:35 +01:00
dgrads.cumulative_hidden_layer_w_grad = alg.scalar_multiply_vm(learning_rate / _n, dgrads.cumulative_hidden_layer_w_grad);
dgrads.output_w_grad = alg.scalar_multiplynv(learning_rate / _n, dgrads.output_w_grad);
update_discriminator_parameters(dgrads.cumulative_hidden_layer_w_grad, dgrads.output_w_grad, learning_rate);
2023-01-24 19:00:54 +01:00
// Training of the generator.
2023-02-16 17:32:35 +01:00
generator_input_set = alg.gaussian_noise(_n, _k);
2023-02-12 10:05:17 +01:00
discriminator_input_set = model_set_test_generator(generator_input_set);
y_hat = model_set_test_discriminator(discriminator_input_set);
2023-02-16 17:32:35 +01:00
_output_set = alg.onevecv(_n);
2023-02-12 10:05:17 +01:00
2023-02-16 17:32:35 +01:00
Vector<Ref<MLPPMatrix>> cumulative_generator_hidden_layer_w_grad = compute_generator_gradients(y_hat, _output_set);
cumulative_generator_hidden_layer_w_grad = alg.scalar_multiply_vm(learning_rate / _n, cumulative_generator_hidden_layer_w_grad);
2023-02-12 10:05:17 +01:00
update_generator_parameters(cumulative_generator_hidden_layer_w_grad, learning_rate);
forward_pass();
if (ui) {
2023-02-16 17:32:35 +01:00
print_ui(epoch, cost_prev, _y_hat, alg.onevecv(_n));
2023-01-24 19:00:54 +01:00
}
epoch++;
2023-02-12 10:05:17 +01:00
2023-01-24 19:00:54 +01:00
if (epoch > max_epoch) {
break;
}
}
}
2023-01-27 13:01:16 +01:00
real_t MLPPGAN::score() {
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg;
2023-02-10 22:19:06 +01:00
MLPPUtilities util;
2023-02-12 10:05:17 +01:00
forward_pass();
2023-02-16 17:32:35 +01:00
return util.performance_vec(_y_hat, alg.onevecv(_n));
2023-01-24 19:00:54 +01:00
}
2023-02-16 17:32:35 +01:00
void MLPPGAN::save(const String &file_name) {
2023-02-10 22:19:06 +01:00
MLPPUtilities util;
2023-02-16 17:32:35 +01:00
/*
2023-02-12 10:05:17 +01:00
if (!_network.empty()) {
2023-02-16 17:32:35 +01:00
util.saveParameters(file_name, _network[0].weights, _network[0].bias, false, 1);
2023-02-12 10:05:17 +01:00
for (uint32_t i = 1; i < _network.size(); i++) {
2023-02-16 17:32:35 +01:00
util.saveParameters(file_name, _network[i].weights, _network[i].bias, true, i + 1);
2023-01-24 19:00:54 +01:00
}
2023-02-16 17:32:35 +01:00
util.saveParameters(file_name, _output_layer->weights, _output_layer->bias, true, _network.size() + 1);
2023-01-24 19:00:54 +01:00
} else {
2023-02-16 17:32:35 +01:00
util.saveParameters(file_name, _output_layer->weights, _output_layer->bias, false, _network.size() + 1);
2023-01-24 19:00:54 +01:00
}
2023-02-16 17:32:35 +01:00
*/
2023-01-24 19:00:54 +01:00
}
2023-02-16 17:32:35 +01:00
void MLPPGAN::add_layer(int n_hidden, MLPPActivation::ActivationFunction activation, MLPPUtilities::WeightDistributionType weight_init, MLPPReg::RegularizationType reg, real_t lambda, real_t alpha) {
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg;
2023-02-12 10:05:17 +01:00
if (_network.empty()) {
2023-02-16 17:32:35 +01:00
Ref<MLPPHiddenLayer> layer = Ref<MLPPHiddenLayer>(memnew(MLPPHiddenLayer(n_hidden, activation, alg.gaussian_noise(_n, _k), weight_init, reg, lambda, alpha)));
_network.push_back(layer);
_network.write[0]->forward_pass();
2023-01-24 19:00:54 +01:00
} else {
2023-02-16 17:32:35 +01:00
Ref<MLPPHiddenLayer> layer = Ref<MLPPHiddenLayer>(memnew(MLPPHiddenLayer(n_hidden, activation, _network.write[_network.size() - 1]->get_a(), weight_init, reg, lambda, alpha)));
_network.push_back(layer);
_network.write[_network.size() - 1]->forward_pass();
2023-01-24 19:00:54 +01:00
}
}
2023-02-16 17:32:35 +01:00
void MLPPGAN::add_output_layer(MLPPUtilities::WeightDistributionType weight_init, MLPPReg::RegularizationType reg, real_t lambda, real_t alpha) {
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg;
2023-02-16 17:32:35 +01:00
2023-02-12 10:05:17 +01:00
if (!_network.empty()) {
2023-02-17 16:55:00 +01:00
_output_layer = Ref<MLPPOutputLayer>(memnew(MLPPOutputLayer(_network.write[_network.size() - 1]->get_n_hidden(), MLPPActivation::ACTIVATION_FUNCTION_SIGMOID, MLPPCost::COST_TYPE_LOGISTIC_LOSS, _network.write[_network.size() - 1]->get_a(), weight_init, reg, lambda, alpha)));
2023-01-24 19:00:54 +01:00
} else {
2023-02-17 16:55:00 +01:00
_output_layer = Ref<MLPPOutputLayer>(memnew(MLPPOutputLayer(_k, MLPPActivation::ACTIVATION_FUNCTION_SIGMOID, MLPPCost::COST_TYPE_LOGISTIC_LOSS, alg.gaussian_noise(_n, _k), weight_init, reg, lambda, alpha)));
2023-01-24 19:00:54 +01:00
}
}
2023-02-16 17:32:35 +01:00
MLPPGAN::MLPPGAN(real_t k, const Ref<MLPPMatrix> &output_set) {
2023-02-12 10:05:17 +01:00
_output_set = output_set;
2023-02-16 17:32:35 +01:00
_n = _output_set->size().y;
2023-02-12 10:05:17 +01:00
_k = k;
}
MLPPGAN::MLPPGAN() {
}
2023-01-24 19:00:54 +01:00
2023-02-12 10:05:17 +01:00
MLPPGAN::~MLPPGAN() {
}
2023-02-16 17:32:35 +01:00
Ref<MLPPMatrix> MLPPGAN::model_set_test_generator(const Ref<MLPPMatrix> &X) {
2023-02-12 10:05:17 +01:00
if (!_network.empty()) {
2023-02-16 17:32:35 +01:00
_network.write[0]->set_input(X);
_network.write[0]->forward_pass();
2023-02-12 10:05:17 +01:00
2023-02-16 17:32:35 +01:00
for (int i = 1; i <= _network.size() / 2; i++) {
_network.write[i]->set_input(_network.write[i - 1]->get_a());
_network.write[i]->forward_pass();
2023-01-24 19:00:54 +01:00
}
}
2023-02-16 17:32:35 +01:00
return _network.write[_network.size() / 2]->get_a();
2023-01-24 19:00:54 +01:00
}
2023-02-16 17:32:35 +01:00
Ref<MLPPVector> MLPPGAN::model_set_test_discriminator(const Ref<MLPPMatrix> &X) {
2023-02-12 10:05:17 +01:00
if (!_network.empty()) {
2023-02-16 17:32:35 +01:00
for (int i = _network.size() / 2 + 1; i < _network.size(); i++) {
2023-02-12 10:05:17 +01:00
if (i == _network.size() / 2 + 1) {
2023-02-16 17:32:35 +01:00
_network.write[i]->set_input(X);
2023-01-24 19:00:54 +01:00
} else {
2023-02-16 17:32:35 +01:00
_network.write[i]->set_input(_network.write[i - 1]->get_a());
2023-01-24 19:00:54 +01:00
}
2023-02-12 10:05:17 +01:00
2023-02-16 17:32:35 +01:00
_network.write[i]->forward_pass();
2023-01-24 19:00:54 +01:00
}
2023-02-12 10:05:17 +01:00
2023-02-16 17:32:35 +01:00
_output_layer->set_input(_network.write[_network.size() - 1]->get_a());
2023-01-24 19:00:54 +01:00
}
2023-02-12 10:05:17 +01:00
2023-02-16 17:32:35 +01:00
_output_layer->forward_pass();
2023-02-12 10:05:17 +01:00
2023-02-16 17:32:35 +01:00
return _output_layer->get_a();
2023-01-24 19:00:54 +01:00
}
2023-02-16 17:32:35 +01:00
real_t MLPPGAN::cost(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y) {
2023-01-25 00:54:50 +01:00
MLPPReg regularization;
2023-02-16 17:32:35 +01:00
MLPPCost mlpp_cost;
real_t total_reg_term = 0;
2023-02-12 10:05:17 +01:00
if (!_network.empty()) {
2023-02-16 17:32:35 +01:00
for (int i = 0; i < _network.size() - 1; i++) {
total_reg_term += regularization.reg_termm(_network.write[i]->get_weights(), _network.write[i]->get_lambda(), _network.write[i]->get_alpha(), _network.write[i]->get_reg());
2023-01-24 19:00:54 +01:00
}
}
2023-02-12 10:05:17 +01:00
2023-02-16 17:32:35 +01:00
return mlpp_cost.run_cost_norm_vector(_output_layer->get_cost(), y_hat, y) + total_reg_term + regularization.reg_termv(_output_layer->get_weights(), _output_layer->get_lambda(), _output_layer->get_alpha(), _output_layer->get_reg());
2023-01-24 19:00:54 +01:00
}
2023-02-12 10:05:17 +01:00
void MLPPGAN::forward_pass() {
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg;
2023-01-24 19:00:54 +01:00
2023-02-12 10:05:17 +01:00
if (!_network.empty()) {
2023-02-16 17:32:35 +01:00
_network.write[0]->set_input(alg.gaussian_noise(_n, _k));
_network.write[0]->forward_pass();
2023-02-12 10:05:17 +01:00
2023-02-16 17:32:35 +01:00
for (int i = 1; i < _network.size(); i++) {
_network.write[i]->set_input(_network.write[i - 1]->get_a());
_network.write[i]->forward_pass();
2023-01-24 19:00:54 +01:00
}
2023-02-16 17:32:35 +01:00
_output_layer->set_input(_network.write[_network.size() - 1]->get_a());
2023-01-24 19:00:54 +01:00
} else { // Should never happen, though.
2023-02-16 17:32:35 +01:00
_output_layer->set_input(alg.gaussian_noise(_n, _k));
2023-01-24 19:00:54 +01:00
}
2023-02-12 10:05:17 +01:00
2023-02-16 17:32:35 +01:00
_output_layer->forward_pass();
_y_hat = _output_layer->get_a();
2023-01-24 19:00:54 +01:00
}
2023-02-16 17:32:35 +01:00
void MLPPGAN::update_discriminator_parameters(const Vector<Ref<MLPPMatrix>> &hidden_layer_updations, const Ref<MLPPVector> &output_layer_updation, real_t learning_rate) {
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg;
2023-01-24 19:00:54 +01:00
2023-02-16 17:32:35 +01:00
_output_layer->set_weights(alg.subtractionnv(_output_layer->get_weights(), output_layer_updation));
real_t output_layer_bias = _output_layer->get_bias();
output_layer_bias -= learning_rate * alg.sum_elementsv(_output_layer->get_delta()) / _n;
_output_layer->set_bias(output_layer_bias);
2023-01-24 19:00:54 +01:00
2023-02-12 10:05:17 +01:00
if (!_network.empty()) {
2023-02-16 17:32:35 +01:00
Ref<MLPPHiddenLayer> layer = _network[_network.size() - 1];
2023-01-24 19:00:54 +01:00
2023-04-22 14:23:51 +02:00
layer->set_weights(alg.subtractionnm(layer->get_weights(), hidden_layer_updations[0]));
layer->set_bias(alg.subtract_matrix_rows(layer->get_bias(), alg.scalar_multiplynm(learning_rate / _n, layer->get_delta())));
2023-02-16 17:32:35 +01:00
for (int i = _network.size() - 2; i > _network.size() / 2; i--) {
layer = _network[i];
2023-04-22 14:23:51 +02:00
layer->set_weights(alg.subtractionnm(layer->get_weights(), hidden_layer_updations[(_network.size() - 2) - i + 1]));
layer->set_bias(alg.subtract_matrix_rows(layer->get_bias(), alg.scalar_multiplynm(learning_rate / _n, layer->get_delta())));
2023-01-24 19:00:54 +01:00
}
}
}
2023-02-16 17:32:35 +01:00
void MLPPGAN::update_generator_parameters(const Vector<Ref<MLPPMatrix>> &hidden_layer_updations, real_t learning_rate) {
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg;
2023-01-24 19:00:54 +01:00
2023-02-12 10:05:17 +01:00
if (!_network.empty()) {
for (int i = _network.size() / 2; i >= 0; i--) {
2023-02-16 17:32:35 +01:00
Ref<MLPPHiddenLayer> layer = _network[i];
2023-01-24 19:00:54 +01:00
//std::cout << network[i].weights.size() << "x" << network[i].weights[0].size() << std::endl;
2023-02-12 10:05:17 +01:00
//std::cout << hidden_layer_updations[(network.size() - 2) - i + 1].size() << "x" << hidden_layer_updations[(network.size() - 2) - i + 1][0].size() << std::endl;
2023-04-22 14:23:51 +02:00
layer->set_weights(alg.subtractionnm(layer->get_weights(), hidden_layer_updations[(_network.size() - 2) - i + 1]));
layer->set_bias(alg.subtract_matrix_rows(layer->get_bias(), alg.scalar_multiplynm(learning_rate / _n, layer->get_delta())));
2023-01-24 19:00:54 +01:00
}
}
}
2023-02-16 17:32:35 +01:00
MLPPGAN::ComputeDiscriminatorGradientsResult MLPPGAN::compute_discriminator_gradients(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &output_set) {
MLPPCost mlpp_cost;
2023-01-24 19:23:30 +01:00
MLPPActivation avn;
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg;
2023-01-25 00:54:50 +01:00
MLPPReg regularization;
2023-01-24 19:00:54 +01:00
2023-02-16 17:32:35 +01:00
ComputeDiscriminatorGradientsResult res;
Ref<MLPPVector> cost_deriv = mlpp_cost.run_cost_deriv_vector(_output_layer->get_cost(), y_hat, _output_set);
Ref<MLPPVector> activ_deriv = avn.run_activation_deriv_vector(_output_layer->get_activation(), _output_layer->get_z());
2023-01-24 19:00:54 +01:00
2023-02-16 17:32:35 +01:00
_output_layer->set_delta(alg.hadamard_productnv(cost_deriv, activ_deriv));
2023-04-22 14:23:51 +02:00
res.output_w_grad = alg.mat_vec_multv(alg.transposenm(_output_layer->get_input()), _output_layer->get_delta());
2023-02-16 17:32:35 +01:00
res.output_w_grad = alg.additionnv(res.output_w_grad, regularization.reg_deriv_termv(_output_layer->get_weights(), _output_layer->get_lambda(), _output_layer->get_alpha(), _output_layer->get_reg()));
2023-01-24 19:00:54 +01:00
2023-02-12 10:05:17 +01:00
if (!_network.empty()) {
2023-02-16 17:32:35 +01:00
Ref<MLPPHiddenLayer> layer = _network[_network.size() - 1];
2023-01-24 19:00:54 +01:00
2023-02-16 17:32:35 +01:00
Ref<MLPPVector> hidden_layer_activ_deriv = avn.run_activation_deriv_vector(layer->get_activation(), layer->get_z());
2023-01-24 19:00:54 +01:00
2023-04-22 14:23:51 +02:00
layer->set_delta(alg.hadamard_productnm(alg.outer_product(_output_layer->get_delta(), _output_layer->get_weights()), hidden_layer_activ_deriv));
Ref<MLPPMatrix> hidden_layer_w_grad = alg.matmultnm(alg.transposenm(layer->get_input()), layer->get_delta());
2023-01-24 19:00:54 +01:00
2023-04-22 14:23:51 +02:00
res.cumulative_hidden_layer_w_grad.push_back(alg.additionnm(hidden_layer_w_grad, regularization.reg_deriv_termm(layer->get_weights(), layer->get_lambda(), layer->get_alpha(), layer->get_reg()))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
2023-01-24 19:00:54 +01:00
2023-02-12 10:05:17 +01:00
for (int i = static_cast<int>(_network.size()) - 2; i > static_cast<int>(_network.size()) / 2; i--) {
2023-02-16 17:32:35 +01:00
layer = _network[i];
Ref<MLPPHiddenLayer> next_layer = _network[i + 1];
hidden_layer_activ_deriv = avn.run_activation_deriv_vector(layer->get_activation(), layer->get_z());
2023-01-24 19:00:54 +01:00
2023-04-22 14:23:51 +02:00
layer->set_delta(alg.hadamard_productnm(alg.matmultnm(next_layer->get_delta(), alg.transposenm(next_layer->get_weights())), hidden_layer_activ_deriv));
hidden_layer_w_grad = alg.matmultnm(alg.transposenm(layer->get_input()), layer->get_delta());
2023-02-16 17:32:35 +01:00
2023-04-22 14:23:51 +02:00
res.cumulative_hidden_layer_w_grad.push_back(alg.additionnm(hidden_layer_w_grad, regularization.reg_deriv_termm(layer->get_weights(), layer->get_lambda(), layer->get_alpha(), layer->get_reg()))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
2023-01-24 19:00:54 +01:00
}
}
2023-02-16 17:32:35 +01:00
return res;
2023-01-24 19:00:54 +01:00
}
2023-02-16 17:32:35 +01:00
Vector<Ref<MLPPMatrix>> MLPPGAN::compute_generator_gradients(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &output_set) {
MLPPCost mlpp_cost;
2023-01-24 19:23:30 +01:00
MLPPActivation avn;
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg;
2023-01-25 00:54:50 +01:00
MLPPReg regularization;
2023-01-24 19:00:54 +01:00
2023-02-16 17:32:35 +01:00
Vector<Ref<MLPPMatrix>> cumulative_hidden_layer_w_grad; // Tensor containing ALL hidden grads.
Ref<MLPPVector> cost_deriv = mlpp_cost.run_cost_deriv_vector(_output_layer->get_cost(), y_hat, _output_set);
Ref<MLPPVector> activ_deriv = avn.run_activation_deriv_vector(_output_layer->get_activation(), _output_layer->get_z());
2023-01-24 19:00:54 +01:00
2023-02-16 17:32:35 +01:00
_output_layer->set_delta(alg.hadamard_productnv(cost_deriv, activ_deriv));
2023-04-22 14:23:51 +02:00
Ref<MLPPVector> output_w_grad = alg.mat_vec_multv(alg.transposenm(_output_layer->get_input()), _output_layer->get_delta());
2023-02-16 17:32:35 +01:00
output_w_grad = alg.additionnv(output_w_grad, regularization.reg_deriv_termv(_output_layer->get_weights(), _output_layer->get_lambda(), _output_layer->get_alpha(), _output_layer->get_reg()));
2023-02-12 10:05:17 +01:00
if (!_network.empty()) {
2023-02-16 17:32:35 +01:00
Ref<MLPPHiddenLayer> layer = _network[_network.size() - 1];
Ref<MLPPVector> hidden_layer_activ_deriv = avn.run_activation_deriv_vector(layer->get_activation(), layer->get_z());
layer->set_delta(alg.hadamard_productnv(alg.outer_product(_output_layer->get_delta(), _output_layer->get_weights()), hidden_layer_activ_deriv));
2023-04-22 14:23:51 +02:00
Ref<MLPPMatrix> hidden_layer_w_grad = alg.matmultnm(alg.transposenm(layer->get_input()), layer->get_delta());
cumulative_hidden_layer_w_grad.push_back(alg.additionnm(hidden_layer_w_grad, regularization.reg_deriv_termm(layer->get_weights(), layer->get_lambda(), layer->get_alpha(), layer->get_reg()))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
2023-02-12 10:05:17 +01:00
for (int i = _network.size() - 2; i >= 0; i--) {
2023-02-16 17:32:35 +01:00
layer = _network[i];
Ref<MLPPHiddenLayer> next_layer = _network[i + 1];
hidden_layer_activ_deriv = avn.run_activation_deriv_vector(layer->get_activation(), layer->get_z());
2023-04-22 14:23:51 +02:00
layer->set_delta(alg.hadamard_productnm(alg.matmultnm(next_layer->get_delta(), alg.transposenm(next_layer->get_weights())), hidden_layer_activ_deriv));
2023-02-16 17:32:35 +01:00
2023-04-22 14:23:51 +02:00
hidden_layer_w_grad = alg.matmultnm(alg.transposenm(layer->get_input()), layer->get_delta());
cumulative_hidden_layer_w_grad.push_back(alg.additionnm(hidden_layer_w_grad, regularization.reg_deriv_termm(layer->get_weights(), layer->get_lambda(), layer->get_alpha(), layer->get_reg()))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
2023-01-24 19:00:54 +01:00
}
}
2023-02-12 10:05:17 +01:00
2023-02-16 17:32:35 +01:00
return cumulative_hidden_layer_w_grad;
2023-01-24 19:00:54 +01:00
}
2023-02-16 17:32:35 +01:00
void MLPPGAN::print_ui(int epoch, real_t cost_prev, const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &output_set) {
MLPPUtilities::cost_info(epoch, cost_prev, cost(y_hat, _output_set));
PLOG_MSG("Layer " + itos(_network.size() + 1) + ": ");
MLPPUtilities::print_ui_vb(_output_layer->get_weights(), _output_layer->get_bias());
2023-02-12 10:05:17 +01:00
if (!_network.empty()) {
for (int i = _network.size() - 1; i >= 0; i--) {
2023-02-16 17:32:35 +01:00
Ref<MLPPHiddenLayer> layer = _network[i];
PLOG_MSG("Layer " + itos(i + 1) + ": ");
MLPPUtilities::print_ui_mb(layer->get_weights(), layer->get_bias());
2023-01-24 19:00:54 +01:00
}
}
}
2023-02-12 10:05:17 +01:00
void MLPPGAN::_bind_methods() {
/*
ClassDB::bind_method(D_METHOD("get_input_set"), &MLPPGAN::get_input_set);
ClassDB::bind_method(D_METHOD("set_input_set", "value"), &MLPPGAN::set_input_set);
ADD_PROPERTY(PropertyInfo(Variant::OBJECT, "input_set", PROPERTY_HINT_RESOURCE_TYPE, "MLPPMatrix"), "set_input_set", "get_input_set");
ClassDB::bind_method(D_METHOD("get_output_set"), &MLPPGAN::get_output_set);
ClassDB::bind_method(D_METHOD("set_output_set", "value"), &MLPPGAN::set_output_set);
ADD_PROPERTY(PropertyInfo(Variant::OBJECT, "output_set", PROPERTY_HINT_RESOURCE_TYPE, "MLPPVector"), "set_output_set", "get_output_set");
ClassDB::bind_method(D_METHOD("get_k"), &MLPPGAN::get_k);
ClassDB::bind_method(D_METHOD("set_k", "value"), &MLPPGAN::set_k);
ADD_PROPERTY(PropertyInfo(Variant::INT, "k"), "set_k", "get_k");
ClassDB::bind_method(D_METHOD("model_set_test", "X"), &MLPPGAN::model_set_test);
ClassDB::bind_method(D_METHOD("model_test", "x"), &MLPPGAN::model_test);
ClassDB::bind_method(D_METHOD("score"), &MLPPGAN::score);
*/
}