Work on porting WGAN.

This commit is contained in:
Relintai 2023-02-06 02:36:22 +01:00
parent 35f4a01bac
commit e5810cda01
6 changed files with 224 additions and 128 deletions

View File

@ -11,14 +11,14 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
int MLPPHiddenLayer::get_n_hidden() { int MLPPHiddenLayer::get_n_hidden() const {
return n_hidden; return n_hidden;
} }
void MLPPHiddenLayer::set_n_hidden(const int val) { void MLPPHiddenLayer::set_n_hidden(const int val) {
n_hidden = val; n_hidden = val;
} }
MLPPActivation::ActivationFunction MLPPHiddenLayer::get_activation() { MLPPActivation::ActivationFunction MLPPHiddenLayer::get_activation() const {
return activation; return activation;
} }
void MLPPHiddenLayer::set_activation(const MLPPActivation::ActivationFunction val) { void MLPPHiddenLayer::set_activation(const MLPPActivation::ActivationFunction val) {
@ -81,28 +81,28 @@ void MLPPHiddenLayer::set_delta(const Ref<MLPPMatrix> &val) {
delta = val; delta = val;
} }
MLPPReg::RegularizationType MLPPHiddenLayer::get_reg() { MLPPReg::RegularizationType MLPPHiddenLayer::get_reg() const {
return reg; return reg;
} }
void MLPPHiddenLayer::set_reg(const MLPPReg::RegularizationType val) { void MLPPHiddenLayer::set_reg(const MLPPReg::RegularizationType val) {
reg = val; reg = val;
} }
real_t MLPPHiddenLayer::get_lambda() { real_t MLPPHiddenLayer::get_lambda() const {
return lambda; return lambda;
} }
void MLPPHiddenLayer::set_lambda(const real_t val) { void MLPPHiddenLayer::set_lambda(const real_t val) {
lambda = val; lambda = val;
} }
real_t MLPPHiddenLayer::get_alpha() { real_t MLPPHiddenLayer::get_alpha() const {
return alpha; return alpha;
} }
void MLPPHiddenLayer::set_alpha(const real_t val) { void MLPPHiddenLayer::set_alpha(const real_t val) {
alpha = val; alpha = val;
} }
MLPPUtilities::WeightDistributionType MLPPHiddenLayer::get_weight_init() { MLPPUtilities::WeightDistributionType MLPPHiddenLayer::get_weight_init() const {
return weight_init; return weight_init;
} }
void MLPPHiddenLayer::set_weight_init(const MLPPUtilities::WeightDistributionType val) { void MLPPHiddenLayer::set_weight_init(const MLPPUtilities::WeightDistributionType val) {

View File

@ -28,7 +28,7 @@ class MLPPHiddenLayer : public Reference {
GDCLASS(MLPPHiddenLayer, Reference); GDCLASS(MLPPHiddenLayer, Reference);
public: public:
int get_n_hidden(); int get_n_hidden() const;
void set_n_hidden(const int val); void set_n_hidden(const int val);
MLPPActivation::ActivationFunction get_activation(); MLPPActivation::ActivationFunction get_activation();
@ -58,16 +58,16 @@ public:
Ref<MLPPMatrix> get_delta(); Ref<MLPPMatrix> get_delta();
void set_delta(const Ref<MLPPMatrix> &val); void set_delta(const Ref<MLPPMatrix> &val);
MLPPReg::RegularizationType get_reg(); MLPPReg::RegularizationType get_reg() const;
void set_reg(const MLPPReg::RegularizationType val); void set_reg(const MLPPReg::RegularizationType val);
real_t get_lambda(); real_t get_lambda() const;
void set_lambda(const real_t val); void set_lambda(const real_t val);
real_t get_alpha(); real_t get_alpha() const;
void set_alpha(const real_t val); void set_alpha(const real_t val);
MLPPUtilities::WeightDistributionType get_weight_init(); MLPPUtilities::WeightDistributionType get_weight_init() const;
void set_weight_init(const MLPPUtilities::WeightDistributionType val); void set_weight_init(const MLPPUtilities::WeightDistributionType val);
void forward_pass(); void forward_pass();

View File

@ -41,6 +41,25 @@ std::vector<std::vector<real_t>> MLPPLinAlg::gaussianNoise(int n, int m) {
return A; return A;
} }
Ref<MLPPMatrix> MLPPLinAlg::gaussian_noise(int n, int m) {
std::random_device rd;
std::default_random_engine generator(rd());
std::normal_distribution<real_t> distribution(0, 1); // Standard normal distribution. Mean of 0, std of 1.
Ref<MLPPMatrix> A;
A.instance();
A->resize(Size2i(m, n));
int a_data_size = A->data_size();
real_t *a_ptr = A->ptrw();
for (int i = 0; i < a_data_size; ++i) {
a_ptr[i] = distribution(generator);
}
return A;
}
std::vector<std::vector<real_t>> MLPPLinAlg::addition(std::vector<std::vector<real_t>> A, std::vector<std::vector<real_t>> B) { std::vector<std::vector<real_t>> MLPPLinAlg::addition(std::vector<std::vector<real_t>> A, std::vector<std::vector<real_t>> B) {
std::vector<std::vector<real_t>> C; std::vector<std::vector<real_t>> C;
C.resize(A.size()); C.resize(A.size());

View File

@ -27,6 +27,7 @@ public:
bool linearIndependenceChecker(std::vector<std::vector<real_t>> A); bool linearIndependenceChecker(std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> gaussianNoise(int n, int m); std::vector<std::vector<real_t>> gaussianNoise(int n, int m);
Ref<MLPPMatrix> gaussian_noise(int n, int m);
std::vector<std::vector<real_t>> addition(std::vector<std::vector<real_t>> A, std::vector<std::vector<real_t>> B); std::vector<std::vector<real_t>> addition(std::vector<std::vector<real_t>> A, std::vector<std::vector<real_t>> B);
std::vector<std::vector<real_t>> subtraction(std::vector<std::vector<real_t>> A, std::vector<std::vector<real_t>> B); std::vector<std::vector<real_t>> subtraction(std::vector<std::vector<real_t>> A, std::vector<std::vector<real_t>> B);

View File

@ -11,15 +11,19 @@
#include "../regularization/reg.h" #include "../regularization/reg.h"
#include "../utilities/utilities.h" #include "../utilities/utilities.h"
#include "core/object/method_bind_ext.gen.inc"
#include <cmath> #include <cmath>
#include <iostream> #include <iostream>
std::vector<std::vector<real_t>> MLPPWGAN::generate_example(int n) { Ref<MLPPMatrix> MLPPWGAN::generate_example(int n) {
MLPPLinAlg alg; MLPPLinAlg alg;
return model_set_test_generator(alg.gaussianNoise(n, k));
return model_set_test_generator(alg.gaussian_noise(n, k));
} }
void MLPPWGAN::gradient_descent(real_t learning_rate, int max_epoch, bool UI) { /*
void MLPPWGAN::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
//MLPPCost mlpp_cost; //MLPPCost mlpp_cost;
MLPPLinAlg alg; MLPPLinAlg alg;
real_t cost_prev = 0; real_t cost_prev = 0;
@ -30,7 +34,7 @@ void MLPPWGAN::gradient_descent(real_t learning_rate, int max_epoch, bool UI) {
const int CRITIC_INTERATIONS = 5; // Wasserstein GAN specific parameter. const int CRITIC_INTERATIONS = 5; // Wasserstein GAN specific parameter.
while (true) { while (true) {
cost_prev = cost(y_hat, alg.onevec(n)); cost_prev = cost(y_hat, alg.onevecv(n));
std::vector<std::vector<real_t>> generatorInputSet; std::vector<std::vector<real_t>> generatorInputSet;
std::vector<std::vector<real_t>> discriminatorInputSet; std::vector<std::vector<real_t>> discriminatorInputSet;
@ -70,8 +74,8 @@ void MLPPWGAN::gradient_descent(real_t learning_rate, int max_epoch, bool UI) {
forward_pass(); forward_pass();
if (UI) { if (ui) {
handle_ui(epoch, cost_prev, MLPPWGAN::y_hat, alg.onevec(n)); handle_ui(epoch, cost_prev, MLPPWGAN::y_hat, alg.onevecv(n));
} }
epoch++; epoch++;
@ -80,186 +84,239 @@ void MLPPWGAN::gradient_descent(real_t learning_rate, int max_epoch, bool UI) {
} }
} }
} }
*/
real_t MLPPWGAN::score() { real_t MLPPWGAN::score() {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPUtilities util; MLPPUtilities util;
forward_pass(); forward_pass();
return util.performance(y_hat, alg.onevec(n)); return util.performance_vec(y_hat, alg.onevecv(n));
} }
void MLPPWGAN::save(std::string fileName) { void MLPPWGAN::save(const String &file_name) {
MLPPUtilities util; MLPPUtilities util;
/*
if (!network.empty()) { if (!network.empty()) {
util.saveParameters(fileName, network[0].weights, network[0].bias, 0, 1); util.saveParameters(file_name, network[0].weights, network[0].bias, 0, 1);
for (uint32_t i = 1; i < network.size(); i++) { for (uint32_t i = 1; i < network.size(); i++) {
util.saveParameters(fileName, network[i].weights, network[i].bias, 1, i + 1); util.saveParameters(fileName, network[i].weights, network[i].bias, 1, i + 1);
} }
util.saveParameters(fileName, outputLayer->weights, outputLayer->bias, 1, network.size() + 1); util.saveParameters(file_name, outputLayer->weights, outputLayer->bias, 1, network.size() + 1);
} else { } else {
util.saveParameters(fileName, outputLayer->weights, outputLayer->bias, 0, network.size() + 1); util.saveParameters(file_name, outputLayer->weights, outputLayer->bias, 0, network.size() + 1);
} }
*/
} }
void MLPPWGAN::add_layer(int n_hidden, std::string activation, std::string weightInit, std::string reg, real_t lambda, real_t alpha) { void MLPPWGAN::add_layer(int n_hidden, MLPPActivation::ActivationFunction activation, MLPPUtilities::WeightDistributionType weight_init, MLPPReg::RegularizationType reg, real_t lambda, real_t alpha) {
MLPPLinAlg alg; MLPPLinAlg alg;
Ref<MLPPHiddenLayer> layer;
layer.instance();
layer->set_n_hidden(n_hidden);
layer->set_activation(activation);
layer->set_weight_init(weight_init);
layer->set_reg(reg);
layer->set_lambda(lambda);
layer->set_alpha(alpha);
if (network.empty()) { if (network.empty()) {
network.push_back(MLPPOldHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha)); layer->set_input(alg.gaussian_noise(n, k));
network[0].forwardPass();
} else { } else {
network.push_back(MLPPOldHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha)); layer->set_input(network.write[network.size() - 1]->get_a());
network[network.size() - 1].forwardPass();
} }
network.push_back(layer);
layer->forward_pass();
} }
void MLPPWGAN::add_output_layer(std::string weightInit, std::string reg, real_t lambda, real_t alpha) { void MLPPWGAN::add_output_layer(MLPPUtilities::WeightDistributionType weight_init, MLPPReg::RegularizationType reg, real_t lambda, real_t alpha) {
MLPPLinAlg alg; MLPPLinAlg alg;
if (!network.empty()) {
outputLayer = new MLPPOldOutputLayer(network[network.size() - 1].n_hidden, "Linear", "WassersteinLoss", network[network.size() - 1].a, weightInit, "WeightClipping", -0.01, 0.01); ERR_FAIL_COND(network.empty());
} else { // Should never happen.
outputLayer = new MLPPOldOutputLayer(k, "Linear", "WassersteinLoss", alg.gaussianNoise(n, k), weightInit, "WeightClipping", -0.01, 0.01); if (!output_layer.is_valid()) {
output_layer.instance();
} }
output_layer->set_n_hidden(network[network.size() - 1]->get_n_hidden());
output_layer->set_activation(MLPPActivation::ACTIVATION_FUNCTION_LINEAR);
output_layer->set_cost(MLPPCost::COST_TYPE_WASSERSTEIN_LOSS);
output_layer->set_input(network.write[network.size() - 1]->get_a());
output_layer->set_weight_init(weight_init);
output_layer->set_lambda(lambda);
output_layer->set_alpha(alpha);
} }
MLPPWGAN::MLPPWGAN(real_t k, std::vector<std::vector<real_t>> outputSet) : MLPPWGAN::MLPPWGAN(real_t p_k, const Ref<MLPPMatrix> &p_output_set) {
outputSet(outputSet), n(outputSet.size()), k(k) { output_set = p_output_set;
n = p_output_set->size().y;
k = p_k;
} }
MLPPWGAN::MLPPWGAN() { MLPPWGAN::MLPPWGAN() {
n = 0;
k = 0;
} }
MLPPWGAN::~MLPPWGAN() { MLPPWGAN::~MLPPWGAN() {
delete outputLayer;
} }
std::vector<std::vector<real_t>> MLPPWGAN::model_set_test_generator(std::vector<std::vector<real_t>> X) { Ref<MLPPMatrix> MLPPWGAN::model_set_test_generator(const Ref<MLPPMatrix> &X) {
if (!network.empty()) { if (!network.empty()) {
network[0].input = X; network.write[0]->set_input(X);
network[0].forwardPass(); network.write[0]->forward_pass();
for (uint32_t i = 1; i <= network.size() / 2; i++) { for (int i = 1; i <= network.size() / 2; ++i) {
network[i].input = network[i - 1].a; network.write[i]->set_input(network.write[i - 1]->get_a());
network[i].forwardPass(); network.write[i]->forward_pass();
} }
} }
return network[network.size() / 2].a;
return network.write[network.size() / 2]->get_a();
} }
std::vector<real_t> MLPPWGAN::model_set_test_discriminator(std::vector<std::vector<real_t>> X) { Ref<MLPPVector> MLPPWGAN::model_set_test_discriminator(const Ref<MLPPMatrix> &X) {
if (!network.empty()) { if (!network.empty()) {
for (uint32_t i = network.size() / 2 + 1; i < network.size(); i++) { for (uint32_t i = network.size() / 2 + 1; i < network.size(); i++) {
if (i == network.size() / 2 + 1) { if (i == network.size() / 2 + 1) {
network[i].input = X; network.write[i]->set_input(X);
} else { } else {
network[i].input = network[i - 1].a; network.write[i]->set_input(network.write[i - 1]->get_a());
} }
network[i].forwardPass(); network.write[i]->forward_pass();
} }
outputLayer->input = network[network.size() - 1].a;
output_layer->set_input(network.write[network.size() - 1]->get_a());
} }
outputLayer->forwardPass();
return outputLayer->a; output_layer->forward_pass();
return output_layer->get_a();
} }
real_t MLPPWGAN::cost(std::vector<real_t> y_hat, std::vector<real_t> y) { real_t MLPPWGAN::cost(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y) {
MLPPReg regularization; MLPPReg regularization;
class MLPPCost cost; MLPPCost mlpp_cost;
real_t totalRegTerm = 0;
auto cost_function = outputLayer->cost_map[outputLayer->cost]; real_t total_reg_term = 0;
if (!network.empty()) {
for (uint32_t i = 0; i < network.size() - 1; i++) { for (int i = 0; i < network.size() - 1; ++i) {
totalRegTerm += regularization.regTerm(network[i].weights, network[i].lambda, network[i].alpha, network[i].reg); Ref<MLPPHiddenLayer> layer = network[i];
total_reg_term += regularization.reg_termm(layer->get_weights(), layer->get_lambda(), layer->get_alpha(), layer->get_reg());
} }
}
return (cost.*cost_function)(y_hat, y) + totalRegTerm + regularization.regTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg); total_reg_term += regularization.reg_termm(output_layer->get_weights(), output_layer->get_lambda(), output_layer->get_alpha(), output_layer->get_reg());
return mlpp_cost.run_cost_norm_vector(output_layer->get_cost(), y_hat, y) + total_reg_term;
} }
void MLPPWGAN::forward_pass() { void MLPPWGAN::forward_pass() {
MLPPLinAlg alg; MLPPLinAlg alg;
if (!network.empty()) {
network[0].input = alg.gaussianNoise(n, k);
network[0].forwardPass();
for (uint32_t i = 1; i < network.size(); i++) { if (!network.empty()) {
network[i].input = network[i - 1].a; Ref<MLPPHiddenLayer> layer = network[0];
network[i].forwardPass();
layer->set_input(alg.gaussian_noise(n, k));
layer->forward_pass();
for (int i = 1; i < network.size(); i++) {
layer = network[i];
layer->set_input(network.write[i - 1]->get_a());
layer->forward_pass();
} }
outputLayer->input = network[network.size() - 1].a;
output_layer->set_input(network.write[network.size() - 1]->get_a());
} else { // Should never happen, though. } else { // Should never happen, though.
outputLayer->input = alg.gaussianNoise(n, k); output_layer->set_input(alg.gaussian_noise(n, k));
} }
outputLayer->forwardPass();
y_hat = outputLayer->a; output_layer->forward_pass();
y_hat->set_from_mlpp_vector(output_layer->get_a());
} }
void MLPPWGAN::update_discriminator_parameters(std::vector<std::vector<std::vector<real_t>>> hiddenLayerUpdations, std::vector<real_t> outputLayerUpdation, real_t learning_rate) { void MLPPWGAN::update_discriminator_parameters(Vector<Ref<MLPPMatrix>> hidden_layer_updations, const Ref<MLPPVector> &output_layer_updation, real_t learning_rate) {
MLPPLinAlg alg; MLPPLinAlg alg;
outputLayer->weights = alg.subtraction(outputLayer->weights, outputLayerUpdation); output_layer->set_weights(alg.subtractionnv(output_layer->get_weights(), output_layer_updation));
outputLayer->bias -= learning_rate * alg.sum_elements(outputLayer->delta) / n; output_layer->set_bias(output_layer->get_bias() - learning_rate * alg.sum_elementsv(output_layer->get_delta()) / n);
if (!network.empty()) { if (!network.empty()) {
network[network.size() - 1].weights = alg.subtraction(network[network.size() - 1].weights, hiddenLayerUpdations[0]); Ref<MLPPHiddenLayer> layer = network[network.size() - 1];
network[network.size() - 1].bias = alg.subtractMatrixRows(network[network.size() - 1].bias, alg.scalarMultiply(learning_rate / n, network[network.size() - 1].delta));
for (uint32_t i = network.size() - 2; i > network.size() / 2; i--) { layer->set_weights(alg.subtractionm(layer->get_weights(), hidden_layer_updations[0]));
network[i].weights = alg.subtraction(network[i].weights, hiddenLayerUpdations[(network.size() - 2) - i + 1]); layer->set_bias(alg.subtract_matrix_rows(layer->get_bias(), alg.scalar_multiplym(learning_rate / n, layer->get_delta())));
network[i].bias = alg.subtractMatrixRows(network[i].bias, alg.scalarMultiply(learning_rate / n, network[i].delta));
for (int i = network.size() - 2; i > network.size() / 2; i--) {
layer = network[i];
layer->set_weights(alg.subtractionm(layer->get_weights(), hidden_layer_updations[(network.size() - 2) - i + 1]));
layer->set_bias(alg.subtract_matrix_rows(layer->get_bias(), alg.scalar_multiplym(learning_rate / n, layer->get_delta())));
} }
} }
} }
void MLPPWGAN::update_generator_parameters(std::vector<std::vector<std::vector<real_t>>> hiddenLayerUpdations, real_t learning_rate) { void MLPPWGAN::update_generator_parameters(Vector<Ref<MLPPMatrix>> hidden_layer_updations, real_t learning_rate) {
MLPPLinAlg alg; MLPPLinAlg alg;
if (!network.empty()) { if (!network.empty()) {
for (uint32_t i = network.size() / 2; i >= 0; i--) { for (int i = network.size() / 2; i >= 0; i--) {
Ref<MLPPHiddenLayer> layer = network[i];
//std::cout << network[i].weights.size() << "x" << network[i].weights[0].size() << std::endl; //std::cout << network[i].weights.size() << "x" << network[i].weights[0].size() << std::endl;
//std::cout << hiddenLayerUpdations[(network.size() - 2) - i + 1].size() << "x" << hiddenLayerUpdations[(network.size() - 2) - i + 1][0].size() << std::endl; //std::cout << hiddenLayerUpdations[(network.size() - 2) - i + 1].size() << "x" << hiddenLayerUpdations[(network.size() - 2) - i + 1][0].size() << std::endl;
network[i].weights = alg.subtraction(network[i].weights, hiddenLayerUpdations[(network.size() - 2) - i + 1]); layer->set_weights(alg.subtractionm(layer->get_weights(), hidden_layer_updations[(network.size() - 2) - i + 1]));
network[i].bias = alg.subtractMatrixRows(network[i].bias, alg.scalarMultiply(learning_rate / n, network[i].delta)); layer->set_bias(alg.subtract_matrix_rows(layer->get_bias(), alg.scalar_multiplym(learning_rate / n, layer->get_delta())));
} }
} }
} }
std::tuple<std::vector<std::vector<std::vector<real_t>>>, std::vector<real_t>> MLPPWGAN::compute_discriminator_gradients(std::vector<real_t> y_hat, std::vector<real_t> outputSet) { MLPPWGAN::DiscriminatorGradientResult MLPPWGAN::compute_discriminator_gradients(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &output_set) {
class MLPPCost cost; MLPPCost mlpp_cost;
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPReg regularization; MLPPReg regularization;
std::vector<std::vector<std::vector<real_t>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads. DiscriminatorGradientResult data;
auto costDeriv = outputLayer->costDeriv_map[outputLayer->cost]; output_layer->set_delta(alg.hadamard_productnv(mlpp_cost.run_cost_deriv_vector(output_layer->get_cost(), y_hat, output_set), avn.run_activation_deriv_vector(output_layer->get_activation(), output_layer->get_z())));
auto outputAvn = outputLayer->activation_map[outputLayer->activation];
outputLayer->delta = alg.hadamard_product((cost.*costDeriv)(y_hat, outputSet), (avn.*outputAvn)(outputLayer->z, 1)); data.output_w_grad = alg.mat_vec_multv(alg.transposem(output_layer->get_input()), output_layer->get_delta());
std::vector<real_t> outputWGrad = alg.mat_vec_mult(alg.transpose(outputLayer->input), outputLayer->delta); data.output_w_grad = alg.additionnv(data.output_w_grad, regularization.reg_deriv_termv(output_layer->get_weights(), output_layer->get_lambda(), output_layer->get_alpha(), output_layer->get_reg()));
outputWGrad = alg.addition(outputWGrad, regularization.regDerivTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg));
if (!network.empty()) { if (!network.empty()) {
auto hiddenLayerAvn = network[network.size() - 1].activation_map[network[network.size() - 1].activation]; Ref<MLPPHiddenLayer> layer = network[network.size() - 1];
network[network.size() - 1].delta = alg.hadamard_product(alg.outerProduct(outputLayer->delta, outputLayer->weights), (avn.*hiddenLayerAvn)(network[network.size() - 1].z, 1)); layer->set_delta(alg.hadamard_productm(alg.outer_product(output_layer->get_delta(), output_layer->get_weights()), avn.run_activation_deriv_vector(layer->get_activation(), layer->get_z())));
std::vector<std::vector<real_t>> hiddenLayerWGrad = alg.matmult(alg.transpose(network[network.size() - 1].input), network[network.size() - 1].delta); Ref<MLPPMatrix> hidden_layer_w_grad = alg.matmultm(alg.transposem(layer->get_input()), layer->get_delta());
cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGrad, regularization.regDerivTerm(network[network.size() - 1].weights, network[network.size() - 1].lambda, network[network.size() - 1].alpha, network[network.size() - 1].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well. data.cumulative_hidden_layer_w_grad.push_back(alg.additionm(hidden_layer_w_grad, regularization.reg_deriv_termm(layer->get_weights(), layer->get_lambda(), layer->get_alpha(), layer->get_reg()))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
//std::cout << "HIDDENLAYER FIRST:" << hiddenLayerWGrad.size() << "x" << hiddenLayerWGrad[0].size() << std::endl; //std::cout << "HIDDENLAYER FIRST:" << hiddenLayerWGrad.size() << "x" << hiddenLayerWGrad[0].size() << std::endl;
//std::cout << "WEIGHTS SECOND:" << network[network.size() - 1].weights.size() << "x" << network[network.size() - 1].weights[0].size() << std::endl; //std::cout << "WEIGHTS SECOND:" << layer.weights.size() << "x" << layer.weights[0].size() << std::endl;
for (uint32_t i = network.size() - 2; i > network.size() / 2; i--) { for (uint32_t i = network.size() - 2; i > network.size() / 2; i--) {
auto hiddenLayerAvnl = network[i].activation_map[network[i].activation]; layer = network[i];
network[i].delta = alg.hadamard_product(alg.matmult(network[i + 1].delta, alg.transpose(network[i + 1].weights)), (avn.*hiddenLayerAvnl)(network[i].z, 1)); Ref<MLPPHiddenLayer> next_layer = network[i + 1];
std::vector<std::vector<real_t>> hiddenLayerWGradl = alg.matmult(alg.transpose(network[i].input), network[i].delta);
cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGradl, regularization.regDerivTerm(network[i].weights, network[i].lambda, network[i].alpha, network[i].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well. layer->set_delta(alg.hadamard_productm(alg.matmultm(next_layer->get_delta(), alg.transposem(next_layer->get_weights())), avn.run_activation_deriv_matrix(layer->get_activation(), layer->get_z())));
hidden_layer_w_grad = alg.matmultm(alg.transposem(layer->get_input()), layer->get_delta());
data.cumulative_hidden_layer_w_grad.push_back(alg.additionm(hidden_layer_w_grad, regularization.reg_deriv_termm(layer->get_weights(), layer->get_lambda(), layer->get_alpha(), layer->get_reg()))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
} }
} }
return { cumulativeHiddenLayerWGrad, outputWGrad };
return data;
} }
std::vector<std::vector<std::vector<real_t>>> MLPPWGAN::compute_generator_gradients(std::vector<real_t> y_hat, std::vector<real_t> outputSet) { /*
Vector<Ref<MLPPMatrix>> MLPPWGAN::compute_generator_gradients(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &output_set) {
class MLPPCost cost; class MLPPCost cost;
MLPPActivation avn; MLPPActivation avn;
MLPPLinAlg alg; MLPPLinAlg alg;
@ -267,14 +324,15 @@ std::vector<std::vector<std::vector<real_t>>> MLPPWGAN::compute_generator_gradie
std::vector<std::vector<std::vector<real_t>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads. std::vector<std::vector<std::vector<real_t>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads.
auto costDeriv = outputLayer->costDeriv_map[outputLayer->cost]; auto costDeriv = output_layer->costDeriv_map[output_layer->cost];
auto outputAvn = outputLayer->activation_map[outputLayer->activation]; auto outputAvn = output_layer->activation_map[output_layer->activation];
outputLayer->delta = alg.hadamard_product((cost.*costDeriv)(y_hat, outputSet), (avn.*outputAvn)(outputLayer->z, 1)); output_layer->delta = alg.hadamard_product((cost.*costDeriv)(y_hat, outputSet), (avn.*outputAvn)(output_layer->z, 1));
std::vector<real_t> outputWGrad = alg.mat_vec_mult(alg.transpose(outputLayer->input), outputLayer->delta); std::vector<real_t> outputWGrad = alg.mat_vec_mult(alg.transpose(output_layer->input), output_layer->delta);
outputWGrad = alg.addition(outputWGrad, regularization.regDerivTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg)); outputWGrad = alg.addition(outputWGrad, regularization.regDerivTerm(output_layer->weights, output_layer->lambda, output_layer->alpha, output_layer->reg));
if (!network.empty()) { if (!network.empty()) {
auto hiddenLayerAvn = network[network.size() - 1].activation_map[network[network.size() - 1].activation]; auto hiddenLayerAvn = network[network.size() - 1].activation_map[network[network.size() - 1].activation];
network[network.size() - 1].delta = alg.hadamard_product(alg.outerProduct(outputLayer->delta, outputLayer->weights), (avn.*hiddenLayerAvn)(network[network.size() - 1].z, 1)); network[network.size() - 1].delta = alg.hadamard_product(alg.outerProduct(output_layer->delta, output_layer->weights), (avn.*hiddenLayerAvn)(network[network.size() - 1].z, 1));
std::vector<std::vector<real_t>> hiddenLayerWGrad = alg.matmult(alg.transpose(network[network.size() - 1].input), network[network.size() - 1].delta); std::vector<std::vector<real_t>> hiddenLayerWGrad = alg.matmult(alg.transpose(network[network.size() - 1].input), network[network.size() - 1].delta);
cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGrad, regularization.regDerivTerm(network[network.size() - 1].weights, network[network.size() - 1].lambda, network[network.size() - 1].alpha, network[network.size() - 1].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well. cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGrad, regularization.regDerivTerm(network[network.size() - 1].weights, network[network.size() - 1].lambda, network[network.size() - 1].alpha, network[network.size() - 1].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
@ -285,17 +343,24 @@ std::vector<std::vector<std::vector<real_t>>> MLPPWGAN::compute_generator_gradie
cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGradl, regularization.regDerivTerm(network[i].weights, network[i].lambda, network[i].alpha, network[i].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well. cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGradl, regularization.regDerivTerm(network[i].weights, network[i].lambda, network[i].alpha, network[i].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
} }
} }
return cumulativeHiddenLayerWGrad; return cumulativeHiddenLayerWGrad;
} }
*/
void MLPPWGAN::handle_ui(int epoch, real_t cost_prev, std::vector<real_t> y_hat, std::vector<real_t> outputSet) { void MLPPWGAN::handle_ui(int epoch, real_t cost_prev, const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &output_set) {
MLPPUtilities::CostInfo(epoch, cost_prev, cost(y_hat, outputSet)); MLPPUtilities::cost_info(epoch, cost_prev, cost(y_hat, output_set));
std::cout << "Layer " << network.size() + 1 << ": " << std::endl; std::cout << "Layer " << network.size() + 1 << ": " << std::endl;
MLPPUtilities::UI(outputLayer->weights, outputLayer->bias);
MLPPUtilities::print_ui_vb(output_layer->get_weights(), output_layer->get_bias());
if (!network.empty()) { if (!network.empty()) {
for (uint32_t i = network.size() - 1; i >= 0; i--) { for (int i = network.size() - 1; i >= 0; i--) {
Ref<MLPPHiddenLayer> layer = network[i];
std::cout << "Layer " << i + 1 << ": " << std::endl; std::cout << "Layer " << i + 1 << ": " << std::endl;
MLPPUtilities::UI(network[i].weights, network[i].bias);
MLPPUtilities::print_ui_vib(layer->get_weights(), layer->get_bias(), 0);
} }
} }
} }

View File

@ -20,6 +20,11 @@
#include "../hidden_layer/hidden_layer.h" #include "../hidden_layer/hidden_layer.h"
#include "../output_layer/output_layer.h" #include "../output_layer/output_layer.h"
#include "../activation/activation.h"
#include "../cost/cost.h"
#include "../regularization/reg.h"
#include "../utilities/utilities.h"
#include <string> #include <string>
#include <tuple> #include <tuple>
#include <vector> #include <vector>
@ -28,40 +33,46 @@ class MLPPWGAN : public Reference {
GDCLASS(MLPPWGAN, Reference); GDCLASS(MLPPWGAN, Reference);
public: public:
std::vector<std::vector<real_t>> generate_example(int n); Ref<MLPPMatrix> generate_example(int n);
void gradient_descent(real_t learning_rate, int max_epoch, bool UI = false); void gradient_descent(real_t learning_rate, int max_epoch, bool ui = false);
real_t score(); real_t score();
void save(std::string fileName); void save(const String &file_name);
void add_layer(int n_hidden, std::string activation, std::string weightInit = "Default", std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5); void add_layer(int n_hidden, MLPPActivation::ActivationFunction activation, MLPPUtilities::WeightDistributionType weight_init = MLPPUtilities::WEIGHT_DISTRIBUTION_TYPE_DEFAULT, MLPPReg::RegularizationType reg = MLPPReg::REGULARIZATION_TYPE_NONE, real_t lambda = 0.5, real_t alpha = 0.5);
void add_output_layer(std::string weightInit = "Default", std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5); void add_output_layer(MLPPUtilities::WeightDistributionType weight_init = MLPPUtilities::WEIGHT_DISTRIBUTION_TYPE_DEFAULT, MLPPReg::RegularizationType reg = MLPPReg::REGULARIZATION_TYPE_NONE, real_t lambda = 0.5, real_t alpha = 0.5);
MLPPWGAN(real_t k, std::vector<std::vector<real_t>> outputSet); MLPPWGAN(real_t k, const Ref<MLPPMatrix> &output_set);
MLPPWGAN(); MLPPWGAN();
~MLPPWGAN(); ~MLPPWGAN();
protected: protected:
std::vector<std::vector<real_t>> model_set_test_generator(std::vector<std::vector<real_t>> X); // Evaluator for the generator of the WGAN. Ref<MLPPMatrix> model_set_test_generator(const Ref<MLPPMatrix> &X); // Evaluator for the generator of the WGAN.
std::vector<real_t> model_set_test_discriminator(std::vector<std::vector<real_t>> X); // Evaluator for the discriminator of the WGAN. Ref<MLPPVector> model_set_test_discriminator(const Ref<MLPPMatrix> &X); // Evaluator for the discriminator of the WGAN.
real_t cost(std::vector<real_t> y_hat, std::vector<real_t> y); real_t cost(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y);
void forward_pass(); void forward_pass();
void update_discriminator_parameters(std::vector<std::vector<std::vector<real_t>>> hiddenLayerUpdations, std::vector<real_t> outputLayerUpdation, real_t learning_rate); void update_discriminator_parameters(Vector<Ref<MLPPMatrix>> hidden_layer_updations, const Ref<MLPPVector> &output_layer_updation, real_t learning_rate);
void update_generator_parameters(std::vector<std::vector<std::vector<real_t>>> hiddenLayerUpdations, real_t learning_rate); void update_generator_parameters(Vector<Ref<MLPPMatrix>> hidden_layer_updations, real_t learning_rate);
std::tuple<std::vector<std::vector<std::vector<real_t>>>, std::vector<real_t>> compute_discriminator_gradients(std::vector<real_t> y_hat, std::vector<real_t> outputSet);
std::vector<std::vector<std::vector<real_t>>> compute_generator_gradients(std::vector<real_t> y_hat, std::vector<real_t> outputSet);
void handle_ui(int epoch, real_t cost_prev, std::vector<real_t> y_hat, std::vector<real_t> outputSet); struct DiscriminatorGradientResult {
Vector<Ref<MLPPMatrix>> cumulative_hidden_layer_w_grad; // Tensor containing ALL hidden grads.
Ref<MLPPVector> output_w_grad;
};
DiscriminatorGradientResult compute_discriminator_gradients(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &output_set);
Vector<Ref<MLPPMatrix>> compute_generator_gradients(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &output_set);
void handle_ui(int epoch, real_t cost_prev, const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &output_set);
static void _bind_methods(); static void _bind_methods();
std::vector<std::vector<real_t>> outputSet; Ref<MLPPMatrix> output_set;
std::vector<real_t> y_hat; Ref<MLPPVector> y_hat;
std::vector<MLPPOldHiddenLayer> network; Vector<Ref<MLPPHiddenLayer>> network;
MLPPOldOutputLayer *outputLayer; Ref<MLPPOutputLayer> output_layer;
int n; int n;
int k; int k;