diff --git a/SCsub b/SCsub index 8d7c266..1042466 100644 --- a/SCsub +++ b/SCsub @@ -50,6 +50,8 @@ sources = [ "mlpp/utilities/utilities.cpp", "mlpp/wgan/wgan.cpp", + "mlpp/wgan/wgan_old.cpp", + "test/mlpp_tests.cpp", ] diff --git a/mlpp/wgan/wgan.cpp b/mlpp/wgan/wgan.cpp index 72803a2..986443d 100644 --- a/mlpp/wgan/wgan.cpp +++ b/mlpp/wgan/wgan.cpp @@ -16,9 +16,6 @@ #include "core/object/method_bind_ext.gen.inc" -#include -#include - Ref MLPPWGAN::get_output_set() { return output_set; } @@ -415,291 +412,3 @@ void MLPPWGAN::_bind_methods() { ClassDB::bind_method(D_METHOD("add_layer", "activation", "weight_init", "reg", "lambda", "alpha"), &MLPPWGAN::add_layer, MLPPUtilities::WEIGHT_DISTRIBUTION_TYPE_DEFAULT, MLPPReg::REGULARIZATION_TYPE_NONE, 0.5, 0.5); ClassDB::bind_method(D_METHOD("add_output_layer", "weight_init", "reg", "lambda", "alpha"), &MLPPWGAN::add_output_layer, MLPPUtilities::WEIGHT_DISTRIBUTION_TYPE_DEFAULT, MLPPReg::REGULARIZATION_TYPE_NONE, 0.5, 0.5); } - -// ======== OLD ========== - -MLPPWGANOld::MLPPWGANOld(real_t k, std::vector> outputSet) : - outputSet(outputSet), n(outputSet.size()), k(k) { -} - -MLPPWGANOld::~MLPPWGANOld() { - delete outputLayer; -} - -std::vector> MLPPWGANOld::generateExample(int n) { - MLPPLinAlg alg; - return modelSetTestGenerator(alg.gaussianNoise(n, k)); -} - -void MLPPWGANOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { - class MLPPCost cost; - MLPPLinAlg alg; - real_t cost_prev = 0; - int epoch = 1; - forwardPass(); - - const int CRITIC_INTERATIONS = 5; // Wasserstein GAN specific parameter. - - while (true) { - cost_prev = Cost(y_hat, alg.onevec(n)); - - std::vector> generatorInputSet; - std::vector> discriminatorInputSet; - - std::vector y_hat; - std::vector outputSet; - - // Training of the discriminator. - for (int i = 0; i < CRITIC_INTERATIONS; i++) { - generatorInputSet = alg.gaussianNoise(n, k); - discriminatorInputSet = modelSetTestGenerator(generatorInputSet); - discriminatorInputSet.insert(discriminatorInputSet.end(), MLPPWGANOld::outputSet.begin(), MLPPWGANOld::outputSet.end()); // Fake + real inputs. - - y_hat = modelSetTestDiscriminator(discriminatorInputSet); - outputSet = alg.scalarMultiply(-1, alg.onevec(n)); // WGAN changes y_i = 1 and y_i = 0 to y_i = 1 and y_i = -1 - std::vector outputSetReal = alg.onevec(n); - outputSet.insert(outputSet.end(), outputSetReal.begin(), outputSetReal.end()); // Fake + real output scores. - - auto discriminator_gradient_results = computeDiscriminatorGradients(y_hat, outputSet); - auto cumulativeDiscriminatorHiddenLayerWGrad = std::get<0>(discriminator_gradient_results); - auto outputDiscriminatorWGrad = std::get<1>(discriminator_gradient_results); - - cumulativeDiscriminatorHiddenLayerWGrad = alg.scalarMultiply(learning_rate / n, cumulativeDiscriminatorHiddenLayerWGrad); - outputDiscriminatorWGrad = alg.scalarMultiply(learning_rate / n, outputDiscriminatorWGrad); - updateDiscriminatorParameters(cumulativeDiscriminatorHiddenLayerWGrad, outputDiscriminatorWGrad, learning_rate); - } - - // Training of the generator. - generatorInputSet = alg.gaussianNoise(n, k); - discriminatorInputSet = modelSetTestGenerator(generatorInputSet); - y_hat = modelSetTestDiscriminator(discriminatorInputSet); - outputSet = alg.onevec(n); - - std::vector>> cumulativeGeneratorHiddenLayerWGrad = computeGeneratorGradients(y_hat, outputSet); - cumulativeGeneratorHiddenLayerWGrad = alg.scalarMultiply(learning_rate / n, cumulativeGeneratorHiddenLayerWGrad); - updateGeneratorParameters(cumulativeGeneratorHiddenLayerWGrad, learning_rate); - - forwardPass(); - - if (UI) { - MLPPWGANOld::UI(epoch, cost_prev, MLPPWGANOld::y_hat, alg.onevec(n)); - } - - epoch++; - if (epoch > max_epoch) { - break; - } - } -} - -real_t MLPPWGANOld::score() { - MLPPLinAlg alg; - MLPPUtilities util; - forwardPass(); - return util.performance(y_hat, alg.onevec(n)); -} - -void MLPPWGANOld::save(std::string fileName) { - MLPPUtilities util; - if (!network.empty()) { - util.saveParameters(fileName, network[0].weights, network[0].bias, 0, 1); - for (uint32_t i = 1; i < network.size(); i++) { - util.saveParameters(fileName, network[i].weights, network[i].bias, 1, i + 1); - } - util.saveParameters(fileName, outputLayer->weights, outputLayer->bias, 1, network.size() + 1); - } else { - util.saveParameters(fileName, outputLayer->weights, outputLayer->bias, 0, network.size() + 1); - } -} - -void MLPPWGANOld::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, real_t lambda, real_t alpha) { - MLPPLinAlg alg; - if (network.empty()) { - network.push_back(MLPPOldHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha)); - network[0].forwardPass(); - } else { - network.push_back(MLPPOldHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha)); - network[network.size() - 1].forwardPass(); - } -} - -void MLPPWGANOld::addOutputLayer(std::string weightInit, std::string reg, real_t lambda, real_t alpha) { - MLPPLinAlg alg; - if (!network.empty()) { - outputLayer = new MLPPOldOutputLayer(network[network.size() - 1].n_hidden, "Linear", "WassersteinLoss", network[network.size() - 1].a, weightInit, "WeightClipping", -0.01, 0.01); - } else { // Should never happen. - outputLayer = new MLPPOldOutputLayer(k, "Linear", "WassersteinLoss", alg.gaussianNoise(n, k), weightInit, "WeightClipping", -0.01, 0.01); - } -} - -std::vector> MLPPWGANOld::modelSetTestGenerator(std::vector> X) { - if (!network.empty()) { - network[0].input = X; - network[0].forwardPass(); - - for (uint32_t i = 1; i <= network.size() / 2; i++) { - network[i].input = network[i - 1].a; - network[i].forwardPass(); - } - } - return network[network.size() / 2].a; -} - -std::vector MLPPWGANOld::modelSetTestDiscriminator(std::vector> X) { - if (!network.empty()) { - for (uint32_t i = network.size() / 2 + 1; i < network.size(); i++) { - if (i == network.size() / 2 + 1) { - network[i].input = X; - } else { - network[i].input = network[i - 1].a; - } - network[i].forwardPass(); - } - outputLayer->input = network[network.size() - 1].a; - } - outputLayer->forwardPass(); - return outputLayer->a; -} - -real_t MLPPWGANOld::Cost(std::vector y_hat, std::vector y) { - MLPPReg regularization; - class MLPPCost cost; - real_t totalRegTerm = 0; - - auto cost_function = outputLayer->cost_map[outputLayer->cost]; - if (!network.empty()) { - for (uint32_t i = 0; i < network.size() - 1; i++) { - totalRegTerm += regularization.regTerm(network[i].weights, network[i].lambda, network[i].alpha, network[i].reg); - } - } - return (cost.*cost_function)(y_hat, y) + totalRegTerm + regularization.regTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg); -} - -void MLPPWGANOld::forwardPass() { - MLPPLinAlg alg; - if (!network.empty()) { - network[0].input = alg.gaussianNoise(n, k); - network[0].forwardPass(); - - for (uint32_t i = 1; i < network.size(); i++) { - network[i].input = network[i - 1].a; - network[i].forwardPass(); - } - outputLayer->input = network[network.size() - 1].a; - } else { // Should never happen, though. - outputLayer->input = alg.gaussianNoise(n, k); - } - outputLayer->forwardPass(); - y_hat = outputLayer->a; -} - -void MLPPWGANOld::updateDiscriminatorParameters(std::vector>> hiddenLayerUpdations, std::vector outputLayerUpdation, real_t learning_rate) { - MLPPLinAlg alg; - - outputLayer->weights = alg.subtraction(outputLayer->weights, outputLayerUpdation); - outputLayer->bias -= learning_rate * alg.sum_elements(outputLayer->delta) / n; - - if (!network.empty()) { - network[network.size() - 1].weights = alg.subtraction(network[network.size() - 1].weights, hiddenLayerUpdations[0]); - network[network.size() - 1].bias = alg.subtractMatrixRows(network[network.size() - 1].bias, alg.scalarMultiply(learning_rate / n, network[network.size() - 1].delta)); - - for (uint32_t i = network.size() - 2; i > network.size() / 2; i--) { - network[i].weights = alg.subtraction(network[i].weights, hiddenLayerUpdations[(network.size() - 2) - i + 1]); - network[i].bias = alg.subtractMatrixRows(network[i].bias, alg.scalarMultiply(learning_rate / n, network[i].delta)); - } - } -} - -void MLPPWGANOld::updateGeneratorParameters(std::vector>> hiddenLayerUpdations, real_t learning_rate) { - MLPPLinAlg alg; - - if (!network.empty()) { - for (int ii = network.size() / 2; ii >= 0; ii--) { - uint32_t i = static_cast(ii); - - //std::cout << network[i].weights.size() << "x" << network[i].weights[0].size() << std::endl; - //std::cout << hiddenLayerUpdations[(network.size() - 2) - i + 1].size() << "x" << hiddenLayerUpdations[(network.size() - 2) - i + 1][0].size() << std::endl; - network[i].weights = alg.subtraction(network[i].weights, hiddenLayerUpdations[(network.size() - 2) - i + 1]); - network[i].bias = alg.subtractMatrixRows(network[i].bias, alg.scalarMultiply(learning_rate / n, network[i].delta)); - } - } -} - -std::tuple>>, std::vector> MLPPWGANOld::computeDiscriminatorGradients(std::vector y_hat, std::vector outputSet) { - class MLPPCost cost; - MLPPActivation avn; - MLPPLinAlg alg; - MLPPReg regularization; - - std::vector>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads. - - auto costDeriv = outputLayer->costDeriv_map[outputLayer->cost]; - auto outputAvn = outputLayer->activation_map[outputLayer->activation]; - outputLayer->delta = alg.hadamard_product((cost.*costDeriv)(y_hat, outputSet), (avn.*outputAvn)(outputLayer->z, 1)); - std::vector outputWGrad = alg.mat_vec_mult(alg.transpose(outputLayer->input), outputLayer->delta); - outputWGrad = alg.addition(outputWGrad, regularization.regDerivTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg)); - - if (!network.empty()) { - auto hiddenLayerAvn = network[network.size() - 1].activation_map[network[network.size() - 1].activation]; - network[network.size() - 1].delta = alg.hadamard_product(alg.outerProduct(outputLayer->delta, outputLayer->weights), (avn.*hiddenLayerAvn)(network[network.size() - 1].z, 1)); - std::vector> hiddenLayerWGrad = alg.matmult(alg.transpose(network[network.size() - 1].input), network[network.size() - 1].delta); - - cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGrad, regularization.regDerivTerm(network[network.size() - 1].weights, network[network.size() - 1].lambda, network[network.size() - 1].alpha, network[network.size() - 1].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well. - - //std::cout << "HIDDENLAYER FIRST:" << hiddenLayerWGrad.size() << "x" << hiddenLayerWGrad[0].size() << std::endl; - //std::cout << "WEIGHTS SECOND:" << network[network.size() - 1].weights.size() << "x" << network[network.size() - 1].weights[0].size() << std::endl; - - for (uint32_t i = network.size() - 2; i > network.size() / 2; i--) { - auto hiddenLayerAvnl = network[i].activation_map[network[i].activation]; - network[i].delta = alg.hadamard_product(alg.matmult(network[i + 1].delta, alg.transpose(network[i + 1].weights)), (avn.*hiddenLayerAvnl)(network[i].z, 1)); - std::vector> hiddenLayerWGradl = alg.matmult(alg.transpose(network[i].input), network[i].delta); - - cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGradl, regularization.regDerivTerm(network[i].weights, network[i].lambda, network[i].alpha, network[i].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well. - } - } - return { cumulativeHiddenLayerWGrad, outputWGrad }; -} - -std::vector>> MLPPWGANOld::computeGeneratorGradients(std::vector y_hat, std::vector outputSet) { - class MLPPCost cost; - MLPPActivation avn; - MLPPLinAlg alg; - MLPPReg regularization; - - std::vector>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads. - - auto costDeriv = outputLayer->costDeriv_map[outputLayer->cost]; - auto outputAvn = outputLayer->activation_map[outputLayer->activation]; - outputLayer->delta = alg.hadamard_product((cost.*costDeriv)(y_hat, outputSet), (avn.*outputAvn)(outputLayer->z, 1)); - std::vector outputWGrad = alg.mat_vec_mult(alg.transpose(outputLayer->input), outputLayer->delta); - outputWGrad = alg.addition(outputWGrad, regularization.regDerivTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg)); - if (!network.empty()) { - auto hiddenLayerAvn = network[network.size() - 1].activation_map[network[network.size() - 1].activation]; - network[network.size() - 1].delta = alg.hadamard_product(alg.outerProduct(outputLayer->delta, outputLayer->weights), (avn.*hiddenLayerAvn)(network[network.size() - 1].z, 1)); - std::vector> hiddenLayerWGrad = alg.matmult(alg.transpose(network[network.size() - 1].input), network[network.size() - 1].delta); - cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGrad, regularization.regDerivTerm(network[network.size() - 1].weights, network[network.size() - 1].lambda, network[network.size() - 1].alpha, network[network.size() - 1].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well. - - for (int ii = network.size() - 2; ii >= 0; ii--) { - uint32_t i = static_cast(ii); - auto hiddenLayerAvnl = network[i].activation_map[network[i].activation]; - network[i].delta = alg.hadamard_product(alg.matmult(network[i + 1].delta, alg.transpose(network[i + 1].weights)), (avn.*hiddenLayerAvnl)(network[i].z, 1)); - std::vector> hiddenLayerWGradl = alg.matmult(alg.transpose(network[i].input), network[i].delta); - cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGradl, regularization.regDerivTerm(network[i].weights, network[i].lambda, network[i].alpha, network[i].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well. - } - } - return cumulativeHiddenLayerWGrad; -} - -void MLPPWGANOld::UI(int epoch, real_t cost_prev, std::vector y_hat, std::vector outputSet) { - MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); - std::cout << "Layer " << network.size() + 1 << ": " << std::endl; - MLPPUtilities::UI(outputLayer->weights, outputLayer->bias); - if (!network.empty()) { - for (int ii = network.size() - 1; ii >= 0; ii--) { - uint32_t i = static_cast(ii); - - std::cout << "Layer " << i + 1 << ": " << std::endl; - MLPPUtilities::UI(network[i].weights, network[i].bias); - } - } -} diff --git a/mlpp/wgan/wgan.h b/mlpp/wgan/wgan.h index 586215a..9790d20 100644 --- a/mlpp/wgan/wgan.h +++ b/mlpp/wgan/wgan.h @@ -25,10 +25,6 @@ #include "../regularization/reg.h" #include "../utilities/utilities.h" -#include -#include -#include - class MLPPWGAN : public Reference { GDCLASS(MLPPWGAN, Reference); @@ -84,40 +80,4 @@ protected: int k; }; -class MLPPWGANOld { -public: - MLPPWGANOld(real_t k, std::vector> outputSet); - ~MLPPWGANOld(); - std::vector> generateExample(int n); - void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false); - real_t score(); - void save(std::string fileName); - - void addLayer(int n_hidden, std::string activation, std::string weightInit = "Default", std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5); - void addOutputLayer(std::string weightInit = "Default", std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5); - -private: - std::vector> modelSetTestGenerator(std::vector> X); // Evaluator for the generator of the WGAN. - std::vector modelSetTestDiscriminator(std::vector> X); // Evaluator for the discriminator of the WGAN. - - real_t Cost(std::vector y_hat, std::vector y); - - void forwardPass(); - void updateDiscriminatorParameters(std::vector>> hiddenLayerUpdations, std::vector outputLayerUpdation, real_t learning_rate); - void updateGeneratorParameters(std::vector>> hiddenLayerUpdations, real_t learning_rate); - std::tuple>>, std::vector> computeDiscriminatorGradients(std::vector y_hat, std::vector outputSet); - std::vector>> computeGeneratorGradients(std::vector y_hat, std::vector outputSet); - - void UI(int epoch, real_t cost_prev, std::vector y_hat, std::vector outputSet); - - std::vector> outputSet; - std::vector y_hat; - - std::vector network; - MLPPOldOutputLayer *outputLayer; - - int n; - int k; -}; - #endif /* WGAN_hpp */ \ No newline at end of file diff --git a/mlpp/wgan/wgan_old.cpp b/mlpp/wgan/wgan_old.cpp new file mode 100644 index 0000000..491a634 --- /dev/null +++ b/mlpp/wgan/wgan_old.cpp @@ -0,0 +1,306 @@ +// +// WGAN.cpp +// +// Created by Marc Melikyan on 11/4/20. +// + +#include "wgan_old.h" + +#include "core/log/logger.h" + +#include "../activation/activation.h" +#include "../cost/cost.h" +#include "../lin_alg/lin_alg.h" +#include "../regularization/reg.h" +#include "../utilities/utilities.h" + +#include "core/object/method_bind_ext.gen.inc" + +#include +#include + +MLPPWGANOld::MLPPWGANOld(real_t k, std::vector> outputSet) : + outputSet(outputSet), n(outputSet.size()), k(k) { +} + +MLPPWGANOld::~MLPPWGANOld() { + delete outputLayer; +} + +std::vector> MLPPWGANOld::generateExample(int n) { + MLPPLinAlg alg; + return modelSetTestGenerator(alg.gaussianNoise(n, k)); +} + +void MLPPWGANOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { + class MLPPCost cost; + MLPPLinAlg alg; + real_t cost_prev = 0; + int epoch = 1; + forwardPass(); + + const int CRITIC_INTERATIONS = 5; // Wasserstein GAN specific parameter. + + while (true) { + cost_prev = Cost(y_hat, alg.onevec(n)); + + std::vector> generatorInputSet; + std::vector> discriminatorInputSet; + + std::vector y_hat; + std::vector outputSet; + + // Training of the discriminator. + for (int i = 0; i < CRITIC_INTERATIONS; i++) { + generatorInputSet = alg.gaussianNoise(n, k); + discriminatorInputSet = modelSetTestGenerator(generatorInputSet); + discriminatorInputSet.insert(discriminatorInputSet.end(), MLPPWGANOld::outputSet.begin(), MLPPWGANOld::outputSet.end()); // Fake + real inputs. + + y_hat = modelSetTestDiscriminator(discriminatorInputSet); + outputSet = alg.scalarMultiply(-1, alg.onevec(n)); // WGAN changes y_i = 1 and y_i = 0 to y_i = 1 and y_i = -1 + std::vector outputSetReal = alg.onevec(n); + outputSet.insert(outputSet.end(), outputSetReal.begin(), outputSetReal.end()); // Fake + real output scores. + + auto discriminator_gradient_results = computeDiscriminatorGradients(y_hat, outputSet); + auto cumulativeDiscriminatorHiddenLayerWGrad = std::get<0>(discriminator_gradient_results); + auto outputDiscriminatorWGrad = std::get<1>(discriminator_gradient_results); + + cumulativeDiscriminatorHiddenLayerWGrad = alg.scalarMultiply(learning_rate / n, cumulativeDiscriminatorHiddenLayerWGrad); + outputDiscriminatorWGrad = alg.scalarMultiply(learning_rate / n, outputDiscriminatorWGrad); + updateDiscriminatorParameters(cumulativeDiscriminatorHiddenLayerWGrad, outputDiscriminatorWGrad, learning_rate); + } + + // Training of the generator. + generatorInputSet = alg.gaussianNoise(n, k); + discriminatorInputSet = modelSetTestGenerator(generatorInputSet); + y_hat = modelSetTestDiscriminator(discriminatorInputSet); + outputSet = alg.onevec(n); + + std::vector>> cumulativeGeneratorHiddenLayerWGrad = computeGeneratorGradients(y_hat, outputSet); + cumulativeGeneratorHiddenLayerWGrad = alg.scalarMultiply(learning_rate / n, cumulativeGeneratorHiddenLayerWGrad); + updateGeneratorParameters(cumulativeGeneratorHiddenLayerWGrad, learning_rate); + + forwardPass(); + + if (UI) { + MLPPWGANOld::UI(epoch, cost_prev, MLPPWGANOld::y_hat, alg.onevec(n)); + } + + epoch++; + if (epoch > max_epoch) { + break; + } + } +} + +real_t MLPPWGANOld::score() { + MLPPLinAlg alg; + MLPPUtilities util; + forwardPass(); + return util.performance(y_hat, alg.onevec(n)); +} + +void MLPPWGANOld::save(std::string fileName) { + MLPPUtilities util; + if (!network.empty()) { + util.saveParameters(fileName, network[0].weights, network[0].bias, 0, 1); + for (uint32_t i = 1; i < network.size(); i++) { + util.saveParameters(fileName, network[i].weights, network[i].bias, 1, i + 1); + } + util.saveParameters(fileName, outputLayer->weights, outputLayer->bias, 1, network.size() + 1); + } else { + util.saveParameters(fileName, outputLayer->weights, outputLayer->bias, 0, network.size() + 1); + } +} + +void MLPPWGANOld::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, real_t lambda, real_t alpha) { + MLPPLinAlg alg; + if (network.empty()) { + network.push_back(MLPPOldHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha)); + network[0].forwardPass(); + } else { + network.push_back(MLPPOldHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha)); + network[network.size() - 1].forwardPass(); + } +} + +void MLPPWGANOld::addOutputLayer(std::string weightInit, std::string reg, real_t lambda, real_t alpha) { + MLPPLinAlg alg; + if (!network.empty()) { + outputLayer = new MLPPOldOutputLayer(network[network.size() - 1].n_hidden, "Linear", "WassersteinLoss", network[network.size() - 1].a, weightInit, "WeightClipping", -0.01, 0.01); + } else { // Should never happen. + outputLayer = new MLPPOldOutputLayer(k, "Linear", "WassersteinLoss", alg.gaussianNoise(n, k), weightInit, "WeightClipping", -0.01, 0.01); + } +} + +std::vector> MLPPWGANOld::modelSetTestGenerator(std::vector> X) { + if (!network.empty()) { + network[0].input = X; + network[0].forwardPass(); + + for (uint32_t i = 1; i <= network.size() / 2; i++) { + network[i].input = network[i - 1].a; + network[i].forwardPass(); + } + } + return network[network.size() / 2].a; +} + +std::vector MLPPWGANOld::modelSetTestDiscriminator(std::vector> X) { + if (!network.empty()) { + for (uint32_t i = network.size() / 2 + 1; i < network.size(); i++) { + if (i == network.size() / 2 + 1) { + network[i].input = X; + } else { + network[i].input = network[i - 1].a; + } + network[i].forwardPass(); + } + outputLayer->input = network[network.size() - 1].a; + } + outputLayer->forwardPass(); + return outputLayer->a; +} + +real_t MLPPWGANOld::Cost(std::vector y_hat, std::vector y) { + MLPPReg regularization; + class MLPPCost cost; + real_t totalRegTerm = 0; + + auto cost_function = outputLayer->cost_map[outputLayer->cost]; + if (!network.empty()) { + for (uint32_t i = 0; i < network.size() - 1; i++) { + totalRegTerm += regularization.regTerm(network[i].weights, network[i].lambda, network[i].alpha, network[i].reg); + } + } + return (cost.*cost_function)(y_hat, y) + totalRegTerm + regularization.regTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg); +} + +void MLPPWGANOld::forwardPass() { + MLPPLinAlg alg; + if (!network.empty()) { + network[0].input = alg.gaussianNoise(n, k); + network[0].forwardPass(); + + for (uint32_t i = 1; i < network.size(); i++) { + network[i].input = network[i - 1].a; + network[i].forwardPass(); + } + outputLayer->input = network[network.size() - 1].a; + } else { // Should never happen, though. + outputLayer->input = alg.gaussianNoise(n, k); + } + outputLayer->forwardPass(); + y_hat = outputLayer->a; +} + +void MLPPWGANOld::updateDiscriminatorParameters(std::vector>> hiddenLayerUpdations, std::vector outputLayerUpdation, real_t learning_rate) { + MLPPLinAlg alg; + + outputLayer->weights = alg.subtraction(outputLayer->weights, outputLayerUpdation); + outputLayer->bias -= learning_rate * alg.sum_elements(outputLayer->delta) / n; + + if (!network.empty()) { + network[network.size() - 1].weights = alg.subtraction(network[network.size() - 1].weights, hiddenLayerUpdations[0]); + network[network.size() - 1].bias = alg.subtractMatrixRows(network[network.size() - 1].bias, alg.scalarMultiply(learning_rate / n, network[network.size() - 1].delta)); + + for (uint32_t i = network.size() - 2; i > network.size() / 2; i--) { + network[i].weights = alg.subtraction(network[i].weights, hiddenLayerUpdations[(network.size() - 2) - i + 1]); + network[i].bias = alg.subtractMatrixRows(network[i].bias, alg.scalarMultiply(learning_rate / n, network[i].delta)); + } + } +} + +void MLPPWGANOld::updateGeneratorParameters(std::vector>> hiddenLayerUpdations, real_t learning_rate) { + MLPPLinAlg alg; + + if (!network.empty()) { + for (int ii = network.size() / 2; ii >= 0; ii--) { + uint32_t i = static_cast(ii); + + //std::cout << network[i].weights.size() << "x" << network[i].weights[0].size() << std::endl; + //std::cout << hiddenLayerUpdations[(network.size() - 2) - i + 1].size() << "x" << hiddenLayerUpdations[(network.size() - 2) - i + 1][0].size() << std::endl; + network[i].weights = alg.subtraction(network[i].weights, hiddenLayerUpdations[(network.size() - 2) - i + 1]); + network[i].bias = alg.subtractMatrixRows(network[i].bias, alg.scalarMultiply(learning_rate / n, network[i].delta)); + } + } +} + +std::tuple>>, std::vector> MLPPWGANOld::computeDiscriminatorGradients(std::vector y_hat, std::vector outputSet) { + class MLPPCost cost; + MLPPActivation avn; + MLPPLinAlg alg; + MLPPReg regularization; + + std::vector>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads. + + auto costDeriv = outputLayer->costDeriv_map[outputLayer->cost]; + auto outputAvn = outputLayer->activation_map[outputLayer->activation]; + outputLayer->delta = alg.hadamard_product((cost.*costDeriv)(y_hat, outputSet), (avn.*outputAvn)(outputLayer->z, 1)); + std::vector outputWGrad = alg.mat_vec_mult(alg.transpose(outputLayer->input), outputLayer->delta); + outputWGrad = alg.addition(outputWGrad, regularization.regDerivTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg)); + + if (!network.empty()) { + auto hiddenLayerAvn = network[network.size() - 1].activation_map[network[network.size() - 1].activation]; + network[network.size() - 1].delta = alg.hadamard_product(alg.outerProduct(outputLayer->delta, outputLayer->weights), (avn.*hiddenLayerAvn)(network[network.size() - 1].z, 1)); + std::vector> hiddenLayerWGrad = alg.matmult(alg.transpose(network[network.size() - 1].input), network[network.size() - 1].delta); + + cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGrad, regularization.regDerivTerm(network[network.size() - 1].weights, network[network.size() - 1].lambda, network[network.size() - 1].alpha, network[network.size() - 1].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well. + + //std::cout << "HIDDENLAYER FIRST:" << hiddenLayerWGrad.size() << "x" << hiddenLayerWGrad[0].size() << std::endl; + //std::cout << "WEIGHTS SECOND:" << network[network.size() - 1].weights.size() << "x" << network[network.size() - 1].weights[0].size() << std::endl; + + for (uint32_t i = network.size() - 2; i > network.size() / 2; i--) { + auto hiddenLayerAvnl = network[i].activation_map[network[i].activation]; + network[i].delta = alg.hadamard_product(alg.matmult(network[i + 1].delta, alg.transpose(network[i + 1].weights)), (avn.*hiddenLayerAvnl)(network[i].z, 1)); + std::vector> hiddenLayerWGradl = alg.matmult(alg.transpose(network[i].input), network[i].delta); + + cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGradl, regularization.regDerivTerm(network[i].weights, network[i].lambda, network[i].alpha, network[i].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well. + } + } + return { cumulativeHiddenLayerWGrad, outputWGrad }; +} + +std::vector>> MLPPWGANOld::computeGeneratorGradients(std::vector y_hat, std::vector outputSet) { + class MLPPCost cost; + MLPPActivation avn; + MLPPLinAlg alg; + MLPPReg regularization; + + std::vector>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads. + + auto costDeriv = outputLayer->costDeriv_map[outputLayer->cost]; + auto outputAvn = outputLayer->activation_map[outputLayer->activation]; + outputLayer->delta = alg.hadamard_product((cost.*costDeriv)(y_hat, outputSet), (avn.*outputAvn)(outputLayer->z, 1)); + std::vector outputWGrad = alg.mat_vec_mult(alg.transpose(outputLayer->input), outputLayer->delta); + outputWGrad = alg.addition(outputWGrad, regularization.regDerivTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg)); + if (!network.empty()) { + auto hiddenLayerAvn = network[network.size() - 1].activation_map[network[network.size() - 1].activation]; + network[network.size() - 1].delta = alg.hadamard_product(alg.outerProduct(outputLayer->delta, outputLayer->weights), (avn.*hiddenLayerAvn)(network[network.size() - 1].z, 1)); + std::vector> hiddenLayerWGrad = alg.matmult(alg.transpose(network[network.size() - 1].input), network[network.size() - 1].delta); + cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGrad, regularization.regDerivTerm(network[network.size() - 1].weights, network[network.size() - 1].lambda, network[network.size() - 1].alpha, network[network.size() - 1].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well. + + for (int ii = network.size() - 2; ii >= 0; ii--) { + uint32_t i = static_cast(ii); + auto hiddenLayerAvnl = network[i].activation_map[network[i].activation]; + network[i].delta = alg.hadamard_product(alg.matmult(network[i + 1].delta, alg.transpose(network[i + 1].weights)), (avn.*hiddenLayerAvnl)(network[i].z, 1)); + std::vector> hiddenLayerWGradl = alg.matmult(alg.transpose(network[i].input), network[i].delta); + cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGradl, regularization.regDerivTerm(network[i].weights, network[i].lambda, network[i].alpha, network[i].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well. + } + } + return cumulativeHiddenLayerWGrad; +} + +void MLPPWGANOld::UI(int epoch, real_t cost_prev, std::vector y_hat, std::vector outputSet) { + MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); + std::cout << "Layer " << network.size() + 1 << ": " << std::endl; + MLPPUtilities::UI(outputLayer->weights, outputLayer->bias); + if (!network.empty()) { + for (int ii = network.size() - 1; ii >= 0; ii--) { + uint32_t i = static_cast(ii); + + std::cout << "Layer " << i + 1 << ": " << std::endl; + MLPPUtilities::UI(network[i].weights, network[i].bias); + } + } +} diff --git a/mlpp/wgan/wgan_old.h b/mlpp/wgan/wgan_old.h new file mode 100644 index 0000000..aa54eb9 --- /dev/null +++ b/mlpp/wgan/wgan_old.h @@ -0,0 +1,68 @@ + +#ifndef MLPP_WGAN_OLD_H +#define MLPP_WGAN_OLD_H + +// +// WGAN.hpp +// +// Created by Marc Melikyan on 11/4/20. +// + +#include "core/containers/vector.h" +#include "core/math/math_defs.h" +#include "core/string/ustring.h" + +#include "core/object/reference.h" + +#include "../lin_alg/mlpp_matrix.h" +#include "../lin_alg/mlpp_vector.h" + +#include "../hidden_layer/hidden_layer.h" +#include "../output_layer/output_layer.h" + +#include "../activation/activation.h" +#include "../cost/cost.h" +#include "../regularization/reg.h" +#include "../utilities/utilities.h" + +#include +#include +#include + +class MLPPWGANOld { +public: + MLPPWGANOld(real_t k, std::vector> outputSet); + ~MLPPWGANOld(); + std::vector> generateExample(int n); + void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false); + real_t score(); + void save(std::string fileName); + + void addLayer(int n_hidden, std::string activation, std::string weightInit = "Default", std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5); + void addOutputLayer(std::string weightInit = "Default", std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5); + +private: + std::vector> modelSetTestGenerator(std::vector> X); // Evaluator for the generator of the WGAN. + std::vector modelSetTestDiscriminator(std::vector> X); // Evaluator for the discriminator of the WGAN. + + real_t Cost(std::vector y_hat, std::vector y); + + void forwardPass(); + void updateDiscriminatorParameters(std::vector>> hiddenLayerUpdations, std::vector outputLayerUpdation, real_t learning_rate); + void updateGeneratorParameters(std::vector>> hiddenLayerUpdations, real_t learning_rate); + std::tuple>>, std::vector> computeDiscriminatorGradients(std::vector y_hat, std::vector outputSet); + std::vector>> computeGeneratorGradients(std::vector y_hat, std::vector outputSet); + + void UI(int epoch, real_t cost_prev, std::vector y_hat, std::vector outputSet); + + std::vector> outputSet; + std::vector y_hat; + + std::vector network; + MLPPOldOutputLayer *outputLayer; + + int n; + int k; +}; + +#endif /* WGAN_hpp */ \ No newline at end of file diff --git a/test/mlpp_tests.cpp b/test/mlpp_tests.cpp index 3c3edad..2245013 100644 --- a/test/mlpp_tests.cpp +++ b/test/mlpp_tests.cpp @@ -47,6 +47,8 @@ #include "../mlpp/uni_lin_reg/uni_lin_reg.h" #include "../mlpp/wgan/wgan.h" +#include "../mlpp/wgan/wgan_old.h" + Vector dstd_vec_to_vec(const std::vector &in) { Vector r;