mirror of
https://github.com/Relintai/MLPP.git
synced 2024-11-12 10:15:01 +01:00
pow
This commit is contained in:
parent
6a3b1ebefb
commit
47b29071fd
BIN
MLPP/.DS_Store
vendored
BIN
MLPP/.DS_Store
vendored
Binary file not shown.
@ -554,7 +554,7 @@ void ANN::Adam(double learning_rate, int max_epoch, int mini_batch_size, double
|
|||||||
void ANN::addOutputLayer(std::string activation, std::string loss, std::string weightInit, std::string reg, double lambda, double alpha){
|
void ANN::addOutputLayer(std::string activation, std::string loss, std::string weightInit, std::string reg, double lambda, double alpha){
|
||||||
LinAlg alg;
|
LinAlg alg;
|
||||||
if(!network.empty()){
|
if(!network.empty()){
|
||||||
outputLayer = new OutputLayer(network[0].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha);
|
outputLayer = new OutputLayer(network[network.size() - 1].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha);
|
||||||
}
|
}
|
||||||
else{
|
else{
|
||||||
outputLayer = new OutputLayer(k, activation, loss, inputSet, weightInit, reg, lambda, alpha);
|
outputLayer = new OutputLayer(k, activation, loss, inputSet, weightInit, reg, lambda, alpha);
|
||||||
@ -612,6 +612,8 @@ void ANN::Adam(double learning_rate, int max_epoch, int mini_batch_size, double
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> ANN::computeGradients(std::vector<double> y_hat, std::vector<double> outputSet){
|
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> ANN::computeGradients(std::vector<double> y_hat, std::vector<double> outputSet){
|
||||||
|
std::cout << "BEGIN" << std::endl;
|
||||||
|
std::cout << k << std::endl;
|
||||||
class Cost cost;
|
class Cost cost;
|
||||||
Activation avn;
|
Activation avn;
|
||||||
LinAlg alg;
|
LinAlg alg;
|
||||||
@ -630,13 +632,12 @@ void ANN::Adam(double learning_rate, int max_epoch, int mini_batch_size, double
|
|||||||
network[network.size() - 1].delta = alg.hadamard_product(alg.outerProduct(outputLayer->delta, outputLayer->weights), (avn.*hiddenLayerAvn)(network[network.size() - 1].z, 1));
|
network[network.size() - 1].delta = alg.hadamard_product(alg.outerProduct(outputLayer->delta, outputLayer->weights), (avn.*hiddenLayerAvn)(network[network.size() - 1].z, 1));
|
||||||
std::vector<std::vector<double>> hiddenLayerWGrad = alg.matmult(alg.transpose(network[network.size() - 1].input), network[network.size() - 1].delta);
|
std::vector<std::vector<double>> hiddenLayerWGrad = alg.matmult(alg.transpose(network[network.size() - 1].input), network[network.size() - 1].delta);
|
||||||
|
|
||||||
cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGrad, regularization.regDerivTerm(network[network.size() - 1].weights, network[network.size() - 1].lambda, network[network.size() - 1].alpha, network[network.size() - 1].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
|
cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGrad, regularization.regDerivTerm(network[network.size() - 1].weights, network[network.size() - 1].lambda, network[network.size() - 1].alpha, network[network.size() - 1].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
|
||||||
|
|
||||||
for(int i = network.size() - 2; i >= 0; i--){
|
for(int i = network.size() - 2; i >= 0; i--){
|
||||||
auto hiddenLayerAvn = network[i].activation_map[network[i].activation];
|
auto hiddenLayerAvn = network[i].activation_map[network[i].activation];
|
||||||
network[i].delta = alg.hadamard_product(alg.matmult(network[i + 1].delta, network[i + 1].weights), (avn.*hiddenLayerAvn)(network[i].z, 1));
|
network[i].delta = alg.hadamard_product(alg.matmult(network[i + 1].delta, alg.transpose(network[i + 1].weights)), (avn.*hiddenLayerAvn)(network[i].z, 1));
|
||||||
std::vector<std::vector<double>> hiddenLayerWGrad = alg.matmult(alg.transpose(network[i].input), network[i].delta);
|
std::vector<std::vector<double>> hiddenLayerWGrad = alg.matmult(alg.transpose(network[i].input), network[i].delta);
|
||||||
|
|
||||||
cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGrad, regularization.regDerivTerm(network[i].weights, network[i].lambda, network[i].alpha, network[i].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
|
cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGrad, regularization.regDerivTerm(network[i].weights, network[i].lambda, network[i].alpha, network[i].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
|
||||||
|
|
||||||
}
|
}
|
||||||
|
BIN
SharedLib/.DS_Store → MLPP/GAN/.DS_Store
vendored
BIN
SharedLib/.DS_Store → MLPP/GAN/.DS_Store
vendored
Binary file not shown.
290
MLPP/GAN/GAN.cpp
Normal file
290
MLPP/GAN/GAN.cpp
Normal file
@ -0,0 +1,290 @@
|
|||||||
|
//
|
||||||
|
// GAN.cpp
|
||||||
|
//
|
||||||
|
// Created by Marc Melikyan on 11/4/20.
|
||||||
|
//
|
||||||
|
|
||||||
|
#include "GAN.hpp"
|
||||||
|
#include "Activation/Activation.hpp"
|
||||||
|
#include "LinAlg/LinAlg.hpp"
|
||||||
|
#include "Regularization/Reg.hpp"
|
||||||
|
#include "Utilities/Utilities.hpp"
|
||||||
|
#include "Cost/Cost.hpp"
|
||||||
|
|
||||||
|
#include <iostream>
|
||||||
|
#include <cmath>
|
||||||
|
|
||||||
|
namespace MLPP {
|
||||||
|
GAN::GAN(double k, std::vector<std::vector<double>> outputSet)
|
||||||
|
: outputSet(outputSet), n(outputSet.size()), k(k)
|
||||||
|
{
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
GAN::~GAN(){
|
||||||
|
delete outputLayer;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<std::vector<double>> GAN::generateExample(int n){
|
||||||
|
LinAlg alg;
|
||||||
|
return modelSetTestGenerator(alg.gaussianNoise(n, k));
|
||||||
|
}
|
||||||
|
|
||||||
|
void GAN::gradientDescent(double learning_rate, int max_epoch, bool UI){
|
||||||
|
class Cost cost;
|
||||||
|
LinAlg alg;
|
||||||
|
double cost_prev = 0;
|
||||||
|
int epoch = 1;
|
||||||
|
forwardPass();
|
||||||
|
|
||||||
|
while(true){
|
||||||
|
cost_prev = Cost(y_hat, alg.onevec(n));
|
||||||
|
|
||||||
|
// Training of the discriminator.
|
||||||
|
|
||||||
|
std::vector<std::vector<double>> generatorInputSet = alg.gaussianNoise(n, k);
|
||||||
|
std::vector<std::vector<double>> discriminatorInputSet = modelSetTestGenerator(generatorInputSet);
|
||||||
|
discriminatorInputSet.insert(discriminatorInputSet.end(), outputSet.begin(), outputSet.end()); // Fake + real inputs.
|
||||||
|
|
||||||
|
std::vector<double> y_hat = modelSetTestDiscriminator(discriminatorInputSet);
|
||||||
|
std::vector<double> outputSet = alg.zerovec(n);
|
||||||
|
std::vector<double> outputSetReal = alg.onevec(n);
|
||||||
|
outputSet.insert(outputSet.end(), outputSetReal.begin(), outputSetReal.end()); // Fake + real output scores.
|
||||||
|
|
||||||
|
auto [cumulativeDiscriminatorHiddenLayerWGrad, outputDiscriminatorWGrad] = computeDiscriminatorGradients(y_hat, outputSet);
|
||||||
|
cumulativeDiscriminatorHiddenLayerWGrad = alg.scalarMultiply(learning_rate/n, cumulativeDiscriminatorHiddenLayerWGrad);
|
||||||
|
outputDiscriminatorWGrad = alg.scalarMultiply(learning_rate/n, outputDiscriminatorWGrad);
|
||||||
|
updateDiscriminatorParameters(cumulativeDiscriminatorHiddenLayerWGrad, outputDiscriminatorWGrad, learning_rate);
|
||||||
|
|
||||||
|
// Training of the generator.
|
||||||
|
generatorInputSet = alg.gaussianNoise(n, k);
|
||||||
|
discriminatorInputSet = modelSetTestGenerator(generatorInputSet);
|
||||||
|
y_hat = modelSetTestDiscriminator(discriminatorInputSet);
|
||||||
|
outputSet = alg.onevec(n);
|
||||||
|
|
||||||
|
std::vector<std::vector<std::vector<double>>> cumulativeGeneratorHiddenLayerWGrad = computeGeneratorGradients(y_hat, outputSet);
|
||||||
|
cumulativeGeneratorHiddenLayerWGrad = alg.scalarMultiply(learning_rate/n, cumulativeGeneratorHiddenLayerWGrad);
|
||||||
|
updateGeneratorParameters(cumulativeGeneratorHiddenLayerWGrad, learning_rate);
|
||||||
|
|
||||||
|
forwardPass();
|
||||||
|
if(UI) { GAN::UI(epoch, cost_prev, GAN::y_hat, alg.onevec(n)); }
|
||||||
|
|
||||||
|
epoch++;
|
||||||
|
if(epoch > max_epoch) { break; }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
double GAN::score(){
|
||||||
|
LinAlg alg;
|
||||||
|
Utilities util;
|
||||||
|
forwardPass();
|
||||||
|
return util.performance(y_hat, alg.onevec(n));
|
||||||
|
}
|
||||||
|
|
||||||
|
void GAN::save(std::string fileName){
|
||||||
|
Utilities util;
|
||||||
|
if(!network.empty()){
|
||||||
|
util.saveParameters(fileName, network[0].weights, network[0].bias, 0, 1);
|
||||||
|
for(int i = 1; i < network.size(); i++){
|
||||||
|
util.saveParameters(fileName, network[i].weights, network[i].bias, 1, i + 1);
|
||||||
|
}
|
||||||
|
util.saveParameters(fileName, outputLayer->weights, outputLayer->bias, 1, network.size() + 1);
|
||||||
|
}
|
||||||
|
else{
|
||||||
|
util.saveParameters(fileName, outputLayer->weights, outputLayer->bias, 0, network.size() + 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void GAN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, double lambda, double alpha){
|
||||||
|
LinAlg alg;
|
||||||
|
if(network.empty()){
|
||||||
|
network.push_back(HiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha));
|
||||||
|
network[0].forwardPass();
|
||||||
|
}
|
||||||
|
else{
|
||||||
|
network.push_back(HiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
|
||||||
|
network[network.size() - 1].forwardPass();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void GAN::addOutputLayer(std::string activation, std::string loss, std::string weightInit, std::string reg, double lambda, double alpha){
|
||||||
|
LinAlg alg;
|
||||||
|
if(!network.empty()){
|
||||||
|
outputLayer = new OutputLayer(network[network.size() - 1].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha);
|
||||||
|
}
|
||||||
|
else{
|
||||||
|
outputLayer = new OutputLayer(k, activation, loss, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<std::vector<double>> GAN::modelSetTestGenerator(std::vector<std::vector<double>> X){
|
||||||
|
if(!network.empty()){
|
||||||
|
network[0].input = X;
|
||||||
|
network[0].forwardPass();
|
||||||
|
|
||||||
|
for(int i = 1; i <= network.size()/2; i++){
|
||||||
|
network[i].input = network[i - 1].a;
|
||||||
|
network[i].forwardPass();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return network[network.size()/2].a;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<double> GAN::modelSetTestDiscriminator(std::vector<std::vector<double>> X){
|
||||||
|
if(!network.empty()){
|
||||||
|
for(int i = network.size()/2 + 1; i < network.size(); i++){
|
||||||
|
if(i == network.size()/2 + 1){
|
||||||
|
network[i].input = X;
|
||||||
|
}
|
||||||
|
else { network[i].input = network[i - 1].a; }
|
||||||
|
network[i].forwardPass();
|
||||||
|
}
|
||||||
|
outputLayer->input = network[network.size() - 1].a;
|
||||||
|
}
|
||||||
|
outputLayer->forwardPass();
|
||||||
|
return outputLayer->a;
|
||||||
|
}
|
||||||
|
|
||||||
|
double GAN::Cost(std::vector<double> y_hat, std::vector<double> y){
|
||||||
|
Reg regularization;
|
||||||
|
class Cost cost;
|
||||||
|
double totalRegTerm = 0;
|
||||||
|
|
||||||
|
auto cost_function = outputLayer->cost_map[outputLayer->cost];
|
||||||
|
if(!network.empty()){
|
||||||
|
for(int i = 0; i < network.size() - 1; i++){
|
||||||
|
totalRegTerm += regularization.regTerm(network[i].weights, network[i].lambda, network[i].alpha, network[i].reg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return (cost.*cost_function)(y_hat, y) + totalRegTerm + regularization.regTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg);
|
||||||
|
}
|
||||||
|
|
||||||
|
void GAN::forwardPass(){
|
||||||
|
LinAlg alg;
|
||||||
|
if(!network.empty()){
|
||||||
|
network[0].input = alg.gaussianNoise(n, k);
|
||||||
|
network[0].forwardPass();
|
||||||
|
|
||||||
|
for(int i = 1; i < network.size(); i++){
|
||||||
|
network[i].input = network[i - 1].a;
|
||||||
|
network[i].forwardPass();
|
||||||
|
}
|
||||||
|
outputLayer->input = network[network.size() - 1].a;
|
||||||
|
}
|
||||||
|
else{ // Should never happen, though.
|
||||||
|
outputLayer->input = alg.gaussianNoise(n, k);
|
||||||
|
}
|
||||||
|
outputLayer->forwardPass();
|
||||||
|
y_hat = outputLayer->a;
|
||||||
|
}
|
||||||
|
|
||||||
|
void GAN::updateDiscriminatorParameters(std::vector<std::vector<std::vector<double>>> hiddenLayerUpdations, std::vector<double> outputLayerUpdation, double learning_rate){
|
||||||
|
LinAlg alg;
|
||||||
|
|
||||||
|
outputLayer->weights = alg.subtraction(outputLayer->weights, outputLayerUpdation);
|
||||||
|
outputLayer->bias -= learning_rate * alg.sum_elements(outputLayer->delta) / n;
|
||||||
|
|
||||||
|
if(!network.empty()){
|
||||||
|
network[network.size() - 1].weights = alg.subtraction(network[network.size() - 1].weights, hiddenLayerUpdations[0]);
|
||||||
|
network[network.size() - 1].bias = alg.subtractMatrixRows(network[network.size() - 1].bias, alg.scalarMultiply(learning_rate/n, network[network.size() - 1].delta));
|
||||||
|
|
||||||
|
for(int i = network.size() - 2; i > network.size()/2; i--){
|
||||||
|
network[i].weights = alg.subtraction(network[i].weights, hiddenLayerUpdations[(network.size() - 2) - i + 1]);
|
||||||
|
network[i].bias = alg.subtractMatrixRows(network[i].bias, alg.scalarMultiply(learning_rate/n, network[i].delta));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void GAN::updateGeneratorParameters(std::vector<std::vector<std::vector<double>>> hiddenLayerUpdations, double learning_rate){
|
||||||
|
LinAlg alg;
|
||||||
|
|
||||||
|
if(!network.empty()){
|
||||||
|
|
||||||
|
for(int i = network.size()/2; i >= 0; i--){
|
||||||
|
//std::cout << network[i].weights.size() << "x" << network[i].weights[0].size() << std::endl;
|
||||||
|
//std::cout << hiddenLayerUpdations[(network.size() - 2) - i + 1].size() << "x" << hiddenLayerUpdations[(network.size() - 2) - i + 1][0].size() << std::endl;
|
||||||
|
network[i].weights = alg.subtraction(network[i].weights, hiddenLayerUpdations[(network.size() - 2) - i + 1]);
|
||||||
|
network[i].bias = alg.subtractMatrixRows(network[i].bias, alg.scalarMultiply(learning_rate/n, network[i].delta));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> GAN::computeDiscriminatorGradients(std::vector<double> y_hat, std::vector<double> outputSet){
|
||||||
|
class Cost cost;
|
||||||
|
Activation avn;
|
||||||
|
LinAlg alg;
|
||||||
|
Reg regularization;
|
||||||
|
|
||||||
|
std::vector<std::vector<std::vector<double>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads.
|
||||||
|
|
||||||
|
auto costDeriv = outputLayer->costDeriv_map[outputLayer->cost];
|
||||||
|
auto outputAvn = outputLayer->activation_map[outputLayer->activation];
|
||||||
|
outputLayer->delta = alg.hadamard_product((cost.*costDeriv)(y_hat, outputSet), (avn.*outputAvn)(outputLayer->z, 1));
|
||||||
|
std::vector<double> outputWGrad = alg.mat_vec_mult(alg.transpose(outputLayer->input), outputLayer->delta);
|
||||||
|
outputWGrad = alg.addition(outputWGrad, regularization.regDerivTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg));
|
||||||
|
|
||||||
|
|
||||||
|
if(!network.empty()){
|
||||||
|
auto hiddenLayerAvn = network[network.size() - 1].activation_map[network[network.size() - 1].activation];
|
||||||
|
|
||||||
|
network[network.size() - 1].delta = alg.hadamard_product(alg.outerProduct(outputLayer->delta, outputLayer->weights), (avn.*hiddenLayerAvn)(network[network.size() - 1].z, 1));
|
||||||
|
std::vector<std::vector<double>> hiddenLayerWGrad = alg.matmult(alg.transpose(network[network.size() - 1].input), network[network.size() - 1].delta);
|
||||||
|
|
||||||
|
cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGrad, regularization.regDerivTerm(network[network.size() - 1].weights, network[network.size() - 1].lambda, network[network.size() - 1].alpha, network[network.size() - 1].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
|
||||||
|
|
||||||
|
//std::cout << "HIDDENLAYER FIRST:" << hiddenLayerWGrad.size() << "x" << hiddenLayerWGrad[0].size() << std::endl;
|
||||||
|
//std::cout << "WEIGHTS SECOND:" << network[network.size() - 1].weights.size() << "x" << network[network.size() - 1].weights[0].size() << std::endl;
|
||||||
|
|
||||||
|
for(int i = network.size() - 2; i > network.size()/2; i--){
|
||||||
|
auto hiddenLayerAvn = network[i].activation_map[network[i].activation];
|
||||||
|
network[i].delta = alg.hadamard_product(alg.matmult(network[i + 1].delta, alg.transpose(network[i + 1].weights)), (avn.*hiddenLayerAvn)(network[i].z, 1));
|
||||||
|
std::vector<std::vector<double>> hiddenLayerWGrad = alg.matmult(alg.transpose(network[i].input), network[i].delta);
|
||||||
|
|
||||||
|
cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGrad, regularization.regDerivTerm(network[i].weights, network[i].lambda, network[i].alpha, network[i].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return {cumulativeHiddenLayerWGrad, outputWGrad};
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<std::vector<std::vector<double>>> GAN::computeGeneratorGradients(std::vector<double> y_hat, std::vector<double> outputSet){
|
||||||
|
class Cost cost;
|
||||||
|
Activation avn;
|
||||||
|
LinAlg alg;
|
||||||
|
Reg regularization;
|
||||||
|
|
||||||
|
std::vector<std::vector<std::vector<double>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads.
|
||||||
|
|
||||||
|
auto costDeriv = outputLayer->costDeriv_map[outputLayer->cost];
|
||||||
|
auto outputAvn = outputLayer->activation_map[outputLayer->activation];
|
||||||
|
outputLayer->delta = alg.hadamard_product((cost.*costDeriv)(y_hat, outputSet), (avn.*outputAvn)(outputLayer->z, 1));
|
||||||
|
std::vector<double> outputWGrad = alg.mat_vec_mult(alg.transpose(outputLayer->input), outputLayer->delta);
|
||||||
|
outputWGrad = alg.addition(outputWGrad, regularization.regDerivTerm(outputLayer->weights, outputLayer->lambda, outputLayer->alpha, outputLayer->reg));
|
||||||
|
if(!network.empty()){
|
||||||
|
auto hiddenLayerAvn = network[network.size() - 1].activation_map[network[network.size() - 1].activation];
|
||||||
|
network[network.size() - 1].delta = alg.hadamard_product(alg.outerProduct(outputLayer->delta, outputLayer->weights), (avn.*hiddenLayerAvn)(network[network.size() - 1].z, 1));
|
||||||
|
std::vector<std::vector<double>> hiddenLayerWGrad = alg.matmult(alg.transpose(network[network.size() - 1].input), network[network.size() - 1].delta);
|
||||||
|
cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGrad, regularization.regDerivTerm(network[network.size() - 1].weights, network[network.size() - 1].lambda, network[network.size() - 1].alpha, network[network.size() - 1].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
|
||||||
|
|
||||||
|
for(int i = network.size() - 2; i >= 0; i--){
|
||||||
|
auto hiddenLayerAvn = network[i].activation_map[network[i].activation];
|
||||||
|
network[i].delta = alg.hadamard_product(alg.matmult(network[i + 1].delta, alg.transpose(network[i + 1].weights)), (avn.*hiddenLayerAvn)(network[i].z, 1));
|
||||||
|
std::vector<std::vector<double>> hiddenLayerWGrad = alg.matmult(alg.transpose(network[i].input), network[i].delta);
|
||||||
|
cumulativeHiddenLayerWGrad.push_back(alg.addition(hiddenLayerWGrad, regularization.regDerivTerm(network[i].weights, network[i].lambda, network[i].alpha, network[i].reg))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cumulativeHiddenLayerWGrad;
|
||||||
|
}
|
||||||
|
|
||||||
|
void GAN::UI(int epoch, double cost_prev, std::vector<double> y_hat, std::vector<double> outputSet){
|
||||||
|
Utilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
|
||||||
|
std::cout << "Layer " << network.size() + 1 << ": " << std::endl;
|
||||||
|
Utilities::UI(outputLayer->weights, outputLayer->bias);
|
||||||
|
if(!network.empty()){
|
||||||
|
for(int i = network.size() - 1; i >= 0; i--){
|
||||||
|
std::cout << "Layer " << i + 1 << ": " << std::endl;
|
||||||
|
Utilities::UI(network[i].weights, network[i].bias);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
57
MLPP/GAN/GAN.hpp
Normal file
57
MLPP/GAN/GAN.hpp
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
//
|
||||||
|
// GAN.hpp
|
||||||
|
//
|
||||||
|
// Created by Marc Melikyan on 11/4/20.
|
||||||
|
//
|
||||||
|
|
||||||
|
#ifndef GAN_hpp
|
||||||
|
#define GAN_hpp
|
||||||
|
|
||||||
|
#include "HiddenLayer/HiddenLayer.hpp"
|
||||||
|
#include "OutputLayer/OutputLayer.hpp"
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
#include <tuple>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
namespace MLPP{
|
||||||
|
|
||||||
|
class GAN{
|
||||||
|
public:
|
||||||
|
GAN(double k, std::vector<std::vector<double>> outputSet);
|
||||||
|
~GAN();
|
||||||
|
std::vector<std::vector<double>> generateExample(int n);
|
||||||
|
double modelTest(std::vector<double> x);
|
||||||
|
void gradientDescent(double learning_rate, int max_epoch, bool UI = 1);
|
||||||
|
double score();
|
||||||
|
void save(std::string fileName);
|
||||||
|
|
||||||
|
void addLayer(int n_hidden, std::string activation, std::string weightInit = "Default", std::string reg = "None", double lambda = 0.5, double alpha = 0.5);
|
||||||
|
void addOutputLayer(std::string activation, std::string loss, std::string weightInit = "Default", std::string reg = "None", double lambda = 0.5, double alpha = 0.5);
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::vector<std::vector<double>> modelSetTestGenerator(std::vector<std::vector<double>> X); // Evaluator for the generator of the gan.
|
||||||
|
std::vector<double> modelSetTestDiscriminator(std::vector<std::vector<double>> X); // Evaluator for the discriminator of the gan.
|
||||||
|
|
||||||
|
double Cost(std::vector<double> y_hat, std::vector<double> y);
|
||||||
|
|
||||||
|
void forwardPass();
|
||||||
|
void updateDiscriminatorParameters(std::vector<std::vector<std::vector<double>>> hiddenLayerUpdations, std::vector<double> outputLayerUpdation, double learning_rate);
|
||||||
|
void updateGeneratorParameters(std::vector<std::vector<std::vector<double>>> hiddenLayerUpdations, double learning_rate);
|
||||||
|
std::tuple<std::vector<std::vector<std::vector<double>>>, std::vector<double>> computeDiscriminatorGradients(std::vector<double> y_hat, std::vector<double> outputSet);
|
||||||
|
std::vector<std::vector<std::vector<double>>> computeGeneratorGradients(std::vector<double> y_hat, std::vector<double> outputSet);
|
||||||
|
|
||||||
|
void UI(int epoch, double cost_prev, std::vector<double> y_hat, std::vector<double> outputSet);
|
||||||
|
|
||||||
|
std::vector<std::vector<double>> outputSet;
|
||||||
|
std::vector<double> y_hat;
|
||||||
|
|
||||||
|
std::vector<HiddenLayer> network;
|
||||||
|
OutputLayer *outputLayer;
|
||||||
|
|
||||||
|
int n;
|
||||||
|
int k;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* GAN_hpp */
|
@ -7,11 +7,28 @@
|
|||||||
#include "LinAlg.hpp"
|
#include "LinAlg.hpp"
|
||||||
#include "Stat/Stat.hpp"
|
#include "Stat/Stat.hpp"
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
#include <random>
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
|
|
||||||
namespace MLPP{
|
namespace MLPP{
|
||||||
|
|
||||||
|
std::vector<std::vector<double>> LinAlg::gaussianNoise(int n, int m){
|
||||||
|
std::random_device rd;
|
||||||
|
std::default_random_engine generator(rd());
|
||||||
|
|
||||||
|
std::vector<std::vector<double>> A;
|
||||||
|
A.resize(n);
|
||||||
|
for(int i = 0; i < n; i++){
|
||||||
|
A[i].resize(m);
|
||||||
|
for(int j = 0; j < m; j++){
|
||||||
|
std::normal_distribution<double> distribution(0, 1); // Standard normal distribution. Mean of 0, std of 1.
|
||||||
|
A[i][j] = distribution(generator);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return A;
|
||||||
|
}
|
||||||
|
|
||||||
std::vector<std::vector<double>> LinAlg::addition(std::vector<std::vector<double>> A, std::vector<std::vector<double>> B){
|
std::vector<std::vector<double>> LinAlg::addition(std::vector<std::vector<double>> A, std::vector<std::vector<double>> B){
|
||||||
std::vector<std::vector<double>> C;
|
std::vector<std::vector<double>> C;
|
||||||
C.resize(A.size());
|
C.resize(A.size());
|
||||||
|
@ -16,6 +16,8 @@ namespace MLPP{
|
|||||||
|
|
||||||
// MATRIX FUNCTIONS
|
// MATRIX FUNCTIONS
|
||||||
|
|
||||||
|
std::vector<std::vector<double>> gaussianNoise(int n, int m);
|
||||||
|
|
||||||
std::vector<std::vector<double>> addition(std::vector<std::vector<double>> A, std::vector<std::vector<double>> B);
|
std::vector<std::vector<double>> addition(std::vector<std::vector<double>> A, std::vector<std::vector<double>> B);
|
||||||
|
|
||||||
std::vector<std::vector<double>> subtraction(std::vector<std::vector<double>> A, std::vector<std::vector<double>> B);
|
std::vector<std::vector<double>> subtraction(std::vector<std::vector<double>> A, std::vector<std::vector<double>> B);
|
||||||
|
@ -12,7 +12,7 @@ Begin by downloading the header files for the ML++ library. You can do this by c
|
|||||||
```
|
```
|
||||||
git clone https://github.com/novak-99/MLPP
|
git clone https://github.com/novak-99/MLPP
|
||||||
```
|
```
|
||||||
Next, execute the "./buildSO.sh" shell script:
|
Next, execute the "buildSO.sh" shell script:
|
||||||
```
|
```
|
||||||
sudo ./buildSO.sh
|
sudo ./buildSO.sh
|
||||||
```
|
```
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
g++ -I MLPP -c -fPIC main.cpp MLPP/Stat/Stat.cpp MLPP/LinAlg/LinAlg.cpp MLPP/Regularization/Reg.cpp MLPP/Activation/Activation.cpp MLPP/Utilities/Utilities.cpp MLPP/Data/Data.cpp MLPP/Cost/Cost.cpp MLPP/ANN/ANN.cpp MLPP/HiddenLayer/HiddenLayer.cpp MLPP/OutputLayer/OutputLayer.cpp MLPP/MLP/MLP.cpp MLPP/LinReg/LinReg.cpp MLPP/LogReg/LogReg.cpp MLPP/UniLinReg/UniLinReg.cpp MLPP/CLogLogReg/CLogLogReg.cpp MLPP/ExpReg/ExpReg.cpp MLPP/ProbitReg/ProbitReg.cpp MLPP/SoftmaxReg/SoftmaxReg.cpp MLPP/TanhReg/TanhReg.cpp MLPP/SoftmaxNet/SoftmaxNet.cpp MLPP/Convolutions/Convolutions.cpp MLPP/AutoEncoder/AutoEncoder.cpp MLPP/MultinomialNB/MultinomialNB.cpp MLPP/BernoulliNB/BernoulliNB.cpp MLPP/GaussianNB/GaussianNB.cpp MLPP/KMeans/KMeans.cpp MLPP/kNN/kNN.cpp MLPP/PCA/PCA.cpp MLPP/OutlierFinder/OutlierFinder.cpp MLPP/MANN/MANN.cpp MLPP/MultiOutputLayer/MultiOutputLayer.cpp MLPP/SVC/SVC.cpp MLPP/NumericalAnalysis/NumericalAnalysis.cpp MLPP/DualSVC/DualSVC.cpp --std=c++17
|
g++ -I MLPP -c -fPIC main.cpp MLPP/Stat/Stat.cpp MLPP/LinAlg/LinAlg.cpp MLPP/Regularization/Reg.cpp MLPP/Activation/Activation.cpp MLPP/Utilities/Utilities.cpp MLPP/Data/Data.cpp MLPP/Cost/Cost.cpp MLPP/ANN/ANN.cpp MLPP/HiddenLayer/HiddenLayer.cpp MLPP/OutputLayer/OutputLayer.cpp MLPP/MLP/MLP.cpp MLPP/LinReg/LinReg.cpp MLPP/LogReg/LogReg.cpp MLPP/UniLinReg/UniLinReg.cpp MLPP/CLogLogReg/CLogLogReg.cpp MLPP/ExpReg/ExpReg.cpp MLPP/ProbitReg/ProbitReg.cpp MLPP/SoftmaxReg/SoftmaxReg.cpp MLPP/TanhReg/TanhReg.cpp MLPP/SoftmaxNet/SoftmaxNet.cpp MLPP/Convolutions/Convolutions.cpp MLPP/AutoEncoder/AutoEncoder.cpp MLPP/MultinomialNB/MultinomialNB.cpp MLPP/BernoulliNB/BernoulliNB.cpp MLPP/GaussianNB/GaussianNB.cpp MLPP/KMeans/KMeans.cpp MLPP/kNN/kNN.cpp MLPP/PCA/PCA.cpp MLPP/OutlierFinder/OutlierFinder.cpp MLPP/MANN/MANN.cpp MLPP/MultiOutputLayer/MultiOutputLayer.cpp MLPP/SVC/SVC.cpp MLPP/NumericalAnalysis/NumericalAnalysis.cpp MLPP/DualSVC/DualSVC.cpp --std=c++17
|
||||||
|
|
||||||
g++ -shared -o MLPP.so Reg.o LinAlg.o Stat.o Activation.o LinReg.o Utilities.o Cost.o LogReg.o ProbitReg.o ExpReg.o CLogLogReg.o SoftmaxReg.o TanhReg.o kNN.o KMeans.o UniLinReg.o SoftmaxNet.o MLP.o AutoEncoder.o HiddenLayer.o OutputLayer.o ANN.o BernoulliNB.o GaussianNB.o MultinomialNB.o Convolutions.o OutlierFinder.o Data.o MultiOutputLayer.o MANN.o SVC.o NumericalAnalysis.o DualSVC.o
|
g++ -shared -o MLPP.so Reg.o LinAlg.o Stat.o Activation.o LinReg.o Utilities.o Cost.o LogReg.o ProbitReg.o ExpReg.o CLogLogReg.o SoftmaxReg.o TanhReg.o kNN.o KMeans.o UniLinReg.o SoftmaxNet.o MLP.o AutoEncoder.o HiddenLayer.o OutputLayer.o ANN.o BernoulliNB.o GaussianNB.o MultinomialNB.o Convolutions.o OutlierFinder.o Data.o MultiOutputLayer.o MANN.o SVC.o NumericalAnalysis.o DualSVC.o
|
||||||
mv MLPP.so SharedLib
|
sudo mv MLPP.so /usr/local/lib
|
||||||
|
|
||||||
rm *.o
|
rm *.o
|
26
main.cpp
26
main.cpp
@ -47,6 +47,7 @@
|
|||||||
#include "MLPP/SVC/SVC.hpp"
|
#include "MLPP/SVC/SVC.hpp"
|
||||||
#include "MLPP/NumericalAnalysis/NumericalAnalysis.hpp"
|
#include "MLPP/NumericalAnalysis/NumericalAnalysis.hpp"
|
||||||
#include "MLPP/DualSVC/DualSVC.hpp"
|
#include "MLPP/DualSVC/DualSVC.hpp"
|
||||||
|
#include "MLPP/GAN/GAN.hpp"
|
||||||
|
|
||||||
|
|
||||||
using namespace MLPP;
|
using namespace MLPP;
|
||||||
@ -154,8 +155,8 @@ int main() {
|
|||||||
std::vector<double> w = {0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1};
|
std::vector<double> w = {0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1};
|
||||||
|
|
||||||
// std::cout << "Arithmetic Mean: " << stat.mean(x) << std::endl;
|
// std::cout << "Arithmetic Mean: " << stat.mean(x) << std::endl;
|
||||||
std::cout << "Median: " << stat.median(x) << std::endl;
|
// std::cout << "Median: " << stat.median(x) << std::endl;
|
||||||
alg.printVector(x);
|
// alg.printVector(x);
|
||||||
// alg.printVector(stat.mode(x));
|
// alg.printVector(stat.mode(x));
|
||||||
// std::cout << "Range: " << stat.range(x) << std::endl;
|
// std::cout << "Range: " << stat.range(x) << std::endl;
|
||||||
// std::cout << "Midrange: " << stat.midrange(x) << std::endl;
|
// std::cout << "Midrange: " << stat.midrange(x) << std::endl;
|
||||||
@ -365,7 +366,7 @@ int main() {
|
|||||||
// std::vector<std::vector<double>> inputSet = {{0,0,1,1}, {0,1,0,1}};
|
// std::vector<std::vector<double>> inputSet = {{0,0,1,1}, {0,1,0,1}};
|
||||||
// std::vector<double> outputSet = {0,1,1,0};
|
// std::vector<double> outputSet = {0,1,1,0};
|
||||||
// ANN ann(alg.transpose(inputSet), outputSet);
|
// ANN ann(alg.transpose(inputSet), outputSet);
|
||||||
// //ann.addLayer(10, "RELU");
|
// //ann.addLayer(10, "Sigmoid");
|
||||||
// ann.addLayer(10, "Sigmoid");
|
// ann.addLayer(10, "Sigmoid");
|
||||||
// ann.addOutputLayer("Sigmoid", "LogLoss");
|
// ann.addOutputLayer("Sigmoid", "LogLoss");
|
||||||
// //ann.AMSGrad(0.1, 10000, 1, 0.9, 0.999, 0.000001, 1);
|
// //ann.AMSGrad(0.1, 10000, 1, 0.9, 0.999, 0.000001, 1);
|
||||||
@ -375,6 +376,19 @@ int main() {
|
|||||||
// alg.printVector(ann.modelSetTest(alg.transpose(inputSet)));
|
// alg.printVector(ann.modelSetTest(alg.transpose(inputSet)));
|
||||||
// std::cout << "ACCURACY: " << 100 * ann.score() << "%" << std::endl;
|
// std::cout << "ACCURACY: " << 100 * ann.score() << "%" << std::endl;
|
||||||
|
|
||||||
|
std::vector<std::vector<double>> outputSet = {{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20},
|
||||||
|
{2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40}};
|
||||||
|
//Vector outputSet = {0,1,1,0};
|
||||||
|
GAN gan(2, alg.transpose(outputSet));
|
||||||
|
gan.addLayer(5, "Sigmoid");
|
||||||
|
gan.addLayer(2, "RELU");
|
||||||
|
gan.addLayer(5, "Sigmoid");
|
||||||
|
gan.addOutputLayer("Sigmoid", "LogLoss");
|
||||||
|
gan.gradientDescent(0.1, 25000, 0);
|
||||||
|
std::cout << "GENERATED INPUT: (Gaussian-sampled noise):" << std::endl;
|
||||||
|
alg.printMatrix(gan.generateExample(5));
|
||||||
|
|
||||||
|
|
||||||
// typedef std::vector<std::vector<double>> Matrix;
|
// typedef std::vector<std::vector<double>> Matrix;
|
||||||
// typedef std::vector<double> Vector;
|
// typedef std::vector<double> Vector;
|
||||||
|
|
||||||
@ -382,10 +396,10 @@ int main() {
|
|||||||
// Vector outputSet = {0,1,1,0};
|
// Vector outputSet = {0,1,1,0};
|
||||||
|
|
||||||
// ANN ann(inputSet, outputSet);
|
// ANN ann(inputSet, outputSet);
|
||||||
// ann.addLayer(10, "Sigmoid");
|
// ann.addLayer(5, "Sigmoid");
|
||||||
// ann.addLayer(10, "Sigmoid"); // Add more layers as needed.
|
// ann.addLayer(8, "Sigmoid"); // Add more layers as needed.
|
||||||
// ann.addOutputLayer("Sigmoid", "LogLoss");
|
// ann.addOutputLayer("Sigmoid", "LogLoss");
|
||||||
// ann.gradientDescent(0.1, 20000, 0);
|
// ann.gradientDescent(1, 20000, 1);
|
||||||
|
|
||||||
// Vector predictions = ann.modelSetTest(inputSet);
|
// Vector predictions = ann.modelSetTest(inputSet);
|
||||||
// alg.printVector(predictions); // Testing out the model's preds for train set.
|
// alg.printVector(predictions); // Testing out the model's preds for train set.
|
||||||
|
Loading…
Reference in New Issue
Block a user