Renamed MLPPHiddenLayer and MLPPOutputLayer.

This commit is contained in:
Relintai 2023-01-30 16:56:16 +01:00
parent 27d187c67a
commit ea4978f535
12 changed files with 31 additions and 31 deletions

View File

@ -652,10 +652,10 @@ real_t MLPPANN::applyLearningRateScheduler(real_t learningRate, real_t decayCons
void MLPPANN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
if (network.empty()) {
network.push_back(MLPPHiddenLayer(n_hidden, activation, inputSet, weightInit, reg, lambda, alpha));
network.push_back(MLPPOldHiddenLayer(n_hidden, activation, inputSet, weightInit, reg, lambda, alpha));
network[0].forwardPass();
} else {
network.push_back(MLPPHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
network.push_back(MLPPOldHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
network[network.size() - 1].forwardPass();
}
}
@ -663,9 +663,9 @@ void MLPPANN::addLayer(int n_hidden, std::string activation, std::string weightI
void MLPPANN::addOutputLayer(std::string activation, std::string loss, std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
MLPPLinAlg alg;
if (!network.empty()) {
outputLayer = new MLPPOutputLayer(network[network.size() - 1].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha);
outputLayer = new MLPPOldOutputLayer(network[network.size() - 1].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha);
} else {
outputLayer = new MLPPOutputLayer(k, activation, loss, inputSet, weightInit, reg, lambda, alpha);
outputLayer = new MLPPOldOutputLayer(k, activation, loss, inputSet, weightInit, reg, lambda, alpha);
}
}

View File

@ -56,8 +56,8 @@ private:
std::vector<real_t> outputSet;
std::vector<real_t> y_hat;
std::vector<MLPPHiddenLayer> network;
MLPPOutputLayer *outputLayer;
std::vector<MLPPOldHiddenLayer> network;
MLPPOldOutputLayer *outputLayer;
int n;
int k;

View File

@ -99,10 +99,10 @@ void MLPPGAN::save(std::string fileName) {
void MLPPGAN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
MLPPLinAlg alg;
if (network.empty()) {
network.push_back(MLPPHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha));
network.push_back(MLPPOldHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha));
network[0].forwardPass();
} else {
network.push_back(MLPPHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
network.push_back(MLPPOldHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
network[network.size() - 1].forwardPass();
}
}
@ -110,9 +110,9 @@ void MLPPGAN::addLayer(int n_hidden, std::string activation, std::string weightI
void MLPPGAN::addOutputLayer(std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
MLPPLinAlg alg;
if (!network.empty()) {
outputLayer = new MLPPOutputLayer(network[network.size() - 1].n_hidden, "Sigmoid", "LogLoss", network[network.size() - 1].a, weightInit, reg, lambda, alpha);
outputLayer = new MLPPOldOutputLayer(network[network.size() - 1].n_hidden, "Sigmoid", "LogLoss", network[network.size() - 1].a, weightInit, reg, lambda, alpha);
} else {
outputLayer = new MLPPOutputLayer(k, "Sigmoid", "LogLoss", alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha);
outputLayer = new MLPPOldOutputLayer(k, "Sigmoid", "LogLoss", alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha);
}
}

View File

@ -48,8 +48,8 @@ private:
std::vector<std::vector<real_t>> outputSet;
std::vector<real_t> y_hat;
std::vector<MLPPHiddenLayer> network;
MLPPOutputLayer *outputLayer;
std::vector<MLPPOldHiddenLayer> network;
MLPPOldOutputLayer *outputLayer;
int n;
int k;

View File

@ -13,7 +13,7 @@
#include <random>
MLPPHiddenLayer::MLPPHiddenLayer(int n_hidden, std::string activation, std::vector<std::vector<real_t>> input, std::string weightInit, std::string reg, real_t lambda, real_t alpha) :
MLPPOldHiddenLayer::MLPPOldHiddenLayer(int n_hidden, std::string activation, std::vector<std::vector<real_t>> input, std::string weightInit, std::string reg, real_t lambda, real_t alpha) :
n_hidden(n_hidden), activation(activation), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) {
weights = MLPPUtilities::weightInitialization(input[0].size(), n_hidden, weightInit);
bias = MLPPUtilities::biasInitialization(n_hidden);
@ -97,14 +97,14 @@ MLPPHiddenLayer::MLPPHiddenLayer(int n_hidden, std::string activation, std::vect
activationTest_map["Arcoth"] = &MLPPActivation::arcoth;
}
void MLPPHiddenLayer::forwardPass() {
void MLPPOldHiddenLayer::forwardPass() {
MLPPLinAlg alg;
MLPPActivation avn;
z = alg.mat_vec_add(alg.matmult(input, weights), bias);
a = (avn.*activation_map[activation])(z, 0);
}
void MLPPHiddenLayer::Test(std::vector<real_t> x) {
void MLPPOldHiddenLayer::Test(std::vector<real_t> x) {
MLPPLinAlg alg;
MLPPActivation avn;
z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias);

View File

@ -17,9 +17,9 @@
#include <vector>
class MLPPHiddenLayer {
class MLPPOldHiddenLayer {
public:
MLPPHiddenLayer(int n_hidden, std::string activation, std::vector<std::vector<real_t>> input, std::string weightInit, std::string reg, real_t lambda, real_t alpha);
MLPPOldHiddenLayer(int n_hidden, std::string activation, std::vector<std::vector<real_t>> input, std::string weightInit, std::string reg, real_t lambda, real_t alpha);
int n_hidden;
std::string activation;

View File

@ -141,10 +141,10 @@ void MLPPMANN::save(std::string fileName) {
void MLPPMANN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
if (network.empty()) {
network.push_back(MLPPHiddenLayer(n_hidden, activation, inputSet, weightInit, reg, lambda, alpha));
network.push_back(MLPPOldHiddenLayer(n_hidden, activation, inputSet, weightInit, reg, lambda, alpha));
network[0].forwardPass();
} else {
network.push_back(MLPPHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
network.push_back(MLPPOldHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
network[network.size() - 1].forwardPass();
}
}

View File

@ -39,7 +39,7 @@ private:
std::vector<std::vector<real_t>> outputSet;
std::vector<std::vector<real_t>> y_hat;
std::vector<MLPPHiddenLayer> network;
std::vector<MLPPOldHiddenLayer> network;
MLPPMultiOutputLayer *outputLayer;
int n;

View File

@ -12,7 +12,7 @@
#include <random>
MLPPOutputLayer::MLPPOutputLayer(int n_hidden, std::string activation, std::string cost, std::vector<std::vector<real_t>> input, std::string weightInit, std::string reg, real_t lambda, real_t alpha) :
MLPPOldOutputLayer::MLPPOldOutputLayer(int n_hidden, std::string activation, std::string cost, std::vector<std::vector<real_t>> input, std::string weightInit, std::string reg, real_t lambda, real_t alpha) :
n_hidden(n_hidden), activation(activation), cost(cost), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) {
weights = MLPPUtilities::weightInitialization(n_hidden, weightInit);
bias = MLPPUtilities::biasInitialization();
@ -113,14 +113,14 @@ MLPPOutputLayer::MLPPOutputLayer(int n_hidden, std::string activation, std::stri
cost_map["WassersteinLoss"] = &MLPPCost::HingeLoss;
}
void MLPPOutputLayer::forwardPass() {
void MLPPOldOutputLayer::forwardPass() {
MLPPLinAlg alg;
MLPPActivation avn;
z = alg.scalarAdd(bias, alg.mat_vec_mult(input, weights));
a = (avn.*activation_map[activation])(z, 0);
}
void MLPPOutputLayer::Test(std::vector<real_t> x) {
void MLPPOldOutputLayer::Test(std::vector<real_t> x) {
MLPPLinAlg alg;
MLPPActivation avn;
z_test = alg.dot(weights, x) + bias;

View File

@ -18,9 +18,9 @@
#include <vector>
class MLPPOutputLayer {
class MLPPOldOutputLayer {
public:
MLPPOutputLayer(int n_hidden, std::string activation, std::string cost, std::vector<std::vector<real_t>> input, std::string weightInit, std::string reg, real_t lambda, real_t alpha);
MLPPOldOutputLayer(int n_hidden, std::string activation, std::string cost, std::vector<std::vector<real_t>> input, std::string weightInit, std::string reg, real_t lambda, real_t alpha);
int n_hidden;
std::string activation;

View File

@ -108,10 +108,10 @@ void MLPPWGAN::save(std::string fileName) {
void MLPPWGAN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
MLPPLinAlg alg;
if (network.empty()) {
network.push_back(MLPPHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha));
network.push_back(MLPPOldHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha));
network[0].forwardPass();
} else {
network.push_back(MLPPHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
network.push_back(MLPPOldHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
network[network.size() - 1].forwardPass();
}
}
@ -119,9 +119,9 @@ void MLPPWGAN::addLayer(int n_hidden, std::string activation, std::string weight
void MLPPWGAN::addOutputLayer(std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
MLPPLinAlg alg;
if (!network.empty()) {
outputLayer = new MLPPOutputLayer(network[network.size() - 1].n_hidden, "Linear", "WassersteinLoss", network[network.size() - 1].a, weightInit, "WeightClipping", -0.01, 0.01);
outputLayer = new MLPPOldOutputLayer(network[network.size() - 1].n_hidden, "Linear", "WassersteinLoss", network[network.size() - 1].a, weightInit, "WeightClipping", -0.01, 0.01);
} else { // Should never happen.
outputLayer = new MLPPOutputLayer(k, "Linear", "WassersteinLoss", alg.gaussianNoise(n, k), weightInit, "WeightClipping", -0.01, 0.01);
outputLayer = new MLPPOldOutputLayer(k, "Linear", "WassersteinLoss", alg.gaussianNoise(n, k), weightInit, "WeightClipping", -0.01, 0.01);
}
}

View File

@ -46,8 +46,8 @@ private:
std::vector<std::vector<real_t>> outputSet;
std::vector<real_t> y_hat;
std::vector<MLPPHiddenLayer> network;
MLPPOutputLayer *outputLayer;
std::vector<MLPPOldHiddenLayer> network;
MLPPOldOutputLayer *outputLayer;
int n;
int k;