From ea4978f5352d6b438222b337d640a65158dc0bc8 Mon Sep 17 00:00:00 2001 From: Relintai Date: Mon, 30 Jan 2023 16:56:16 +0100 Subject: [PATCH] Renamed MLPPHiddenLayer and MLPPOutputLayer. --- mlpp/ann/ann.cpp | 8 ++++---- mlpp/ann/ann.h | 4 ++-- mlpp/gan/gan.cpp | 8 ++++---- mlpp/gan/gan.h | 4 ++-- mlpp/hidden_layer/hidden_layer.cpp | 6 +++--- mlpp/hidden_layer/hidden_layer.h | 4 ++-- mlpp/mann/mann.cpp | 4 ++-- mlpp/mann/mann.h | 2 +- mlpp/output_layer/output_layer.cpp | 6 +++--- mlpp/output_layer/output_layer.h | 4 ++-- mlpp/wgan/wgan.cpp | 8 ++++---- mlpp/wgan/wgan.h | 4 ++-- 12 files changed, 31 insertions(+), 31 deletions(-) diff --git a/mlpp/ann/ann.cpp b/mlpp/ann/ann.cpp index cd30bd4..fe0f54c 100644 --- a/mlpp/ann/ann.cpp +++ b/mlpp/ann/ann.cpp @@ -652,10 +652,10 @@ real_t MLPPANN::applyLearningRateScheduler(real_t learningRate, real_t decayCons void MLPPANN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, real_t lambda, real_t alpha) { if (network.empty()) { - network.push_back(MLPPHiddenLayer(n_hidden, activation, inputSet, weightInit, reg, lambda, alpha)); + network.push_back(MLPPOldHiddenLayer(n_hidden, activation, inputSet, weightInit, reg, lambda, alpha)); network[0].forwardPass(); } else { - network.push_back(MLPPHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha)); + network.push_back(MLPPOldHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha)); network[network.size() - 1].forwardPass(); } } @@ -663,9 +663,9 @@ void MLPPANN::addLayer(int n_hidden, std::string activation, std::string weightI void MLPPANN::addOutputLayer(std::string activation, std::string loss, std::string weightInit, std::string reg, real_t lambda, real_t alpha) { MLPPLinAlg alg; if (!network.empty()) { - outputLayer = new MLPPOutputLayer(network[network.size() - 1].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha); + outputLayer = new MLPPOldOutputLayer(network[network.size() - 1].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha); } else { - outputLayer = new MLPPOutputLayer(k, activation, loss, inputSet, weightInit, reg, lambda, alpha); + outputLayer = new MLPPOldOutputLayer(k, activation, loss, inputSet, weightInit, reg, lambda, alpha); } } diff --git a/mlpp/ann/ann.h b/mlpp/ann/ann.h index 1ca3c4e..4d86e65 100644 --- a/mlpp/ann/ann.h +++ b/mlpp/ann/ann.h @@ -56,8 +56,8 @@ private: std::vector outputSet; std::vector y_hat; - std::vector network; - MLPPOutputLayer *outputLayer; + std::vector network; + MLPPOldOutputLayer *outputLayer; int n; int k; diff --git a/mlpp/gan/gan.cpp b/mlpp/gan/gan.cpp index 6cf16c1..c3ec434 100644 --- a/mlpp/gan/gan.cpp +++ b/mlpp/gan/gan.cpp @@ -99,10 +99,10 @@ void MLPPGAN::save(std::string fileName) { void MLPPGAN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, real_t lambda, real_t alpha) { MLPPLinAlg alg; if (network.empty()) { - network.push_back(MLPPHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha)); + network.push_back(MLPPOldHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha)); network[0].forwardPass(); } else { - network.push_back(MLPPHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha)); + network.push_back(MLPPOldHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha)); network[network.size() - 1].forwardPass(); } } @@ -110,9 +110,9 @@ void MLPPGAN::addLayer(int n_hidden, std::string activation, std::string weightI void MLPPGAN::addOutputLayer(std::string weightInit, std::string reg, real_t lambda, real_t alpha) { MLPPLinAlg alg; if (!network.empty()) { - outputLayer = new MLPPOutputLayer(network[network.size() - 1].n_hidden, "Sigmoid", "LogLoss", network[network.size() - 1].a, weightInit, reg, lambda, alpha); + outputLayer = new MLPPOldOutputLayer(network[network.size() - 1].n_hidden, "Sigmoid", "LogLoss", network[network.size() - 1].a, weightInit, reg, lambda, alpha); } else { - outputLayer = new MLPPOutputLayer(k, "Sigmoid", "LogLoss", alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha); + outputLayer = new MLPPOldOutputLayer(k, "Sigmoid", "LogLoss", alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha); } } diff --git a/mlpp/gan/gan.h b/mlpp/gan/gan.h index 1c9faaf..c78c772 100644 --- a/mlpp/gan/gan.h +++ b/mlpp/gan/gan.h @@ -48,8 +48,8 @@ private: std::vector> outputSet; std::vector y_hat; - std::vector network; - MLPPOutputLayer *outputLayer; + std::vector network; + MLPPOldOutputLayer *outputLayer; int n; int k; diff --git a/mlpp/hidden_layer/hidden_layer.cpp b/mlpp/hidden_layer/hidden_layer.cpp index c8fe626..bb8cbbd 100644 --- a/mlpp/hidden_layer/hidden_layer.cpp +++ b/mlpp/hidden_layer/hidden_layer.cpp @@ -13,7 +13,7 @@ #include -MLPPHiddenLayer::MLPPHiddenLayer(int n_hidden, std::string activation, std::vector> input, std::string weightInit, std::string reg, real_t lambda, real_t alpha) : +MLPPOldHiddenLayer::MLPPOldHiddenLayer(int n_hidden, std::string activation, std::vector> input, std::string weightInit, std::string reg, real_t lambda, real_t alpha) : n_hidden(n_hidden), activation(activation), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) { weights = MLPPUtilities::weightInitialization(input[0].size(), n_hidden, weightInit); bias = MLPPUtilities::biasInitialization(n_hidden); @@ -97,14 +97,14 @@ MLPPHiddenLayer::MLPPHiddenLayer(int n_hidden, std::string activation, std::vect activationTest_map["Arcoth"] = &MLPPActivation::arcoth; } -void MLPPHiddenLayer::forwardPass() { +void MLPPOldHiddenLayer::forwardPass() { MLPPLinAlg alg; MLPPActivation avn; z = alg.mat_vec_add(alg.matmult(input, weights), bias); a = (avn.*activation_map[activation])(z, 0); } -void MLPPHiddenLayer::Test(std::vector x) { +void MLPPOldHiddenLayer::Test(std::vector x) { MLPPLinAlg alg; MLPPActivation avn; z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias); diff --git a/mlpp/hidden_layer/hidden_layer.h b/mlpp/hidden_layer/hidden_layer.h index 138dca2..00e3484 100644 --- a/mlpp/hidden_layer/hidden_layer.h +++ b/mlpp/hidden_layer/hidden_layer.h @@ -17,9 +17,9 @@ #include -class MLPPHiddenLayer { +class MLPPOldHiddenLayer { public: - MLPPHiddenLayer(int n_hidden, std::string activation, std::vector> input, std::string weightInit, std::string reg, real_t lambda, real_t alpha); + MLPPOldHiddenLayer(int n_hidden, std::string activation, std::vector> input, std::string weightInit, std::string reg, real_t lambda, real_t alpha); int n_hidden; std::string activation; diff --git a/mlpp/mann/mann.cpp b/mlpp/mann/mann.cpp index 77a1f15..d8620ae 100644 --- a/mlpp/mann/mann.cpp +++ b/mlpp/mann/mann.cpp @@ -141,10 +141,10 @@ void MLPPMANN::save(std::string fileName) { void MLPPMANN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, real_t lambda, real_t alpha) { if (network.empty()) { - network.push_back(MLPPHiddenLayer(n_hidden, activation, inputSet, weightInit, reg, lambda, alpha)); + network.push_back(MLPPOldHiddenLayer(n_hidden, activation, inputSet, weightInit, reg, lambda, alpha)); network[0].forwardPass(); } else { - network.push_back(MLPPHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha)); + network.push_back(MLPPOldHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha)); network[network.size() - 1].forwardPass(); } } diff --git a/mlpp/mann/mann.h b/mlpp/mann/mann.h index d8c6e0b..82ed9a8 100644 --- a/mlpp/mann/mann.h +++ b/mlpp/mann/mann.h @@ -39,7 +39,7 @@ private: std::vector> outputSet; std::vector> y_hat; - std::vector network; + std::vector network; MLPPMultiOutputLayer *outputLayer; int n; diff --git a/mlpp/output_layer/output_layer.cpp b/mlpp/output_layer/output_layer.cpp index 3a0d389..e544816 100644 --- a/mlpp/output_layer/output_layer.cpp +++ b/mlpp/output_layer/output_layer.cpp @@ -12,7 +12,7 @@ #include -MLPPOutputLayer::MLPPOutputLayer(int n_hidden, std::string activation, std::string cost, std::vector> input, std::string weightInit, std::string reg, real_t lambda, real_t alpha) : +MLPPOldOutputLayer::MLPPOldOutputLayer(int n_hidden, std::string activation, std::string cost, std::vector> input, std::string weightInit, std::string reg, real_t lambda, real_t alpha) : n_hidden(n_hidden), activation(activation), cost(cost), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) { weights = MLPPUtilities::weightInitialization(n_hidden, weightInit); bias = MLPPUtilities::biasInitialization(); @@ -113,14 +113,14 @@ MLPPOutputLayer::MLPPOutputLayer(int n_hidden, std::string activation, std::stri cost_map["WassersteinLoss"] = &MLPPCost::HingeLoss; } -void MLPPOutputLayer::forwardPass() { +void MLPPOldOutputLayer::forwardPass() { MLPPLinAlg alg; MLPPActivation avn; z = alg.scalarAdd(bias, alg.mat_vec_mult(input, weights)); a = (avn.*activation_map[activation])(z, 0); } -void MLPPOutputLayer::Test(std::vector x) { +void MLPPOldOutputLayer::Test(std::vector x) { MLPPLinAlg alg; MLPPActivation avn; z_test = alg.dot(weights, x) + bias; diff --git a/mlpp/output_layer/output_layer.h b/mlpp/output_layer/output_layer.h index 362545e..5cc40dd 100644 --- a/mlpp/output_layer/output_layer.h +++ b/mlpp/output_layer/output_layer.h @@ -18,9 +18,9 @@ #include -class MLPPOutputLayer { +class MLPPOldOutputLayer { public: - MLPPOutputLayer(int n_hidden, std::string activation, std::string cost, std::vector> input, std::string weightInit, std::string reg, real_t lambda, real_t alpha); + MLPPOldOutputLayer(int n_hidden, std::string activation, std::string cost, std::vector> input, std::string weightInit, std::string reg, real_t lambda, real_t alpha); int n_hidden; std::string activation; diff --git a/mlpp/wgan/wgan.cpp b/mlpp/wgan/wgan.cpp index c142c79..968e414 100644 --- a/mlpp/wgan/wgan.cpp +++ b/mlpp/wgan/wgan.cpp @@ -108,10 +108,10 @@ void MLPPWGAN::save(std::string fileName) { void MLPPWGAN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, real_t lambda, real_t alpha) { MLPPLinAlg alg; if (network.empty()) { - network.push_back(MLPPHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha)); + network.push_back(MLPPOldHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha)); network[0].forwardPass(); } else { - network.push_back(MLPPHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha)); + network.push_back(MLPPOldHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha)); network[network.size() - 1].forwardPass(); } } @@ -119,9 +119,9 @@ void MLPPWGAN::addLayer(int n_hidden, std::string activation, std::string weight void MLPPWGAN::addOutputLayer(std::string weightInit, std::string reg, real_t lambda, real_t alpha) { MLPPLinAlg alg; if (!network.empty()) { - outputLayer = new MLPPOutputLayer(network[network.size() - 1].n_hidden, "Linear", "WassersteinLoss", network[network.size() - 1].a, weightInit, "WeightClipping", -0.01, 0.01); + outputLayer = new MLPPOldOutputLayer(network[network.size() - 1].n_hidden, "Linear", "WassersteinLoss", network[network.size() - 1].a, weightInit, "WeightClipping", -0.01, 0.01); } else { // Should never happen. - outputLayer = new MLPPOutputLayer(k, "Linear", "WassersteinLoss", alg.gaussianNoise(n, k), weightInit, "WeightClipping", -0.01, 0.01); + outputLayer = new MLPPOldOutputLayer(k, "Linear", "WassersteinLoss", alg.gaussianNoise(n, k), weightInit, "WeightClipping", -0.01, 0.01); } } diff --git a/mlpp/wgan/wgan.h b/mlpp/wgan/wgan.h index 15a50e8..57329d7 100644 --- a/mlpp/wgan/wgan.h +++ b/mlpp/wgan/wgan.h @@ -46,8 +46,8 @@ private: std::vector> outputSet; std::vector y_hat; - std::vector network; - MLPPOutputLayer *outputLayer; + std::vector network; + MLPPOldOutputLayer *outputLayer; int n; int k;