mirror of
https://github.com/Relintai/pmlpp.git
synced 2025-02-01 17:07:02 +01:00
Renamed MLPPHiddenLayer and MLPPOutputLayer.
This commit is contained in:
parent
27d187c67a
commit
ea4978f535
@ -652,10 +652,10 @@ real_t MLPPANN::applyLearningRateScheduler(real_t learningRate, real_t decayCons
|
|||||||
|
|
||||||
void MLPPANN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
|
void MLPPANN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
|
||||||
if (network.empty()) {
|
if (network.empty()) {
|
||||||
network.push_back(MLPPHiddenLayer(n_hidden, activation, inputSet, weightInit, reg, lambda, alpha));
|
network.push_back(MLPPOldHiddenLayer(n_hidden, activation, inputSet, weightInit, reg, lambda, alpha));
|
||||||
network[0].forwardPass();
|
network[0].forwardPass();
|
||||||
} else {
|
} else {
|
||||||
network.push_back(MLPPHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
|
network.push_back(MLPPOldHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
|
||||||
network[network.size() - 1].forwardPass();
|
network[network.size() - 1].forwardPass();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -663,9 +663,9 @@ void MLPPANN::addLayer(int n_hidden, std::string activation, std::string weightI
|
|||||||
void MLPPANN::addOutputLayer(std::string activation, std::string loss, std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
|
void MLPPANN::addOutputLayer(std::string activation, std::string loss, std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
|
||||||
MLPPLinAlg alg;
|
MLPPLinAlg alg;
|
||||||
if (!network.empty()) {
|
if (!network.empty()) {
|
||||||
outputLayer = new MLPPOutputLayer(network[network.size() - 1].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha);
|
outputLayer = new MLPPOldOutputLayer(network[network.size() - 1].n_hidden, activation, loss, network[network.size() - 1].a, weightInit, reg, lambda, alpha);
|
||||||
} else {
|
} else {
|
||||||
outputLayer = new MLPPOutputLayer(k, activation, loss, inputSet, weightInit, reg, lambda, alpha);
|
outputLayer = new MLPPOldOutputLayer(k, activation, loss, inputSet, weightInit, reg, lambda, alpha);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -56,8 +56,8 @@ private:
|
|||||||
std::vector<real_t> outputSet;
|
std::vector<real_t> outputSet;
|
||||||
std::vector<real_t> y_hat;
|
std::vector<real_t> y_hat;
|
||||||
|
|
||||||
std::vector<MLPPHiddenLayer> network;
|
std::vector<MLPPOldHiddenLayer> network;
|
||||||
MLPPOutputLayer *outputLayer;
|
MLPPOldOutputLayer *outputLayer;
|
||||||
|
|
||||||
int n;
|
int n;
|
||||||
int k;
|
int k;
|
||||||
|
@ -99,10 +99,10 @@ void MLPPGAN::save(std::string fileName) {
|
|||||||
void MLPPGAN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
|
void MLPPGAN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
|
||||||
MLPPLinAlg alg;
|
MLPPLinAlg alg;
|
||||||
if (network.empty()) {
|
if (network.empty()) {
|
||||||
network.push_back(MLPPHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha));
|
network.push_back(MLPPOldHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha));
|
||||||
network[0].forwardPass();
|
network[0].forwardPass();
|
||||||
} else {
|
} else {
|
||||||
network.push_back(MLPPHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
|
network.push_back(MLPPOldHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
|
||||||
network[network.size() - 1].forwardPass();
|
network[network.size() - 1].forwardPass();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -110,9 +110,9 @@ void MLPPGAN::addLayer(int n_hidden, std::string activation, std::string weightI
|
|||||||
void MLPPGAN::addOutputLayer(std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
|
void MLPPGAN::addOutputLayer(std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
|
||||||
MLPPLinAlg alg;
|
MLPPLinAlg alg;
|
||||||
if (!network.empty()) {
|
if (!network.empty()) {
|
||||||
outputLayer = new MLPPOutputLayer(network[network.size() - 1].n_hidden, "Sigmoid", "LogLoss", network[network.size() - 1].a, weightInit, reg, lambda, alpha);
|
outputLayer = new MLPPOldOutputLayer(network[network.size() - 1].n_hidden, "Sigmoid", "LogLoss", network[network.size() - 1].a, weightInit, reg, lambda, alpha);
|
||||||
} else {
|
} else {
|
||||||
outputLayer = new MLPPOutputLayer(k, "Sigmoid", "LogLoss", alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha);
|
outputLayer = new MLPPOldOutputLayer(k, "Sigmoid", "LogLoss", alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -48,8 +48,8 @@ private:
|
|||||||
std::vector<std::vector<real_t>> outputSet;
|
std::vector<std::vector<real_t>> outputSet;
|
||||||
std::vector<real_t> y_hat;
|
std::vector<real_t> y_hat;
|
||||||
|
|
||||||
std::vector<MLPPHiddenLayer> network;
|
std::vector<MLPPOldHiddenLayer> network;
|
||||||
MLPPOutputLayer *outputLayer;
|
MLPPOldOutputLayer *outputLayer;
|
||||||
|
|
||||||
int n;
|
int n;
|
||||||
int k;
|
int k;
|
||||||
|
@ -13,7 +13,7 @@
|
|||||||
#include <random>
|
#include <random>
|
||||||
|
|
||||||
|
|
||||||
MLPPHiddenLayer::MLPPHiddenLayer(int n_hidden, std::string activation, std::vector<std::vector<real_t>> input, std::string weightInit, std::string reg, real_t lambda, real_t alpha) :
|
MLPPOldHiddenLayer::MLPPOldHiddenLayer(int n_hidden, std::string activation, std::vector<std::vector<real_t>> input, std::string weightInit, std::string reg, real_t lambda, real_t alpha) :
|
||||||
n_hidden(n_hidden), activation(activation), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) {
|
n_hidden(n_hidden), activation(activation), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) {
|
||||||
weights = MLPPUtilities::weightInitialization(input[0].size(), n_hidden, weightInit);
|
weights = MLPPUtilities::weightInitialization(input[0].size(), n_hidden, weightInit);
|
||||||
bias = MLPPUtilities::biasInitialization(n_hidden);
|
bias = MLPPUtilities::biasInitialization(n_hidden);
|
||||||
@ -97,14 +97,14 @@ MLPPHiddenLayer::MLPPHiddenLayer(int n_hidden, std::string activation, std::vect
|
|||||||
activationTest_map["Arcoth"] = &MLPPActivation::arcoth;
|
activationTest_map["Arcoth"] = &MLPPActivation::arcoth;
|
||||||
}
|
}
|
||||||
|
|
||||||
void MLPPHiddenLayer::forwardPass() {
|
void MLPPOldHiddenLayer::forwardPass() {
|
||||||
MLPPLinAlg alg;
|
MLPPLinAlg alg;
|
||||||
MLPPActivation avn;
|
MLPPActivation avn;
|
||||||
z = alg.mat_vec_add(alg.matmult(input, weights), bias);
|
z = alg.mat_vec_add(alg.matmult(input, weights), bias);
|
||||||
a = (avn.*activation_map[activation])(z, 0);
|
a = (avn.*activation_map[activation])(z, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MLPPHiddenLayer::Test(std::vector<real_t> x) {
|
void MLPPOldHiddenLayer::Test(std::vector<real_t> x) {
|
||||||
MLPPLinAlg alg;
|
MLPPLinAlg alg;
|
||||||
MLPPActivation avn;
|
MLPPActivation avn;
|
||||||
z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias);
|
z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias);
|
||||||
|
@ -17,9 +17,9 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
|
||||||
class MLPPHiddenLayer {
|
class MLPPOldHiddenLayer {
|
||||||
public:
|
public:
|
||||||
MLPPHiddenLayer(int n_hidden, std::string activation, std::vector<std::vector<real_t>> input, std::string weightInit, std::string reg, real_t lambda, real_t alpha);
|
MLPPOldHiddenLayer(int n_hidden, std::string activation, std::vector<std::vector<real_t>> input, std::string weightInit, std::string reg, real_t lambda, real_t alpha);
|
||||||
|
|
||||||
int n_hidden;
|
int n_hidden;
|
||||||
std::string activation;
|
std::string activation;
|
||||||
|
@ -141,10 +141,10 @@ void MLPPMANN::save(std::string fileName) {
|
|||||||
|
|
||||||
void MLPPMANN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
|
void MLPPMANN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
|
||||||
if (network.empty()) {
|
if (network.empty()) {
|
||||||
network.push_back(MLPPHiddenLayer(n_hidden, activation, inputSet, weightInit, reg, lambda, alpha));
|
network.push_back(MLPPOldHiddenLayer(n_hidden, activation, inputSet, weightInit, reg, lambda, alpha));
|
||||||
network[0].forwardPass();
|
network[0].forwardPass();
|
||||||
} else {
|
} else {
|
||||||
network.push_back(MLPPHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
|
network.push_back(MLPPOldHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
|
||||||
network[network.size() - 1].forwardPass();
|
network[network.size() - 1].forwardPass();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -39,7 +39,7 @@ private:
|
|||||||
std::vector<std::vector<real_t>> outputSet;
|
std::vector<std::vector<real_t>> outputSet;
|
||||||
std::vector<std::vector<real_t>> y_hat;
|
std::vector<std::vector<real_t>> y_hat;
|
||||||
|
|
||||||
std::vector<MLPPHiddenLayer> network;
|
std::vector<MLPPOldHiddenLayer> network;
|
||||||
MLPPMultiOutputLayer *outputLayer;
|
MLPPMultiOutputLayer *outputLayer;
|
||||||
|
|
||||||
int n;
|
int n;
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
#include <random>
|
#include <random>
|
||||||
|
|
||||||
|
|
||||||
MLPPOutputLayer::MLPPOutputLayer(int n_hidden, std::string activation, std::string cost, std::vector<std::vector<real_t>> input, std::string weightInit, std::string reg, real_t lambda, real_t alpha) :
|
MLPPOldOutputLayer::MLPPOldOutputLayer(int n_hidden, std::string activation, std::string cost, std::vector<std::vector<real_t>> input, std::string weightInit, std::string reg, real_t lambda, real_t alpha) :
|
||||||
n_hidden(n_hidden), activation(activation), cost(cost), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) {
|
n_hidden(n_hidden), activation(activation), cost(cost), input(input), weightInit(weightInit), reg(reg), lambda(lambda), alpha(alpha) {
|
||||||
weights = MLPPUtilities::weightInitialization(n_hidden, weightInit);
|
weights = MLPPUtilities::weightInitialization(n_hidden, weightInit);
|
||||||
bias = MLPPUtilities::biasInitialization();
|
bias = MLPPUtilities::biasInitialization();
|
||||||
@ -113,14 +113,14 @@ MLPPOutputLayer::MLPPOutputLayer(int n_hidden, std::string activation, std::stri
|
|||||||
cost_map["WassersteinLoss"] = &MLPPCost::HingeLoss;
|
cost_map["WassersteinLoss"] = &MLPPCost::HingeLoss;
|
||||||
}
|
}
|
||||||
|
|
||||||
void MLPPOutputLayer::forwardPass() {
|
void MLPPOldOutputLayer::forwardPass() {
|
||||||
MLPPLinAlg alg;
|
MLPPLinAlg alg;
|
||||||
MLPPActivation avn;
|
MLPPActivation avn;
|
||||||
z = alg.scalarAdd(bias, alg.mat_vec_mult(input, weights));
|
z = alg.scalarAdd(bias, alg.mat_vec_mult(input, weights));
|
||||||
a = (avn.*activation_map[activation])(z, 0);
|
a = (avn.*activation_map[activation])(z, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MLPPOutputLayer::Test(std::vector<real_t> x) {
|
void MLPPOldOutputLayer::Test(std::vector<real_t> x) {
|
||||||
MLPPLinAlg alg;
|
MLPPLinAlg alg;
|
||||||
MLPPActivation avn;
|
MLPPActivation avn;
|
||||||
z_test = alg.dot(weights, x) + bias;
|
z_test = alg.dot(weights, x) + bias;
|
||||||
|
@ -18,9 +18,9 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
|
||||||
class MLPPOutputLayer {
|
class MLPPOldOutputLayer {
|
||||||
public:
|
public:
|
||||||
MLPPOutputLayer(int n_hidden, std::string activation, std::string cost, std::vector<std::vector<real_t>> input, std::string weightInit, std::string reg, real_t lambda, real_t alpha);
|
MLPPOldOutputLayer(int n_hidden, std::string activation, std::string cost, std::vector<std::vector<real_t>> input, std::string weightInit, std::string reg, real_t lambda, real_t alpha);
|
||||||
|
|
||||||
int n_hidden;
|
int n_hidden;
|
||||||
std::string activation;
|
std::string activation;
|
||||||
|
@ -108,10 +108,10 @@ void MLPPWGAN::save(std::string fileName) {
|
|||||||
void MLPPWGAN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
|
void MLPPWGAN::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
|
||||||
MLPPLinAlg alg;
|
MLPPLinAlg alg;
|
||||||
if (network.empty()) {
|
if (network.empty()) {
|
||||||
network.push_back(MLPPHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha));
|
network.push_back(MLPPOldHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha));
|
||||||
network[0].forwardPass();
|
network[0].forwardPass();
|
||||||
} else {
|
} else {
|
||||||
network.push_back(MLPPHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
|
network.push_back(MLPPOldHiddenLayer(n_hidden, activation, network[network.size() - 1].a, weightInit, reg, lambda, alpha));
|
||||||
network[network.size() - 1].forwardPass();
|
network[network.size() - 1].forwardPass();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -119,9 +119,9 @@ void MLPPWGAN::addLayer(int n_hidden, std::string activation, std::string weight
|
|||||||
void MLPPWGAN::addOutputLayer(std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
|
void MLPPWGAN::addOutputLayer(std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
|
||||||
MLPPLinAlg alg;
|
MLPPLinAlg alg;
|
||||||
if (!network.empty()) {
|
if (!network.empty()) {
|
||||||
outputLayer = new MLPPOutputLayer(network[network.size() - 1].n_hidden, "Linear", "WassersteinLoss", network[network.size() - 1].a, weightInit, "WeightClipping", -0.01, 0.01);
|
outputLayer = new MLPPOldOutputLayer(network[network.size() - 1].n_hidden, "Linear", "WassersteinLoss", network[network.size() - 1].a, weightInit, "WeightClipping", -0.01, 0.01);
|
||||||
} else { // Should never happen.
|
} else { // Should never happen.
|
||||||
outputLayer = new MLPPOutputLayer(k, "Linear", "WassersteinLoss", alg.gaussianNoise(n, k), weightInit, "WeightClipping", -0.01, 0.01);
|
outputLayer = new MLPPOldOutputLayer(k, "Linear", "WassersteinLoss", alg.gaussianNoise(n, k), weightInit, "WeightClipping", -0.01, 0.01);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -46,8 +46,8 @@ private:
|
|||||||
std::vector<std::vector<real_t>> outputSet;
|
std::vector<std::vector<real_t>> outputSet;
|
||||||
std::vector<real_t> y_hat;
|
std::vector<real_t> y_hat;
|
||||||
|
|
||||||
std::vector<MLPPHiddenLayer> network;
|
std::vector<MLPPOldHiddenLayer> network;
|
||||||
MLPPOutputLayer *outputLayer;
|
MLPPOldOutputLayer *outputLayer;
|
||||||
|
|
||||||
int n;
|
int n;
|
||||||
int k;
|
int k;
|
||||||
|
Loading…
Reference in New Issue
Block a user