From 0e9d8bcb41c432b775cf2632adbb6baa7240eae2 Mon Sep 17 00:00:00 2001 From: Relintai Date: Tue, 24 Jan 2023 19:37:08 +0100 Subject: [PATCH] Prefix Cost with MLPP. --- main.cpp | 4 +- mlpp/ann/ann.cpp | 24 +++--- mlpp/auto_encoder/auto_encoder.cpp | 2 +- mlpp/c_log_log_reg/c_log_log_reg.cpp | 2 +- mlpp/convolutions/convolutions.cpp | 46 +++++----- mlpp/convolutions/convolutions.h | 4 +- mlpp/cost/cost.cpp | 84 +++++++++---------- mlpp/cost/cost.h | 2 +- mlpp/dual_svc/dual_svc.cpp | 8 +- mlpp/exp_reg/exp_reg.cpp | 2 +- mlpp/gan/gan.cpp | 8 +- mlpp/lin_reg/lin_reg.cpp | 2 +- mlpp/log_reg/log_reg.cpp | 2 +- mlpp/mann/mann.cpp | 4 +- mlpp/mlp/mlp.cpp | 2 +- .../multi_output_layer/multi_output_layer.cpp | 32 +++---- mlpp/multi_output_layer/multi_output_layer.h | 4 +- mlpp/output_layer/output_layer.cpp | 32 +++---- mlpp/output_layer/output_layer.h | 4 +- mlpp/probit_reg/probit_reg.cpp | 2 +- mlpp/softmax_net/softmax_net.cpp | 2 +- mlpp/softmax_reg/softmax_reg.cpp | 2 +- mlpp/svc/svc.cpp | 8 +- mlpp/tanh_reg/tanh_reg.cpp | 2 +- mlpp/wgan/wgan.cpp | 8 +- 25 files changed, 146 insertions(+), 146 deletions(-) diff --git a/main.cpp b/main.cpp index 040e031..b49aa64 100644 --- a/main.cpp +++ b/main.cpp @@ -123,9 +123,9 @@ int main() { Stat stat; LinAlg alg; MLPPActivation avn; - Cost cost; + MLPPCost cost; Data data; - Convolutions conv; + MLPPConvolutions conv; // DATA SETS // std::vector> inputSet = {{1,2,3,4,5,6,7,8,9,10}, {3,5,9,12,15,18,21,24,27,30}}; diff --git a/mlpp/ann/ann.cpp b/mlpp/ann/ann.cpp index 30d0770..7f8ccaf 100644 --- a/mlpp/ann/ann.cpp +++ b/mlpp/ann/ann.cpp @@ -54,7 +54,7 @@ double MLPPANN::modelTest(std::vector x) { } void MLPPANN::gradientDescent(double learning_rate, int max_epoch, bool UI) { - class Cost cost; + class MLPPCost cost; LinAlg alg; double cost_prev = 0; int epoch = 1; @@ -88,7 +88,7 @@ void MLPPANN::gradientDescent(double learning_rate, int max_epoch, bool UI) { } void MLPPANN::SGD(double learning_rate, int max_epoch, bool UI) { - class Cost cost; + class MLPPCost cost; LinAlg alg; double cost_prev = 0; @@ -126,7 +126,7 @@ void MLPPANN::SGD(double learning_rate, int max_epoch, bool UI) { } void MLPPANN::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { - class Cost cost; + class MLPPCost cost; LinAlg alg; double cost_prev = 0; @@ -164,7 +164,7 @@ void MLPPANN::MBGD(double learning_rate, int max_epoch, int mini_batch_size, boo } void MLPPANN::Momentum(double learning_rate, int max_epoch, int mini_batch_size, double gamma, bool NAG, bool UI) { - class Cost cost; + class MLPPCost cost; LinAlg alg; double cost_prev = 0; @@ -221,7 +221,7 @@ void MLPPANN::Momentum(double learning_rate, int max_epoch, int mini_batch_size, } void MLPPANN::Adagrad(double learning_rate, int max_epoch, int mini_batch_size, double e, bool UI) { - class Cost cost; + class MLPPCost cost; LinAlg alg; double cost_prev = 0; @@ -277,7 +277,7 @@ void MLPPANN::Adagrad(double learning_rate, int max_epoch, int mini_batch_size, } void MLPPANN::Adadelta(double learning_rate, int max_epoch, int mini_batch_size, double b1, double e, bool UI) { - class Cost cost; + class MLPPCost cost; LinAlg alg; double cost_prev = 0; @@ -333,7 +333,7 @@ void MLPPANN::Adadelta(double learning_rate, int max_epoch, int mini_batch_size, } void MLPPANN::Adam(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) { - class Cost cost; + class MLPPCost cost; LinAlg alg; double cost_prev = 0; @@ -400,7 +400,7 @@ void MLPPANN::Adam(double learning_rate, int max_epoch, int mini_batch_size, dou } void MLPPANN::Adamax(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) { - class Cost cost; + class MLPPCost cost; LinAlg alg; double cost_prev = 0; @@ -465,7 +465,7 @@ void MLPPANN::Adamax(double learning_rate, int max_epoch, int mini_batch_size, d } void MLPPANN::Nadam(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) { - class Cost cost; + class MLPPCost cost; LinAlg alg; double cost_prev = 0; @@ -535,7 +535,7 @@ void MLPPANN::Nadam(double learning_rate, int max_epoch, int mini_batch_size, do } void MLPPANN::AMSGrad(double learning_rate, int max_epoch, int mini_batch_size, double b1, double b2, double e, bool UI) { - class Cost cost; + class MLPPCost cost; LinAlg alg; double cost_prev = 0; @@ -671,7 +671,7 @@ void MLPPANN::addOutputLayer(std::string activation, std::string loss, std::stri double MLPPANN::Cost(std::vector y_hat, std::vector y) { Reg regularization; - class Cost cost; + class MLPPCost cost; double totalRegTerm = 0; auto cost_function = outputLayer->cost_map[outputLayer->cost]; @@ -719,7 +719,7 @@ void MLPPANN::updateParameters(std::vector>> hid std::tuple>>, std::vector> MLPPANN::computeGradients(std::vector y_hat, std::vector outputSet) { // std::cout << "BEGIN" << std::endl; - class Cost cost; + class MLPPCost cost; MLPPActivation avn; LinAlg alg; Reg regularization; diff --git a/mlpp/auto_encoder/auto_encoder.cpp b/mlpp/auto_encoder/auto_encoder.cpp index 2cf3dc2..55f4475 100644 --- a/mlpp/auto_encoder/auto_encoder.cpp +++ b/mlpp/auto_encoder/auto_encoder.cpp @@ -208,7 +208,7 @@ void MLPPAutoEncoder::save(std::string fileName) { } double MLPPAutoEncoder::Cost(std::vector> y_hat, std::vector> y) { - class Cost cost; + class MLPPCost cost; return cost.MSE(y_hat, inputSet); } diff --git a/mlpp/c_log_log_reg/c_log_log_reg.cpp b/mlpp/c_log_log_reg/c_log_log_reg.cpp index 25a2a08..7e8ff27 100644 --- a/mlpp/c_log_log_reg/c_log_log_reg.cpp +++ b/mlpp/c_log_log_reg/c_log_log_reg.cpp @@ -186,7 +186,7 @@ double MLPPCLogLogReg::score() { double MLPPCLogLogReg::Cost(std::vector y_hat, std::vector y) { Reg regularization; - class Cost cost; + class MLPPCost cost; return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); } diff --git a/mlpp/convolutions/convolutions.cpp b/mlpp/convolutions/convolutions.cpp index e501af4..74d3b78 100644 --- a/mlpp/convolutions/convolutions.cpp +++ b/mlpp/convolutions/convolutions.cpp @@ -10,11 +10,11 @@ #include #include -Convolutions::Convolutions() : +MLPPConvolutions::MLPPConvolutions() : prewittHorizontal({ { 1, 1, 1 }, { 0, 0, 0 }, { -1, -1, -1 } }), prewittVertical({ { 1, 0, -1 }, { 1, 0, -1 }, { 1, 0, -1 } }), sobelHorizontal({ { 1, 2, 1 }, { 0, 0, 0 }, { -1, -2, -1 } }), sobelVertical({ { -1, 0, 1 }, { -2, 0, 2 }, { -1, 0, 1 } }), scharrHorizontal({ { 3, 10, 3 }, { 0, 0, 0 }, { -3, -10, -3 } }), scharrVertical({ { 3, 0, -3 }, { 10, 0, -10 }, { 3, 0, -3 } }), robertsHorizontal({ { 0, 1 }, { -1, 0 } }), robertsVertical({ { 1, 0 }, { 0, -1 } }) { } -std::vector> Convolutions::convolve(std::vector> input, std::vector> filter, int S, int P) { +std::vector> MLPPConvolutions::convolve(std::vector> input, std::vector> filter, int S, int P) { LinAlg alg; std::vector> featureMap; int N = input.size(); @@ -70,7 +70,7 @@ std::vector> Convolutions::convolve(std::vector>> Convolutions::convolve(std::vector>> input, std::vector>> filter, int S, int P) { +std::vector>> MLPPConvolutions::convolve(std::vector>> input, std::vector>> filter, int S, int P) { LinAlg alg; std::vector>> featureMap; int N = input[0].size(); @@ -136,7 +136,7 @@ std::vector>> Convolutions::convolve(std::vector return featureMap; } -std::vector> Convolutions::pool(std::vector> input, int F, int S, std::string type) { +std::vector> MLPPConvolutions::pool(std::vector> input, int F, int S, std::string type) { LinAlg alg; std::vector> pooledMap; int N = input.size(); @@ -176,7 +176,7 @@ std::vector> Convolutions::pool(std::vector>> Convolutions::pool(std::vector>> input, int F, int S, std::string type) { +std::vector>> MLPPConvolutions::pool(std::vector>> input, int F, int S, std::string type) { std::vector>> pooledMap; for (int i = 0; i < input.size(); i++) { pooledMap.push_back(pool(input[i], F, S, type)); @@ -184,7 +184,7 @@ std::vector>> Convolutions::pool(std::vector> input, std::string type) { +double MLPPConvolutions::globalPool(std::vector> input, std::string type) { LinAlg alg; if (type == "Average") { Stat stat; @@ -196,7 +196,7 @@ double Convolutions::globalPool(std::vector> input, std::str } } -std::vector Convolutions::globalPool(std::vector>> input, std::string type) { +std::vector MLPPConvolutions::globalPool(std::vector>> input, std::string type) { std::vector pooledMap; for (int i = 0; i < input.size(); i++) { pooledMap.push_back(globalPool(input[i], type)); @@ -204,12 +204,12 @@ std::vector Convolutions::globalPool(std::vector> Convolutions::gaussianFilter2D(int size, double std) { +std::vector> MLPPConvolutions::gaussianFilter2D(int size, double std) { std::vector> filter; filter.resize(size); for (int i = 0; i < filter.size(); i++) { @@ -229,7 +229,7 @@ been easier to carry out the calculation explicitly, mainly because it is more i and also because my convolution algorithm is only built for filters with equally sized heights and widths. */ -std::vector> Convolutions::dx(std::vector> input) { +std::vector> MLPPConvolutions::dx(std::vector> input) { std::vector> deriv; // We assume a gray scale image. deriv.resize(input.size()); for (int i = 0; i < deriv.size(); i++) { @@ -250,7 +250,7 @@ std::vector> Convolutions::dx(std::vector> Convolutions::dy(std::vector> input) { +std::vector> MLPPConvolutions::dy(std::vector> input) { std::vector> deriv; deriv.resize(input.size()); for (int i = 0; i < deriv.size(); i++) { @@ -271,14 +271,14 @@ std::vector> Convolutions::dy(std::vector> Convolutions::gradMagnitude(std::vector> input) { +std::vector> MLPPConvolutions::gradMagnitude(std::vector> input) { LinAlg alg; std::vector> xDeriv_2 = alg.hadamard_product(dx(input), dx(input)); std::vector> yDeriv_2 = alg.hadamard_product(dy(input), dy(input)); return alg.sqrt(alg.addition(xDeriv_2, yDeriv_2)); } -std::vector> Convolutions::gradOrientation(std::vector> input) { +std::vector> MLPPConvolutions::gradOrientation(std::vector> input) { std::vector> deriv; deriv.resize(input.size()); for (int i = 0; i < deriv.size(); i++) { @@ -295,7 +295,7 @@ std::vector> Convolutions::gradOrientation(std::vector>> Convolutions::computeM(std::vector> input) { +std::vector>> MLPPConvolutions::computeM(std::vector> input) { double const SIGMA = 1; double const GAUSSIAN_SIZE = 3; @@ -313,7 +313,7 @@ std::vector>> Convolutions::computeM(std::vector std::vector>> M = { xxDeriv, yyDeriv, xyDeriv }; return M; } -std::vector> Convolutions::harrisCornerDetection(std::vector> input) { +std::vector> MLPPConvolutions::harrisCornerDetection(std::vector> input) { double const k = 0.05; // Empirically determined wherein k -> [0.04, 0.06], though conventionally 0.05 is typically used as well. LinAlg alg; std::vector>> M = computeM(input); @@ -340,34 +340,34 @@ std::vector> Convolutions::harrisCornerDetection(std::v return imageTypes; } -std::vector> Convolutions::getPrewittHorizontal() { +std::vector> MLPPConvolutions::getPrewittHorizontal() { return prewittHorizontal; } -std::vector> Convolutions::getPrewittVertical() { +std::vector> MLPPConvolutions::getPrewittVertical() { return prewittVertical; } -std::vector> Convolutions::getSobelHorizontal() { +std::vector> MLPPConvolutions::getSobelHorizontal() { return sobelHorizontal; } -std::vector> Convolutions::getSobelVertical() { +std::vector> MLPPConvolutions::getSobelVertical() { return sobelVertical; } -std::vector> Convolutions::getScharrHorizontal() { +std::vector> MLPPConvolutions::getScharrHorizontal() { return scharrHorizontal; } -std::vector> Convolutions::getScharrVertical() { +std::vector> MLPPConvolutions::getScharrVertical() { return scharrVertical; } -std::vector> Convolutions::getRobertsHorizontal() { +std::vector> MLPPConvolutions::getRobertsHorizontal() { return robertsHorizontal; } -std::vector> Convolutions::getRobertsVertical() { +std::vector> MLPPConvolutions::getRobertsVertical() { return robertsVertical; } diff --git a/mlpp/convolutions/convolutions.h b/mlpp/convolutions/convolutions.h index 4f97db5..7dc4c66 100644 --- a/mlpp/convolutions/convolutions.h +++ b/mlpp/convolutions/convolutions.h @@ -5,9 +5,9 @@ #include #include -class Convolutions { +class MLPPConvolutions { public: - Convolutions(); + MLPPConvolutions(); std::vector> convolve(std::vector> input, std::vector> filter, int S, int P = 0); std::vector>> convolve(std::vector>> input, std::vector>> filter, int S, int P = 0); std::vector> pool(std::vector> input, int F, int S, std::string type); diff --git a/mlpp/cost/cost.cpp b/mlpp/cost/cost.cpp index cdfe2a6..37641a2 100644 --- a/mlpp/cost/cost.cpp +++ b/mlpp/cost/cost.cpp @@ -11,7 +11,7 @@ #include -double Cost::MSE(std::vector y_hat, std::vector y) { +double MLPPCost::MSE(std::vector y_hat, std::vector y) { double sum = 0; for (int i = 0; i < y_hat.size(); i++) { sum += (y_hat[i] - y[i]) * (y_hat[i] - y[i]); @@ -19,7 +19,7 @@ double Cost::MSE(std::vector y_hat, std::vector y) { return sum / 2 * y_hat.size(); } -double Cost::MSE(std::vector> y_hat, std::vector> y) { +double MLPPCost::MSE(std::vector> y_hat, std::vector> y) { double sum = 0; for (int i = 0; i < y_hat.size(); i++) { for (int j = 0; j < y_hat[i].size(); j++) { @@ -29,17 +29,17 @@ double Cost::MSE(std::vector> y_hat, std::vector Cost::MSEDeriv(std::vector y_hat, std::vector y) { +std::vector MLPPCost::MSEDeriv(std::vector y_hat, std::vector y) { LinAlg alg; return alg.subtraction(y_hat, y); } -std::vector> Cost::MSEDeriv(std::vector> y_hat, std::vector> y) { +std::vector> MLPPCost::MSEDeriv(std::vector> y_hat, std::vector> y) { LinAlg alg; return alg.subtraction(y_hat, y); } -double Cost::RMSE(std::vector y_hat, std::vector y) { +double MLPPCost::RMSE(std::vector y_hat, std::vector y) { double sum = 0; for (int i = 0; i < y_hat.size(); i++) { sum += (y_hat[i] - y[i]) * (y_hat[i] - y[i]); @@ -47,7 +47,7 @@ double Cost::RMSE(std::vector y_hat, std::vector y) { return sqrt(sum / y_hat.size()); } -double Cost::RMSE(std::vector> y_hat, std::vector> y) { +double MLPPCost::RMSE(std::vector> y_hat, std::vector> y) { double sum = 0; for (int i = 0; i < y_hat.size(); i++) { for (int j = 0; j < y_hat[i].size(); j++) { @@ -57,17 +57,17 @@ double Cost::RMSE(std::vector> y_hat, std::vector Cost::RMSEDeriv(std::vector y_hat, std::vector y) { +std::vector MLPPCost::RMSEDeriv(std::vector y_hat, std::vector y) { LinAlg alg; return alg.scalarMultiply(1 / (2 * sqrt(MSE(y_hat, y))), MSEDeriv(y_hat, y)); } -std::vector> Cost::RMSEDeriv(std::vector> y_hat, std::vector> y) { +std::vector> MLPPCost::RMSEDeriv(std::vector> y_hat, std::vector> y) { LinAlg alg; return alg.scalarMultiply(1 / (2 / sqrt(MSE(y_hat, y))), MSEDeriv(y_hat, y)); } -double Cost::MAE(std::vector y_hat, std::vector y) { +double MLPPCost::MAE(std::vector y_hat, std::vector y) { double sum = 0; for (int i = 0; i < y_hat.size(); i++) { sum += abs((y_hat[i] - y[i])); @@ -75,7 +75,7 @@ double Cost::MAE(std::vector y_hat, std::vector y) { return sum / y_hat.size(); } -double Cost::MAE(std::vector> y_hat, std::vector> y) { +double MLPPCost::MAE(std::vector> y_hat, std::vector> y) { double sum = 0; for (int i = 0; i < y_hat.size(); i++) { for (int j = 0; j < y_hat[i].size(); j++) { @@ -85,7 +85,7 @@ double Cost::MAE(std::vector> y_hat, std::vector Cost::MAEDeriv(std::vector y_hat, std::vector y) { +std::vector MLPPCost::MAEDeriv(std::vector y_hat, std::vector y) { std::vector deriv; deriv.resize(y_hat.size()); for (int i = 0; i < deriv.size(); i++) { @@ -100,7 +100,7 @@ std::vector Cost::MAEDeriv(std::vector y_hat, std::vector> Cost::MAEDeriv(std::vector> y_hat, std::vector> y) { +std::vector> MLPPCost::MAEDeriv(std::vector> y_hat, std::vector> y) { std::vector> deriv; deriv.resize(y_hat.size()); for (int i = 0; i < deriv.size(); i++) { @@ -120,7 +120,7 @@ std::vector> Cost::MAEDeriv(std::vector> return deriv; } -double Cost::MBE(std::vector y_hat, std::vector y) { +double MLPPCost::MBE(std::vector y_hat, std::vector y) { double sum = 0; for (int i = 0; i < y_hat.size(); i++) { sum += (y_hat[i] - y[i]); @@ -128,7 +128,7 @@ double Cost::MBE(std::vector y_hat, std::vector y) { return sum / y_hat.size(); } -double Cost::MBE(std::vector> y_hat, std::vector> y) { +double MLPPCost::MBE(std::vector> y_hat, std::vector> y) { double sum = 0; for (int i = 0; i < y_hat.size(); i++) { for (int j = 0; j < y_hat[i].size(); j++) { @@ -138,17 +138,17 @@ double Cost::MBE(std::vector> y_hat, std::vector Cost::MBEDeriv(std::vector y_hat, std::vector y) { +std::vector MLPPCost::MBEDeriv(std::vector y_hat, std::vector y) { LinAlg alg; return alg.onevec(y_hat.size()); } -std::vector> Cost::MBEDeriv(std::vector> y_hat, std::vector> y) { +std::vector> MLPPCost::MBEDeriv(std::vector> y_hat, std::vector> y) { LinAlg alg; return alg.onemat(y_hat.size(), y_hat[0].size()); } -double Cost::LogLoss(std::vector y_hat, std::vector y) { +double MLPPCost::LogLoss(std::vector y_hat, std::vector y) { double sum = 0; double eps = 1e-8; for (int i = 0; i < y_hat.size(); i++) { @@ -158,7 +158,7 @@ double Cost::LogLoss(std::vector y_hat, std::vector y) { return sum / y_hat.size(); } -double Cost::LogLoss(std::vector> y_hat, std::vector> y) { +double MLPPCost::LogLoss(std::vector> y_hat, std::vector> y) { double sum = 0; double eps = 1e-8; for (int i = 0; i < y_hat.size(); i++) { @@ -170,17 +170,17 @@ double Cost::LogLoss(std::vector> y_hat, std::vector Cost::LogLossDeriv(std::vector y_hat, std::vector y) { +std::vector MLPPCost::LogLossDeriv(std::vector y_hat, std::vector y) { LinAlg alg; return alg.addition(alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat)), alg.elementWiseDivision(alg.scalarMultiply(-1, alg.scalarAdd(-1, y)), alg.scalarMultiply(-1, alg.scalarAdd(-1, y_hat)))); } -std::vector> Cost::LogLossDeriv(std::vector> y_hat, std::vector> y) { +std::vector> MLPPCost::LogLossDeriv(std::vector> y_hat, std::vector> y) { LinAlg alg; return alg.addition(alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat)), alg.elementWiseDivision(alg.scalarMultiply(-1, alg.scalarAdd(-1, y)), alg.scalarMultiply(-1, alg.scalarAdd(-1, y_hat)))); } -double Cost::CrossEntropy(std::vector y_hat, std::vector y) { +double MLPPCost::CrossEntropy(std::vector y_hat, std::vector y) { double sum = 0; for (int i = 0; i < y_hat.size(); i++) { sum += y[i] * std::log(y_hat[i]); @@ -189,7 +189,7 @@ double Cost::CrossEntropy(std::vector y_hat, std::vector y) { return -1 * sum; } -double Cost::CrossEntropy(std::vector> y_hat, std::vector> y) { +double MLPPCost::CrossEntropy(std::vector> y_hat, std::vector> y) { double sum = 0; for (int i = 0; i < y_hat.size(); i++) { for (int j = 0; j < y_hat[i].size(); j++) { @@ -200,17 +200,17 @@ double Cost::CrossEntropy(std::vector> y_hat, std::vector Cost::CrossEntropyDeriv(std::vector y_hat, std::vector y) { +std::vector MLPPCost::CrossEntropyDeriv(std::vector y_hat, std::vector y) { LinAlg alg; return alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat)); } -std::vector> Cost::CrossEntropyDeriv(std::vector> y_hat, std::vector> y) { +std::vector> MLPPCost::CrossEntropyDeriv(std::vector> y_hat, std::vector> y) { LinAlg alg; return alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat)); } -double Cost::HuberLoss(std::vector y_hat, std::vector y, double delta) { +double MLPPCost::HuberLoss(std::vector y_hat, std::vector y, double delta) { LinAlg alg; double sum = 0; for (int i = 0; i < y_hat.size(); i++) { @@ -223,7 +223,7 @@ double Cost::HuberLoss(std::vector y_hat, std::vector y, double return sum; } -double Cost::HuberLoss(std::vector> y_hat, std::vector> y, double delta) { +double MLPPCost::HuberLoss(std::vector> y_hat, std::vector> y, double delta) { LinAlg alg; double sum = 0; for (int i = 0; i < y_hat.size(); i++) { @@ -238,7 +238,7 @@ double Cost::HuberLoss(std::vector> y_hat, std::vector Cost::HuberLossDeriv(std::vector y_hat, std::vector y, double delta) { +std::vector MLPPCost::HuberLossDeriv(std::vector y_hat, std::vector y, double delta) { LinAlg alg; double sum = 0; std::vector deriv; @@ -258,7 +258,7 @@ std::vector Cost::HuberLossDeriv(std::vector y_hat, std::vector< return deriv; } -std::vector> Cost::HuberLossDeriv(std::vector> y_hat, std::vector> y, double delta) { +std::vector> MLPPCost::HuberLossDeriv(std::vector> y_hat, std::vector> y, double delta) { LinAlg alg; double sum = 0; std::vector> deriv; @@ -283,7 +283,7 @@ std::vector> Cost::HuberLossDeriv(std::vector y_hat, std::vector y) { +double MLPPCost::HingeLoss(std::vector y_hat, std::vector y) { double sum = 0; for (int i = 0; i < y_hat.size(); i++) { sum += fmax(0, 1 - y[i] * y_hat[i]); @@ -292,7 +292,7 @@ double Cost::HingeLoss(std::vector y_hat, std::vector y) { return sum / y_hat.size(); } -double Cost::HingeLoss(std::vector> y_hat, std::vector> y) { +double MLPPCost::HingeLoss(std::vector> y_hat, std::vector> y) { double sum = 0; for (int i = 0; i < y_hat.size(); i++) { for (int j = 0; j < y_hat[i].size(); j++) { @@ -303,7 +303,7 @@ double Cost::HingeLoss(std::vector> y_hat, std::vector Cost::HingeLossDeriv(std::vector y_hat, std::vector y) { +std::vector MLPPCost::HingeLossDeriv(std::vector y_hat, std::vector y) { std::vector deriv; deriv.resize(y_hat.size()); for (int i = 0; i < y_hat.size(); i++) { @@ -316,7 +316,7 @@ std::vector Cost::HingeLossDeriv(std::vector y_hat, std::vector< return deriv; } -std::vector> Cost::HingeLossDeriv(std::vector> y_hat, std::vector> y) { +std::vector> MLPPCost::HingeLossDeriv(std::vector> y_hat, std::vector> y) { std::vector> deriv; for (int i = 0; i < y_hat.size(); i++) { for (int j = 0; j < y_hat[i].size(); j++) { @@ -330,7 +330,7 @@ std::vector> Cost::HingeLossDeriv(std::vector y_hat, std::vector y) { +double MLPPCost::WassersteinLoss(std::vector y_hat, std::vector y) { double sum = 0; for (int i = 0; i < y_hat.size(); i++) { sum += y_hat[i] * y[i]; @@ -338,7 +338,7 @@ double Cost::WassersteinLoss(std::vector y_hat, std::vector y) { return -sum / y_hat.size(); } -double Cost::WassersteinLoss(std::vector> y_hat, std::vector> y) { +double MLPPCost::WassersteinLoss(std::vector> y_hat, std::vector> y) { double sum = 0; for (int i = 0; i < y_hat.size(); i++) { for (int j = 0; j < y_hat[i].size(); j++) { @@ -348,39 +348,39 @@ double Cost::WassersteinLoss(std::vector> y_hat, std::vector return -sum / y_hat.size(); } -std::vector Cost::WassersteinLossDeriv(std::vector y_hat, std::vector y) { +std::vector MLPPCost::WassersteinLossDeriv(std::vector y_hat, std::vector y) { LinAlg alg; return alg.scalarMultiply(-1, y); // Simple. } -std::vector> Cost::WassersteinLossDeriv(std::vector> y_hat, std::vector> y) { +std::vector> MLPPCost::WassersteinLossDeriv(std::vector> y_hat, std::vector> y) { LinAlg alg; return alg.scalarMultiply(-1, y); // Simple. } -double Cost::HingeLoss(std::vector y_hat, std::vector y, std::vector weights, double C) { +double MLPPCost::HingeLoss(std::vector y_hat, std::vector y, std::vector weights, double C) { LinAlg alg; Reg regularization; return C * HingeLoss(y_hat, y) + regularization.regTerm(weights, 1, 0, "Ridge"); } -double Cost::HingeLoss(std::vector> y_hat, std::vector> y, std::vector> weights, double C) { +double MLPPCost::HingeLoss(std::vector> y_hat, std::vector> y, std::vector> weights, double C) { LinAlg alg; Reg regularization; return C * HingeLoss(y_hat, y) + regularization.regTerm(weights, 1, 0, "Ridge"); } -std::vector Cost::HingeLossDeriv(std::vector y_hat, std::vector y, double C) { +std::vector MLPPCost::HingeLossDeriv(std::vector y_hat, std::vector y, double C) { LinAlg alg; Reg regularization; return alg.scalarMultiply(C, HingeLossDeriv(y_hat, y)); } -std::vector> Cost::HingeLossDeriv(std::vector> y_hat, std::vector> y, double C) { +std::vector> MLPPCost::HingeLossDeriv(std::vector> y_hat, std::vector> y, double C) { LinAlg alg; Reg regularization; return alg.scalarMultiply(C, HingeLossDeriv(y_hat, y)); } -double Cost::dualFormSVM(std::vector alpha, std::vector> X, std::vector y) { +double MLPPCost::dualFormSVM(std::vector alpha, std::vector> X, std::vector y) { LinAlg alg; std::vector> Y = alg.diag(y); // Y is a diagnoal matrix. Y[i][j] = y[i] if i = i, else Y[i][j] = 0. Yt = Y. std::vector> K = alg.matmult(X, alg.transpose(X)); // TO DO: DON'T forget to add non-linear kernelizations. @@ -391,7 +391,7 @@ double Cost::dualFormSVM(std::vector alpha, std::vector Cost::dualFormSVMDeriv(std::vector alpha, std::vector> X, std::vector y) { +std::vector MLPPCost::dualFormSVMDeriv(std::vector alpha, std::vector> X, std::vector y) { LinAlg alg; std::vector> Y = alg.zeromat(y.size(), y.size()); for (int i = 0; i < y.size(); i++) { diff --git a/mlpp/cost/cost.h b/mlpp/cost/cost.h index 6857527..3e82930 100644 --- a/mlpp/cost/cost.h +++ b/mlpp/cost/cost.h @@ -11,7 +11,7 @@ #include -class Cost { +class MLPPCost { public: // Regression Costs double MSE(std::vector y_hat, std::vector y); diff --git a/mlpp/dual_svc/dual_svc.cpp b/mlpp/dual_svc/dual_svc.cpp index d33c194..e09f4a3 100644 --- a/mlpp/dual_svc/dual_svc.cpp +++ b/mlpp/dual_svc/dual_svc.cpp @@ -32,7 +32,7 @@ double DualSVC::modelTest(std::vector x) { } void DualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) { - class Cost cost; + class MLPPCost cost; MLPPActivation avn; LinAlg alg; Reg regularization; @@ -80,7 +80,7 @@ void DualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) { } // void DualSVC::SGD(double learning_rate, int max_epoch, bool UI){ -// class Cost cost; +// class MLPPCost cost; // MLPPActivation avn; // LinAlg alg; // Reg regularization; @@ -113,7 +113,7 @@ void DualSVC::gradientDescent(double learning_rate, int max_epoch, bool UI) { // } // void DualSVC::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI){ -// class Cost cost; +// class MLPPCost cost; // MLPPActivation avn; // LinAlg alg; // Reg regularization; @@ -163,7 +163,7 @@ void DualSVC::save(std::string fileName) { } double DualSVC::Cost(std::vector alpha, std::vector> X, std::vector y) { - class Cost cost; + class MLPPCost cost; return cost.dualFormSVM(alpha, X, y); } diff --git a/mlpp/exp_reg/exp_reg.cpp b/mlpp/exp_reg/exp_reg.cpp index 22327be..4117981 100644 --- a/mlpp/exp_reg/exp_reg.cpp +++ b/mlpp/exp_reg/exp_reg.cpp @@ -205,7 +205,7 @@ void ExpReg::save(std::string fileName) { double ExpReg::Cost(std::vector y_hat, std::vector y) { Reg regularization; - class Cost cost; + class MLPPCost cost; return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); } diff --git a/mlpp/gan/gan.cpp b/mlpp/gan/gan.cpp index 809a54d..0417c93 100644 --- a/mlpp/gan/gan.cpp +++ b/mlpp/gan/gan.cpp @@ -29,7 +29,7 @@ std::vector> GAN::generateExample(int n) { } void GAN::gradientDescent(double learning_rate, int max_epoch, bool UI) { - class Cost cost; + class MLPPCost cost; LinAlg alg; double cost_prev = 0; int epoch = 1; @@ -147,7 +147,7 @@ std::vector GAN::modelSetTestDiscriminator(std::vector y_hat, std::vector y) { Reg regularization; - class Cost cost; + class MLPPCost cost; double totalRegTerm = 0; auto cost_function = outputLayer->cost_map[outputLayer->cost]; @@ -208,7 +208,7 @@ void GAN::updateGeneratorParameters(std::vector> } std::tuple>>, std::vector> GAN::computeDiscriminatorGradients(std::vector y_hat, std::vector outputSet) { - class Cost cost; + class MLPPCost cost; MLPPActivation avn; LinAlg alg; Reg regularization; @@ -244,7 +244,7 @@ std::tuple>>, std::vector> G } std::vector>> GAN::computeGeneratorGradients(std::vector y_hat, std::vector outputSet) { - class Cost cost; + class MLPPCost cost; MLPPActivation avn; LinAlg alg; Reg regularization; diff --git a/mlpp/lin_reg/lin_reg.cpp b/mlpp/lin_reg/lin_reg.cpp index 44ce144..d4a0c74 100644 --- a/mlpp/lin_reg/lin_reg.cpp +++ b/mlpp/lin_reg/lin_reg.cpp @@ -219,7 +219,7 @@ void LinReg::save(std::string fileName) { double LinReg::Cost(std::vector y_hat, std::vector y) { Reg regularization; - class Cost cost; + class MLPPCost cost; return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); } diff --git a/mlpp/log_reg/log_reg.cpp b/mlpp/log_reg/log_reg.cpp index 5aacf3a..21d8c00 100644 --- a/mlpp/log_reg/log_reg.cpp +++ b/mlpp/log_reg/log_reg.cpp @@ -182,7 +182,7 @@ void LogReg::save(std::string fileName) { double LogReg::Cost(std::vector y_hat, std::vector y) { Reg regularization; - class Cost cost; + class MLPPCost cost; return cost.LogLoss(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); } diff --git a/mlpp/mann/mann.cpp b/mlpp/mann/mann.cpp index d42f471..77cfb4c 100644 --- a/mlpp/mann/mann.cpp +++ b/mlpp/mann/mann.cpp @@ -53,7 +53,7 @@ std::vector MANN::modelTest(std::vector x) { } void MANN::gradientDescent(double learning_rate, int max_epoch, bool UI) { - class Cost cost; + class MLPPCost cost; MLPPActivation avn; LinAlg alg; Reg regularization; @@ -159,7 +159,7 @@ void MANN::addOutputLayer(std::string activation, std::string loss, std::string double MANN::Cost(std::vector> y_hat, std::vector> y) { Reg regularization; - class Cost cost; + class MLPPCost cost; double totalRegTerm = 0; auto cost_function = outputLayer->cost_map[outputLayer->cost]; diff --git a/mlpp/mlp/mlp.cpp b/mlpp/mlp/mlp.cpp index 833b280..c17f6f8 100644 --- a/mlpp/mlp/mlp.cpp +++ b/mlpp/mlp/mlp.cpp @@ -227,7 +227,7 @@ void MLP::save(std::string fileName) { double MLP::Cost(std::vector y_hat, std::vector y) { Reg regularization; - class Cost cost; + class MLPPCost cost; return cost.LogLoss(y_hat, y) + regularization.regTerm(weights2, lambda, alpha, reg) + regularization.regTerm(weights1, lambda, alpha, reg); } diff --git a/mlpp/multi_output_layer/multi_output_layer.cpp b/mlpp/multi_output_layer/multi_output_layer.cpp index 2a6af1e..959ba92 100644 --- a/mlpp/multi_output_layer/multi_output_layer.cpp +++ b/mlpp/multi_output_layer/multi_output_layer.cpp @@ -98,22 +98,22 @@ MultiOutputLayer::MultiOutputLayer(int n_output, int n_hidden, std::string activ activation_map["Arcoth"] = &MLPPActivation::arcoth; activationTest_map["Arcoth"] = &MLPPActivation::arcoth; - costDeriv_map["MSE"] = &Cost::MSEDeriv; - cost_map["MSE"] = &Cost::MSE; - costDeriv_map["RMSE"] = &Cost::RMSEDeriv; - cost_map["RMSE"] = &Cost::RMSE; - costDeriv_map["MAE"] = &Cost::MAEDeriv; - cost_map["MAE"] = &Cost::MAE; - costDeriv_map["MBE"] = &Cost::MBEDeriv; - cost_map["MBE"] = &Cost::MBE; - costDeriv_map["LogLoss"] = &Cost::LogLossDeriv; - cost_map["LogLoss"] = &Cost::LogLoss; - costDeriv_map["CrossEntropy"] = &Cost::CrossEntropyDeriv; - cost_map["CrossEntropy"] = &Cost::CrossEntropy; - costDeriv_map["HingeLoss"] = &Cost::HingeLossDeriv; - cost_map["HingeLoss"] = &Cost::HingeLoss; - costDeriv_map["WassersteinLoss"] = &Cost::HingeLossDeriv; - cost_map["WassersteinLoss"] = &Cost::HingeLoss; + costDeriv_map["MSE"] = &MLPPCost::MSEDeriv; + cost_map["MSE"] = &MLPPCost::MSE; + costDeriv_map["RMSE"] = &MLPPCost::RMSEDeriv; + cost_map["RMSE"] = &MLPPCost::RMSE; + costDeriv_map["MAE"] = &MLPPCost::MAEDeriv; + cost_map["MAE"] = &MLPPCost::MAE; + costDeriv_map["MBE"] = &MLPPCost::MBEDeriv; + cost_map["MBE"] = &MLPPCost::MBE; + costDeriv_map["LogLoss"] = &MLPPCost::LogLossDeriv; + cost_map["LogLoss"] = &MLPPCost::LogLoss; + costDeriv_map["CrossEntropy"] = &MLPPCost::CrossEntropyDeriv; + cost_map["CrossEntropy"] = &MLPPCost::CrossEntropy; + costDeriv_map["HingeLoss"] = &MLPPCost::HingeLossDeriv; + cost_map["HingeLoss"] = &MLPPCost::HingeLoss; + costDeriv_map["WassersteinLoss"] = &MLPPCost::HingeLossDeriv; + cost_map["WassersteinLoss"] = &MLPPCost::HingeLoss; } void MultiOutputLayer::forwardPass() { diff --git a/mlpp/multi_output_layer/multi_output_layer.h b/mlpp/multi_output_layer/multi_output_layer.h index 219efe5..26f2efb 100644 --- a/mlpp/multi_output_layer/multi_output_layer.h +++ b/mlpp/multi_output_layer/multi_output_layer.h @@ -35,8 +35,8 @@ public: std::map> (MLPPActivation::*)(std::vector>, bool)> activation_map; std::map (MLPPActivation::*)(std::vector, bool)> activationTest_map; - std::map>, std::vector>)> cost_map; - std::map> (Cost::*)(std::vector>, std::vector>)> costDeriv_map; + std::map>, std::vector>)> cost_map; + std::map> (MLPPCost::*)(std::vector>, std::vector>)> costDeriv_map; std::vector z_test; std::vector a_test; diff --git a/mlpp/output_layer/output_layer.cpp b/mlpp/output_layer/output_layer.cpp index 358d314..a86e37e 100644 --- a/mlpp/output_layer/output_layer.cpp +++ b/mlpp/output_layer/output_layer.cpp @@ -95,22 +95,22 @@ OutputLayer::OutputLayer(int n_hidden, std::string activation, std::string cost, activation_map["Arcoth"] = &MLPPActivation::arcoth; activationTest_map["Arcoth"] = &MLPPActivation::arcoth; - costDeriv_map["MSE"] = &Cost::MSEDeriv; - cost_map["MSE"] = &Cost::MSE; - costDeriv_map["RMSE"] = &Cost::RMSEDeriv; - cost_map["RMSE"] = &Cost::RMSE; - costDeriv_map["MAE"] = &Cost::MAEDeriv; - cost_map["MAE"] = &Cost::MAE; - costDeriv_map["MBE"] = &Cost::MBEDeriv; - cost_map["MBE"] = &Cost::MBE; - costDeriv_map["LogLoss"] = &Cost::LogLossDeriv; - cost_map["LogLoss"] = &Cost::LogLoss; - costDeriv_map["CrossEntropy"] = &Cost::CrossEntropyDeriv; - cost_map["CrossEntropy"] = &Cost::CrossEntropy; - costDeriv_map["HingeLoss"] = &Cost::HingeLossDeriv; - cost_map["HingeLoss"] = &Cost::HingeLoss; - costDeriv_map["WassersteinLoss"] = &Cost::HingeLossDeriv; - cost_map["WassersteinLoss"] = &Cost::HingeLoss; + costDeriv_map["MSE"] = &MLPPCost::MSEDeriv; + cost_map["MSE"] = &MLPPCost::MSE; + costDeriv_map["RMSE"] = &MLPPCost::RMSEDeriv; + cost_map["RMSE"] = &MLPPCost::RMSE; + costDeriv_map["MAE"] = &MLPPCost::MAEDeriv; + cost_map["MAE"] = &MLPPCost::MAE; + costDeriv_map["MBE"] = &MLPPCost::MBEDeriv; + cost_map["MBE"] = &MLPPCost::MBE; + costDeriv_map["LogLoss"] = &MLPPCost::LogLossDeriv; + cost_map["LogLoss"] = &MLPPCost::LogLoss; + costDeriv_map["CrossEntropy"] = &MLPPCost::CrossEntropyDeriv; + cost_map["CrossEntropy"] = &MLPPCost::CrossEntropy; + costDeriv_map["HingeLoss"] = &MLPPCost::HingeLossDeriv; + cost_map["HingeLoss"] = &MLPPCost::HingeLoss; + costDeriv_map["WassersteinLoss"] = &MLPPCost::HingeLossDeriv; + cost_map["WassersteinLoss"] = &MLPPCost::HingeLoss; } void OutputLayer::forwardPass() { diff --git a/mlpp/output_layer/output_layer.h b/mlpp/output_layer/output_layer.h index 3e60016..81bc306 100644 --- a/mlpp/output_layer/output_layer.h +++ b/mlpp/output_layer/output_layer.h @@ -34,8 +34,8 @@ public: std::map (MLPPActivation::*)(std::vector, bool)> activation_map; std::map activationTest_map; - std::map, std::vector)> cost_map; - std::map (Cost::*)(std::vector, std::vector)> costDeriv_map; + std::map, std::vector)> cost_map; + std::map (MLPPCost::*)(std::vector, std::vector)> costDeriv_map; double z_test; double a_test; diff --git a/mlpp/probit_reg/probit_reg.cpp b/mlpp/probit_reg/probit_reg.cpp index 84a5b50..8e689d1 100644 --- a/mlpp/probit_reg/probit_reg.cpp +++ b/mlpp/probit_reg/probit_reg.cpp @@ -209,7 +209,7 @@ void ProbitReg::save(std::string fileName) { double ProbitReg::Cost(std::vector y_hat, std::vector y) { Reg regularization; - class Cost cost; + class MLPPCost cost; return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); } diff --git a/mlpp/softmax_net/softmax_net.cpp b/mlpp/softmax_net/softmax_net.cpp index f3cc3bc..62a658a 100644 --- a/mlpp/softmax_net/softmax_net.cpp +++ b/mlpp/softmax_net/softmax_net.cpp @@ -246,7 +246,7 @@ std::vector> SoftmaxNet::getEmbeddings() { double SoftmaxNet::Cost(std::vector> y_hat, std::vector> y) { Reg regularization; Data data; - class Cost cost; + class MLPPCost cost; return cost.CrossEntropy(y_hat, y) + regularization.regTerm(weights1, lambda, alpha, reg) + regularization.regTerm(weights2, lambda, alpha, reg); } diff --git a/mlpp/softmax_reg/softmax_reg.cpp b/mlpp/softmax_reg/softmax_reg.cpp index 1418f1b..98b72e8 100644 --- a/mlpp/softmax_reg/softmax_reg.cpp +++ b/mlpp/softmax_reg/softmax_reg.cpp @@ -165,7 +165,7 @@ void SoftmaxReg::save(std::string fileName) { double SoftmaxReg::Cost(std::vector> y_hat, std::vector> y) { Reg regularization; - class Cost cost; + class MLPPCost cost; return cost.CrossEntropy(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); } diff --git a/mlpp/svc/svc.cpp b/mlpp/svc/svc.cpp index 2cb0c12..50b0e87 100644 --- a/mlpp/svc/svc.cpp +++ b/mlpp/svc/svc.cpp @@ -31,7 +31,7 @@ double SVC::modelTest(std::vector x) { } void SVC::gradientDescent(double learning_rate, int max_epoch, bool UI) { - class Cost cost; + class MLPPCost cost; MLPPActivation avn; LinAlg alg; Reg regularization; @@ -64,7 +64,7 @@ void SVC::gradientDescent(double learning_rate, int max_epoch, bool UI) { } void SVC::SGD(double learning_rate, int max_epoch, bool UI) { - class Cost cost; + class MLPPCost cost; MLPPActivation avn; LinAlg alg; Reg regularization; @@ -107,7 +107,7 @@ void SVC::SGD(double learning_rate, int max_epoch, bool UI) { } void SVC::MBGD(double learning_rate, int max_epoch, int mini_batch_size, bool UI) { - class Cost cost; + class MLPPCost cost; MLPPActivation avn; LinAlg alg; Reg regularization; @@ -159,7 +159,7 @@ void SVC::save(std::string fileName) { } double SVC::Cost(std::vector z, std::vector y, std::vector weights, double C) { - class Cost cost; + class MLPPCost cost; return cost.HingeLoss(z, y, weights, C); } diff --git a/mlpp/tanh_reg/tanh_reg.cpp b/mlpp/tanh_reg/tanh_reg.cpp index 1c7e81c..194a205 100644 --- a/mlpp/tanh_reg/tanh_reg.cpp +++ b/mlpp/tanh_reg/tanh_reg.cpp @@ -158,7 +158,7 @@ void TanhReg::save(std::string fileName) { double TanhReg::Cost(std::vector y_hat, std::vector y) { Reg regularization; - class Cost cost; + class MLPPCost cost; return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); } diff --git a/mlpp/wgan/wgan.cpp b/mlpp/wgan/wgan.cpp index 988bf04..7fdd612 100644 --- a/mlpp/wgan/wgan.cpp +++ b/mlpp/wgan/wgan.cpp @@ -29,7 +29,7 @@ std::vector> WGAN::generateExample(int n) { } void WGAN::gradientDescent(double learning_rate, int max_epoch, bool UI) { - class Cost cost; + class MLPPCost cost; LinAlg alg; double cost_prev = 0; int epoch = 1; @@ -156,7 +156,7 @@ std::vector WGAN::modelSetTestDiscriminator(std::vector y_hat, std::vector y) { Reg regularization; - class Cost cost; + class MLPPCost cost; double totalRegTerm = 0; auto cost_function = outputLayer->cost_map[outputLayer->cost]; @@ -217,7 +217,7 @@ void WGAN::updateGeneratorParameters(std::vector } std::tuple>>, std::vector> WGAN::computeDiscriminatorGradients(std::vector y_hat, std::vector outputSet) { - class Cost cost; + class MLPPCost cost; MLPPActivation avn; LinAlg alg; Reg regularization; @@ -253,7 +253,7 @@ std::tuple>>, std::vector> W } std::vector>> WGAN::computeGeneratorGradients(std::vector y_hat, std::vector outputSet) { - class Cost cost; + class MLPPCost cost; MLPPActivation avn; LinAlg alg; Reg regularization;