From 817b1e3b72a1d8d6a0e994c70a9b43f61e51c615 Mon Sep 17 00:00:00 2001 From: Relintai Date: Sat, 4 Feb 2023 13:59:26 +0100 Subject: [PATCH] Warning fixes. --- mlpp/ann/ann.h | 20 ++++++++++---------- mlpp/auto_encoder/auto_encoder.h | 6 +++--- mlpp/c_log_log_reg/c_log_log_reg.h | 8 ++++---- mlpp/dual_svc/dual_svc.h | 6 +++--- mlpp/gan/gan.h | 2 +- mlpp/lin_reg/lin_reg.h | 20 ++++++++++---------- mlpp/log_reg/log_reg.h | 8 ++++---- mlpp/mann/mann.h | 2 +- mlpp/mlp/mlp.h | 6 +++--- mlpp/probit_reg/probit_reg.h | 8 ++++---- mlpp/softmax_net/softmax_net.h | 6 +++--- mlpp/softmax_reg/softmax_reg.h | 6 +++--- mlpp/svc/svc.h | 6 +++--- mlpp/tanh_reg/tanh_reg.h | 6 +++--- mlpp/utilities/utilities.h | 6 +++--- mlpp/wgan/wgan.h | 2 +- 16 files changed, 59 insertions(+), 59 deletions(-) diff --git a/mlpp/ann/ann.h b/mlpp/ann/ann.h index 4d86e65..dae4172 100644 --- a/mlpp/ann/ann.h +++ b/mlpp/ann/ann.h @@ -22,16 +22,16 @@ public: ~MLPPANN(); std::vector modelSetTest(std::vector> X); real_t modelTest(std::vector x); - void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1); - void SGD(real_t learning_rate, int max_epoch, bool UI = 1); - void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1); - void Momentum(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool NAG, bool UI = 1); - void Adagrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t e, bool UI = 1); - void Adadelta(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t e, bool UI = 1); - void Adam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = 1); - void Adamax(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = 1); - void Nadam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = 1); - void AMSGrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = 1); + void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false); + void SGD(real_t learning_rate, int max_epoch, bool UI = false); + void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false); + void Momentum(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool NAG, bool UI = false); + void Adagrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t e, bool UI = false); + void Adadelta(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t e, bool UI = false); + void Adam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = false); + void Adamax(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = false); + void Nadam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = false); + void AMSGrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = false); real_t score(); void save(std::string fileName); diff --git a/mlpp/auto_encoder/auto_encoder.h b/mlpp/auto_encoder/auto_encoder.h index c3fd3aa..f5a3dfb 100644 --- a/mlpp/auto_encoder/auto_encoder.h +++ b/mlpp/auto_encoder/auto_encoder.h @@ -19,9 +19,9 @@ public: MLPPAutoEncoder(std::vector> inputSet, int n_hidden); std::vector> modelSetTest(std::vector> X); std::vector modelTest(std::vector x); - void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1); - void SGD(real_t learning_rate, int max_epoch, bool UI = 1); - void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1); + void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false); + void SGD(real_t learning_rate, int max_epoch, bool UI = false); + void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false); real_t score(); void save(std::string fileName); diff --git a/mlpp/c_log_log_reg/c_log_log_reg.h b/mlpp/c_log_log_reg/c_log_log_reg.h index 672ab3c..39f3c81 100644 --- a/mlpp/c_log_log_reg/c_log_log_reg.h +++ b/mlpp/c_log_log_reg/c_log_log_reg.h @@ -18,10 +18,10 @@ public: MLPPCLogLogReg(std::vector> inputSet, std::vector outputSet, std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5); std::vector modelSetTest(std::vector> X); real_t modelTest(std::vector x); - void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1); - void MLE(real_t learning_rate, int max_epoch, bool UI = 1); - void SGD(real_t learning_rate, int max_epoch, bool UI = 1); - void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1); + void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false); + void MLE(real_t learning_rate, int max_epoch, bool UI = false); + void SGD(real_t learning_rate, int max_epoch, bool UI = false); + void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false); real_t score(); private: diff --git a/mlpp/dual_svc/dual_svc.h b/mlpp/dual_svc/dual_svc.h index b44cc9a..476ef49 100644 --- a/mlpp/dual_svc/dual_svc.h +++ b/mlpp/dual_svc/dual_svc.h @@ -25,9 +25,9 @@ public: std::vector modelSetTest(std::vector> X); real_t modelTest(std::vector x); - void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1); - void SGD(real_t learning_rate, int max_epoch, bool UI = 1); - void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1); + void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false); + void SGD(real_t learning_rate, int max_epoch, bool UI = false); + void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false); real_t score(); void save(std::string fileName); diff --git a/mlpp/gan/gan.h b/mlpp/gan/gan.h index c78c772..594c58c 100644 --- a/mlpp/gan/gan.h +++ b/mlpp/gan/gan.h @@ -24,7 +24,7 @@ public: MLPPGAN(real_t k, std::vector> outputSet); ~MLPPGAN(); std::vector> generateExample(int n); - void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1); + void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false); real_t score(); void save(std::string fileName); diff --git a/mlpp/lin_reg/lin_reg.h b/mlpp/lin_reg/lin_reg.h index eb25a22..b8073e4 100644 --- a/mlpp/lin_reg/lin_reg.h +++ b/mlpp/lin_reg/lin_reg.h @@ -19,18 +19,18 @@ public: std::vector modelSetTest(std::vector> X); real_t modelTest(std::vector x); void NewtonRaphson(real_t learning_rate, int max_epoch, bool UI); - void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1); - void SGD(real_t learning_rate, int max_epoch, bool UI = 1); + void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false); + void SGD(real_t learning_rate, int max_epoch, bool UI = false); - void Momentum(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool UI = 1); - void NAG(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool UI = 1); - void Adagrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t e, bool UI = 1); - void Adadelta(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t e, bool UI = 1); - void Adam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = 1); - void Adamax(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = 1); - void Nadam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = 1); + void Momentum(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool UI = false); + void NAG(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool UI = false); + void Adagrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t e, bool UI = false); + void Adadelta(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t e, bool UI = false); + void Adam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = false); + void Adamax(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = false); + void Nadam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = false); - void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1); + void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false); void normalEquation(); real_t score(); void save(std::string fileName); diff --git a/mlpp/log_reg/log_reg.h b/mlpp/log_reg/log_reg.h index 236ba91..39580ee 100644 --- a/mlpp/log_reg/log_reg.h +++ b/mlpp/log_reg/log_reg.h @@ -20,10 +20,10 @@ public: MLPPLogReg(std::vector> inputSet, std::vector outputSet, std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5); std::vector modelSetTest(std::vector> X); real_t modelTest(std::vector x); - void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1); - void MLE(real_t learning_rate, int max_epoch, bool UI = 1); - void SGD(real_t learning_rate, int max_epoch, bool UI = 1); - void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1); + void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false); + void MLE(real_t learning_rate, int max_epoch, bool UI = false); + void SGD(real_t learning_rate, int max_epoch, bool UI = false); + void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false); real_t score(); void save(std::string fileName); diff --git a/mlpp/mann/mann.h b/mlpp/mann/mann.h index 9e0e29d..668e084 100644 --- a/mlpp/mann/mann.h +++ b/mlpp/mann/mann.h @@ -24,7 +24,7 @@ public: ~MLPPMANN(); std::vector> modelSetTest(std::vector> X); std::vector modelTest(std::vector x); - void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1); + void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false); real_t score(); void save(std::string fileName); diff --git a/mlpp/mlp/mlp.h b/mlpp/mlp/mlp.h index 336b68b..ea2cd6b 100644 --- a/mlpp/mlp/mlp.h +++ b/mlpp/mlp/mlp.h @@ -21,9 +21,9 @@ public: MLPPMLP(std::vector> inputSet, std::vector outputSet, int n_hidden, std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5); std::vector modelSetTest(std::vector> X); real_t modelTest(std::vector x); - void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1); - void SGD(real_t learning_rate, int max_epoch, bool UI = 1); - void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1); + void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false); + void SGD(real_t learning_rate, int max_epoch, bool UI = false); + void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false); real_t score(); void save(std::string fileName); diff --git a/mlpp/probit_reg/probit_reg.h b/mlpp/probit_reg/probit_reg.h index cec953d..3c93f60 100644 --- a/mlpp/probit_reg/probit_reg.h +++ b/mlpp/probit_reg/probit_reg.h @@ -20,10 +20,10 @@ public: MLPPProbitReg(std::vector> inputSet, std::vector outputSet, std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5); std::vector modelSetTest(std::vector> X); real_t modelTest(std::vector x); - void gradientDescent(real_t learning_rate, int max_epoch = 0, bool UI = 1); - void MLE(real_t learning_rate, int max_epoch = 0, bool UI = 1); - void SGD(real_t learning_rate, int max_epoch = 0, bool UI = 1); - void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1); + void gradientDescent(real_t learning_rate, int max_epoch = 0, bool UI = false); + void MLE(real_t learning_rate, int max_epoch = 0, bool UI = false); + void SGD(real_t learning_rate, int max_epoch = 0, bool UI = false); + void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false); real_t score(); void save(std::string fileName); diff --git a/mlpp/softmax_net/softmax_net.h b/mlpp/softmax_net/softmax_net.h index cb12e59..17b32d3 100644 --- a/mlpp/softmax_net/softmax_net.h +++ b/mlpp/softmax_net/softmax_net.h @@ -20,9 +20,9 @@ public: MLPPSoftmaxNet(std::vector> inputSet, std::vector> outputSet, int n_hidden, std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5); std::vector modelTest(std::vector x); std::vector> modelSetTest(std::vector> X); - void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1); - void SGD(real_t learning_rate, int max_epoch, bool UI = 1); - void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1); + void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false); + void SGD(real_t learning_rate, int max_epoch, bool UI = false); + void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false); real_t score(); void save(std::string fileName); diff --git a/mlpp/softmax_reg/softmax_reg.h b/mlpp/softmax_reg/softmax_reg.h index 43e71d4..0d9747a 100644 --- a/mlpp/softmax_reg/softmax_reg.h +++ b/mlpp/softmax_reg/softmax_reg.h @@ -20,9 +20,9 @@ public: MLPPSoftmaxReg(std::vector> inputSet, std::vector> outputSet, std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5); std::vector modelTest(std::vector x); std::vector> modelSetTest(std::vector> X); - void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1); - void SGD(real_t learning_rate, int max_epoch, bool UI = 1); - void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1); + void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false); + void SGD(real_t learning_rate, int max_epoch, bool UI = false); + void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false); real_t score(); void save(std::string fileName); diff --git a/mlpp/svc/svc.h b/mlpp/svc/svc.h index 6077f10..ab300d3 100644 --- a/mlpp/svc/svc.h +++ b/mlpp/svc/svc.h @@ -23,9 +23,9 @@ public: MLPPSVC(std::vector> inputSet, std::vector outputSet, real_t C); std::vector modelSetTest(std::vector> X); real_t modelTest(std::vector x); - void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1); - void SGD(real_t learning_rate, int max_epoch, bool UI = 1); - void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1); + void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false); + void SGD(real_t learning_rate, int max_epoch, bool UI = false); + void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false); real_t score(); void save(std::string fileName); diff --git a/mlpp/tanh_reg/tanh_reg.h b/mlpp/tanh_reg/tanh_reg.h index b0b0a0c..3a97131 100644 --- a/mlpp/tanh_reg/tanh_reg.h +++ b/mlpp/tanh_reg/tanh_reg.h @@ -20,9 +20,9 @@ public: MLPPTanhReg(std::vector> inputSet, std::vector outputSet, std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5); std::vector modelSetTest(std::vector> X); real_t modelTest(std::vector x); - void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1); - void SGD(real_t learning_rate, int max_epoch, bool UI = 1); - void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1); + void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false); + void SGD(real_t learning_rate, int max_epoch, bool UI = false); + void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false); real_t score(); void save(std::string fileName); diff --git a/mlpp/utilities/utilities.h b/mlpp/utilities/utilities.h index 60685e3..9572c51 100644 --- a/mlpp/utilities/utilities.h +++ b/mlpp/utilities/utilities.h @@ -58,9 +58,9 @@ public: real_t performance_pool_int_array_vec(PoolIntArray y_hat, const Ref &output_set); // Parameter Saving Functions - void saveParameters(std::string fileName, std::vector weights, real_t bias, bool app = 0, int layer = -1); - void saveParameters(std::string fileName, std::vector weights, std::vector initial, real_t bias, bool app = 0, int layer = -1); - void saveParameters(std::string fileName, std::vector> weights, std::vector bias, bool app = 0, int layer = -1); + void saveParameters(std::string fileName, std::vector weights, real_t bias, bool app = false, int layer = -1); + void saveParameters(std::string fileName, std::vector weights, std::vector initial, real_t bias, bool app = false, int layer = -1); + void saveParameters(std::string fileName, std::vector> weights, std::vector bias, bool app = false, int layer = -1); // Gradient Descent related static void UI(std::vector weights, real_t bias); diff --git a/mlpp/wgan/wgan.h b/mlpp/wgan/wgan.h index 57329d7..994306f 100644 --- a/mlpp/wgan/wgan.h +++ b/mlpp/wgan/wgan.h @@ -22,7 +22,7 @@ public: MLPPWGAN(real_t k, std::vector> outputSet); ~MLPPWGAN(); std::vector> generateExample(int n); - void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1); + void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false); real_t score(); void save(std::string fileName);