Warning fixes.

This commit is contained in:
Relintai 2023-02-04 13:59:26 +01:00
parent df75dc8e7f
commit 817b1e3b72
16 changed files with 59 additions and 59 deletions

View File

@ -22,16 +22,16 @@ public:
~MLPPANN();
std::vector<real_t> modelSetTest(std::vector<std::vector<real_t>> X);
real_t modelTest(std::vector<real_t> x);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1);
void SGD(real_t learning_rate, int max_epoch, bool UI = 1);
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
void Momentum(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool NAG, bool UI = 1);
void Adagrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t e, bool UI = 1);
void Adadelta(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t e, bool UI = 1);
void Adam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = 1);
void Adamax(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = 1);
void Nadam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = 1);
void AMSGrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = 1);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false);
void SGD(real_t learning_rate, int max_epoch, bool UI = false);
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false);
void Momentum(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool NAG, bool UI = false);
void Adagrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t e, bool UI = false);
void Adadelta(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t e, bool UI = false);
void Adam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = false);
void Adamax(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = false);
void Nadam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = false);
void AMSGrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = false);
real_t score();
void save(std::string fileName);

View File

@ -19,9 +19,9 @@ public:
MLPPAutoEncoder(std::vector<std::vector<real_t>> inputSet, int n_hidden);
std::vector<std::vector<real_t>> modelSetTest(std::vector<std::vector<real_t>> X);
std::vector<real_t> modelTest(std::vector<real_t> x);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1);
void SGD(real_t learning_rate, int max_epoch, bool UI = 1);
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false);
void SGD(real_t learning_rate, int max_epoch, bool UI = false);
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false);
real_t score();
void save(std::string fileName);

View File

@ -18,10 +18,10 @@ public:
MLPPCLogLogReg(std::vector<std::vector<real_t>> inputSet, std::vector<real_t> outputSet, std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5);
std::vector<real_t> modelSetTest(std::vector<std::vector<real_t>> X);
real_t modelTest(std::vector<real_t> x);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1);
void MLE(real_t learning_rate, int max_epoch, bool UI = 1);
void SGD(real_t learning_rate, int max_epoch, bool UI = 1);
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false);
void MLE(real_t learning_rate, int max_epoch, bool UI = false);
void SGD(real_t learning_rate, int max_epoch, bool UI = false);
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false);
real_t score();
private:

View File

@ -25,9 +25,9 @@ public:
std::vector<real_t> modelSetTest(std::vector<std::vector<real_t>> X);
real_t modelTest(std::vector<real_t> x);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1);
void SGD(real_t learning_rate, int max_epoch, bool UI = 1);
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false);
void SGD(real_t learning_rate, int max_epoch, bool UI = false);
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false);
real_t score();
void save(std::string fileName);

View File

@ -24,7 +24,7 @@ public:
MLPPGAN(real_t k, std::vector<std::vector<real_t>> outputSet);
~MLPPGAN();
std::vector<std::vector<real_t>> generateExample(int n);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false);
real_t score();
void save(std::string fileName);

View File

@ -19,18 +19,18 @@ public:
std::vector<real_t> modelSetTest(std::vector<std::vector<real_t>> X);
real_t modelTest(std::vector<real_t> x);
void NewtonRaphson(real_t learning_rate, int max_epoch, bool UI);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1);
void SGD(real_t learning_rate, int max_epoch, bool UI = 1);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false);
void SGD(real_t learning_rate, int max_epoch, bool UI = false);
void Momentum(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool UI = 1);
void NAG(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool UI = 1);
void Adagrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t e, bool UI = 1);
void Adadelta(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t e, bool UI = 1);
void Adam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = 1);
void Adamax(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = 1);
void Nadam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = 1);
void Momentum(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool UI = false);
void NAG(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool UI = false);
void Adagrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t e, bool UI = false);
void Adadelta(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t e, bool UI = false);
void Adam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = false);
void Adamax(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = false);
void Nadam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI = false);
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false);
void normalEquation();
real_t score();
void save(std::string fileName);

View File

@ -20,10 +20,10 @@ public:
MLPPLogReg(std::vector<std::vector<real_t>> inputSet, std::vector<real_t> outputSet, std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5);
std::vector<real_t> modelSetTest(std::vector<std::vector<real_t>> X);
real_t modelTest(std::vector<real_t> x);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1);
void MLE(real_t learning_rate, int max_epoch, bool UI = 1);
void SGD(real_t learning_rate, int max_epoch, bool UI = 1);
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false);
void MLE(real_t learning_rate, int max_epoch, bool UI = false);
void SGD(real_t learning_rate, int max_epoch, bool UI = false);
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false);
real_t score();
void save(std::string fileName);

View File

@ -24,7 +24,7 @@ public:
~MLPPMANN();
std::vector<std::vector<real_t>> modelSetTest(std::vector<std::vector<real_t>> X);
std::vector<real_t> modelTest(std::vector<real_t> x);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false);
real_t score();
void save(std::string fileName);

View File

@ -21,9 +21,9 @@ public:
MLPPMLP(std::vector<std::vector<real_t>> inputSet, std::vector<real_t> outputSet, int n_hidden, std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5);
std::vector<real_t> modelSetTest(std::vector<std::vector<real_t>> X);
real_t modelTest(std::vector<real_t> x);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1);
void SGD(real_t learning_rate, int max_epoch, bool UI = 1);
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false);
void SGD(real_t learning_rate, int max_epoch, bool UI = false);
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false);
real_t score();
void save(std::string fileName);

View File

@ -20,10 +20,10 @@ public:
MLPPProbitReg(std::vector<std::vector<real_t>> inputSet, std::vector<real_t> outputSet, std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5);
std::vector<real_t> modelSetTest(std::vector<std::vector<real_t>> X);
real_t modelTest(std::vector<real_t> x);
void gradientDescent(real_t learning_rate, int max_epoch = 0, bool UI = 1);
void MLE(real_t learning_rate, int max_epoch = 0, bool UI = 1);
void SGD(real_t learning_rate, int max_epoch = 0, bool UI = 1);
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
void gradientDescent(real_t learning_rate, int max_epoch = 0, bool UI = false);
void MLE(real_t learning_rate, int max_epoch = 0, bool UI = false);
void SGD(real_t learning_rate, int max_epoch = 0, bool UI = false);
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false);
real_t score();
void save(std::string fileName);

View File

@ -20,9 +20,9 @@ public:
MLPPSoftmaxNet(std::vector<std::vector<real_t>> inputSet, std::vector<std::vector<real_t>> outputSet, int n_hidden, std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5);
std::vector<real_t> modelTest(std::vector<real_t> x);
std::vector<std::vector<real_t>> modelSetTest(std::vector<std::vector<real_t>> X);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1);
void SGD(real_t learning_rate, int max_epoch, bool UI = 1);
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false);
void SGD(real_t learning_rate, int max_epoch, bool UI = false);
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false);
real_t score();
void save(std::string fileName);

View File

@ -20,9 +20,9 @@ public:
MLPPSoftmaxReg(std::vector<std::vector<real_t>> inputSet, std::vector<std::vector<real_t>> outputSet, std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5);
std::vector<real_t> modelTest(std::vector<real_t> x);
std::vector<std::vector<real_t>> modelSetTest(std::vector<std::vector<real_t>> X);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1);
void SGD(real_t learning_rate, int max_epoch, bool UI = 1);
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false);
void SGD(real_t learning_rate, int max_epoch, bool UI = false);
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false);
real_t score();
void save(std::string fileName);

View File

@ -23,9 +23,9 @@ public:
MLPPSVC(std::vector<std::vector<real_t>> inputSet, std::vector<real_t> outputSet, real_t C);
std::vector<real_t> modelSetTest(std::vector<std::vector<real_t>> X);
real_t modelTest(std::vector<real_t> x);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1);
void SGD(real_t learning_rate, int max_epoch, bool UI = 1);
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false);
void SGD(real_t learning_rate, int max_epoch, bool UI = false);
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false);
real_t score();
void save(std::string fileName);

View File

@ -20,9 +20,9 @@ public:
MLPPTanhReg(std::vector<std::vector<real_t>> inputSet, std::vector<real_t> outputSet, std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5);
std::vector<real_t> modelSetTest(std::vector<std::vector<real_t>> X);
real_t modelTest(std::vector<real_t> x);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1);
void SGD(real_t learning_rate, int max_epoch, bool UI = 1);
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false);
void SGD(real_t learning_rate, int max_epoch, bool UI = false);
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false);
real_t score();
void save(std::string fileName);

View File

@ -58,9 +58,9 @@ public:
real_t performance_pool_int_array_vec(PoolIntArray y_hat, const Ref<MLPPVector> &output_set);
// Parameter Saving Functions
void saveParameters(std::string fileName, std::vector<real_t> weights, real_t bias, bool app = 0, int layer = -1);
void saveParameters(std::string fileName, std::vector<real_t> weights, std::vector<real_t> initial, real_t bias, bool app = 0, int layer = -1);
void saveParameters(std::string fileName, std::vector<std::vector<real_t>> weights, std::vector<real_t> bias, bool app = 0, int layer = -1);
void saveParameters(std::string fileName, std::vector<real_t> weights, real_t bias, bool app = false, int layer = -1);
void saveParameters(std::string fileName, std::vector<real_t> weights, std::vector<real_t> initial, real_t bias, bool app = false, int layer = -1);
void saveParameters(std::string fileName, std::vector<std::vector<real_t>> weights, std::vector<real_t> bias, bool app = false, int layer = -1);
// Gradient Descent related
static void UI(std::vector<real_t> weights, real_t bias);

View File

@ -22,7 +22,7 @@ public:
MLPPWGAN(real_t k, std::vector<std::vector<real_t>> outputSet);
~MLPPWGAN();
std::vector<std::vector<real_t>> generateExample(int n);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false);
real_t score();
void save(std::string fileName);