diff --git a/mlpp/c_log_log_reg/c_log_log_reg.cpp b/mlpp/c_log_log_reg/c_log_log_reg.cpp index 0b63bce..1cc0e9e 100644 --- a/mlpp/c_log_log_reg/c_log_log_reg.cpp +++ b/mlpp/c_log_log_reg/c_log_log_reg.cpp @@ -14,31 +14,26 @@ #include #include -MLPPCLogLogReg::MLPPCLogLogReg(std::vector> inputSet, std::vector outputSet, std::string reg, real_t lambda, real_t alpha) : - inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) { - y_hat.resize(n); - weights = MLPPUtilities::weightInitialization(k); - bias = MLPPUtilities::biasInitialization(); +std::vector MLPPCLogLogReg::model_set_test(std::vector> X) { + return evaluatem(X); } -std::vector MLPPCLogLogReg::modelSetTest(std::vector> X) { - return Evaluate(X); +real_t MLPPCLogLogReg::model_test(std::vector x) { + return evaluatev(x); } -real_t MLPPCLogLogReg::modelTest(std::vector x) { - return Evaluate(x); -} - -void MLPPCLogLogReg::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { +void MLPPCLogLogReg::gradient_descent(real_t learning_rate, int max_epoch, bool ui) { MLPPActivation avn; MLPPLinAlg alg; MLPPReg regularization; + real_t cost_prev = 0; int epoch = 1; - forwardPass(); + + forward_pass(); while (true) { - cost_prev = Cost(y_hat, outputSet); + cost_prev = cost(y_hat, outputSet); std::vector error = alg.subtraction(y_hat, outputSet); @@ -49,12 +44,13 @@ void MLPPCLogLogReg::gradientDescent(real_t learning_rate, int max_epoch, bool U // Calculating the bias gradients bias -= learning_rate * alg.sum_elements(alg.hadamard_product(error, avn.cloglog(z, 1))) / n; - forwardPass(); + forward_pass(); - if (UI) { - MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); + if (ui) { + MLPPUtilities::CostInfo(epoch, cost_prev, cost(y_hat, outputSet)); MLPPUtilities::UI(weights, bias); } + epoch++; if (epoch > max_epoch) { @@ -63,16 +59,18 @@ void MLPPCLogLogReg::gradientDescent(real_t learning_rate, int max_epoch, bool U } } -void MLPPCLogLogReg::MLE(real_t learning_rate, int max_epoch, bool UI) { +void MLPPCLogLogReg::mle(real_t learning_rate, int max_epoch, bool ui) { MLPPActivation avn; MLPPLinAlg alg; MLPPReg regularization; + real_t cost_prev = 0; int epoch = 1; - forwardPass(); + + forward_pass(); while (true) { - cost_prev = Cost(y_hat, outputSet); + cost_prev = cost(y_hat, outputSet); std::vector error = alg.subtraction(y_hat, outputSet); @@ -81,12 +79,14 @@ void MLPPCLogLogReg::MLE(real_t learning_rate, int max_epoch, bool UI) { // Calculating the bias gradients bias += learning_rate * alg.sum_elements(alg.hadamard_product(error, avn.cloglog(z, 1))) / n; - forwardPass(); - if (UI) { - MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet)); + forward_pass(); + + if (ui) { + MLPPUtilities::CostInfo(epoch, cost_prev, cost(y_hat, outputSet)); MLPPUtilities::UI(weights, bias); } + epoch++; if (epoch > max_epoch) { @@ -95,12 +95,14 @@ void MLPPCLogLogReg::MLE(real_t learning_rate, int max_epoch, bool UI) { } } -void MLPPCLogLogReg::SGD(real_t learning_rate, int max_epoch, bool UI) { +void MLPPCLogLogReg::sgd(real_t learning_rate, int max_epoch, bool p_) { MLPPLinAlg alg; MLPPReg regularization; + real_t cost_prev = 0; int epoch = 1; - forwardPass(); + + forward_pass(); while (true) { std::random_device rd; @@ -108,9 +110,9 @@ void MLPPCLogLogReg::SGD(real_t learning_rate, int max_epoch, bool UI) { std::uniform_int_distribution distribution(0, int(n - 1)); int outputIndex = distribution(generator); - real_t y_hat = Evaluate(inputSet[outputIndex]); - real_t z = propagate(inputSet[outputIndex]); - cost_prev = Cost({ y_hat }, { outputSet[outputIndex] }); + real_t y_hat = evaluatev(inputSet[outputIndex]); + real_t z = propagatev(inputSet[outputIndex]); + cost_prev = cost({ y_hat }, { outputSet[outputIndex] }); real_t error = y_hat - outputSet[outputIndex]; @@ -121,22 +123,24 @@ void MLPPCLogLogReg::SGD(real_t learning_rate, int max_epoch, bool UI) { // Bias updation bias -= learning_rate * error * exp(z - exp(z)); - y_hat = Evaluate({ inputSet[outputIndex] }); + y_hat = evaluatev(inputSet[outputIndex]); - if (UI) { - MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] })); + if (p_) { + MLPPUtilities::CostInfo(epoch, cost_prev, cost({ y_hat }, { outputSet[outputIndex] })); MLPPUtilities::UI(weights, bias); } + epoch++; if (epoch > max_epoch) { break; } } - forwardPass(); + + forward_pass(); } -void MLPPCLogLogReg::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) { +void MLPPCLogLogReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size, bool p_) { MLPPActivation avn; MLPPLinAlg alg; MLPPReg regularization; @@ -151,9 +155,9 @@ void MLPPCLogLogReg::MBGD(real_t learning_rate, int max_epoch, int mini_batch_si while (true) { for (int i = 0; i < n_mini_batch; i++) { - std::vector y_hat = Evaluate(inputMiniBatches[i]); - std::vector z = propagate(inputMiniBatches[i]); - cost_prev = Cost(y_hat, outputMiniBatches[i]); + std::vector y_hat = evaluatem(inputMiniBatches[i]); + std::vector z = propagatem(inputMiniBatches[i]); + cost_prev = cost(y_hat, outputMiniBatches[i]); std::vector error = alg.subtraction(y_hat, outputMiniBatches[i]); @@ -164,21 +168,24 @@ void MLPPCLogLogReg::MBGD(real_t learning_rate, int max_epoch, int mini_batch_si // Calculating the bias gradients bias -= learning_rate * alg.sum_elements(alg.hadamard_product(error, avn.cloglog(z, 1))) / n; - forwardPass(); + forward_pass(); - y_hat = Evaluate(inputMiniBatches[i]); + y_hat = evaluatem(inputMiniBatches[i]); - if (UI) { - MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i])); + if (p_) { + MLPPUtilities::CostInfo(epoch, cost_prev, cost(y_hat, outputMiniBatches[i])); MLPPUtilities::UI(weights, bias); } } + epoch++; + if (epoch > max_epoch) { break; } } - forwardPass(); + + forward_pass(); } real_t MLPPCLogLogReg::score() { @@ -186,38 +193,58 @@ real_t MLPPCLogLogReg::score() { return util.performance(y_hat, outputSet); } -real_t MLPPCLogLogReg::Cost(std::vector y_hat, std::vector y) { +MLPPCLogLogReg::MLPPCLogLogReg(std::vector> pinputSet, std::vector poutputSet, std::string p_reg, real_t p_lambda, real_t p_alpha) { + inputSet = pinputSet; + outputSet = poutputSet; + n = inputSet.size(); + k = inputSet[0].size(); + reg = p_reg; + lambda = p_lambda; + alpha = p_alpha; + + y_hat.resize(n); + + weights = MLPPUtilities::weightInitialization(k); + bias = MLPPUtilities::biasInitialization(); +} + +MLPPCLogLogReg::MLPPCLogLogReg() { +} +MLPPCLogLogReg::~MLPPCLogLogReg() { +} + +real_t MLPPCLogLogReg::cost(std::vector y_hat, std::vector y) { MLPPReg regularization; class MLPPCost cost; return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); } -std::vector MLPPCLogLogReg::Evaluate(std::vector> X) { - MLPPLinAlg alg; - MLPPActivation avn; - return avn.cloglog(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); -} - -std::vector MLPPCLogLogReg::propagate(std::vector> X) { - MLPPLinAlg alg; - return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)); -} - -real_t MLPPCLogLogReg::Evaluate(std::vector x) { +real_t MLPPCLogLogReg::evaluatev(std::vector x) { MLPPLinAlg alg; MLPPActivation avn; return avn.cloglog(alg.dot(weights, x) + bias); } -real_t MLPPCLogLogReg::propagate(std::vector x) { +real_t MLPPCLogLogReg::propagatev(std::vector x) { MLPPLinAlg alg; return alg.dot(weights, x) + bias; } +std::vector MLPPCLogLogReg::evaluatem(std::vector> X) { + MLPPLinAlg alg; + MLPPActivation avn; + return avn.cloglog(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); +} + +std::vector MLPPCLogLogReg::propagatem(std::vector> X) { + MLPPLinAlg alg; + return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)); +} + // cloglog ( wTx + b ) -void MLPPCLogLogReg::forwardPass() { +void MLPPCLogLogReg::forward_pass() { MLPPActivation avn; - z = propagate(inputSet); + z = propagatem(inputSet); y_hat = avn.cloglog(z); } diff --git a/mlpp/c_log_log_reg/c_log_log_reg.h b/mlpp/c_log_log_reg/c_log_log_reg.h index 39f3c81..8e7e1b1 100644 --- a/mlpp/c_log_log_reg/c_log_log_reg.h +++ b/mlpp/c_log_log_reg/c_log_log_reg.h @@ -15,25 +15,34 @@ class MLPPCLogLogReg { public: - MLPPCLogLogReg(std::vector> inputSet, std::vector outputSet, std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5); - std::vector modelSetTest(std::vector> X); - real_t modelTest(std::vector x); - void gradientDescent(real_t learning_rate, int max_epoch, bool UI = false); - void MLE(real_t learning_rate, int max_epoch, bool UI = false); - void SGD(real_t learning_rate, int max_epoch, bool UI = false); - void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = false); + std::vector model_set_test(std::vector> X); + real_t model_test(std::vector x); + + void gradient_descent(real_t learning_rate, int max_epoch, bool ui = false); + void mle(real_t learning_rate, int max_epoch, bool ui = false); + void sgd(real_t learning_rate, int max_epoch, bool ui = false); + void mbgd(real_t learning_rate, int max_epoch, int mini_batch_size, bool ui = false); + real_t score(); -private: - void weightInitialization(int k); - void biasInitialization(); - real_t Cost(std::vector y_hat, std::vector y); + MLPPCLogLogReg(std::vector> pinputSet, std::vector poutputSet, std::string p_reg = "None", real_t p_lambda = 0.5, real_t p_alpha = 0.5); - std::vector Evaluate(std::vector> X); - std::vector propagate(std::vector> X); - real_t Evaluate(std::vector x); - real_t propagate(std::vector x); - void forwardPass(); + MLPPCLogLogReg(); + ~MLPPCLogLogReg(); + +private: + void weight_initialization(int k); + void bias_initialization(); + + real_t cost(std::vector y_hat, std::vector y); + + real_t evaluatev(std::vector x); + real_t propagatev(std::vector x); + + std::vector evaluatem(std::vector> X); + std::vector propagatem(std::vector> X); + + void forward_pass(); std::vector> inputSet; std::vector outputSet; diff --git a/mlpp/c_log_log_reg/c_log_log_reg_old.h b/mlpp/c_log_log_reg/c_log_log_reg_old.h index 221b65a..5278299 100644 --- a/mlpp/c_log_log_reg/c_log_log_reg_old.h +++ b/mlpp/c_log_log_reg/c_log_log_reg_old.h @@ -1,6 +1,6 @@ -#ifndef MLPP_C_LOG_LOG_REG_H -#define MLPP_C_LOG_LOG_REG_H +#ifndef MLPP_C_LOG_LOG_REG_OLD_H +#define MLPP_C_LOG_LOG_REG_OLD_H // // CLogLogReg.hpp diff --git a/test/mlpp_tests.cpp b/test/mlpp_tests.cpp index 816d0e7..86d480c 100644 --- a/test/mlpp_tests.cpp +++ b/test/mlpp_tests.cpp @@ -423,9 +423,15 @@ void MLPPTests::test_c_log_log_regression(bool ui) { // CLOGLOG REGRESSION std::vector> inputSet = { { 1, 2, 3, 4, 5, 6, 7, 8 }, { 0, 0, 0, 0, 1, 1, 1, 1 } }; std::vector outputSet = { 0, 0, 0, 0, 1, 1, 1, 1 }; + + MLPPCLogLogRegOld model_old(alg.transpose(inputSet), outputSet); + model_old.SGD(0.1, 10000, ui); + alg.printVector(model_old.modelSetTest(alg.transpose(inputSet))); + std::cout << "ACCURACY: " << 100 * model_old.score() << "%" << std::endl; + MLPPCLogLogReg model(alg.transpose(inputSet), outputSet); - model.SGD(0.1, 10000, ui); - alg.printVector(model.modelSetTest(alg.transpose(inputSet))); + model.sgd(0.1, 10000, ui); + alg.printVector(model.model_set_test(alg.transpose(inputSet))); std::cout << "ACCURACY: " << 100 * model.score() << "%" << std::endl; } void MLPPTests::test_exp_reg_regression(bool ui) {