pmlpp/mlpp/log_reg/log_reg.cpp

213 lines
5.6 KiB
C++
Raw Normal View History

//
// LogReg.cpp
//
// Created by Marc Melikyan on 10/2/20.
//
2023-01-24 18:12:23 +01:00
#include "log_reg.h"
#include "../activation/activation.h"
2023-01-24 19:00:54 +01:00
#include "../cost/cost.h"
2023-01-24 18:12:23 +01:00
#include "../lin_alg/lin_alg.h"
#include "../regularization/reg.h"
#include "../utilities/utilities.h"
#include <iostream>
#include <random>
2023-02-10 21:44:27 +01:00
MLPPLogReg::MLPPLogReg(std::vector<std::vector<real_t>> pinputSet, std::vector<real_t> poutputSet, std::string preg, real_t plambda, real_t palpha) {
inputSet = pinputSet;
outputSet = poutputSet;
n = pinputSet.size();
k = pinputSet[0].size();
reg = preg;
lambda = plambda;
alpha = palpha;
2023-01-24 19:20:18 +01:00
2023-01-24 19:00:54 +01:00
y_hat.resize(n);
weights = MLPPUtilities::weightInitialization(k);
bias = MLPPUtilities::biasInitialization();
2023-01-24 19:00:54 +01:00
}
2023-01-27 13:01:16 +01:00
std::vector<real_t> MLPPLogReg::modelSetTest(std::vector<std::vector<real_t>> X) {
2023-01-24 19:00:54 +01:00
return Evaluate(X);
}
2023-01-27 13:01:16 +01:00
real_t MLPPLogReg::modelTest(std::vector<real_t> x) {
2023-01-24 19:00:54 +01:00
return Evaluate(x);
}
2023-01-27 13:01:16 +01:00
void MLPPLogReg::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg;
2023-01-25 00:54:50 +01:00
MLPPReg regularization;
2023-01-27 13:01:16 +01:00
real_t cost_prev = 0;
2023-01-24 19:00:54 +01:00
int epoch = 1;
forwardPass();
while (true) {
cost_prev = Cost(y_hat, outputSet);
2023-01-27 13:01:16 +01:00
std::vector<real_t> error = alg.subtraction(y_hat, outputSet);
2023-01-24 19:00:54 +01:00
// Calculating the weight gradients
weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate / n, alg.mat_vec_mult(alg.transpose(inputSet), error)));
weights = regularization.regWeights(weights, lambda, alpha, reg);
// Calculating the bias gradients
bias -= learning_rate * alg.sum_elements(error) / n;
forwardPass();
if (UI) {
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
MLPPUtilities::UI(weights, bias);
2023-01-24 19:00:54 +01:00
}
epoch++;
if (epoch > max_epoch) {
break;
}
}
}
2023-01-27 13:01:16 +01:00
void MLPPLogReg::MLE(real_t learning_rate, int max_epoch, bool UI) {
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg;
2023-01-25 00:54:50 +01:00
MLPPReg regularization;
2023-01-27 13:01:16 +01:00
real_t cost_prev = 0;
2023-01-24 19:00:54 +01:00
int epoch = 1;
forwardPass();
while (true) {
cost_prev = Cost(y_hat, outputSet);
2023-01-27 13:01:16 +01:00
std::vector<real_t> error = alg.subtraction(outputSet, y_hat);
2023-01-24 19:00:54 +01:00
// Calculating the weight gradients
weights = alg.addition(weights, alg.scalarMultiply(learning_rate / n, alg.mat_vec_mult(alg.transpose(inputSet), error)));
weights = regularization.regWeights(weights, lambda, alpha, reg);
// Calculating the bias gradients
bias += learning_rate * alg.sum_elements(error) / n;
forwardPass();
if (UI) {
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
MLPPUtilities::UI(weights, bias);
2023-01-24 19:00:54 +01:00
}
epoch++;
if (epoch > max_epoch) {
break;
}
}
}
2023-01-27 13:01:16 +01:00
void MLPPLogReg::SGD(real_t learning_rate, int max_epoch, bool UI) {
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg;
2023-01-25 00:54:50 +01:00
MLPPReg regularization;
2023-01-27 13:01:16 +01:00
real_t cost_prev = 0;
2023-01-24 19:00:54 +01:00
int epoch = 1;
while (true) {
std::random_device rd;
std::default_random_engine generator(rd());
std::uniform_int_distribution<int> distribution(0, int(n - 1));
int outputIndex = distribution(generator);
2023-01-27 13:01:16 +01:00
real_t y_hat = Evaluate(inputSet[outputIndex]);
2023-01-24 19:00:54 +01:00
cost_prev = Cost({ y_hat }, { outputSet[outputIndex] });
2023-01-27 13:01:16 +01:00
real_t error = y_hat - outputSet[outputIndex];
2023-01-24 19:00:54 +01:00
// Weight updation
weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate * error, inputSet[outputIndex]));
weights = regularization.regWeights(weights, lambda, alpha, reg);
// Bias updation
bias -= learning_rate * error;
y_hat = Evaluate({ inputSet[outputIndex] });
if (UI) {
MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] }));
MLPPUtilities::UI(weights, bias);
2023-01-24 19:00:54 +01:00
}
epoch++;
if (epoch > max_epoch) {
break;
}
}
forwardPass();
}
2023-01-27 13:01:16 +01:00
void MLPPLogReg::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg;
2023-01-25 00:54:50 +01:00
MLPPReg regularization;
2023-01-27 13:01:16 +01:00
real_t cost_prev = 0;
2023-01-24 19:00:54 +01:00
int epoch = 1;
// Creating the mini-batches
int n_mini_batch = n / mini_batch_size;
2023-02-10 21:44:27 +01:00
auto bacthes = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch);
auto inputMiniBatches = std::get<0>(bacthes);
auto outputMiniBatches = std::get<1>(bacthes);
2023-01-24 19:00:54 +01:00
while (true) {
for (int i = 0; i < n_mini_batch; i++) {
2023-01-27 13:01:16 +01:00
std::vector<real_t> y_hat = Evaluate(inputMiniBatches[i]);
2023-01-24 19:00:54 +01:00
cost_prev = Cost(y_hat, outputMiniBatches[i]);
2023-01-27 13:01:16 +01:00
std::vector<real_t> error = alg.subtraction(y_hat, outputMiniBatches[i]);
2023-01-24 19:00:54 +01:00
// Calculating the weight gradients
weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate / outputMiniBatches[i].size(), alg.mat_vec_mult(alg.transpose(inputMiniBatches[i]), error)));
weights = regularization.regWeights(weights, lambda, alpha, reg);
// Calculating the bias gradients
bias -= learning_rate * alg.sum_elements(error) / outputMiniBatches[i].size();
y_hat = Evaluate(inputMiniBatches[i]);
if (UI) {
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i]));
MLPPUtilities::UI(weights, bias);
2023-01-24 19:00:54 +01:00
}
}
epoch++;
if (epoch > max_epoch) {
break;
}
}
forwardPass();
}
2023-01-27 13:01:16 +01:00
real_t MLPPLogReg::score() {
2023-02-10 21:44:27 +01:00
MLPPUtilities util;
2023-01-24 19:00:54 +01:00
return util.performance(y_hat, outputSet);
}
2023-01-25 00:54:50 +01:00
void MLPPLogReg::save(std::string fileName) {
2023-02-10 21:44:27 +01:00
MLPPUtilities util;
2023-01-24 19:00:54 +01:00
util.saveParameters(fileName, weights, bias);
}
2023-01-27 13:01:16 +01:00
real_t MLPPLogReg::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
2023-01-25 00:54:50 +01:00
MLPPReg regularization;
2023-01-24 19:37:08 +01:00
class MLPPCost cost;
2023-01-24 19:00:54 +01:00
return cost.LogLoss(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
}
2023-01-27 13:01:16 +01:00
std::vector<real_t> MLPPLogReg::Evaluate(std::vector<std::vector<real_t>> X) {
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg;
2023-01-24 19:23:30 +01:00
MLPPActivation avn;
2023-01-24 19:00:54 +01:00
return avn.sigmoid(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
}
2023-01-27 13:01:16 +01:00
real_t MLPPLogReg::Evaluate(std::vector<real_t> x) {
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg;
2023-01-24 19:23:30 +01:00
MLPPActivation avn;
2023-01-24 19:00:54 +01:00
return avn.sigmoid(alg.dot(weights, x) + bias);
}
// sigmoid ( wTx + b )
2023-01-25 00:54:50 +01:00
void MLPPLogReg::forwardPass() {
2023-01-24 19:00:54 +01:00
y_hat = Evaluate(inputSet);
}