Also rename the class variables in MLPPCLogLogReg.

This commit is contained in:
Relintai 2023-02-12 13:30:47 +01:00
parent 8d9651b65a
commit c92a79c755
2 changed files with 59 additions and 59 deletions

View File

@ -33,22 +33,22 @@ void MLPPCLogLogReg::gradient_descent(real_t learning_rate, int max_epoch, bool
forward_pass(); forward_pass();
while (true) { while (true) {
cost_prev = cost(y_hat, outputSet); cost_prev = cost(_y_hat, _output_set);
std::vector<real_t> error = alg.subtraction(y_hat, outputSet); std::vector<real_t> error = alg.subtraction(_y_hat, _output_set);
// Calculating the weight gradients // Calculating the weight gradients
weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate / n, alg.mat_vec_mult(alg.transpose(inputSet), alg.hadamard_product(error, avn.cloglog(z, 1))))); _weights = alg.subtraction(_weights, alg.scalarMultiply(learning_rate / _n, alg.mat_vec_mult(alg.transpose(_input_set), alg.hadamard_product(error, avn.cloglog(_z, true)))));
weights = regularization.regWeights(weights, lambda, alpha, reg); _weights = regularization.regWeights(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients // Calculating the bias gradients
bias -= learning_rate * alg.sum_elements(alg.hadamard_product(error, avn.cloglog(z, 1))) / n; bias -= learning_rate * alg.sum_elements(alg.hadamard_product(error, avn.cloglog(_z, true))) / _n;
forward_pass(); forward_pass();
if (ui) { if (ui) {
MLPPUtilities::CostInfo(epoch, cost_prev, cost(y_hat, outputSet)); MLPPUtilities::CostInfo(epoch, cost_prev, cost(_y_hat, _output_set));
MLPPUtilities::UI(weights, bias); MLPPUtilities::UI(_weights, bias);
} }
epoch++; epoch++;
@ -70,21 +70,21 @@ void MLPPCLogLogReg::mle(real_t learning_rate, int max_epoch, bool ui) {
forward_pass(); forward_pass();
while (true) { while (true) {
cost_prev = cost(y_hat, outputSet); cost_prev = cost(_y_hat, _output_set);
std::vector<real_t> error = alg.subtraction(y_hat, outputSet); std::vector<real_t> error = alg.subtraction(_y_hat, _output_set);
weights = alg.addition(weights, alg.scalarMultiply(learning_rate / n, alg.mat_vec_mult(alg.transpose(inputSet), alg.hadamard_product(error, avn.cloglog(z, 1))))); _weights = alg.addition(_weights, alg.scalarMultiply(learning_rate / _n, alg.mat_vec_mult(alg.transpose(_input_set), alg.hadamard_product(error, avn.cloglog(_z, true)))));
weights = regularization.regWeights(weights, lambda, alpha, reg); _weights = regularization.regWeights(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients // Calculating the bias gradients
bias += learning_rate * alg.sum_elements(alg.hadamard_product(error, avn.cloglog(z, 1))) / n; bias += learning_rate * alg.sum_elements(alg.hadamard_product(error, avn.cloglog(_z, true))) / _n;
forward_pass(); forward_pass();
if (ui) { if (ui) {
MLPPUtilities::CostInfo(epoch, cost_prev, cost(y_hat, outputSet)); MLPPUtilities::CostInfo(epoch, cost_prev, cost(_y_hat, _output_set));
MLPPUtilities::UI(weights, bias); MLPPUtilities::UI(_weights, bias);
} }
epoch++; epoch++;
@ -107,27 +107,27 @@ void MLPPCLogLogReg::sgd(real_t learning_rate, int max_epoch, bool p_) {
while (true) { while (true) {
std::random_device rd; std::random_device rd;
std::default_random_engine generator(rd()); std::default_random_engine generator(rd());
std::uniform_int_distribution<int> distribution(0, int(n - 1)); std::uniform_int_distribution<int> distribution(0, int(_n - 1));
int outputIndex = distribution(generator); int outputIndex = distribution(generator);
real_t y_hat = evaluatev(inputSet[outputIndex]); real_t y_hat = evaluatev(_input_set[outputIndex]);
real_t z = propagatev(inputSet[outputIndex]); real_t z = propagatev(_input_set[outputIndex]);
cost_prev = cost({ y_hat }, { outputSet[outputIndex] }); cost_prev = cost({ y_hat }, { _output_set[outputIndex] });
real_t error = y_hat - outputSet[outputIndex]; real_t error = y_hat - _output_set[outputIndex];
// Weight Updation // Weight Updation
weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate * error * exp(z - exp(z)), inputSet[outputIndex])); _weights = alg.subtraction(_weights, alg.scalarMultiply(learning_rate * error * exp(z - exp(z)), _input_set[outputIndex]));
weights = regularization.regWeights(weights, lambda, alpha, reg); _weights = regularization.regWeights(_weights, _lambda, _alpha, _reg);
// Bias updation // Bias updation
bias -= learning_rate * error * exp(z - exp(z)); bias -= learning_rate * error * exp(z - exp(z));
y_hat = evaluatev(inputSet[outputIndex]); y_hat = evaluatev(_input_set[outputIndex]);
if (p_) { if (p_) {
MLPPUtilities::CostInfo(epoch, cost_prev, cost({ y_hat }, { outputSet[outputIndex] })); MLPPUtilities::CostInfo(epoch, cost_prev, cost({ y_hat }, { _output_set[outputIndex] }));
MLPPUtilities::UI(weights, bias); MLPPUtilities::UI(_weights, bias);
} }
epoch++; epoch++;
@ -148,8 +148,8 @@ void MLPPCLogLogReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_si
int epoch = 1; int epoch = 1;
// Creating the mini-batches // Creating the mini-batches
int n_mini_batch = n / mini_batch_size; int n_mini_batch = _n / mini_batch_size;
auto batches = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch); auto batches = MLPPUtilities::createMiniBatches(_input_set, _output_set, n_mini_batch);
auto inputMiniBatches = std::get<0>(batches); auto inputMiniBatches = std::get<0>(batches);
auto outputMiniBatches = std::get<1>(batches); auto outputMiniBatches = std::get<1>(batches);
@ -162,11 +162,11 @@ void MLPPCLogLogReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_si
std::vector<real_t> error = alg.subtraction(y_hat, outputMiniBatches[i]); std::vector<real_t> error = alg.subtraction(y_hat, outputMiniBatches[i]);
// Calculating the weight gradients // Calculating the weight gradients
weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate / n, alg.mat_vec_mult(alg.transpose(inputMiniBatches[i]), alg.hadamard_product(error, avn.cloglog(z, 1))))); _weights = alg.subtraction(_weights, alg.scalarMultiply(learning_rate / _n, alg.mat_vec_mult(alg.transpose(inputMiniBatches[i]), alg.hadamard_product(error, avn.cloglog(z, 1)))));
weights = regularization.regWeights(weights, lambda, alpha, reg); _weights = regularization.regWeights(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients // Calculating the bias gradients
bias -= learning_rate * alg.sum_elements(alg.hadamard_product(error, avn.cloglog(z, 1))) / n; bias -= learning_rate * alg.sum_elements(alg.hadamard_product(error, avn.cloglog(z, 1))) / _n;
forward_pass(); forward_pass();
@ -174,7 +174,7 @@ void MLPPCLogLogReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_si
if (p_) { if (p_) {
MLPPUtilities::CostInfo(epoch, cost_prev, cost(y_hat, outputMiniBatches[i])); MLPPUtilities::CostInfo(epoch, cost_prev, cost(y_hat, outputMiniBatches[i]));
MLPPUtilities::UI(weights, bias); MLPPUtilities::UI(_weights, bias);
} }
} }
@ -190,21 +190,21 @@ void MLPPCLogLogReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_si
real_t MLPPCLogLogReg::score() { real_t MLPPCLogLogReg::score() {
MLPPUtilities util; MLPPUtilities util;
return util.performance(y_hat, outputSet); return util.performance(_y_hat, _output_set);
} }
MLPPCLogLogReg::MLPPCLogLogReg(std::vector<std::vector<real_t>> pinputSet, std::vector<real_t> poutputSet, std::string p_reg, real_t p_lambda, real_t p_alpha) { MLPPCLogLogReg::MLPPCLogLogReg(std::vector<std::vector<real_t>> p_input_set, std::vector<real_t> p_output_set, std::string p_reg, real_t p_lambda, real_t p_alpha) {
inputSet = pinputSet; _input_set = p_input_set;
outputSet = poutputSet; _output_set = p_output_set;
n = inputSet.size(); _n = _input_set.size();
k = inputSet[0].size(); _k = _input_set[0].size();
reg = p_reg; _reg = p_reg;
lambda = p_lambda; _lambda = p_lambda;
alpha = p_alpha; _alpha = p_alpha;
y_hat.resize(n); _y_hat.resize(_n);
weights = MLPPUtilities::weightInitialization(k); _weights = MLPPUtilities::weightInitialization(_k);
bias = MLPPUtilities::biasInitialization(); bias = MLPPUtilities::biasInitialization();
} }
@ -216,35 +216,35 @@ MLPPCLogLogReg::~MLPPCLogLogReg() {
real_t MLPPCLogLogReg::cost(std::vector<real_t> y_hat, std::vector<real_t> y) { real_t MLPPCLogLogReg::cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
MLPPReg regularization; MLPPReg regularization;
class MLPPCost cost; class MLPPCost cost;
return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); return cost.MSE(y_hat, y) + regularization.regTerm(_weights, _lambda, _alpha, _reg);
} }
real_t MLPPCLogLogReg::evaluatev(std::vector<real_t> x) { real_t MLPPCLogLogReg::evaluatev(std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPActivation avn; MLPPActivation avn;
return avn.cloglog(alg.dot(weights, x) + bias); return avn.cloglog(alg.dot(_weights, x) + bias);
} }
real_t MLPPCLogLogReg::propagatev(std::vector<real_t> x) { real_t MLPPCLogLogReg::propagatev(std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.dot(weights, x) + bias; return alg.dot(_weights, x) + bias;
} }
std::vector<real_t> MLPPCLogLogReg::evaluatem(std::vector<std::vector<real_t>> X) { std::vector<real_t> MLPPCLogLogReg::evaluatem(std::vector<std::vector<real_t>> X) {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPActivation avn; MLPPActivation avn;
return avn.cloglog(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); return avn.cloglog(alg.scalarAdd(bias, alg.mat_vec_mult(X, _weights)));
} }
std::vector<real_t> MLPPCLogLogReg::propagatem(std::vector<std::vector<real_t>> X) { std::vector<real_t> MLPPCLogLogReg::propagatem(std::vector<std::vector<real_t>> X) {
MLPPLinAlg alg; MLPPLinAlg alg;
return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)); return alg.scalarAdd(bias, alg.mat_vec_mult(X, _weights));
} }
// cloglog ( wTx + b ) // cloglog ( wTx + b )
void MLPPCLogLogReg::forward_pass() { void MLPPCLogLogReg::forward_pass() {
MLPPActivation avn; MLPPActivation avn;
z = propagatem(inputSet); _z = propagatem(_input_set);
y_hat = avn.cloglog(z); _y_hat = avn.cloglog(_z);
} }

View File

@ -25,7 +25,7 @@ public:
real_t score(); real_t score();
MLPPCLogLogReg(std::vector<std::vector<real_t>> pinputSet, std::vector<real_t> poutputSet, std::string p_reg = "None", real_t p_lambda = 0.5, real_t p_alpha = 0.5); MLPPCLogLogReg(std::vector<std::vector<real_t>> p_input_set, std::vector<real_t> p_output_set, std::string p_reg = "None", real_t p_lambda = 0.5, real_t p_alpha = 0.5);
MLPPCLogLogReg(); MLPPCLogLogReg();
~MLPPCLogLogReg(); ~MLPPCLogLogReg();
@ -44,20 +44,20 @@ private:
void forward_pass(); void forward_pass();
std::vector<std::vector<real_t>> inputSet; std::vector<std::vector<real_t>> _input_set;
std::vector<real_t> outputSet; std::vector<real_t> _output_set;
std::vector<real_t> y_hat; std::vector<real_t> _y_hat;
std::vector<real_t> z; std::vector<real_t> _z;
std::vector<real_t> weights; std::vector<real_t> _weights;
real_t bias; real_t bias;
int n; int _n;
int k; int _k;
// Regularization Params // Regularization Params
std::string reg; std::string _reg;
real_t lambda; real_t _lambda;
real_t alpha; /* This is the controlling param for Elastic Net*/ real_t _alpha; /* This is the controlling param for Elastic Net*/
}; };
#endif /* CLogLogReg_hpp */ #endif /* CLogLogReg_hpp */