Initial cleanup pass on MLPPExpReg.

This commit is contained in:
Relintai 2023-02-12 10:52:46 +01:00
parent 689fbd397f
commit 9a529c572d
3 changed files with 163 additions and 131 deletions

View File

@ -14,78 +14,68 @@
#include <iostream> #include <iostream>
#include <random> #include <random>
MLPPExpReg::MLPPExpReg(std::vector<std::vector<real_t>> p_inputSet, std::vector<real_t> p_outputSet, std::string p_reg, real_t p_lambda, real_t p_alpha) { std::vector<real_t> MLPPExpReg::model_set_test(std::vector<std::vector<real_t>> X) {
inputSet = p_inputSet; return evaluatem(X);
outputSet = p_outputSet;
n = p_inputSet.size();
k = p_inputSet[0].size();
reg = p_reg;
lambda = p_lambda;
alpha = p_alpha;
y_hat.resize(n);
weights = MLPPUtilities::weightInitialization(k);
initial = MLPPUtilities::weightInitialization(k);
bias = MLPPUtilities::biasInitialization();
} }
std::vector<real_t> MLPPExpReg::modelSetTest(std::vector<std::vector<real_t>> X) { real_t MLPPExpReg::model_test(std::vector<real_t> x) {
return Evaluate(X); return evaluatev(x);
} }
real_t MLPPExpReg::modelTest(std::vector<real_t> x) { void MLPPExpReg::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
return Evaluate(x);
}
void MLPPExpReg::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPReg regularization; MLPPReg regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass();
forward_pass();
while (true) { while (true) {
cost_prev = Cost(y_hat, outputSet); cost_prev = cost(_y_hat, _output_set);
std::vector<real_t> error = alg.subtraction(y_hat, outputSet); std::vector<real_t> error = alg.subtraction(_y_hat, _output_set);
for (int i = 0; i < k; i++) { for (int i = 0; i < _k; i++) {
// Calculating the weight gradient // Calculating the weight gradient
real_t sum = 0; real_t sum = 0;
for (int j = 0; j < n; j++) { for (int j = 0; j < _n; j++) {
sum += error[j] * inputSet[j][i] * std::pow(weights[i], inputSet[j][i] - 1); sum += error[j] * _input_set[j][i] * std::pow(_weights[i], _input_set[j][i] - 1);
} }
real_t w_gradient = sum / n; real_t w_gradient = sum / _n;
// Calculating the initial gradient // Calculating the initial gradient
real_t sum2 = 0; real_t sum2 = 0;
for (int j = 0; j < n; j++) { for (int j = 0; j < _n; j++) {
sum2 += error[j] * std::pow(weights[i], inputSet[j][i]); sum2 += error[j] * std::pow(_weights[i], _input_set[j][i]);
} }
real_t i_gradient = sum2 / n; real_t i_gradient = sum2 / _n;
// Weight/initial updation // Weight/initial updation
weights[i] -= learning_rate * w_gradient; _weights[i] -= learning_rate * w_gradient;
initial[i] -= learning_rate * i_gradient; _initial[i] -= learning_rate * i_gradient;
} }
weights = regularization.regWeights(weights, lambda, alpha, reg);
_weights = regularization.regWeights(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradient // Calculating the bias gradient
real_t sum = 0; real_t sum = 0;
for (int j = 0; j < n; j++) { for (int j = 0; j < _n; j++) {
sum += (y_hat[j] - outputSet[j]); sum += (_y_hat[j] - _output_set[j]);
} }
real_t b_gradient = sum / n; real_t b_gradient = sum / _n;
// bias updation // bias updation
bias -= learning_rate * b_gradient; _bias -= learning_rate * b_gradient;
forwardPass();
if (UI) { forward_pass();
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputSet));
MLPPUtilities::UI(weights, bias); if (ui) {
MLPPUtilities::CostInfo(epoch, cost_prev, cost(_y_hat, _output_set));
MLPPUtilities::UI(_weights, _bias);
} }
epoch++; epoch++;
if (epoch > max_epoch) { if (epoch > max_epoch) {
@ -94,153 +84,184 @@ void MLPPExpReg::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
} }
} }
void MLPPExpReg::SGD(real_t learning_rate, int max_epoch, bool UI) { void MLPPExpReg::sgd(real_t learning_rate, int max_epoch, bool ui) {
MLPPReg regularization; MLPPReg regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
std::random_device rd;
std::default_random_engine generator(rd());
std::uniform_int_distribution<int> distribution(0, int(_n - 1));
while (true) { while (true) {
std::random_device rd; int output_index = distribution(generator);
std::default_random_engine generator(rd());
std::uniform_int_distribution<int> distribution(0, int(n - 1));
int outputIndex = distribution(generator);
real_t y_hat = Evaluate(inputSet[outputIndex]); real_t y_hat = evaluatev(_input_set[output_index]);
cost_prev = Cost({ y_hat }, { outputSet[outputIndex] }); cost_prev = cost({ y_hat }, { _output_set[output_index] });
for (int i = 0; i < k; i++) { for (int i = 0; i < _k; i++) {
// Calculating the weight gradients // Calculating the weight gradients
real_t w_gradient = (y_hat - outputSet[outputIndex]) * inputSet[outputIndex][i] * std::pow(weights[i], inputSet[outputIndex][i] - 1); real_t w_gradient = (y_hat - _output_set[output_index]) * _input_set[output_index][i] * std::pow(_weights[i], _input_set[output_index][i] - 1);
real_t i_gradient = (y_hat - outputSet[outputIndex]) * std::pow(weights[i], inputSet[outputIndex][i]); real_t i_gradient = (y_hat - _output_set[output_index]) * std::pow(_weights[i], _input_set[output_index][i]);
// Weight/initial updation // Weight/initial updation
weights[i] -= learning_rate * w_gradient; _weights[i] -= learning_rate * w_gradient;
initial[i] -= learning_rate * i_gradient; _initial[i] -= learning_rate * i_gradient;
} }
weights = regularization.regWeights(weights, lambda, alpha, reg);
_weights = regularization.regWeights(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients // Calculating the bias gradients
real_t b_gradient = (y_hat - outputSet[outputIndex]); real_t b_gradient = (y_hat - _output_set[output_index]);
// Bias updation // Bias updation
bias -= learning_rate * b_gradient; _bias -= learning_rate * b_gradient;
y_hat = Evaluate({ inputSet[outputIndex] }); y_hat = evaluatev(_input_set[output_index]);
if (UI) { if (ui) {
MLPPUtilities::CostInfo(epoch, cost_prev, Cost({ y_hat }, { outputSet[outputIndex] })); MLPPUtilities::CostInfo(epoch, cost_prev, cost({ y_hat }, { _output_set[output_index] }));
MLPPUtilities::UI(weights, bias); MLPPUtilities::UI(_weights, _bias);
} }
epoch++; epoch++;
if (epoch > max_epoch) { if (epoch > max_epoch) {
break; break;
} }
} }
forwardPass();
forward_pass();
} }
void MLPPExpReg::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) { void MLPPExpReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size, bool ui) {
MLPPLinAlg alg; MLPPLinAlg alg;
MLPPReg regularization; MLPPReg regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
// Creating the mini-batches // Creating the mini-batches
int n_mini_batch = n / mini_batch_size; int n_mini_batch = _n / mini_batch_size;
auto batches = MLPPUtilities::createMiniBatches(inputSet, outputSet, n_mini_batch); auto batches = MLPPUtilities::createMiniBatches(_input_set, _output_set, n_mini_batch);
auto inputMiniBatches = std::get<0>(batches); auto input_mini_batches = std::get<0>(batches);
auto outputMiniBatches = std::get<1>(batches); auto output_mini_batches = std::get<1>(batches);
while (true) { while (true) {
for (int i = 0; i < n_mini_batch; i++) { for (int i = 0; i < n_mini_batch; i++) {
std::vector<real_t> y_hat = Evaluate(inputMiniBatches[i]); std::vector<real_t> y_hat = evaluatem(input_mini_batches[i]);
cost_prev = Cost(y_hat, outputMiniBatches[i]); cost_prev = cost(y_hat, output_mini_batches[i]);
std::vector<real_t> error = alg.subtraction(y_hat, outputMiniBatches[i]); std::vector<real_t> error = alg.subtraction(y_hat, output_mini_batches[i]);
for (int j = 0; j < k; j++) { for (int j = 0; j < _k; j++) {
// Calculating the weight gradient // Calculating the weight gradient
real_t sum = 0; real_t sum = 0;
for (uint32_t k = 0; k < outputMiniBatches[i].size(); k++) { for (uint32_t k = 0; k < output_mini_batches[i].size(); k++) {
sum += error[k] * inputMiniBatches[i][k][j] * std::pow(weights[j], inputMiniBatches[i][k][j] - 1); sum += error[k] * input_mini_batches[i][k][j] * std::pow(_weights[j], input_mini_batches[i][k][j] - 1);
} }
real_t w_gradient = sum / outputMiniBatches[i].size(); real_t w_gradient = sum / output_mini_batches[i].size();
// Calculating the initial gradient // Calculating the initial gradient
real_t sum2 = 0; real_t sum2 = 0;
for (uint32_t k = 0; k < outputMiniBatches[i].size(); k++) { for (uint32_t k = 0; k < output_mini_batches[i].size(); k++) {
sum2 += error[k] * std::pow(weights[j], inputMiniBatches[i][k][j]); sum2 += error[k] * std::pow(_weights[j], input_mini_batches[i][k][j]);
} }
real_t i_gradient = sum2 / outputMiniBatches[i].size(); real_t i_gradient = sum2 / output_mini_batches[i].size();
// Weight/initial updation // Weight/initial updation
weights[j] -= learning_rate * w_gradient; _weights[j] -= learning_rate * w_gradient;
initial[j] -= learning_rate * i_gradient; _initial[j] -= learning_rate * i_gradient;
} }
weights = regularization.regWeights(weights, lambda, alpha, reg);
_weights = regularization.regWeights(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradient // Calculating the bias gradient
real_t sum = 0; real_t sum = 0;
for (uint32_t j = 0; j < outputMiniBatches[i].size(); j++) { for (uint32_t j = 0; j < output_mini_batches[i].size(); j++) {
sum += (y_hat[j] - outputMiniBatches[i][j]); sum += (y_hat[j] - output_mini_batches[i][j]);
} }
//real_t b_gradient = sum / outputMiniBatches[i].size(); //real_t b_gradient = sum / output_mini_batches[i].size();
y_hat = Evaluate(inputMiniBatches[i]); y_hat = evaluatem(input_mini_batches[i]);
if (UI) { if (ui) {
MLPPUtilities::CostInfo(epoch, cost_prev, Cost(y_hat, outputMiniBatches[i])); MLPPUtilities::CostInfo(epoch, cost_prev, cost(y_hat, output_mini_batches[i]));
MLPPUtilities::UI(weights, bias); MLPPUtilities::UI(_weights, _bias);
} }
} }
epoch++; epoch++;
if (epoch > max_epoch) { if (epoch > max_epoch) {
break; break;
} }
} }
forwardPass();
forward_pass();
} }
real_t MLPPExpReg::score() { real_t MLPPExpReg::score() {
MLPPUtilities util; MLPPUtilities util;
return util.performance(y_hat, outputSet);
return util.performance(_y_hat, _output_set);
} }
void MLPPExpReg::save(std::string fileName) { void MLPPExpReg::save(std::string file_name) {
MLPPUtilities util; MLPPUtilities util;
util.saveParameters(fileName, weights, initial, bias);
util.saveParameters(file_name, _weights, _initial, _bias);
} }
real_t MLPPExpReg::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) { MLPPExpReg::MLPPExpReg(std::vector<std::vector<real_t>> p_input_set, std::vector<real_t> p_output_set, std::string p_reg, real_t p_lambda, real_t p_alpha) {
_input_set = p_input_set;
_output_set = p_output_set;
_n = p_input_set.size();
_k = p_input_set[0].size();
_reg = p_reg;
_lambda = p_lambda;
_alpha = p_alpha;
_y_hat.resize(_n);
_weights = MLPPUtilities::weightInitialization(_k);
_initial = MLPPUtilities::weightInitialization(_k);
_bias = MLPPUtilities::biasInitialization();
}
real_t MLPPExpReg::cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
MLPPReg regularization; MLPPReg regularization;
class MLPPCost cost; MLPPCost mlpp_cost;
return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
return mlpp_cost.MSE(y_hat, y) + regularization.regTerm(_weights, _lambda, _alpha, _reg);
} }
std::vector<real_t> MLPPExpReg::Evaluate(std::vector<std::vector<real_t>> X) { real_t MLPPExpReg::evaluatev(std::vector<real_t> x) {
real_t y_hat = 0;
for (uint32_t i = 0; i < x.size(); i++) {
y_hat += _initial[i] * std::pow(_weights[i], x[i]);
}
return y_hat + _bias;
}
std::vector<real_t> MLPPExpReg::evaluatem(std::vector<std::vector<real_t>> X) {
std::vector<real_t> y_hat; std::vector<real_t> y_hat;
y_hat.resize(X.size()); y_hat.resize(X.size());
for (uint32_t i = 0; i < X.size(); i++) { for (uint32_t i = 0; i < X.size(); i++) {
y_hat[i] = 0; y_hat[i] = 0;
for (uint32_t j = 0; j < X[i].size(); j++) { for (uint32_t j = 0; j < X[i].size(); j++) {
y_hat[i] += initial[j] * std::pow(weights[j], X[i][j]); y_hat[i] += _initial[j] * std::pow(_weights[j], X[i][j]);
} }
y_hat[i] += bias; y_hat[i] += _bias;
} }
return y_hat; return y_hat;
} }
real_t MLPPExpReg::Evaluate(std::vector<real_t> x) {
real_t y_hat = 0;
for (uint32_t i = 0; i < x.size(); i++) {
y_hat += initial[i] * std::pow(weights[i], x[i]);
}
return y_hat + bias;
}
// a * w^x + b // a * w^x + b
void MLPPExpReg::forwardPass() { void MLPPExpReg::forward_pass() {
y_hat = Evaluate(inputSet); _y_hat = evaluatem(_input_set);
} }

View File

@ -15,36 +15,41 @@
class MLPPExpReg { class MLPPExpReg {
public: public:
MLPPExpReg(std::vector<std::vector<real_t>> inputSet, std::vector<real_t> outputSet, std::string reg = "None", real_t lambda = 0.5, real_t alpha = 0.5); std::vector<real_t> model_set_test(std::vector<std::vector<real_t>> X);
std::vector<real_t> modelSetTest(std::vector<std::vector<real_t>> X); real_t model_test(std::vector<real_t> x);
real_t modelTest(std::vector<real_t> x);
void gradientDescent(real_t learning_rate, int max_epoch, bool UI = 1); void gradient_descent(real_t learning_rate, int max_epoch, bool ui = false);
void SGD(real_t learning_rate, int max_epoch, bool UI = 1); void sgd(real_t learning_rate, int max_epoch, bool ui = false);
void MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI = 1); void mbgd(real_t learning_rate, int max_epoch, int mini_batch_size, bool ui = false);
real_t score(); real_t score();
void save(std::string fileName);
void save(std::string file_name);
MLPPExpReg(std::vector<std::vector<real_t>> p_input_set, std::vector<real_t> p_output_set, std::string p_reg = "None", real_t p_lambda = 0.5, real_t p_alpha = 0.5);
private: private:
real_t Cost(std::vector<real_t> y_hat, std::vector<real_t> y); real_t cost(std::vector<real_t> y_hat, std::vector<real_t> y);
std::vector<real_t> Evaluate(std::vector<std::vector<real_t>> X); real_t evaluatev(std::vector<real_t> x);
real_t Evaluate(std::vector<real_t> x); std::vector<real_t> evaluatem(std::vector<std::vector<real_t>> X);
void forwardPass();
std::vector<std::vector<real_t>> inputSet; void forward_pass();
std::vector<real_t> outputSet;
std::vector<real_t> y_hat;
std::vector<real_t> weights;
std::vector<real_t> initial;
real_t bias;
int n; std::vector<std::vector<real_t>> _input_set;
int k; std::vector<real_t> _output_set;
std::vector<real_t> _y_hat;
std::vector<real_t> _weights;
std::vector<real_t> _initial;
real_t _bias;
int _n;
int _k;
// Regularization Params // Regularization Params
std::string reg; std::string _reg;
real_t lambda; real_t _lambda;
real_t alpha; /* This is the controlling param for Elastic Net*/ real_t _alpha; /* This is the controlling param for Elastic Net*/
}; };
#endif /* ExpReg_hpp */ #endif /* ExpReg_hpp */

View File

@ -434,9 +434,15 @@ void MLPPTests::test_exp_reg_regression(bool ui) {
// EXPREG REGRESSION // EXPREG REGRESSION
std::vector<std::vector<real_t>> inputSet = { { 0, 1, 2, 3, 4 } }; std::vector<std::vector<real_t>> inputSet = { { 0, 1, 2, 3, 4 } };
std::vector<real_t> outputSet = { 1, 2, 4, 8, 16 }; std::vector<real_t> outputSet = { 1, 2, 4, 8, 16 };
MLPPExpRegOld model_old(alg.transpose(inputSet), outputSet);
model_old.SGD(0.001, 10000, ui);
alg.printVector(model_old.modelSetTest(alg.transpose(inputSet)));
std::cout << "ACCURACY: " << 100 * model_old.score() << "%" << std::endl;
MLPPExpReg model(alg.transpose(inputSet), outputSet); MLPPExpReg model(alg.transpose(inputSet), outputSet);
model.SGD(0.001, 10000, ui); model.sgd(0.001, 10000, ui);
alg.printVector(model.modelSetTest(alg.transpose(inputSet))); alg.printVector(model.model_set_test(alg.transpose(inputSet)));
std::cout << "ACCURACY: " << 100 * model.score() << "%" << std::endl; std::cout << "ACCURACY: " << 100 * model.score() << "%" << std::endl;
} }
void MLPPTests::test_tanh_regression(bool ui) { void MLPPTests::test_tanh_regression(bool ui) {