"Vectorized" the implementation of SGD for Log & Lin Reg

This commit is contained in:
novak_99 2021-05-28 19:26:10 -07:00
parent c69693b86b
commit 4c65bb36d7
3 changed files with 22 additions and 38 deletions

View File

@ -65,6 +65,7 @@ namespace MLPP{
} }
void LinReg::SGD(double learning_rate, int max_epoch, bool UI){ void LinReg::SGD(double learning_rate, int max_epoch, bool UI){
LinAlg alg;
Reg regularization; Reg regularization;
Utilities util; Utilities util;
double cost_prev = 0; double cost_prev = 0;
@ -79,24 +80,15 @@ namespace MLPP{
double y_hat = Evaluate(inputSet[outputIndex]); double y_hat = Evaluate(inputSet[outputIndex]);
cost_prev = Cost({y_hat}, {outputSet[outputIndex]}); cost_prev = Cost({y_hat}, {outputSet[outputIndex]});
double error = y_hat - outputSet[outputIndex];
for(int i = 0; i < k; i++){ // Weight updation
weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate * error, inputSet[outputIndex]));
// Calculating the weight gradients
double w_gradient = (y_hat - outputSet[outputIndex]) * inputSet[outputIndex][i];
// Weight updation
weights[i] -= learning_rate * w_gradient;
}
weights = regularization.regWeights(weights, lambda, alpha, reg); weights = regularization.regWeights(weights, lambda, alpha, reg);
// Calculating the bias gradients
double b_gradient = (y_hat - outputSet[outputIndex]);
// Bias updation // Bias updation
bias -= learning_rate * b_gradient; bias -= learning_rate * error;
y_hat = Evaluate({inputSet[outputIndex]}); y_hat = Evaluate({inputSet[outputIndex]});
if(UI) { if(UI) {

View File

@ -92,6 +92,7 @@ namespace MLPP{
} }
void LogReg::SGD(double learning_rate, int max_epoch, bool UI){ void LogReg::SGD(double learning_rate, int max_epoch, bool UI){
LinAlg alg;
Reg regularization; Reg regularization;
Utilities util; Utilities util;
double cost_prev = 0; double cost_prev = 0;
@ -106,24 +107,15 @@ namespace MLPP{
double y_hat = Evaluate(inputSet[outputIndex]); double y_hat = Evaluate(inputSet[outputIndex]);
cost_prev = Cost({y_hat}, {outputSet[outputIndex]}); cost_prev = Cost({y_hat}, {outputSet[outputIndex]});
double error = y_hat - outputSet[outputIndex];
for(int i = 0; i < k; i++){ // Weight updation
weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate * error, inputSet[outputIndex]));
// Calculating the weight gradients
double w_gradient = (y_hat - outputSet[outputIndex]) * inputSet[outputIndex][i];
// Weight updation
weights[i] -= learning_rate * w_gradient;
}
weights = regularization.regWeights(weights, lambda, alpha, reg); weights = regularization.regWeights(weights, lambda, alpha, reg);
// Calculating the bias gradients
double b_gradient = (y_hat - outputSet[outputIndex]);
// Bias updation // Bias updation
bias -= learning_rate * b_gradient; bias -= learning_rate * error;
y_hat = Evaluate({inputSet[outputIndex]}); y_hat = Evaluate({inputSet[outputIndex]});
if(UI) { if(UI) {

View File

@ -131,26 +131,26 @@ int main() {
// UniLinReg model(inputSet, outputSet); // UniLinReg model(inputSet, outputSet);
// alg.printVector(model.modelSetTest(inputSet)); // alg.printVector(model.modelSetTest(inputSet));
// // MULIVARIATE LINEAR REGRESSION // MULIVARIATE LINEAR REGRESSION
// std::vector<std::vector<double>> inputSet = {{1,2,3,4,5,6,7,8,9,10}, {3,5,9,12,15,18,21,24,27,30}}; // std::vector<std::vector<double>> inputSet = {{1,2,3,4,5,6,7,8,9,10}, {3,5,9,12,15,18,21,24,27,30}};
// std::vector<double> outputSet = {2,4,6,8,10,12,14,16,18,20}; // std::vector<double> outputSet = {2,4,6,8,10,12,14,16,18,20};
// LinReg model(alg.transpose(inputSet), outputSet); // Can use Lasso, Ridge, ElasticNet Reg // LinReg model(alg.transpose(inputSet), outputSet); // Can use Lasso, Ridge, ElasticNet Reg
// model.normalEquation(); //model.normalEquation();
// model.gradientDescent(0.001, 30000, 1); // model.gradientDescent(0.001, 30000, 1);
// model.SGD(0.001, 30000, 1); // model.SGD(0.001, 30000, 1);
// model.MBGD(0.001, 10000, 2, 1); // model.MBGD(0.001, 10000, 2, 1);
// alg.printVector(model.modelSetTest((alg.transpose(inputSet)))); // alg.printVector(model.modelSetTest((alg.transpose(inputSet))));
// std::cout << "ACCURACY: " << 100 * model.score() << "%" << std::endl; // std::cout << "ACCURACY: " << 100 * model.score() << "%" << std::endl;
// // LOGISTIC REGRESSION // LOGISTIC REGRESSION
// std::vector<std::vector<double>> inputSet; std::vector<std::vector<double>> inputSet;
// std::vector<double> outputSet; std::vector<double> outputSet;
// data.setData(30, "/Users/marcmelikyan/Desktop/Data/BreastCancer.csv", inputSet, outputSet); data.setData(30, "/Users/marcmelikyan/Desktop/Data/BreastCancer.csv", inputSet, outputSet);
// LogReg model(inputSet, outputSet); LogReg model(inputSet, outputSet);
// //model.SGD(0.1, 50000, 0); model.SGD(0.001, 100000, 0);
// model.MLE(0.1, 10000, 0); // model.MLE(0.1, 10000, 0);
// alg.printVector(model.modelSetTest(inputSet)); alg.printVector(model.modelSetTest(inputSet));
// std::cout << "ACCURACY: " << 100 * model.score() << "%" << std::endl; std::cout << "ACCURACY: " << 100 * model.score() << "%" << std::endl;
// // PROBIT REGRESSION // // PROBIT REGRESSION
// std::vector<std::vector<double>> inputSet; // std::vector<std::vector<double>> inputSet;