"Vectorized" implementation of SGD for Tanh Reg

This commit is contained in:
novak_99 2021-05-28 20:56:36 -07:00
parent 687dada9f1
commit e5598185dd
2 changed files with 6 additions and 14 deletions

View File

@ -66,6 +66,7 @@ namespace MLPP{
} }
void TanhReg::SGD(double learning_rate, int max_epoch, bool UI){ void TanhReg::SGD(double learning_rate, int max_epoch, bool UI){
LinAlg alg;
Reg regularization; Reg regularization;
Utilities util; Utilities util;
double cost_prev = 0; double cost_prev = 0;
@ -80,24 +81,15 @@ namespace MLPP{
double y_hat = Evaluate(inputSet[outputIndex]); double y_hat = Evaluate(inputSet[outputIndex]);
cost_prev = Cost({y_hat}, {outputSet[outputIndex]}); cost_prev = Cost({y_hat}, {outputSet[outputIndex]});
double error = y_hat - outputSet[outputIndex];
for(int i = 0; i < k; i++){ // Weight Updation
weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate * error * (1 - y_hat * y_hat), inputSet[outputIndex]));
// Calculating the weight gradients
double w_gradient = (y_hat - outputSet[outputIndex]) * (1 - y_hat * y_hat) * inputSet[outputIndex][i];
// Weight updation
weights[i] -= learning_rate * w_gradient;
}
weights = regularization.regWeights(weights, lambda, alpha, reg); weights = regularization.regWeights(weights, lambda, alpha, reg);
// Calculating the bias gradients
double b_gradient = (y_hat - outputSet[outputIndex]) * (1 - y_hat * y_hat);
// Bias updation // Bias updation
bias -= learning_rate * b_gradient; bias -= learning_rate * error * (1 - y_hat * y_hat);
y_hat = Evaluate({inputSet[outputIndex]}); y_hat = Evaluate({inputSet[outputIndex]});
if(UI) { if(UI) {

BIN
a.out

Binary file not shown.