2023-01-23 21:13:26 +01:00
//
// LinReg.cpp
//
// Created by Marc Melikyan on 10/2/20.
//
2023-01-24 18:12:23 +01:00
# include "lin_reg.h"
2023-01-24 19:00:54 +01:00
# include "../cost/cost.h"
2023-01-24 18:12:23 +01:00
# include "../lin_alg/lin_alg.h"
# include "../regularization/reg.h"
2023-01-24 19:00:54 +01:00
# include "../stat/stat.h"
2023-01-24 18:12:23 +01:00
# include "../utilities/utilities.h"
2023-01-23 21:13:26 +01:00
# include <cmath>
2023-01-24 19:00:54 +01:00
# include <iostream>
2023-01-23 21:13:26 +01:00
# include <random>
2023-02-10 21:55:21 +01:00
MLPPLinReg : : MLPPLinReg ( std : : vector < std : : vector < real_t > > p_inputSet , std : : vector < real_t > p_outputSet , std : : string p_reg , real_t p_lambda , real_t p_alpha ) {
inputSet = p_inputSet ;
outputSet = p_outputSet ;
n = p_inputSet . size ( ) ;
k = p_inputSet [ 0 ] . size ( ) ;
reg = p_reg ;
lambda = p_lambda ;
alpha = p_alpha ;
2023-01-24 19:00:54 +01:00
y_hat . resize ( n ) ;
2023-01-25 01:09:37 +01:00
weights = MLPPUtilities : : weightInitialization ( k ) ;
bias = MLPPUtilities : : biasInitialization ( ) ;
2023-01-24 19:00:54 +01:00
}
2023-01-27 13:01:16 +01:00
std : : vector < real_t > MLPPLinReg : : modelSetTest ( std : : vector < std : : vector < real_t > > X ) {
2023-01-24 19:00:54 +01:00
return Evaluate ( X ) ;
}
2023-01-27 13:01:16 +01:00
real_t MLPPLinReg : : modelTest ( std : : vector < real_t > x ) {
2023-01-24 19:00:54 +01:00
return Evaluate ( x ) ;
}
2023-01-27 13:01:16 +01:00
void MLPPLinReg : : NewtonRaphson ( real_t learning_rate , int max_epoch , bool UI ) {
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg ;
2023-01-25 00:54:50 +01:00
MLPPReg regularization ;
2023-01-27 13:01:16 +01:00
real_t cost_prev = 0 ;
2023-01-24 19:00:54 +01:00
int epoch = 1 ;
forwardPass ( ) ;
while ( true ) {
cost_prev = Cost ( y_hat , outputSet ) ;
2023-01-27 13:01:16 +01:00
std : : vector < real_t > error = alg . subtraction ( y_hat , outputSet ) ;
2023-01-24 19:00:54 +01:00
// Calculating the weight gradients (2nd derivative)
2023-01-27 13:01:16 +01:00
std : : vector < real_t > first_derivative = alg . mat_vec_mult ( alg . transpose ( inputSet ) , error ) ;
std : : vector < std : : vector < real_t > > second_derivative = alg . matmult ( alg . transpose ( inputSet ) , inputSet ) ;
2023-01-24 19:00:54 +01:00
weights = alg . subtraction ( weights , alg . scalarMultiply ( learning_rate / n , alg . mat_vec_mult ( alg . transpose ( alg . inverse ( second_derivative ) ) , first_derivative ) ) ) ;
weights = regularization . regWeights ( weights , lambda , alpha , reg ) ;
// Calculating the bias gradients (2nd derivative)
bias - = learning_rate * alg . sum_elements ( error ) / n ; // We keep this the same. The 2nd derivative is just [1].
forwardPass ( ) ;
if ( UI ) {
2023-01-25 01:09:37 +01:00
MLPPUtilities : : CostInfo ( epoch , cost_prev , Cost ( y_hat , outputSet ) ) ;
MLPPUtilities : : UI ( weights , bias ) ;
2023-01-24 19:00:54 +01:00
}
epoch + + ;
if ( epoch > max_epoch ) {
break ;
}
}
}
2023-01-27 13:01:16 +01:00
void MLPPLinReg : : gradientDescent ( real_t learning_rate , int max_epoch , bool UI ) {
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg ;
2023-01-25 00:54:50 +01:00
MLPPReg regularization ;
2023-01-27 13:01:16 +01:00
real_t cost_prev = 0 ;
2023-01-24 19:00:54 +01:00
int epoch = 1 ;
forwardPass ( ) ;
while ( true ) {
cost_prev = Cost ( y_hat , outputSet ) ;
2023-01-27 13:01:16 +01:00
std : : vector < real_t > error = alg . subtraction ( y_hat , outputSet ) ;
2023-01-24 19:00:54 +01:00
// Calculating the weight gradients
weights = alg . subtraction ( weights , alg . scalarMultiply ( learning_rate / n , alg . mat_vec_mult ( alg . transpose ( inputSet ) , error ) ) ) ;
weights = regularization . regWeights ( weights , lambda , alpha , reg ) ;
// Calculating the bias gradients
bias - = learning_rate * alg . sum_elements ( error ) / n ;
forwardPass ( ) ;
if ( UI ) {
2023-01-25 01:09:37 +01:00
MLPPUtilities : : CostInfo ( epoch , cost_prev , Cost ( y_hat , outputSet ) ) ;
MLPPUtilities : : UI ( weights , bias ) ;
2023-01-24 19:00:54 +01:00
}
epoch + + ;
if ( epoch > max_epoch ) {
break ;
}
}
}
2023-01-27 13:01:16 +01:00
void MLPPLinReg : : SGD ( real_t learning_rate , int max_epoch , bool UI ) {
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg ;
2023-01-25 00:54:50 +01:00
MLPPReg regularization ;
2023-01-27 13:01:16 +01:00
real_t cost_prev = 0 ;
2023-01-24 19:00:54 +01:00
int epoch = 1 ;
while ( true ) {
std : : random_device rd ;
std : : default_random_engine generator ( rd ( ) ) ;
std : : uniform_int_distribution < int > distribution ( 0 , int ( n - 1 ) ) ;
int outputIndex = distribution ( generator ) ;
2023-01-27 13:01:16 +01:00
real_t y_hat = Evaluate ( inputSet [ outputIndex ] ) ;
2023-01-24 19:00:54 +01:00
cost_prev = Cost ( { y_hat } , { outputSet [ outputIndex ] } ) ;
2023-01-27 13:01:16 +01:00
real_t error = y_hat - outputSet [ outputIndex ] ;
2023-01-24 19:00:54 +01:00
// Weight updation
weights = alg . subtraction ( weights , alg . scalarMultiply ( learning_rate * error , inputSet [ outputIndex ] ) ) ;
weights = regularization . regWeights ( weights , lambda , alpha , reg ) ;
// Bias updation
bias - = learning_rate * error ;
y_hat = Evaluate ( { inputSet [ outputIndex ] } ) ;
if ( UI ) {
2023-01-25 01:09:37 +01:00
MLPPUtilities : : CostInfo ( epoch , cost_prev , Cost ( { y_hat } , { outputSet [ outputIndex ] } ) ) ;
MLPPUtilities : : UI ( weights , bias ) ;
2023-01-24 19:00:54 +01:00
}
epoch + + ;
if ( epoch > max_epoch ) {
break ;
}
}
forwardPass ( ) ;
}
2023-01-27 13:01:16 +01:00
void MLPPLinReg : : MBGD ( real_t learning_rate , int max_epoch , int mini_batch_size , bool UI ) {
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg ;
2023-01-25 00:54:50 +01:00
MLPPReg regularization ;
2023-01-27 13:01:16 +01:00
real_t cost_prev = 0 ;
2023-01-24 19:00:54 +01:00
int epoch = 1 ;
// Creating the mini-batches
int n_mini_batch = n / mini_batch_size ;
2023-02-10 21:55:21 +01:00
auto batches = MLPPUtilities : : createMiniBatches ( inputSet , outputSet , n_mini_batch ) ;
auto inputMiniBatches = std : : get < 0 > ( batches ) ;
auto outputMiniBatches = std : : get < 1 > ( batches ) ;
2023-01-24 19:00:54 +01:00
while ( true ) {
for ( int i = 0 ; i < n_mini_batch ; i + + ) {
2023-01-27 13:01:16 +01:00
std : : vector < real_t > y_hat = Evaluate ( inputMiniBatches [ i ] ) ;
2023-01-24 19:00:54 +01:00
cost_prev = Cost ( y_hat , outputMiniBatches [ i ] ) ;
2023-01-27 13:01:16 +01:00
std : : vector < real_t > error = alg . subtraction ( y_hat , outputMiniBatches [ i ] ) ;
2023-01-24 19:00:54 +01:00
// Calculating the weight gradients
weights = alg . subtraction ( weights , alg . scalarMultiply ( learning_rate / outputMiniBatches [ i ] . size ( ) , alg . mat_vec_mult ( alg . transpose ( inputMiniBatches [ i ] ) , error ) ) ) ;
weights = regularization . regWeights ( weights , lambda , alpha , reg ) ;
// Calculating the bias gradients
bias - = learning_rate * alg . sum_elements ( error ) / outputMiniBatches [ i ] . size ( ) ;
y_hat = Evaluate ( inputMiniBatches [ i ] ) ;
if ( UI ) {
2023-01-25 01:09:37 +01:00
MLPPUtilities : : CostInfo ( epoch , cost_prev , Cost ( y_hat , outputMiniBatches [ i ] ) ) ;
MLPPUtilities : : UI ( weights , bias ) ;
2023-01-24 19:00:54 +01:00
}
}
epoch + + ;
if ( epoch > max_epoch ) {
break ;
}
}
forwardPass ( ) ;
}
2023-01-27 13:01:16 +01:00
void MLPPLinReg : : Momentum ( real_t learning_rate , int max_epoch , int mini_batch_size , real_t gamma , bool UI ) {
2023-01-25 23:43:21 +01:00
MLPPLinAlg alg ;
MLPPReg regularization ;
2023-01-27 13:01:16 +01:00
real_t cost_prev = 0 ;
2023-01-25 23:43:21 +01:00
int epoch = 1 ;
// Creating the mini-batches
int n_mini_batch = n / mini_batch_size ;
2023-02-10 21:55:21 +01:00
auto batches = MLPPUtilities : : createMiniBatches ( inputSet , outputSet , n_mini_batch ) ;
auto inputMiniBatches = std : : get < 0 > ( batches ) ;
auto outputMiniBatches = std : : get < 1 > ( batches ) ;
2023-01-25 23:43:21 +01:00
// Initializing necessary components for Momentum.
2023-01-27 13:01:16 +01:00
std : : vector < real_t > v = alg . zerovec ( weights . size ( ) ) ;
2023-01-25 23:43:21 +01:00
while ( true ) {
for ( int i = 0 ; i < n_mini_batch ; i + + ) {
2023-01-27 13:01:16 +01:00
std : : vector < real_t > y_hat = Evaluate ( inputMiniBatches [ i ] ) ;
2023-01-25 23:43:21 +01:00
cost_prev = Cost ( y_hat , outputMiniBatches [ i ] ) ;
2023-01-27 13:01:16 +01:00
std : : vector < real_t > error = alg . subtraction ( y_hat , outputMiniBatches [ i ] ) ;
2023-01-25 23:43:21 +01:00
// Calculating the weight gradients
2023-01-27 13:01:16 +01:00
std : : vector < real_t > gradient = alg . scalarMultiply ( 1 / outputMiniBatches [ i ] . size ( ) , alg . mat_vec_mult ( alg . transpose ( inputMiniBatches [ i ] ) , error ) ) ;
std : : vector < real_t > RegDerivTerm = regularization . regDerivTerm ( weights , lambda , alpha , reg ) ;
std : : vector < real_t > weight_grad = alg . addition ( gradient , RegDerivTerm ) ; // Weight_grad_final
2023-01-25 23:43:21 +01:00
v = alg . addition ( alg . scalarMultiply ( gamma , v ) , alg . scalarMultiply ( learning_rate , weight_grad ) ) ;
weights = alg . subtraction ( weights , v ) ;
// Calculating the bias gradients
bias - = learning_rate * alg . sum_elements ( error ) / outputMiniBatches [ i ] . size ( ) ; // As normal
y_hat = Evaluate ( inputMiniBatches [ i ] ) ;
if ( UI ) {
MLPPUtilities : : CostInfo ( epoch , cost_prev , Cost ( y_hat , outputMiniBatches [ i ] ) ) ;
MLPPUtilities : : UI ( weights , bias ) ;
}
}
epoch + + ;
if ( epoch > max_epoch ) {
break ;
}
}
forwardPass ( ) ;
}
2023-01-27 13:01:16 +01:00
void MLPPLinReg : : NAG ( real_t learning_rate , int max_epoch , int mini_batch_size , real_t gamma , bool UI ) {
2023-01-25 23:43:21 +01:00
MLPPLinAlg alg ;
MLPPReg regularization ;
2023-01-27 13:01:16 +01:00
real_t cost_prev = 0 ;
2023-01-25 23:43:21 +01:00
int epoch = 1 ;
// Creating the mini-batches
int n_mini_batch = n / mini_batch_size ;
2023-02-10 21:55:21 +01:00
auto batches = MLPPUtilities : : createMiniBatches ( inputSet , outputSet , n_mini_batch ) ;
auto inputMiniBatches = std : : get < 0 > ( batches ) ;
auto outputMiniBatches = std : : get < 1 > ( batches ) ;
2023-01-25 23:43:21 +01:00
// Initializing necessary components for Momentum.
2023-01-27 13:01:16 +01:00
std : : vector < real_t > v = alg . zerovec ( weights . size ( ) ) ;
2023-01-25 23:43:21 +01:00
while ( true ) {
for ( int i = 0 ; i < n_mini_batch ; i + + ) {
weights = alg . subtraction ( weights , alg . scalarMultiply ( gamma , v ) ) ; // "Aposterori" calculation
2023-01-27 13:01:16 +01:00
std : : vector < real_t > y_hat = Evaluate ( inputMiniBatches [ i ] ) ;
2023-01-25 23:43:21 +01:00
cost_prev = Cost ( y_hat , outputMiniBatches [ i ] ) ;
2023-01-27 13:01:16 +01:00
std : : vector < real_t > error = alg . subtraction ( y_hat , outputMiniBatches [ i ] ) ;
2023-01-25 23:43:21 +01:00
// Calculating the weight gradients
2023-01-27 13:01:16 +01:00
std : : vector < real_t > gradient = alg . scalarMultiply ( 1 / outputMiniBatches [ i ] . size ( ) , alg . mat_vec_mult ( alg . transpose ( inputMiniBatches [ i ] ) , error ) ) ;
std : : vector < real_t > RegDerivTerm = regularization . regDerivTerm ( weights , lambda , alpha , reg ) ;
std : : vector < real_t > weight_grad = alg . addition ( gradient , RegDerivTerm ) ; // Weight_grad_final
2023-01-25 23:43:21 +01:00
v = alg . addition ( alg . scalarMultiply ( gamma , v ) , alg . scalarMultiply ( learning_rate , weight_grad ) ) ;
weights = alg . subtraction ( weights , v ) ;
// Calculating the bias gradients
bias - = learning_rate * alg . sum_elements ( error ) / outputMiniBatches [ i ] . size ( ) ; // As normal
y_hat = Evaluate ( inputMiniBatches [ i ] ) ;
if ( UI ) {
MLPPUtilities : : CostInfo ( epoch , cost_prev , Cost ( y_hat , outputMiniBatches [ i ] ) ) ;
MLPPUtilities : : UI ( weights , bias ) ;
}
}
epoch + + ;
if ( epoch > max_epoch ) {
break ;
}
}
forwardPass ( ) ;
}
2023-01-27 13:01:16 +01:00
void MLPPLinReg : : Adagrad ( real_t learning_rate , int max_epoch , int mini_batch_size , real_t e , bool UI ) {
2023-01-25 23:43:21 +01:00
MLPPLinAlg alg ;
MLPPReg regularization ;
2023-01-27 13:01:16 +01:00
real_t cost_prev = 0 ;
2023-01-25 23:43:21 +01:00
int epoch = 1 ;
// Creating the mini-batches
int n_mini_batch = n / mini_batch_size ;
2023-02-10 21:55:21 +01:00
auto batches = MLPPUtilities : : createMiniBatches ( inputSet , outputSet , n_mini_batch ) ;
auto inputMiniBatches = std : : get < 0 > ( batches ) ;
auto outputMiniBatches = std : : get < 1 > ( batches ) ;
2023-01-25 23:43:21 +01:00
// Initializing necessary components for Adagrad.
2023-01-27 13:01:16 +01:00
std : : vector < real_t > v = alg . zerovec ( weights . size ( ) ) ;
2023-01-25 23:43:21 +01:00
while ( true ) {
for ( int i = 0 ; i < n_mini_batch ; i + + ) {
2023-01-27 13:01:16 +01:00
std : : vector < real_t > y_hat = Evaluate ( inputMiniBatches [ i ] ) ;
2023-01-25 23:43:21 +01:00
cost_prev = Cost ( y_hat , outputMiniBatches [ i ] ) ;
2023-01-27 13:01:16 +01:00
std : : vector < real_t > error = alg . subtraction ( y_hat , outputMiniBatches [ i ] ) ;
2023-01-25 23:43:21 +01:00
// Calculating the weight gradients
2023-01-27 13:01:16 +01:00
std : : vector < real_t > gradient = alg . scalarMultiply ( 1 / outputMiniBatches [ i ] . size ( ) , alg . mat_vec_mult ( alg . transpose ( inputMiniBatches [ i ] ) , error ) ) ;
std : : vector < real_t > RegDerivTerm = regularization . regDerivTerm ( weights , lambda , alpha , reg ) ;
std : : vector < real_t > weight_grad = alg . addition ( gradient , RegDerivTerm ) ; // Weight_grad_final
2023-01-25 23:43:21 +01:00
v = alg . hadamard_product ( weight_grad , weight_grad ) ;
weights = alg . subtraction ( weights , alg . scalarMultiply ( learning_rate , alg . elementWiseDivision ( weight_grad , alg . sqrt ( alg . scalarAdd ( e , v ) ) ) ) ) ;
// Calculating the bias gradients
bias - = learning_rate * alg . sum_elements ( error ) / outputMiniBatches [ i ] . size ( ) ; // As normal
y_hat = Evaluate ( inputMiniBatches [ i ] ) ;
if ( UI ) {
MLPPUtilities : : CostInfo ( epoch , cost_prev , Cost ( y_hat , outputMiniBatches [ i ] ) ) ;
MLPPUtilities : : UI ( weights , bias ) ;
}
}
epoch + + ;
if ( epoch > max_epoch ) {
break ;
}
}
forwardPass ( ) ;
}
2023-01-27 13:01:16 +01:00
void MLPPLinReg : : Adadelta ( real_t learning_rate , int max_epoch , int mini_batch_size , real_t b1 , real_t e , bool UI ) {
2023-01-25 23:43:21 +01:00
// Adagrad upgrade. Momentum is applied.
MLPPLinAlg alg ;
MLPPReg regularization ;
2023-01-27 13:01:16 +01:00
real_t cost_prev = 0 ;
2023-01-25 23:43:21 +01:00
int epoch = 1 ;
// Creating the mini-batches
int n_mini_batch = n / mini_batch_size ;
2023-02-10 21:55:21 +01:00
auto batches = MLPPUtilities : : createMiniBatches ( inputSet , outputSet , n_mini_batch ) ;
auto inputMiniBatches = std : : get < 0 > ( batches ) ;
auto outputMiniBatches = std : : get < 1 > ( batches ) ;
2023-01-25 23:43:21 +01:00
// Initializing necessary components for Adagrad.
2023-01-27 13:01:16 +01:00
std : : vector < real_t > v = alg . zerovec ( weights . size ( ) ) ;
2023-01-25 23:43:21 +01:00
while ( true ) {
for ( int i = 0 ; i < n_mini_batch ; i + + ) {
2023-01-27 13:01:16 +01:00
std : : vector < real_t > y_hat = Evaluate ( inputMiniBatches [ i ] ) ;
2023-01-25 23:43:21 +01:00
cost_prev = Cost ( y_hat , outputMiniBatches [ i ] ) ;
2023-01-27 13:01:16 +01:00
std : : vector < real_t > error = alg . subtraction ( y_hat , outputMiniBatches [ i ] ) ;
2023-01-25 23:43:21 +01:00
// Calculating the weight gradients
2023-01-27 13:01:16 +01:00
std : : vector < real_t > gradient = alg . scalarMultiply ( 1 / outputMiniBatches [ i ] . size ( ) , alg . mat_vec_mult ( alg . transpose ( inputMiniBatches [ i ] ) , error ) ) ;
std : : vector < real_t > RegDerivTerm = regularization . regDerivTerm ( weights , lambda , alpha , reg ) ;
std : : vector < real_t > weight_grad = alg . addition ( gradient , RegDerivTerm ) ; // Weight_grad_final
2023-01-25 23:43:21 +01:00
v = alg . addition ( alg . scalarMultiply ( b1 , v ) , alg . scalarMultiply ( 1 - b1 , alg . hadamard_product ( weight_grad , weight_grad ) ) ) ;
weights = alg . subtraction ( weights , alg . scalarMultiply ( learning_rate , alg . elementWiseDivision ( weight_grad , alg . sqrt ( alg . scalarAdd ( e , v ) ) ) ) ) ;
// Calculating the bias gradients
bias - = learning_rate * alg . sum_elements ( error ) / outputMiniBatches [ i ] . size ( ) ; // As normal
y_hat = Evaluate ( inputMiniBatches [ i ] ) ;
if ( UI ) {
MLPPUtilities : : CostInfo ( epoch , cost_prev , Cost ( y_hat , outputMiniBatches [ i ] ) ) ;
MLPPUtilities : : UI ( weights , bias ) ;
}
}
epoch + + ;
if ( epoch > max_epoch ) {
break ;
}
}
forwardPass ( ) ;
}
2023-01-27 13:01:16 +01:00
void MLPPLinReg : : Adam ( real_t learning_rate , int max_epoch , int mini_batch_size , real_t b1 , real_t b2 , real_t e , bool UI ) {
2023-01-25 23:43:21 +01:00
MLPPLinAlg alg ;
MLPPReg regularization ;
2023-01-27 13:01:16 +01:00
real_t cost_prev = 0 ;
2023-01-25 23:43:21 +01:00
int epoch = 1 ;
// Creating the mini-batches
int n_mini_batch = n / mini_batch_size ;
2023-02-10 21:55:21 +01:00
auto batches = MLPPUtilities : : createMiniBatches ( inputSet , outputSet , n_mini_batch ) ;
auto inputMiniBatches = std : : get < 0 > ( batches ) ;
auto outputMiniBatches = std : : get < 1 > ( batches ) ;
2023-01-25 23:43:21 +01:00
// Initializing necessary components for Adam.
2023-01-27 13:01:16 +01:00
std : : vector < real_t > m = alg . zerovec ( weights . size ( ) ) ;
2023-01-25 23:43:21 +01:00
2023-01-27 13:01:16 +01:00
std : : vector < real_t > v = alg . zerovec ( weights . size ( ) ) ;
2023-01-25 23:43:21 +01:00
while ( true ) {
for ( int i = 0 ; i < n_mini_batch ; i + + ) {
2023-01-27 13:01:16 +01:00
std : : vector < real_t > y_hat = Evaluate ( inputMiniBatches [ i ] ) ;
2023-01-25 23:43:21 +01:00
cost_prev = Cost ( y_hat , outputMiniBatches [ i ] ) ;
2023-01-27 13:01:16 +01:00
std : : vector < real_t > error = alg . subtraction ( y_hat , outputMiniBatches [ i ] ) ;
2023-01-25 23:43:21 +01:00
// Calculating the weight gradients
2023-01-27 13:01:16 +01:00
std : : vector < real_t > gradient = alg . scalarMultiply ( 1 / outputMiniBatches [ i ] . size ( ) , alg . mat_vec_mult ( alg . transpose ( inputMiniBatches [ i ] ) , error ) ) ;
std : : vector < real_t > RegDerivTerm = regularization . regDerivTerm ( weights , lambda , alpha , reg ) ;
std : : vector < real_t > weight_grad = alg . addition ( gradient , RegDerivTerm ) ; // Weight_grad_final
2023-01-25 23:43:21 +01:00
m = alg . addition ( alg . scalarMultiply ( b1 , m ) , alg . scalarMultiply ( 1 - b1 , weight_grad ) ) ;
v = alg . addition ( alg . scalarMultiply ( b2 , v ) , alg . scalarMultiply ( 1 - b2 , alg . exponentiate ( weight_grad , 2 ) ) ) ;
2023-01-27 13:01:16 +01:00
std : : vector < real_t > m_hat = alg . scalarMultiply ( 1 / ( 1 - pow ( b1 , epoch ) ) , m ) ;
std : : vector < real_t > v_hat = alg . scalarMultiply ( 1 / ( 1 - pow ( b2 , epoch ) ) , v ) ;
2023-01-25 23:43:21 +01:00
weights = alg . subtraction ( weights , alg . scalarMultiply ( learning_rate , alg . elementWiseDivision ( m_hat , alg . scalarAdd ( e , alg . sqrt ( v_hat ) ) ) ) ) ;
// Calculating the bias gradients
bias - = learning_rate * alg . sum_elements ( error ) / outputMiniBatches [ i ] . size ( ) ; // As normal
y_hat = Evaluate ( inputMiniBatches [ i ] ) ;
if ( UI ) {
MLPPUtilities : : CostInfo ( epoch , cost_prev , Cost ( y_hat , outputMiniBatches [ i ] ) ) ;
MLPPUtilities : : UI ( weights , bias ) ;
}
}
epoch + + ;
if ( epoch > max_epoch ) {
break ;
}
}
forwardPass ( ) ;
}
2023-01-27 13:01:16 +01:00
void MLPPLinReg : : Adamax ( real_t learning_rate , int max_epoch , int mini_batch_size , real_t b1 , real_t b2 , real_t e , bool UI ) {
2023-01-25 23:43:21 +01:00
MLPPLinAlg alg ;
MLPPReg regularization ;
2023-01-27 13:01:16 +01:00
real_t cost_prev = 0 ;
2023-01-25 23:43:21 +01:00
int epoch = 1 ;
// Creating the mini-batches
int n_mini_batch = n / mini_batch_size ;
2023-02-10 21:55:21 +01:00
auto batches = MLPPUtilities : : createMiniBatches ( inputSet , outputSet , n_mini_batch ) ;
auto inputMiniBatches = std : : get < 0 > ( batches ) ;
auto outputMiniBatches = std : : get < 1 > ( batches ) ;
2023-01-25 23:43:21 +01:00
2023-01-27 13:01:16 +01:00
std : : vector < real_t > m = alg . zerovec ( weights . size ( ) ) ;
2023-01-25 23:43:21 +01:00
2023-01-27 13:01:16 +01:00
std : : vector < real_t > u = alg . zerovec ( weights . size ( ) ) ;
2023-01-25 23:43:21 +01:00
while ( true ) {
for ( int i = 0 ; i < n_mini_batch ; i + + ) {
2023-01-27 13:01:16 +01:00
std : : vector < real_t > y_hat = Evaluate ( inputMiniBatches [ i ] ) ;
2023-01-25 23:43:21 +01:00
cost_prev = Cost ( y_hat , outputMiniBatches [ i ] ) ;
2023-01-27 13:01:16 +01:00
std : : vector < real_t > error = alg . subtraction ( y_hat , outputMiniBatches [ i ] ) ;
2023-01-25 23:43:21 +01:00
// Calculating the weight gradients
2023-01-27 13:01:16 +01:00
std : : vector < real_t > gradient = alg . scalarMultiply ( 1 / outputMiniBatches [ i ] . size ( ) , alg . mat_vec_mult ( alg . transpose ( inputMiniBatches [ i ] ) , error ) ) ;
std : : vector < real_t > RegDerivTerm = regularization . regDerivTerm ( weights , lambda , alpha , reg ) ;
std : : vector < real_t > weight_grad = alg . addition ( gradient , RegDerivTerm ) ; // Weight_grad_final
2023-01-25 23:43:21 +01:00
m = alg . addition ( alg . scalarMultiply ( b1 , m ) , alg . scalarMultiply ( 1 - b1 , weight_grad ) ) ;
u = alg . max ( alg . scalarMultiply ( b2 , u ) , alg . abs ( weight_grad ) ) ;
2023-01-27 13:01:16 +01:00
std : : vector < real_t > m_hat = alg . scalarMultiply ( 1 / ( 1 - pow ( b1 , epoch ) ) , m ) ;
2023-01-25 23:43:21 +01:00
weights = alg . subtraction ( weights , alg . scalarMultiply ( learning_rate , alg . elementWiseDivision ( m_hat , u ) ) ) ;
// Calculating the bias gradients
bias - = learning_rate * alg . sum_elements ( error ) / outputMiniBatches [ i ] . size ( ) ; // As normal
y_hat = Evaluate ( inputMiniBatches [ i ] ) ;
if ( UI ) {
MLPPUtilities : : CostInfo ( epoch , cost_prev , Cost ( y_hat , outputMiniBatches [ i ] ) ) ;
MLPPUtilities : : UI ( weights , bias ) ;
}
}
epoch + + ;
if ( epoch > max_epoch ) {
break ;
}
}
forwardPass ( ) ;
}
2023-01-27 13:01:16 +01:00
void MLPPLinReg : : Nadam ( real_t learning_rate , int max_epoch , int mini_batch_size , real_t b1 , real_t b2 , real_t e , bool UI ) {
2023-01-25 23:43:21 +01:00
MLPPLinAlg alg ;
MLPPReg regularization ;
2023-01-27 13:01:16 +01:00
real_t cost_prev = 0 ;
2023-01-25 23:43:21 +01:00
int epoch = 1 ;
// Creating the mini-batches
int n_mini_batch = n / mini_batch_size ;
2023-02-10 21:55:21 +01:00
auto batches = MLPPUtilities : : createMiniBatches ( inputSet , outputSet , n_mini_batch ) ;
auto inputMiniBatches = std : : get < 0 > ( batches ) ;
auto outputMiniBatches = std : : get < 1 > ( batches ) ;
2023-01-25 23:43:21 +01:00
// Initializing necessary components for Adam.
2023-01-27 13:01:16 +01:00
std : : vector < real_t > m = alg . zerovec ( weights . size ( ) ) ;
std : : vector < real_t > v = alg . zerovec ( weights . size ( ) ) ;
std : : vector < real_t > m_final = alg . zerovec ( weights . size ( ) ) ;
2023-01-25 23:43:21 +01:00
while ( true ) {
for ( int i = 0 ; i < n_mini_batch ; i + + ) {
2023-01-27 13:01:16 +01:00
std : : vector < real_t > y_hat = Evaluate ( inputMiniBatches [ i ] ) ;
2023-01-25 23:43:21 +01:00
cost_prev = Cost ( y_hat , outputMiniBatches [ i ] ) ;
2023-01-27 13:01:16 +01:00
std : : vector < real_t > error = alg . subtraction ( y_hat , outputMiniBatches [ i ] ) ;
2023-01-25 23:43:21 +01:00
// Calculating the weight gradients
2023-01-27 13:01:16 +01:00
std : : vector < real_t > gradient = alg . scalarMultiply ( 1 / outputMiniBatches [ i ] . size ( ) , alg . mat_vec_mult ( alg . transpose ( inputMiniBatches [ i ] ) , error ) ) ;
std : : vector < real_t > RegDerivTerm = regularization . regDerivTerm ( weights , lambda , alpha , reg ) ;
std : : vector < real_t > weight_grad = alg . addition ( gradient , RegDerivTerm ) ; // Weight_grad_final
2023-01-25 23:43:21 +01:00
m = alg . addition ( alg . scalarMultiply ( b1 , m ) , alg . scalarMultiply ( 1 - b1 , weight_grad ) ) ;
v = alg . addition ( alg . scalarMultiply ( b2 , v ) , alg . scalarMultiply ( 1 - b2 , alg . exponentiate ( weight_grad , 2 ) ) ) ;
m_final = alg . addition ( alg . scalarMultiply ( b1 , m ) , alg . scalarMultiply ( ( 1 - b1 ) / ( 1 - pow ( b1 , epoch ) ) , weight_grad ) ) ;
2023-01-27 13:01:16 +01:00
std : : vector < real_t > m_hat = alg . scalarMultiply ( 1 / ( 1 - pow ( b1 , epoch ) ) , m ) ;
std : : vector < real_t > v_hat = alg . scalarMultiply ( 1 / ( 1 - pow ( b2 , epoch ) ) , v ) ;
2023-01-25 23:43:21 +01:00
weights = alg . subtraction ( weights , alg . scalarMultiply ( learning_rate , alg . elementWiseDivision ( m_final , alg . scalarAdd ( e , alg . sqrt ( v_hat ) ) ) ) ) ;
// Calculating the bias gradients
bias - = learning_rate * alg . sum_elements ( error ) / outputMiniBatches [ i ] . size ( ) ; // As normal
y_hat = Evaluate ( inputMiniBatches [ i ] ) ;
if ( UI ) {
MLPPUtilities : : CostInfo ( epoch , cost_prev , Cost ( y_hat , outputMiniBatches [ i ] ) ) ;
MLPPUtilities : : UI ( weights , bias ) ;
}
}
epoch + + ;
if ( epoch > max_epoch ) {
break ;
}
}
forwardPass ( ) ;
}
2023-01-25 00:54:50 +01:00
void MLPPLinReg : : normalEquation ( ) {
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg ;
2023-01-25 23:43:21 +01:00
MLPPStat stat ;
2023-01-27 13:01:16 +01:00
std : : vector < real_t > x_means ;
std : : vector < std : : vector < real_t > > inputSetT = alg . transpose ( inputSet ) ;
2023-01-24 19:00:54 +01:00
x_means . resize ( inputSetT . size ( ) ) ;
2023-02-10 21:55:21 +01:00
for ( uint32_t i = 0 ; i < inputSetT . size ( ) ; i + + ) {
2023-01-24 19:00:54 +01:00
x_means [ i ] = ( stat . mean ( inputSetT [ i ] ) ) ;
}
2023-01-24 19:14:38 +01:00
//try {
2023-01-27 13:01:16 +01:00
std : : vector < real_t > temp ;
2023-01-25 23:43:21 +01:00
temp . resize ( k ) ;
temp = alg . mat_vec_mult ( alg . inverse ( alg . matmult ( alg . transpose ( inputSet ) , inputSet ) ) , alg . mat_vec_mult ( alg . transpose ( inputSet ) , outputSet ) ) ;
if ( std : : isnan ( temp [ 0 ] ) ) {
//throw 99;
//TODO ERR_FAIL_COND
std : : cout < < " ERR: Resulting matrix was noninvertible/degenerate, and so the normal equation could not be performed. Try utilizing gradient descent. " < < std : : endl ;
return ;
} else {
if ( reg = = " Ridge " ) {
weights = alg . mat_vec_mult ( alg . inverse ( alg . addition ( alg . matmult ( alg . transpose ( inputSet ) , inputSet ) , alg . scalarMultiply ( lambda , alg . identity ( k ) ) ) ) , alg . mat_vec_mult ( alg . transpose ( inputSet ) , outputSet ) ) ;
2023-01-24 19:00:54 +01:00
} else {
2023-01-25 23:43:21 +01:00
weights = alg . mat_vec_mult ( alg . inverse ( alg . matmult ( alg . transpose ( inputSet ) , inputSet ) ) , alg . mat_vec_mult ( alg . transpose ( inputSet ) , outputSet ) ) ;
}
2023-01-24 19:00:54 +01:00
2023-01-25 23:43:21 +01:00
bias = stat . mean ( outputSet ) - alg . dot ( weights , x_means ) ;
2023-01-24 19:00:54 +01:00
2023-01-25 23:43:21 +01:00
forwardPass ( ) ;
}
2023-01-24 19:14:38 +01:00
//} catch (int err_num) {
// std::cout << "ERR " << err_num << ": Resulting matrix was noninvertible/degenerate, and so the normal equation could not be performed. Try utilizing gradient descent." << std::endl;
//}
2023-01-24 19:00:54 +01:00
}
2023-01-27 13:01:16 +01:00
real_t MLPPLinReg : : score ( ) {
2023-01-25 23:43:21 +01:00
MLPPUtilities util ;
2023-01-24 19:00:54 +01:00
return util . performance ( y_hat , outputSet ) ;
}
2023-01-25 00:54:50 +01:00
void MLPPLinReg : : save ( std : : string fileName ) {
2023-01-25 23:43:21 +01:00
MLPPUtilities util ;
2023-01-24 19:00:54 +01:00
util . saveParameters ( fileName , weights , bias ) ;
}
2023-01-27 13:01:16 +01:00
real_t MLPPLinReg : : Cost ( std : : vector < real_t > y_hat , std : : vector < real_t > y ) {
2023-01-25 00:54:50 +01:00
MLPPReg regularization ;
2023-01-24 19:37:08 +01:00
class MLPPCost cost ;
2023-01-24 19:00:54 +01:00
return cost . MSE ( y_hat , y ) + regularization . regTerm ( weights , lambda , alpha , reg ) ;
}
2023-01-27 13:01:16 +01:00
std : : vector < real_t > MLPPLinReg : : Evaluate ( std : : vector < std : : vector < real_t > > X ) {
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg ;
2023-01-24 19:00:54 +01:00
return alg . scalarAdd ( bias , alg . mat_vec_mult ( X , weights ) ) ;
}
2023-01-27 13:01:16 +01:00
real_t MLPPLinReg : : Evaluate ( std : : vector < real_t > x ) {
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg ;
2023-01-24 19:00:54 +01:00
return alg . dot ( weights , x ) + bias ;
}
// wTx + b
2023-01-25 00:54:50 +01:00
void MLPPLinReg : : forwardPass ( ) {
2023-01-24 19:00:54 +01:00
y_hat = Evaluate ( inputSet ) ;
}