2023-01-23 21:13:26 +01:00
//
// DualSVC.cpp
//
// Created by Marc Melikyan on 10/2/20.
//
2023-01-24 18:12:23 +01:00
# include "dual_svc.h"
# include "../activation/activation.h"
2023-01-24 19:00:54 +01:00
# include "../cost/cost.h"
2023-01-24 18:12:23 +01:00
# include "../lin_alg/lin_alg.h"
# include "../regularization/reg.h"
# include "../utilities/utilities.h"
2023-01-23 21:13:26 +01:00
# include <random>
2023-02-16 19:15:36 +01:00
Ref < MLPPVector > MLPPDualSVC : : model_set_test ( const Ref < MLPPMatrix > & X ) {
2023-02-12 12:36:52 +01:00
return evaluatem ( X ) ;
2023-01-24 19:00:54 +01:00
}
2023-02-16 19:15:36 +01:00
real_t MLPPDualSVC : : model_test ( const Ref < MLPPVector > & x ) {
2023-02-12 12:36:52 +01:00
return evaluatev ( x ) ;
2023-01-24 19:00:54 +01:00
}
2023-02-12 12:36:52 +01:00
void MLPPDualSVC : : gradient_descent ( real_t learning_rate , int max_epoch , bool ui ) {
MLPPCost mlpp_cost ;
2023-01-24 19:23:30 +01:00
MLPPActivation avn ;
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg ;
2023-01-25 00:54:50 +01:00
MLPPReg regularization ;
2023-01-27 13:01:16 +01:00
real_t cost_prev = 0 ;
2023-01-24 19:00:54 +01:00
int epoch = 1 ;
2023-02-12 12:36:52 +01:00
forward_pass ( ) ;
2023-01-24 19:00:54 +01:00
2023-02-16 19:15:36 +01:00
Ref < MLPPVector > input_set_i_row_tmp ;
input_set_i_row_tmp . instance ( ) ;
input_set_i_row_tmp - > resize ( _input_set - > size ( ) . x ) ;
Ref < MLPPVector > input_set_j_row_tmp ;
input_set_j_row_tmp . instance ( ) ;
input_set_j_row_tmp - > resize ( _input_set - > size ( ) . x ) ;
2023-01-24 19:00:54 +01:00
while ( true ) {
2023-02-12 12:36:52 +01:00
cost_prev = cost ( _alpha , _input_set , _output_set ) ;
2023-01-24 19:00:54 +01:00
2023-02-16 19:15:36 +01:00
_alpha = alg . subtractionnv ( _alpha , alg . scalar_multiplynv ( learning_rate , mlpp_cost . dual_form_svm_deriv ( _alpha , _input_set , _output_set ) ) ) ;
2023-01-24 19:00:54 +01:00
2023-02-12 12:36:52 +01:00
alpha_projection ( ) ;
2023-01-24 19:00:54 +01:00
// Calculating the bias
2023-01-27 13:01:16 +01:00
real_t biasGradient = 0 ;
2023-02-16 19:15:36 +01:00
for ( int i = 0 ; i < _alpha - > size ( ) ; i + + ) {
2023-01-27 13:01:16 +01:00
real_t sum = 0 ;
2023-02-16 19:15:36 +01:00
if ( _alpha - > get_element ( i ) < _C & & _alpha - > get_element ( i ) > 0 ) {
for ( int j = 0 ; j < _alpha - > size ( ) ; j + + ) {
if ( _alpha - > get_element ( j ) > 0 ) {
_input_set - > get_row_into_mlpp_vector ( i , input_set_i_row_tmp ) ;
_input_set - > get_row_into_mlpp_vector ( j , input_set_j_row_tmp ) ;
sum + = _alpha - > get_element ( j ) * _output_set - > get_element ( j ) * alg . dotv ( input_set_j_row_tmp , input_set_i_row_tmp ) ; // TO DO: DON'T forget to add non-linear kernelizations.
2023-01-24 19:00:54 +01:00
}
}
}
2023-02-16 19:15:36 +01:00
biasGradient = ( 1 - _output_set - > get_element ( i ) * sum ) / _output_set - > get_element ( i ) ;
2023-01-24 19:00:54 +01:00
break ;
}
2023-02-12 12:36:52 +01:00
_bias - = biasGradient * learning_rate ;
forward_pass ( ) ;
2023-01-24 19:00:54 +01:00
// UI PORTION
2023-02-12 12:36:52 +01:00
if ( ui ) {
2023-02-16 19:15:36 +01:00
MLPPUtilities : : cost_info ( epoch , cost_prev , cost ( _alpha , _input_set , _output_set ) ) ;
MLPPUtilities : : print_ui_vb ( _alpha , _bias ) ;
2023-01-24 19:00:54 +01:00
}
2023-02-12 12:36:52 +01:00
2023-01-24 19:00:54 +01:00
epoch + + ;
if ( epoch > max_epoch ) {
break ;
}
}
}
2023-01-27 13:01:16 +01:00
// void MLPPDualSVC::SGD(real_t learning_rate, int max_epoch, bool UI){
2023-01-24 19:37:08 +01:00
// class MLPPCost cost;
2023-01-24 19:23:30 +01:00
// MLPPActivation avn;
2023-01-25 00:29:02 +01:00
// MLPPLinAlg alg;
2023-01-25 00:54:50 +01:00
// MLPPReg regularization;
2023-01-24 19:00:54 +01:00
2023-01-27 13:01:16 +01:00
// real_t cost_prev = 0;
2023-01-24 19:00:54 +01:00
// int epoch = 1;
// while(true){
// std::random_device rd;
// std::default_random_engine generator(rd());
// std::uniform_int_distribution<int> distribution(0, int(n - 1));
// int outputIndex = distribution(generator);
2023-02-12 12:36:52 +01:00
// cost_prev = Cost(alpha, _input_set[outputIndex], _output_set[outputIndex]);
2023-01-24 19:00:54 +01:00
// // Bias updation
// bias -= learning_rate * costDeriv;
2023-02-12 12:36:52 +01:00
// y_hat = Evaluate({_input_set[outputIndex]});
2023-01-24 19:00:54 +01:00
// if(UI) {
2023-01-25 01:09:37 +01:00
// MLPPUtilities::CostInfo(epoch, cost_prev, Cost(alpha));
// MLPPUtilities::UI(weights, bias);
2023-01-24 19:00:54 +01:00
// }
// epoch++;
// if(epoch > max_epoch) { break; }
// }
// forwardPass();
// }
2023-01-27 13:01:16 +01:00
// void MLPPDualSVC::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI){
2023-01-24 19:37:08 +01:00
// class MLPPCost cost;
2023-01-24 19:23:30 +01:00
// MLPPActivation avn;
2023-01-25 00:29:02 +01:00
// MLPPLinAlg alg;
2023-01-25 00:54:50 +01:00
// MLPPReg regularization;
2023-01-27 13:01:16 +01:00
// real_t cost_prev = 0;
2023-01-24 19:00:54 +01:00
// int epoch = 1;
// // Creating the mini-batches
// int n_mini_batch = n/mini_batch_size;
2023-02-12 12:36:52 +01:00
// auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(_input_set, _output_set, n_mini_batch);
2023-01-24 19:00:54 +01:00
// while(true){
// for(int i = 0; i < n_mini_batch; i++){
2023-01-27 13:01:16 +01:00
// std::vector<real_t> y_hat = Evaluate(inputMiniBatches[i]);
// std::vector<real_t> z = propagate(inputMiniBatches[i]);
2023-01-24 19:00:54 +01:00
// cost_prev = Cost(z, outputMiniBatches[i], weights, C);
// // Calculating the weight gradients
// weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate/n, alg.mat_vec_mult(alg.transpose(inputMiniBatches[i]), cost.HingeLossDeriv(z, outputMiniBatches[i], C))));
// weights = regularization.regWeights(weights, learning_rate/n, 0, "Ridge");
// // Calculating the bias gradients
// bias -= learning_rate * alg.sum_elements(cost.HingeLossDeriv(y_hat, outputMiniBatches[i], C)) / n;
// forwardPass();
// y_hat = Evaluate(inputMiniBatches[i]);
// if(UI) {
2023-01-25 01:09:37 +01:00
// MLPPUtilities::CostInfo(epoch, cost_prev, Cost(z, outputMiniBatches[i], weights, C));
// MLPPUtilities::UI(weights, bias);
2023-01-24 19:00:54 +01:00
// }
// }
// epoch++;
// if(epoch > max_epoch) { break; }
// }
// forwardPass();
// }
2023-01-27 13:01:16 +01:00
real_t MLPPDualSVC : : score ( ) {
2023-02-10 22:33:32 +01:00
MLPPUtilities util ;
2023-02-16 19:15:36 +01:00
return util . performance_vec ( _y_hat , _output_set ) ;
2023-01-24 19:00:54 +01:00
}
2023-02-16 19:15:36 +01:00
void MLPPDualSVC : : save ( const String & file_name ) {
MLPPUtilities util ;
//util.saveParameters(file_name, _alpha, _bias);
}
MLPPDualSVC : : MLPPDualSVC ( const Ref < MLPPMatrix > & p_input_set , const Ref < MLPPMatrix > & p_output_set , real_t p_C , KernelMethod p_kernel ) {
2023-02-12 12:36:52 +01:00
_input_set = p_input_set ;
_output_set = p_output_set ;
2023-02-16 19:15:36 +01:00
_n = p_input_set - > size ( ) . y ;
_k = p_input_set - > size ( ) . x ;
2023-02-12 12:36:52 +01:00
_C = p_C ;
_kernel = p_kernel ;
2023-02-16 19:15:36 +01:00
_y_hat . instance ( ) ;
_y_hat - > resize ( _n ) ;
MLPPUtilities utils ;
_bias = utils . bias_initializationr ( ) ;
_alpha . instance ( ) ;
_alpha - > resize ( _n ) ;
utils . weight_initializationv ( _alpha ) ; // One alpha for all training examples, as per the lagrangian multipliers.
2023-02-12 12:36:52 +01:00
_K = kernel_functionm ( _input_set , _input_set , _kernel ) ; // For now this is unused. When non-linear kernels are added, the K will be manipulated.
}
MLPPDualSVC : : MLPPDualSVC ( ) {
}
MLPPDualSVC : : ~ MLPPDualSVC ( ) {
}
2023-02-16 19:15:36 +01:00
real_t MLPPDualSVC : : cost ( const Ref < MLPPVector > & alpha , const Ref < MLPPMatrix > & X , const Ref < MLPPVector > & y ) {
2023-01-24 19:37:08 +01:00
class MLPPCost cost ;
2023-02-16 19:15:36 +01:00
return cost . dual_form_svm ( alpha , X , y ) ;
2023-01-24 19:00:54 +01:00
}
2023-02-16 19:15:36 +01:00
real_t MLPPDualSVC : : evaluatev ( const Ref < MLPPVector > & x ) {
2023-01-24 19:23:30 +01:00
MLPPActivation avn ;
2023-04-22 14:11:07 +02:00
return avn . sign_normr ( propagatev ( x ) ) ;
2023-01-24 19:00:54 +01:00
}
2023-02-16 19:15:36 +01:00
real_t MLPPDualSVC : : propagatev ( const Ref < MLPPVector > & x ) {
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg ;
2023-02-12 12:36:52 +01:00
real_t z = 0 ;
2023-02-16 19:15:36 +01:00
Ref < MLPPVector > input_set_row_tmp ;
input_set_row_tmp . instance ( ) ;
input_set_row_tmp - > resize ( _input_set - > size ( ) . x ) ;
for ( int j = 0 ; j < _alpha - > size ( ) ; j + + ) {
if ( _alpha - > get_element ( j ) ! = 0 ) {
_input_set - > get_row_into_mlpp_vector ( j , input_set_row_tmp ) ;
z + = _alpha - > get_element ( j ) * _output_set - > get_element ( j ) * alg . dotv ( input_set_row_tmp , x ) ; // TO DO: DON'T forget to add non-linear kernelizations.
2023-01-24 19:00:54 +01:00
}
}
2023-02-12 12:36:52 +01:00
z + = _bias ;
2023-01-24 19:00:54 +01:00
return z ;
}
2023-02-16 19:15:36 +01:00
Ref < MLPPVector > MLPPDualSVC : : evaluatem ( const Ref < MLPPMatrix > & X ) {
2023-01-24 19:23:30 +01:00
MLPPActivation avn ;
2023-02-16 19:15:36 +01:00
return avn . sign_normv ( propagatem ( X ) ) ;
2023-01-24 19:00:54 +01:00
}
2023-02-16 19:15:36 +01:00
Ref < MLPPVector > MLPPDualSVC : : propagatem ( const Ref < MLPPMatrix > & X ) {
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg ;
2023-02-16 19:15:36 +01:00
Ref < MLPPVector > z ;
z . instance ( ) ;
z - > resize ( X - > size ( ) . y ) ;
Ref < MLPPVector > input_set_row_tmp ;
input_set_row_tmp . instance ( ) ;
input_set_row_tmp - > resize ( _input_set - > size ( ) . x ) ;
Ref < MLPPVector > x_row_tmp ;
x_row_tmp . instance ( ) ;
x_row_tmp - > resize ( X - > size ( ) . x ) ;
for ( int i = 0 ; i < X - > size ( ) . y ; i + + ) {
2023-02-12 12:36:52 +01:00
real_t sum = 0 ;
2023-02-16 19:15:36 +01:00
for ( int j = 0 ; j < _alpha - > size ( ) ; j + + ) {
if ( _alpha - > get_element ( j ) ! = 0 ) {
_input_set - > get_row_into_mlpp_vector ( j , input_set_row_tmp ) ;
X - > get_row_into_mlpp_vector ( i , x_row_tmp ) ;
sum + = _alpha - > get_element ( j ) * _output_set - > get_element ( j ) * alg . dotv ( input_set_row_tmp , x_row_tmp ) ; // TO DO: DON'T forget to add non-linear kernelizations.
2023-02-12 12:36:52 +01:00
}
2023-01-24 19:00:54 +01:00
}
2023-02-16 19:15:36 +01:00
2023-02-12 12:36:52 +01:00
sum + = _bias ;
2023-02-16 19:15:36 +01:00
z - > set_element ( i , sum ) ;
2023-01-24 19:00:54 +01:00
}
return z ;
}
2023-02-12 12:36:52 +01:00
void MLPPDualSVC : : forward_pass ( ) {
2023-01-24 19:23:30 +01:00
MLPPActivation avn ;
2023-01-24 19:00:54 +01:00
2023-02-12 12:36:52 +01:00
_z = propagatem ( _input_set ) ;
2023-02-16 19:15:36 +01:00
_y_hat = avn . sign_normv ( _z ) ;
2023-01-24 19:00:54 +01:00
}
2023-02-12 12:36:52 +01:00
void MLPPDualSVC : : alpha_projection ( ) {
2023-02-16 19:15:36 +01:00
for ( int i = 0 ; i < _alpha - > size ( ) ; i + + ) {
if ( _alpha - > get_element ( i ) > _C ) {
_alpha - > set_element ( i , _C ) ;
} else if ( _alpha - > get_element ( i ) < 0 ) {
_alpha - > set_element ( i , 0 ) ;
2023-01-24 19:00:54 +01:00
}
}
}
2023-02-16 19:15:36 +01:00
real_t MLPPDualSVC : : kernel_functionv ( const Ref < MLPPVector > & v , const Ref < MLPPVector > & u , KernelMethod kernel ) {
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg ;
2023-02-12 12:36:52 +01:00
2023-02-16 19:15:36 +01:00
if ( kernel = = KERNEL_METHOD_LINEAR ) {
return alg . dotv ( u , v ) ;
2023-02-10 22:33:32 +01:00
}
return 0 ;
2023-01-24 19:00:54 +01:00
}
2023-02-16 19:15:36 +01:00
Ref < MLPPMatrix > MLPPDualSVC : : kernel_functionm ( const Ref < MLPPMatrix > & U , const Ref < MLPPMatrix > & V , KernelMethod kernel ) {
2023-01-25 00:29:02 +01:00
MLPPLinAlg alg ;
2023-02-16 19:15:36 +01:00
if ( kernel = = KERNEL_METHOD_LINEAR ) {
return alg . matmultm ( _input_set , alg . transposem ( _input_set ) ) ;
2023-02-10 22:33:32 +01:00
}
2023-02-16 19:15:36 +01:00
Ref < MLPPMatrix > m ;
m . instance ( ) ;
return m ;
2023-01-24 19:00:54 +01:00
}
2023-02-12 17:38:05 +01:00
void MLPPDualSVC : : _bind_methods ( ) {
}