2023-12-30 00:41:59 +01:00
/*************************************************************************/
/* dual_svc.cpp */
/*************************************************************************/
/* This file is part of: */
/* PMLPP Machine Learning Library */
/* https://github.com/Relintai/pmlpp */
/*************************************************************************/
2023-12-30 00:43:39 +01:00
/* Copyright (c) 2023-present Péter Magyar. */
2023-12-30 00:41:59 +01:00
/* Copyright (c) 2022-2023 Marc Melikyan */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/*************************************************************************/
2023-01-23 21:13:26 +01:00
2023-01-24 18:12:23 +01:00
# include "dual_svc.h"
# include "../activation/activation.h"
2023-01-24 19:00:54 +01:00
# include "../cost/cost.h"
2023-01-24 18:12:23 +01:00
# include "../regularization/reg.h"
# include "../utilities/utilities.h"
2023-01-23 21:13:26 +01:00
# include <random>
2023-02-16 19:15:36 +01:00
Ref < MLPPVector > MLPPDualSVC : : model_set_test ( const Ref < MLPPMatrix > & X ) {
2023-02-12 12:36:52 +01:00
return evaluatem ( X ) ;
2023-01-24 19:00:54 +01:00
}
2023-02-16 19:15:36 +01:00
real_t MLPPDualSVC : : model_test ( const Ref < MLPPVector > & x ) {
2023-02-12 12:36:52 +01:00
return evaluatev ( x ) ;
2023-01-24 19:00:54 +01:00
}
2023-02-12 12:36:52 +01:00
void MLPPDualSVC : : gradient_descent ( real_t learning_rate , int max_epoch , bool ui ) {
MLPPCost mlpp_cost ;
2023-01-24 19:23:30 +01:00
MLPPActivation avn ;
2023-01-25 00:54:50 +01:00
MLPPReg regularization ;
2023-01-27 13:01:16 +01:00
real_t cost_prev = 0 ;
2023-01-24 19:00:54 +01:00
int epoch = 1 ;
2023-02-12 12:36:52 +01:00
forward_pass ( ) ;
2023-01-24 19:00:54 +01:00
2023-02-16 19:15:36 +01:00
Ref < MLPPVector > input_set_i_row_tmp ;
input_set_i_row_tmp . instance ( ) ;
input_set_i_row_tmp - > resize ( _input_set - > size ( ) . x ) ;
Ref < MLPPVector > input_set_j_row_tmp ;
input_set_j_row_tmp . instance ( ) ;
input_set_j_row_tmp - > resize ( _input_set - > size ( ) . x ) ;
2023-01-24 19:00:54 +01:00
while ( true ) {
2023-02-12 12:36:52 +01:00
cost_prev = cost ( _alpha , _input_set , _output_set ) ;
2023-01-24 19:00:54 +01:00
2023-04-30 18:46:53 +02:00
_alpha - > sub ( mlpp_cost . dual_form_svm_deriv ( _alpha , _input_set , _output_set ) - > scalar_multiplyn ( learning_rate ) ) ;
2023-01-24 19:00:54 +01:00
2023-02-12 12:36:52 +01:00
alpha_projection ( ) ;
2023-01-24 19:00:54 +01:00
// Calculating the bias
2023-01-27 13:01:16 +01:00
real_t biasGradient = 0 ;
2023-02-16 19:15:36 +01:00
for ( int i = 0 ; i < _alpha - > size ( ) ; i + + ) {
2023-01-27 13:01:16 +01:00
real_t sum = 0 ;
2023-04-29 13:44:18 +02:00
if ( _alpha - > element_get ( i ) < _C & & _alpha - > element_get ( i ) > 0 ) {
2023-02-16 19:15:36 +01:00
for ( int j = 0 ; j < _alpha - > size ( ) ; j + + ) {
2023-04-29 13:44:18 +02:00
if ( _alpha - > element_get ( j ) > 0 ) {
2023-04-29 15:07:30 +02:00
_input_set - > row_get_into_mlpp_vector ( i , input_set_i_row_tmp ) ;
_input_set - > row_get_into_mlpp_vector ( j , input_set_j_row_tmp ) ;
2023-02-16 19:15:36 +01:00
2023-04-30 18:46:53 +02:00
sum + = _alpha - > element_get ( j ) * _output_set - > element_get ( j ) * input_set_j_row_tmp - > dot ( input_set_i_row_tmp ) ; // TO DO: DON'T forget to add non-linear kernelizations.
2023-01-24 19:00:54 +01:00
}
}
}
2023-02-16 19:15:36 +01:00
2023-04-29 13:44:18 +02:00
biasGradient = ( 1 - _output_set - > element_get ( i ) * sum ) / _output_set - > element_get ( i ) ;
2023-02-16 19:15:36 +01:00
2023-01-24 19:00:54 +01:00
break ;
}
2023-02-12 12:36:52 +01:00
_bias - = biasGradient * learning_rate ;
forward_pass ( ) ;
2023-01-24 19:00:54 +01:00
// UI PORTION
2023-02-12 12:36:52 +01:00
if ( ui ) {
2023-02-16 19:15:36 +01:00
MLPPUtilities : : cost_info ( epoch , cost_prev , cost ( _alpha , _input_set , _output_set ) ) ;
MLPPUtilities : : print_ui_vb ( _alpha , _bias ) ;
2023-01-24 19:00:54 +01:00
}
2023-02-12 12:36:52 +01:00
2023-01-24 19:00:54 +01:00
epoch + + ;
if ( epoch > max_epoch ) {
break ;
}
}
}
2023-01-27 13:01:16 +01:00
// void MLPPDualSVC::SGD(real_t learning_rate, int max_epoch, bool UI){
2023-01-24 19:37:08 +01:00
// class MLPPCost cost;
2023-01-24 19:23:30 +01:00
// MLPPActivation avn;
2023-01-25 00:29:02 +01:00
// MLPPLinAlg alg;
2023-01-25 00:54:50 +01:00
// MLPPReg regularization;
2023-01-24 19:00:54 +01:00
2023-01-27 13:01:16 +01:00
// real_t cost_prev = 0;
2023-01-24 19:00:54 +01:00
// int epoch = 1;
// while(true){
// std::random_device rd;
// std::default_random_engine generator(rd());
// std::uniform_int_distribution<int> distribution(0, int(n - 1));
// int outputIndex = distribution(generator);
2023-02-12 12:36:52 +01:00
// cost_prev = Cost(alpha, _input_set[outputIndex], _output_set[outputIndex]);
2023-01-24 19:00:54 +01:00
// // Bias updation
// bias -= learning_rate * costDeriv;
2023-02-12 12:36:52 +01:00
// y_hat = Evaluate({_input_set[outputIndex]});
2023-01-24 19:00:54 +01:00
// if(UI) {
2023-01-25 01:09:37 +01:00
// MLPPUtilities::CostInfo(epoch, cost_prev, Cost(alpha));
// MLPPUtilities::UI(weights, bias);
2023-01-24 19:00:54 +01:00
// }
// epoch++;
// if(epoch > max_epoch) { break; }
// }
// forwardPass();
// }
2023-01-27 13:01:16 +01:00
// void MLPPDualSVC::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI){
2023-01-24 19:37:08 +01:00
// class MLPPCost cost;
2023-01-24 19:23:30 +01:00
// MLPPActivation avn;
2023-01-25 00:29:02 +01:00
// MLPPLinAlg alg;
2023-01-25 00:54:50 +01:00
// MLPPReg regularization;
2023-01-27 13:01:16 +01:00
// real_t cost_prev = 0;
2023-01-24 19:00:54 +01:00
// int epoch = 1;
// // Creating the mini-batches
// int n_mini_batch = n/mini_batch_size;
2023-02-12 12:36:52 +01:00
// auto [inputMiniBatches, outputMiniBatches] = MLPPUtilities::createMiniBatches(_input_set, _output_set, n_mini_batch);
2023-01-24 19:00:54 +01:00
// while(true){
// for(int i = 0; i < n_mini_batch; i++){
2023-01-27 13:01:16 +01:00
// std::vector<real_t> y_hat = Evaluate(inputMiniBatches[i]);
// std::vector<real_t> z = propagate(inputMiniBatches[i]);
2023-01-24 19:00:54 +01:00
// cost_prev = Cost(z, outputMiniBatches[i], weights, C);
// // Calculating the weight gradients
// weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate/n, alg.mat_vec_mult(alg.transpose(inputMiniBatches[i]), cost.HingeLossDeriv(z, outputMiniBatches[i], C))));
// weights = regularization.regWeights(weights, learning_rate/n, 0, "Ridge");
// // Calculating the bias gradients
// bias -= learning_rate * alg.sum_elements(cost.HingeLossDeriv(y_hat, outputMiniBatches[i], C)) / n;
// forwardPass();
// y_hat = Evaluate(inputMiniBatches[i]);
// if(UI) {
2023-01-25 01:09:37 +01:00
// MLPPUtilities::CostInfo(epoch, cost_prev, Cost(z, outputMiniBatches[i], weights, C));
// MLPPUtilities::UI(weights, bias);
2023-01-24 19:00:54 +01:00
// }
// }
// epoch++;
// if(epoch > max_epoch) { break; }
// }
// forwardPass();
// }
2023-01-27 13:01:16 +01:00
real_t MLPPDualSVC : : score ( ) {
2023-02-10 22:33:32 +01:00
MLPPUtilities util ;
2023-02-16 19:15:36 +01:00
return util . performance_vec ( _y_hat , _output_set ) ;
2023-01-24 19:00:54 +01:00
}
2023-02-16 19:15:36 +01:00
void MLPPDualSVC : : save ( const String & file_name ) {
MLPPUtilities util ;
//util.saveParameters(file_name, _alpha, _bias);
}
2023-12-29 19:35:55 +01:00
MLPPDualSVC : : MLPPDualSVC ( const Ref < MLPPMatrix > & p_input_set , const Ref < MLPPVector > & p_output_set , real_t p_C , KernelMethod p_kernel ) {
2023-02-12 12:36:52 +01:00
_input_set = p_input_set ;
_output_set = p_output_set ;
2023-02-16 19:15:36 +01:00
_n = p_input_set - > size ( ) . y ;
_k = p_input_set - > size ( ) . x ;
2023-02-12 12:36:52 +01:00
_C = p_C ;
_kernel = p_kernel ;
2023-12-29 19:35:55 +01:00
_z . instance ( ) ;
2023-02-16 19:15:36 +01:00
_y_hat . instance ( ) ;
2023-12-29 19:35:55 +01:00
_alpha . instance ( ) ;
2023-02-16 19:15:36 +01:00
_y_hat - > resize ( _n ) ;
MLPPUtilities utils ;
_bias = utils . bias_initializationr ( ) ;
_alpha - > resize ( _n ) ;
utils . weight_initializationv ( _alpha ) ; // One alpha for all training examples, as per the lagrangian multipliers.
2023-02-12 12:36:52 +01:00
_K = kernel_functionm ( _input_set , _input_set , _kernel ) ; // For now this is unused. When non-linear kernels are added, the K will be manipulated.
}
MLPPDualSVC : : MLPPDualSVC ( ) {
}
MLPPDualSVC : : ~ MLPPDualSVC ( ) {
}
2023-02-16 19:15:36 +01:00
real_t MLPPDualSVC : : cost ( const Ref < MLPPVector > & alpha , const Ref < MLPPMatrix > & X , const Ref < MLPPVector > & y ) {
2023-01-24 19:37:08 +01:00
class MLPPCost cost ;
2023-02-16 19:15:36 +01:00
return cost . dual_form_svm ( alpha , X , y ) ;
2023-01-24 19:00:54 +01:00
}
2023-02-16 19:15:36 +01:00
real_t MLPPDualSVC : : evaluatev ( const Ref < MLPPVector > & x ) {
2023-01-24 19:23:30 +01:00
MLPPActivation avn ;
2023-04-22 14:11:07 +02:00
return avn . sign_normr ( propagatev ( x ) ) ;
2023-01-24 19:00:54 +01:00
}
2023-02-16 19:15:36 +01:00
real_t MLPPDualSVC : : propagatev ( const Ref < MLPPVector > & x ) {
2023-02-12 12:36:52 +01:00
real_t z = 0 ;
2023-02-16 19:15:36 +01:00
Ref < MLPPVector > input_set_row_tmp ;
input_set_row_tmp . instance ( ) ;
input_set_row_tmp - > resize ( _input_set - > size ( ) . x ) ;
for ( int j = 0 ; j < _alpha - > size ( ) ; j + + ) {
2023-04-29 13:44:18 +02:00
if ( _alpha - > element_get ( j ) ! = 0 ) {
2023-04-29 15:07:30 +02:00
_input_set - > row_get_into_mlpp_vector ( j , input_set_row_tmp ) ;
2023-04-30 18:46:53 +02:00
z + = _alpha - > element_get ( j ) * _output_set - > element_get ( j ) * input_set_row_tmp - > dot ( x ) ; // TO DO: DON'T forget to add non-linear kernelizations.
2023-01-24 19:00:54 +01:00
}
}
2023-02-12 12:36:52 +01:00
z + = _bias ;
2023-01-24 19:00:54 +01:00
return z ;
}
2023-02-16 19:15:36 +01:00
Ref < MLPPVector > MLPPDualSVC : : evaluatem ( const Ref < MLPPMatrix > & X ) {
2023-01-24 19:23:30 +01:00
MLPPActivation avn ;
2023-02-16 19:15:36 +01:00
return avn . sign_normv ( propagatem ( X ) ) ;
2023-01-24 19:00:54 +01:00
}
2023-02-16 19:15:36 +01:00
Ref < MLPPVector > MLPPDualSVC : : propagatem ( const Ref < MLPPMatrix > & X ) {
Ref < MLPPVector > z ;
z . instance ( ) ;
z - > resize ( X - > size ( ) . y ) ;
Ref < MLPPVector > input_set_row_tmp ;
input_set_row_tmp . instance ( ) ;
input_set_row_tmp - > resize ( _input_set - > size ( ) . x ) ;
Ref < MLPPVector > x_row_tmp ;
x_row_tmp . instance ( ) ;
x_row_tmp - > resize ( X - > size ( ) . x ) ;
for ( int i = 0 ; i < X - > size ( ) . y ; i + + ) {
2023-02-12 12:36:52 +01:00
real_t sum = 0 ;
2023-02-16 19:15:36 +01:00
for ( int j = 0 ; j < _alpha - > size ( ) ; j + + ) {
2023-04-29 13:44:18 +02:00
if ( _alpha - > element_get ( j ) ! = 0 ) {
2023-04-29 15:07:30 +02:00
_input_set - > row_get_into_mlpp_vector ( j , input_set_row_tmp ) ;
X - > row_get_into_mlpp_vector ( i , x_row_tmp ) ;
2023-02-16 19:15:36 +01:00
2023-04-30 18:46:53 +02:00
sum + = _alpha - > element_get ( j ) * _output_set - > element_get ( j ) * input_set_row_tmp - > dot ( x_row_tmp ) ; // TO DO: DON'T forget to add non-linear kernelizations.
2023-02-12 12:36:52 +01:00
}
2023-01-24 19:00:54 +01:00
}
2023-02-16 19:15:36 +01:00
2023-02-12 12:36:52 +01:00
sum + = _bias ;
2023-02-16 19:15:36 +01:00
2023-04-29 13:44:18 +02:00
z - > element_set ( i , sum ) ;
2023-01-24 19:00:54 +01:00
}
return z ;
}
2023-02-12 12:36:52 +01:00
void MLPPDualSVC : : forward_pass ( ) {
2023-01-24 19:23:30 +01:00
MLPPActivation avn ;
2023-01-24 19:00:54 +01:00
2023-02-12 12:36:52 +01:00
_z = propagatem ( _input_set ) ;
2023-02-16 19:15:36 +01:00
_y_hat = avn . sign_normv ( _z ) ;
2023-01-24 19:00:54 +01:00
}
2023-02-12 12:36:52 +01:00
void MLPPDualSVC : : alpha_projection ( ) {
2023-02-16 19:15:36 +01:00
for ( int i = 0 ; i < _alpha - > size ( ) ; i + + ) {
2023-04-29 13:44:18 +02:00
if ( _alpha - > element_get ( i ) > _C ) {
_alpha - > element_set ( i , _C ) ;
} else if ( _alpha - > element_get ( i ) < 0 ) {
_alpha - > element_set ( i , 0 ) ;
2023-01-24 19:00:54 +01:00
}
}
}
2023-02-16 19:15:36 +01:00
real_t MLPPDualSVC : : kernel_functionv ( const Ref < MLPPVector > & v , const Ref < MLPPVector > & u , KernelMethod kernel ) {
if ( kernel = = KERNEL_METHOD_LINEAR ) {
2023-04-30 18:46:53 +02:00
return u - > dot ( v ) ;
2023-02-10 22:33:32 +01:00
}
return 0 ;
2023-01-24 19:00:54 +01:00
}
2023-02-16 19:15:36 +01:00
Ref < MLPPMatrix > MLPPDualSVC : : kernel_functionm ( const Ref < MLPPMatrix > & U , const Ref < MLPPMatrix > & V , KernelMethod kernel ) {
if ( kernel = = KERNEL_METHOD_LINEAR ) {
2023-04-30 18:46:53 +02:00
return _input_set - > multn ( _input_set - > transposen ( ) ) ;
2023-02-10 22:33:32 +01:00
}
2023-02-16 19:15:36 +01:00
Ref < MLPPMatrix > m ;
m . instance ( ) ;
return m ;
2023-01-24 19:00:54 +01:00
}
2023-02-12 17:38:05 +01:00
void MLPPDualSVC : : _bind_methods ( ) {
}