pmlpp/mlpp/cost/cost.h

195 lines
9.8 KiB
C
Raw Normal View History

2023-01-24 18:57:18 +01:00
#ifndef MLPP_COST_H
#define MLPP_COST_H
//
// Cost.hpp
//
// Created by Marc Melikyan on 1/16/21.
//
2023-01-27 13:01:16 +01:00
#include "core/math/math_defs.h"
2023-02-04 12:20:25 +01:00
#include "core/object/reference.h"
#include <vector>
2023-02-04 12:20:25 +01:00
#include "../lin_alg/mlpp_matrix.h"
#include "../lin_alg/mlpp_vector.h"
//void set_weights(const Ref<MLPPMatrix> &val);
//void set_bias(const Ref<MLPPVector> &val);
class MLPPCost : public Reference {
GDCLASS(MLPPCost, Reference);
2023-01-24 19:20:18 +01:00
2023-02-04 13:19:01 +01:00
public:
enum CostTypes {
COST_TYPE_MSE = 0,
COST_TYPE_RMSE,
COST_TYPE_MAE,
COST_TYPE_MBE,
COST_TYPE_LOGISTIC_LOSS,
COST_TYPE_CROSS_ENTROPY,
COST_TYPE_HINGE_LOSS,
COST_TYPE_WASSERSTEIN_LOSS,
};
2023-01-24 19:00:54 +01:00
public:
2023-02-04 12:20:25 +01:00
real_t msev(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y);
real_t msem(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y);
Ref<MLPPVector> mse_derivv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y);
Ref<MLPPMatrix> mse_derivm(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y);
real_t rmsev(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y);
real_t rmsem(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y);
Ref<MLPPVector> rmse_derivv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y);
Ref<MLPPMatrix> rmse_derivm(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y);
real_t maev(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y);
real_t maem(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y);
Ref<MLPPVector> mae_derivv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y);
Ref<MLPPMatrix> mae_derivm(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y);
real_t mbev(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y);
real_t mbem(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y);
Ref<MLPPVector> mbe_derivv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y);
Ref<MLPPMatrix> mbe_derivm(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y);
// Classification Costs
real_t log_lossv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y);
real_t log_lossm(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y);
Ref<MLPPVector> log_loss_derivv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y);
Ref<MLPPMatrix> log_loss_derivm(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y);
real_t cross_entropyv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y);
real_t cross_entropym(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y);
Ref<MLPPVector> cross_entropy_derivv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y);
Ref<MLPPMatrix> cross_entropy_derivm(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y);
real_t huber_lossv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y, real_t delta);
real_t huber_lossm(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y, real_t delta);
Ref<MLPPVector> huber_loss_derivv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y, real_t delta);
Ref<MLPPMatrix> huber_loss_derivm(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y, real_t delta);
real_t hinge_lossv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y);
real_t hinge_lossm(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y);
Ref<MLPPVector> hinge_loss_derivv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y);
Ref<MLPPMatrix> hinge_loss_derivm(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y);
real_t hinge_losswv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y, const Ref<MLPPVector> &weights, real_t C);
real_t hinge_losswm(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y, const Ref<MLPPMatrix> &weights, real_t C);
Ref<MLPPVector> hinge_loss_derivwv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y, real_t C);
Ref<MLPPMatrix> hinge_loss_derivwm(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y, real_t C);
real_t wasserstein_lossv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y);
real_t wasserstein_lossm(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y);
Ref<MLPPVector> wasserstein_loss_derivv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y);
Ref<MLPPMatrix> wasserstein_loss_derivm(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y);
real_t dual_form_svm(const Ref<MLPPVector> &alpha, const Ref<MLPPMatrix> &X, const Ref<MLPPVector> &y); // TO DO: DON'T forget to add non-linear kernelizations.
Ref<MLPPVector> dual_form_svm_deriv(const Ref<MLPPVector> &alpha, const Ref<MLPPMatrix> &X, const Ref<MLPPVector> &y);
2023-02-04 13:19:01 +01:00
typedef real_t (MLPPCost::*VectorCostFunctionPointer)(const Ref<MLPPVector> &, const Ref<MLPPVector> &);
typedef real_t (MLPPCost::*MatrixCostFunctionPointer)(const Ref<MLPPMatrix> &, const Ref<MLPPMatrix> &);
typedef Ref<MLPPVector> (MLPPCost::*VectorDerivCostFunctionPointer)(const Ref<MLPPVector> &, const Ref<MLPPVector> &);
typedef Ref<MLPPMatrix> (MLPPCost::*MatrixDerivCostFunctionPointer)(const Ref<MLPPMatrix> &, const Ref<MLPPMatrix> &);
VectorCostFunctionPointer get_cost_function_ptr_normal_vector(const CostTypes cost);
MatrixCostFunctionPointer get_cost_function_ptr_normal_matrix(const CostTypes cost);
VectorDerivCostFunctionPointer get_cost_function_ptr_deriv_vector(const CostTypes cost);
MatrixDerivCostFunctionPointer get_cost_function_ptr_deriv_matrix(const CostTypes cost);
real_t run_cost_norm_vector(const CostTypes cost, const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y);
real_t run_cost_norm_matrix(const CostTypes cost, const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y);
Ref<MLPPVector> run_cost_deriv_vector(const CostTypes cost, const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y);
Ref<MLPPMatrix> run_cost_deriv_matrix(const CostTypes cost, const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y);
2023-01-24 19:00:54 +01:00
// Regression Costs
2023-01-27 13:01:16 +01:00
real_t MSE(std::vector<real_t> y_hat, std::vector<real_t> y);
real_t MSE(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
2023-01-27 13:01:16 +01:00
std::vector<real_t> MSEDeriv(std::vector<real_t> y_hat, std::vector<real_t> y);
std::vector<std::vector<real_t>> MSEDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
2023-01-27 13:01:16 +01:00
real_t RMSE(std::vector<real_t> y_hat, std::vector<real_t> y);
real_t RMSE(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
2023-01-27 13:01:16 +01:00
std::vector<real_t> RMSEDeriv(std::vector<real_t> y_hat, std::vector<real_t> y);
std::vector<std::vector<real_t>> RMSEDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
2023-01-27 13:01:16 +01:00
real_t MAE(std::vector<real_t> y_hat, std::vector<real_t> y);
real_t MAE(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
2023-01-27 13:01:16 +01:00
std::vector<real_t> MAEDeriv(std::vector<real_t> y_hat, std::vector<real_t> y);
std::vector<std::vector<real_t>> MAEDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
2023-01-27 13:01:16 +01:00
real_t MBE(std::vector<real_t> y_hat, std::vector<real_t> y);
real_t MBE(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
2023-01-27 13:01:16 +01:00
std::vector<real_t> MBEDeriv(std::vector<real_t> y_hat, std::vector<real_t> y);
std::vector<std::vector<real_t>> MBEDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
2023-01-24 19:00:54 +01:00
// Classification Costs
2023-01-27 13:01:16 +01:00
real_t LogLoss(std::vector<real_t> y_hat, std::vector<real_t> y);
real_t LogLoss(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
2023-01-27 13:01:16 +01:00
std::vector<real_t> LogLossDeriv(std::vector<real_t> y_hat, std::vector<real_t> y);
std::vector<std::vector<real_t>> LogLossDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
2023-01-27 13:01:16 +01:00
real_t CrossEntropy(std::vector<real_t> y_hat, std::vector<real_t> y);
real_t CrossEntropy(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
2023-01-27 13:01:16 +01:00
std::vector<real_t> CrossEntropyDeriv(std::vector<real_t> y_hat, std::vector<real_t> y);
std::vector<std::vector<real_t>> CrossEntropyDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
2023-01-27 13:01:16 +01:00
real_t HuberLoss(std::vector<real_t> y_hat, std::vector<real_t> y, real_t delta);
real_t HuberLoss(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y, real_t delta);
2023-01-27 13:01:16 +01:00
std::vector<real_t> HuberLossDeriv(std::vector<real_t> y_hat, std::vector<real_t> y, real_t delta);
std::vector<std::vector<real_t>> HuberLossDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y, real_t delta);
2023-01-27 13:01:16 +01:00
real_t HingeLoss(std::vector<real_t> y_hat, std::vector<real_t> y);
real_t HingeLoss(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
2023-01-27 13:01:16 +01:00
std::vector<real_t> HingeLossDeriv(std::vector<real_t> y_hat, std::vector<real_t> y);
std::vector<std::vector<real_t>> HingeLossDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
2023-01-27 13:01:16 +01:00
real_t HingeLoss(std::vector<real_t> y_hat, std::vector<real_t> y, std::vector<real_t> weights, real_t C);
real_t HingeLoss(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y, std::vector<std::vector<real_t>> weights, real_t C);
2023-01-27 13:01:16 +01:00
std::vector<real_t> HingeLossDeriv(std::vector<real_t> y_hat, std::vector<real_t> y, real_t C);
std::vector<std::vector<real_t>> HingeLossDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y, real_t C);
2023-01-27 13:01:16 +01:00
real_t WassersteinLoss(std::vector<real_t> y_hat, std::vector<real_t> y);
real_t WassersteinLoss(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
2023-01-27 13:01:16 +01:00
std::vector<real_t> WassersteinLossDeriv(std::vector<real_t> y_hat, std::vector<real_t> y);
std::vector<std::vector<real_t>> WassersteinLossDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
2023-01-27 13:01:16 +01:00
real_t dualFormSVM(std::vector<real_t> alpha, std::vector<std::vector<real_t>> X, std::vector<real_t> y); // TO DO: DON'T forget to add non-linear kernelizations.
2023-01-27 13:01:16 +01:00
std::vector<real_t> dualFormSVMDeriv(std::vector<real_t> alpha, std::vector<std::vector<real_t>> X, std::vector<real_t> y);
2023-02-04 12:34:00 +01:00
protected:
static void _bind_methods();
2023-01-24 19:00:54 +01:00
};
2023-01-24 19:20:18 +01:00
2023-02-04 13:19:01 +01:00
VARIANT_ENUM_CAST(MLPPCost::CostTypes);
#endif /* Cost_hpp */