(Hopefully) fix compile on windows.

This commit is contained in:
Relintai 2023-04-27 11:10:48 +02:00
parent 9a0b6aa016
commit 4ce26ff55a
14 changed files with 44 additions and 14 deletions

View File

@ -6,6 +6,9 @@
#include "activation.h"
#include "../lin_alg/lin_alg.h"
#include "core/math/math_defs.h"
#include <algorithm>
#include <cmath>
#include <iostream>
@ -1285,18 +1288,18 @@ Ref<MLPPMatrix> MLPPActivation::gaussian_cdf_normm(const Ref<MLPPMatrix> &z) {
}
real_t MLPPActivation::gaussian_cdf_derivr(real_t z) {
return (1 / sqrt(2 * M_PI)) * exp(-z * z / 2);
return (1 / sqrt(2 * Math_PI)) * exp(-z * z / 2);
}
Ref<MLPPVector> MLPPActivation::gaussian_cdf_derivv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg;
return alg.scalar_multiplynv(1 / Math::sqrt(2 * M_PI), alg.expnv(alg.scalar_multiplynv(-1 / 2.0, alg.hadamard_productnv(z, z))));
return alg.scalar_multiplynv(1 / Math::sqrt(2 * Math_PI), alg.expnv(alg.scalar_multiplynv(-1 / 2.0, alg.hadamard_productnv(z, z))));
}
Ref<MLPPMatrix> MLPPActivation::gaussian_cdf_derivm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg;
return alg.scalar_multiplynm(1 / Math::sqrt(2 * M_PI), alg.expnm(alg.scalar_multiplynm(-1 / 2.0, alg.hadamard_productnm(z, z))));
return alg.scalar_multiplynm(1 / Math::sqrt(2 * Math_PI), alg.expnm(alg.scalar_multiplynm(-1 / 2.0, alg.hadamard_productnm(z, z))));
}
//CLOGLOG
@ -1844,7 +1847,7 @@ Ref<MLPPMatrix> MLPPActivation::selu_derivm(const Ref<MLPPMatrix> &z, real_t lam
//GELU
real_t MLPPActivation::gelu_normr(real_t z) {
return 0.5 * z * (1 + tanh(sqrt(2 / M_PI) * (z + 0.044715 * Math::pow(z, 3))));
return 0.5 * z * (1 + tanh(sqrt(2 / Math_PI) * (z + 0.044715 * Math::pow(z, 3))));
}
Ref<MLPPVector> MLPPActivation::gelu_normv(const Ref<MLPPVector> &z) {
Ref<MLPPVector> a;

View File

@ -11,6 +11,10 @@
#include <cmath>
#include <iostream>
#ifndef M_PI
#define M_PI 3.141592653
#endif
real_t MLPPActivationOld::linear(real_t z, bool deriv) {
if (deriv) {
return 1;

View File

@ -203,7 +203,7 @@ std::vector<real_t> MLPPConvolutions::global_pool_3d(std::vector<std::vector<std
real_t MLPPConvolutions::gaussian_2d(real_t x, real_t y, real_t std) {
real_t std_sq = std * std;
return 1 / (2 * M_PI * std_sq) * std::exp(-(x * x + y * y) / 2 * std_sq);
return 1 / (2 * Math_PI * std_sq) * std::exp(-(x * x + y * y) / 2 * std_sq);
}
std::vector<std::vector<real_t>> MLPPConvolutions::gaussian_filter_2d(int size, real_t std) {

View File

@ -11,6 +11,10 @@
#include <cmath>
#include <iostream>
#ifndef M_PI
#define M_PI 3.141592653
#endif
std::vector<std::vector<real_t>> MLPPConvolutionsOld::convolve_2d(std::vector<std::vector<real_t>> input, std::vector<std::vector<real_t>> filter, int S, int P) {
MLPPLinAlgOld alg;
std::vector<std::vector<real_t>> feature_map;

View File

@ -5,6 +5,9 @@
//
#include "gaussian_nb.h"
#include "core/math/math_defs.h"
#include "../lin_alg/lin_alg.h"
#include "../stat/stat.h"
#include "../utilities/utilities.h"
@ -61,7 +64,7 @@ real_t MLPPGaussianNB::model_test(const Ref<MLPPVector> &x) {
real_t x_i = x->get_element(i);
real_t mu_i = _mu->get_element(i);
y_hat_i += Math::log(_priors->get_element(i) * (1 / Math::sqrt(2 * M_PI * sigma_i * sigma_i)) * Math::exp(-(x_i * mu_i) * (x_i * mu_i) / (2 * sigma_i * sigma_i)));
y_hat_i += Math::log(_priors->get_element(i) * (1 / Math::sqrt(2 * Math_PI * sigma_i * sigma_i)) * Math::exp(-(x_i * mu_i) * (x_i * mu_i) / (2 * sigma_i * sigma_i)));
score[i] = Math::exp(y_hat_i);
}
@ -171,7 +174,7 @@ void MLPPGaussianNB::evaluate() {
real_t mu_j = _mu->get_element(j);
real_t input_set_i_k = _input_set->get_element(i, k);
y_hat_i += Math::log(_priors->get_element(j) * (1 / Math::sqrt(2 * M_PI * sigma_j * sigma_j)) * Math::exp(-(input_set_i_k * mu_j) * (input_set_i_k * mu_j) / (2 * sigma_j * sigma_j)));
y_hat_i += Math::log(_priors->get_element(j) * (1 / Math::sqrt(2 * Math_PI * sigma_j * sigma_j)) * Math::exp(-(input_set_i_k * mu_j) * (input_set_i_k * mu_j) / (2 * sigma_j * sigma_j)));
}
score[j] = Math::exp(y_hat_i);

View File

@ -14,6 +14,10 @@
#include <iostream>
#include <random>
#ifndef M_PI
#define M_PI 3.141592653
#endif
MLPPGaussianNBOld::MLPPGaussianNBOld(std::vector<std::vector<real_t>> p_inputSet, std::vector<real_t> p_outputSet, int p_class_num) {
inputSet = p_inputSet;
outputSet = p_outputSet;

View File

@ -748,7 +748,7 @@ MLPPLinAlg::EigenResult MLPPLinAlg::eigen(Ref<MLPPMatrix> A) {
real_t theta;
if (a_ii == a_jj) {
theta = M_PI / 4;
theta = Math_PI / 4;
} else {
theta = 0.5 * atan(2 * a_ij / (a_ii - a_jj));
}

View File

@ -15,6 +15,10 @@
#include <map>
#include <random>
#ifndef M_PI
#define M_PI 3.141592653
#endif
std::vector<std::vector<real_t>> MLPPLinAlgOld::gramMatrix(std::vector<std::vector<real_t>> A) {
return matmult(transpose(A), A); // AtA
}

View File

@ -2003,7 +2003,7 @@ MLPPMatrix::EigenResult MLPPMatrix::eigen() const {
real_t theta;
if (a_ii == a_jj) {
theta = M_PI / 4;
theta = Math_PI / 4;
} else {
theta = 0.5 * atan(2 * a_ij / (a_ii - a_jj));
}
@ -2143,7 +2143,7 @@ MLPPMatrix::EigenResult MLPPMatrix::eigenb(const Ref<MLPPMatrix> &A) const {
real_t theta;
if (a_ii == a_jj) {
theta = M_PI / 4;
theta = Math_PI / 4;
} else {
theta = 0.5 * atan(2 * a_ij / (a_ii - a_jj));
}

View File

@ -186,11 +186,11 @@ void MLPPProbitReg::sgd(real_t learning_rate, int max_epoch, bool ui) {
real_t error = y_hat - output_set_entry;
// Weight Updation
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate * error * ((1 / Math::sqrt(2 * M_PI)) * Math::exp(-z * z / 2)), input_set_row_tmp));
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate * error * ((1 / Math::sqrt(2 * Math_PI)) * Math::exp(-z * z / 2)), input_set_row_tmp));
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
// Bias updation
_bias -= learning_rate * error * ((1 / Math::sqrt(2 * M_PI)) * Math::exp(-z * z / 2));
_bias -= learning_rate * error * ((1 / Math::sqrt(2 * Math_PI)) * Math::exp(-z * z / 2));
y_hat = evaluatev(input_set_row_tmp);

View File

@ -14,6 +14,10 @@
#include <iostream>
#include <random>
#ifndef M_PI
#define M_PI 3.141592653
#endif
MLPPProbitRegOld::MLPPProbitRegOld(std::vector<std::vector<real_t>> inputSet, std::vector<real_t> outputSet, std::string reg, real_t lambda, real_t alpha) :
inputSet(inputSet), outputSet(outputSet), n(inputSet.size()), k(inputSet[0].size()), reg(reg), lambda(lambda), alpha(alpha) {
y_hat.resize(n);

View File

@ -43,7 +43,7 @@ std::vector<std::vector<real_t>> MLPPTransforms::discreteCosineTransform(std::ve
for (uint32_t k = 0; k < B.size(); k++) {
for (uint32_t f = 0; f < B[k].size(); f++) {
sum += A[k][f] * std::cos((M_PI * i * (2 * k + 1)) / (2 * M)) * std::cos((M_PI * j * (2 * f + 1)) / (2 * M));
sum += A[k][f] * std::cos((Math_PI * i * (2 * k + 1)) / (2 * M)) * std::cos((Math_PI * j * (2 * f + 1)) / (2 * M));
}
}
B[i][j] = sum;

View File

@ -10,6 +10,10 @@
#include <iostream>
#include <string>
#ifndef M_PI
#define M_PI 3.141592653
#endif
// DCT ii.
// https://www.mathworks.com/help/images/discrete-cosine-transform.html
std::vector<std::vector<real_t>> MLPPTransformsOld::discreteCosineTransform(std::vector<std::vector<real_t>> A) {

View File

@ -156,7 +156,7 @@ void MLPPTests::test_linear_algebra() {
std::vector<std::vector<real_t>> square = { { 1, 1 }, { -1, 1 }, { 1, -1 }, { -1, -1 } };
std::vector<std::vector<real_t>> square_rot_res = { { 1.41421, 1.11022e-16 }, { -1.11022e-16, 1.41421 }, { 1.11022e-16, -1.41421 }, { -1.41421, -1.11022e-16 } };
is_approx_equals_dmat(dstd_mat_to_mat(alg.rotate(square, M_PI / 4)), dstd_mat_to_mat(square_rot_res), "alg.rotate(square, M_PI / 4)");
is_approx_equals_dmat(dstd_mat_to_mat(alg.rotate(square, Math_PI / 4)), dstd_mat_to_mat(square_rot_res), "alg.rotate(square, Math_PI / 4)");
std::vector<std::vector<real_t>> A = {
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 },