mirror of
https://github.com/Relintai/MLPP.git
synced 2024-11-12 10:15:01 +01:00
so
This commit is contained in:
parent
1523eea7b8
commit
c93400840a
@ -142,7 +142,7 @@ namespace MLPP{
|
||||
|
||||
double Activation::softplus(double z, bool deriv){
|
||||
if(deriv){ return sigmoid(z); }
|
||||
return log(1 + exp(z));
|
||||
return std::log(1 + exp(z));
|
||||
}
|
||||
|
||||
std::vector<double> Activation::softplus(std::vector<double> z, bool deriv){
|
||||
@ -752,7 +752,7 @@ namespace MLPP{
|
||||
|
||||
double Activation::arsinh(double z, bool deriv){
|
||||
if(deriv){ return 1 / sqrt(z * z + 1); }
|
||||
return log(z + sqrt(z * z + 1));
|
||||
return std::log(z + sqrt(z * z + 1));
|
||||
}
|
||||
|
||||
std::vector<double> Activation::arsinh(std::vector<double> z, bool deriv){
|
||||
@ -771,7 +771,7 @@ namespace MLPP{
|
||||
if(deriv){
|
||||
return 1/sqrt(z * z - 1);
|
||||
}
|
||||
return log(z + sqrt(z * z - 1));
|
||||
return std::log(z + sqrt(z * z - 1));
|
||||
}
|
||||
|
||||
std::vector<double> Activation::arcosh(std::vector<double> z, bool deriv){
|
||||
@ -790,7 +790,7 @@ namespace MLPP{
|
||||
if(deriv){
|
||||
return 1/(1 - z * z);
|
||||
}
|
||||
return 0.5 * log((1 + z)/(1 - z));
|
||||
return 0.5 * std::log((1 + z)/(1 - z));
|
||||
}
|
||||
|
||||
std::vector<double> Activation::artanh(std::vector<double> z, bool deriv){
|
||||
@ -809,7 +809,7 @@ namespace MLPP{
|
||||
if(deriv){
|
||||
return -1/((z * z) * sqrt(1 + (1/(z * z))));
|
||||
}
|
||||
return log(sqrt(1 + (1 / (z * z))) + (1/z));
|
||||
return std::log(sqrt(1 + (1 / (z * z))) + (1/z));
|
||||
}
|
||||
|
||||
std::vector<double> Activation::arcsch(std::vector<double> z, bool deriv){
|
||||
@ -829,7 +829,7 @@ namespace MLPP{
|
||||
if(deriv){
|
||||
return -1/(z * sqrt(1 - z * z));
|
||||
}
|
||||
return log((1/z) + ((1/z) + 1) * ((1/z) - 1));
|
||||
return std::log((1/z) + ((1/z) + 1) * ((1/z) - 1));
|
||||
}
|
||||
|
||||
std::vector<double> Activation::arsech(std::vector<double> z, bool deriv){
|
||||
@ -848,7 +848,7 @@ namespace MLPP{
|
||||
if(deriv){
|
||||
return 1/(1 - z * z);
|
||||
}
|
||||
return 0.5 * log((1 + z)/(z - 1));
|
||||
return 0.5 * std::log((1 + z)/(z - 1));
|
||||
}
|
||||
|
||||
std::vector<double> Activation::arcoth(std::vector<double> z, bool deriv){
|
||||
|
@ -139,8 +139,8 @@ namespace MLPP{
|
||||
for(int j = 0; j < inputSet.size(); j++){
|
||||
for(int k = 0; k < vocab.size(); k++){
|
||||
if(inputSet[i][j] == vocab[k]){
|
||||
score_0 += log(theta[0][vocab[k]]);
|
||||
score_1 += log(theta[1][vocab[k]]);
|
||||
score_0 += std::log(theta[0][vocab[k]]);
|
||||
score_1 += std::log(theta[1][vocab[k]]);
|
||||
|
||||
foundIndices.push_back(k);
|
||||
}
|
||||
@ -155,13 +155,13 @@ namespace MLPP{
|
||||
}
|
||||
}
|
||||
if(!found){
|
||||
score_0 += log(1 - theta[0][vocab[i]]);
|
||||
score_1 += log(1 - theta[1][vocab[i]]);
|
||||
score_0 += std::log(1 - theta[0][vocab[i]]);
|
||||
score_1 += std::log(1 - theta[1][vocab[i]]);
|
||||
}
|
||||
}
|
||||
|
||||
score_0 += log(prior_0);
|
||||
score_1 += log(prior_1);
|
||||
score_0 += std::log(prior_0);
|
||||
score_1 += std::log(prior_1);
|
||||
|
||||
score_0 = exp(score_0);
|
||||
score_1 = exp(score_1);
|
||||
|
@ -157,7 +157,7 @@ namespace MLPP{
|
||||
double sum = 0;
|
||||
double eps = 1e-8;
|
||||
for(int i = 0; i < y_hat.size(); i++){
|
||||
sum += -(y[i] * log(y_hat[i] + eps) + (1 - y[i]) * log(1 - y_hat[i] + eps));
|
||||
sum += -(y[i] * std::log(y_hat[i] + eps) + (1 - y[i]) * std::log(1 - y_hat[i] + eps));
|
||||
}
|
||||
|
||||
return sum / y_hat.size();
|
||||
@ -168,7 +168,7 @@ namespace MLPP{
|
||||
double eps = 1e-8;
|
||||
for(int i = 0; i < y_hat.size(); i++){
|
||||
for(int j = 0; j < y_hat[i].size(); j++){
|
||||
sum += -(y[i][j] * log(y_hat[i][j] + eps) + (1 - y[i][j]) * log(1 - y_hat[i][j] + eps));
|
||||
sum += -(y[i][j] * std::log(y_hat[i][j] + eps) + (1 - y[i][j]) * std::log(1 - y_hat[i][j] + eps));
|
||||
}
|
||||
}
|
||||
|
||||
@ -188,7 +188,7 @@ namespace MLPP{
|
||||
double Cost::CrossEntropy(std::vector<double> y_hat, std::vector<double> y){
|
||||
double sum = 0;
|
||||
for(int i = 0; i < y_hat.size(); i++){
|
||||
sum += y[i] * log(y_hat[i]);
|
||||
sum += y[i] * std::log(y_hat[i]);
|
||||
}
|
||||
|
||||
return -1 * sum;
|
||||
@ -198,7 +198,7 @@ namespace MLPP{
|
||||
double sum = 0;
|
||||
for(int i = 0; i < y_hat.size(); i++){
|
||||
for(int j = 0; j < y_hat[i].size(); j++){
|
||||
sum += y[i][j] * log(y_hat[i][j]);
|
||||
sum += y[i][j] * std::log(y_hat[i][j]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -385,7 +385,7 @@ namespace MLPP{
|
||||
IDF.resize(frequency.size());
|
||||
|
||||
for(int i = 0; i < IDF.size(); i++){
|
||||
IDF[i] = log((double)segmented_sentences.size() / (double)frequency[i]);
|
||||
IDF[i] = std::log((double)segmented_sentences.size() / (double)frequency[i]);
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> TFIDF;
|
||||
|
@ -36,7 +36,7 @@ namespace MLPP{
|
||||
double score[class_num];
|
||||
double y_hat_i = 1;
|
||||
for(int i = class_num - 1; i >= 0; i--){
|
||||
y_hat_i += log(priors[i] * (1 / sqrt(2 * M_PI * sigma[i] * sigma[i])) * exp(-(x[i] * mu[i]) * (x[i] * mu[i]) / (2 * sigma[i] * sigma[i])));
|
||||
y_hat_i += std::log(priors[i] * (1 / sqrt(2 * M_PI * sigma[i] * sigma[i])) * exp(-(x[i] * mu[i]) * (x[i] * mu[i]) / (2 * sigma[i] * sigma[i])));
|
||||
score[i] = exp(y_hat_i);
|
||||
}
|
||||
return std::distance(score, std::max_element(score, score + sizeof(score) / sizeof(double)));
|
||||
@ -79,7 +79,7 @@ namespace MLPP{
|
||||
double y_hat_i = 1;
|
||||
for(int j = class_num - 1; j >= 0; j--){
|
||||
for(int k = 0; k < inputSet[i].size(); k++){
|
||||
y_hat_i += log(priors[j] * (1 / sqrt(2 * M_PI * sigma[j] * sigma[j])) * exp(-(inputSet[i][k] * mu[j]) * (inputSet[i][k] * mu[j]) / (2 * sigma[j] * sigma[j])));
|
||||
y_hat_i += std::log(priors[j] * (1 / sqrt(2 * M_PI * sigma[j] * sigma[j])) * exp(-(inputSet[i][k] * mu[j]) * (inputSet[i][k] * mu[j]) / (2 * sigma[j] * sigma[j])));
|
||||
}
|
||||
score[j] = exp(y_hat_i);
|
||||
std::cout << score[j] << std::endl;
|
||||
|
@ -34,7 +34,7 @@ namespace MLPP{
|
||||
|
||||
std::vector<std::vector<double>> scalarAdd(double scalar, std::vector<std::vector<double>> A);
|
||||
|
||||
std::vector<std::vector<double>> log(std::vector<std::vector<double>> A);
|
||||
std::vector<std::vector<double>> std::log(std::vector<std::vector<double>> A);
|
||||
|
||||
std::vector<std::vector<double>> log10(std::vector<std::vector<double>> A);
|
||||
|
||||
@ -132,7 +132,7 @@ namespace MLPP{
|
||||
|
||||
std::vector<double> subtractMatrixRows(std::vector<double> a, std::vector<std::vector<double>> B);
|
||||
|
||||
std::vector<double> log(std::vector<double> a);
|
||||
std::vector<double> std::log(std::vector<double> a);
|
||||
|
||||
std::vector<double> log10(std::vector<double> a);
|
||||
|
||||
|
@ -35,14 +35,14 @@ namespace MLPP{
|
||||
for(int k = 0; k < vocab.size(); k++){
|
||||
if(x[j] == vocab[k]){
|
||||
for(int p = class_num - 1; p >= 0; p--){
|
||||
score[p] += log(theta[p][vocab[k]]);
|
||||
score[p] += std::log(theta[p][vocab[k]]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for(int i = 0; i < priors.size(); i++){
|
||||
score[i] += log(priors[i]);
|
||||
score[i] += std::log(priors[i]);
|
||||
}
|
||||
|
||||
return std::distance(score, std::max_element(score, score + sizeof(score) / sizeof(double)));
|
||||
@ -98,14 +98,14 @@ namespace MLPP{
|
||||
for(int k = 0; k < vocab.size(); k++){
|
||||
if(inputSet[i][j] == vocab[k]){
|
||||
for(int p = class_num - 1; p >= 0; p--){
|
||||
score[p] += log(theta[i][vocab[k]]);
|
||||
score[p] += std::log(theta[i][vocab[k]]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for(int i = 0; i < priors.size(); i++){
|
||||
score[i] += log(priors[i]);
|
||||
score[i] += std::log(priors[i]);
|
||||
score[i] = exp(score[i]);
|
||||
}
|
||||
|
||||
|
@ -214,6 +214,6 @@ namespace MLPP{
|
||||
if(x == y){
|
||||
return x;
|
||||
}
|
||||
return (y - x) / (log(y) - log(x));
|
||||
return (y - x) / (log(y) - std::log(x));
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user