mirror of
https://github.com/Relintai/MLPP.git
synced 2024-11-12 10:15:01 +01:00
so
This commit is contained in:
parent
90f29f8a89
commit
00004817d9
@ -7,6 +7,7 @@
|
|||||||
#define NumericalAnalysis_hpp
|
#define NumericalAnalysis_hpp
|
||||||
|
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
namespace MLPP{
|
namespace MLPP{
|
||||||
class NumericalAnalysis{
|
class NumericalAnalysis{
|
||||||
|
@ -31,7 +31,6 @@ namespace MLPP{
|
|||||||
|
|
||||||
double Stat::median(std::vector<double> x){
|
double Stat::median(std::vector<double> x){
|
||||||
double center = double(x.size())/double(2);
|
double center = double(x.size())/double(2);
|
||||||
std::vector<double> original_vec = x;
|
|
||||||
sort(x.begin(), x.end());
|
sort(x.begin(), x.end());
|
||||||
if(x.size() % 2 == 0){
|
if(x.size() % 2 == 0){
|
||||||
return mean({x[center - 1], x[center]});
|
return mean({x[center - 1], x[center]});
|
||||||
@ -39,7 +38,6 @@ namespace MLPP{
|
|||||||
else{
|
else{
|
||||||
return x[center - 1 + 0.5];
|
return x[center - 1 + 0.5];
|
||||||
}
|
}
|
||||||
x = original_vec;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<double> Stat::mode(std::vector<double> x){
|
std::vector<double> Stat::mode(std::vector<double> x){
|
||||||
|
35
main.cpp
35
main.cpp
@ -148,13 +148,14 @@ int main() {
|
|||||||
// std::vector<std::vector<double>> inputSet = {{0,0,1,1}, {0,1,0,1}};
|
// std::vector<std::vector<double>> inputSet = {{0,0,1,1}, {0,1,0,1}};
|
||||||
// std::vector<double> outputSet = {0,1,1,0};
|
// std::vector<double> outputSet = {0,1,1,0};
|
||||||
|
|
||||||
// // STATISTICS
|
// STATISTICS
|
||||||
// std::vector<double> x = {1,2,3,4,5,6,7,8,9,10};
|
std::vector<double> x = {1,2,3,4,5,6,5,8,9,10,1};
|
||||||
// std::vector<double> y = {10,9,8,7,6,5,4,3,2,1};
|
std::vector<double> y = {10,9,8,7,6,5,4,3,2,1};
|
||||||
// std::vector<double> w = {0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1};
|
std::vector<double> w = {0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1};
|
||||||
|
|
||||||
// std::cout << "Arithmetic Mean: " << stat.mean(x) << std::endl;
|
// std::cout << "Arithmetic Mean: " << stat.mean(x) << std::endl;
|
||||||
// std::cout << "Median: " << stat.median(x) << std::endl;
|
std::cout << "Median: " << stat.median(x) << std::endl;
|
||||||
|
alg.printVector(x);
|
||||||
// alg.printVector(stat.mode(x));
|
// alg.printVector(stat.mode(x));
|
||||||
// std::cout << "Range: " << stat.range(x) << std::endl;
|
// std::cout << "Range: " << stat.range(x) << std::endl;
|
||||||
// std::cout << "Midrange: " << stat.midrange(x) << std::endl;
|
// std::cout << "Midrange: " << stat.midrange(x) << std::endl;
|
||||||
@ -361,18 +362,18 @@ int main() {
|
|||||||
// Possible Weight Init Methods: Default, Uniform, HeNormal, HeUniform, XavierNormal, XavierUniform
|
// Possible Weight Init Methods: Default, Uniform, HeNormal, HeUniform, XavierNormal, XavierUniform
|
||||||
// Possible Activations: Linear, Sigmoid, Swish, Softplus, Softsign, CLogLog, Ar{Sinh, Cosh, Tanh, Csch, Sech, Coth}, GaussianCDF, GELU, UnitStep
|
// Possible Activations: Linear, Sigmoid, Swish, Softplus, Softsign, CLogLog, Ar{Sinh, Cosh, Tanh, Csch, Sech, Coth}, GaussianCDF, GELU, UnitStep
|
||||||
// Possible Loss Functions: MSE, RMSE, MBE, LogLoss, CrossEntropy, HingeLoss
|
// Possible Loss Functions: MSE, RMSE, MBE, LogLoss, CrossEntropy, HingeLoss
|
||||||
std::vector<std::vector<double>> inputSet = {{0,0,1,1}, {0,1,0,1}};
|
// std::vector<std::vector<double>> inputSet = {{0,0,1,1}, {0,1,0,1}};
|
||||||
std::vector<double> outputSet = {0,1,1,0};
|
// std::vector<double> outputSet = {0,1,1,0};
|
||||||
ANN ann(alg.transpose(inputSet), outputSet);
|
// ANN ann(alg.transpose(inputSet), outputSet);
|
||||||
//ann.addLayer(10, "RELU");
|
// //ann.addLayer(10, "RELU");
|
||||||
ann.addLayer(10, "Sigmoid");
|
// ann.addLayer(10, "Sigmoid");
|
||||||
ann.addOutputLayer("Sigmoid", "LogLoss");
|
// ann.addOutputLayer("Sigmoid", "LogLoss");
|
||||||
//ann.AMSGrad(0.1, 10000, 1, 0.9, 0.999, 0.000001, 1);
|
// //ann.AMSGrad(0.1, 10000, 1, 0.9, 0.999, 0.000001, 1);
|
||||||
//ann.Adadelta(1, 1000, 2, 0.9, 0.000001, 1);
|
// //ann.Adadelta(1, 1000, 2, 0.9, 0.000001, 1);
|
||||||
ann.Momentum(0.1, 8000, 2, 0.9, true, 1);
|
// ann.Momentum(0.1, 8000, 2, 0.9, true, 1);
|
||||||
//ann.MBGD(0.1, 1000, 2, 1);
|
// //ann.MBGD(0.1, 1000, 2, 1);
|
||||||
alg.printVector(ann.modelSetTest(alg.transpose(inputSet)));
|
// alg.printVector(ann.modelSetTest(alg.transpose(inputSet)));
|
||||||
std::cout << "ACCURACY: " << 100 * ann.score() << "%" << std::endl;
|
// std::cout << "ACCURACY: " << 100 * ann.score() << "%" << std::endl;
|
||||||
|
|
||||||
// typedef std::vector<std::vector<double>> Matrix;
|
// typedef std::vector<std::vector<double>> Matrix;
|
||||||
// typedef std::vector<double> Vector;
|
// typedef std::vector<double> Vector;
|
||||||
|
Loading…
Reference in New Issue
Block a user