diff --git a/MLPP/NumericalAnalysis/NumericalAnalysis.cpp b/MLPP/NumericalAnalysis/NumericalAnalysis.cpp new file mode 100644 index 0000000..c4b812c --- /dev/null +++ b/MLPP/NumericalAnalysis/NumericalAnalysis.cpp @@ -0,0 +1,44 @@ +// +// NumericalAnalysis.cpp +// +// Created by Marc Melikyan on 11/13/20. +// + +#include "NumericalAnalysis.hpp" +#include + +namespace MLPP{ + + double NumericalAnalysis::numDiff(double(*function)(double), double x){ + double eps = 1e-10; + return (function(x + eps) - function(x)) / eps; // This is just the formal def. of the derivative. + } + + double NumericalAnalysis::numDiff(double(*function)(std::vector), std::vector x, int axis){ + // For multivariable function analysis. + // This will be used for calculating Jacobian vectors. + // Diffrentiate with respect to indicated axis. (0, 1, 2 ...) + double eps = 1e-10; + std::vector x_eps = x; + x_eps[axis] += eps; + + return (function(x_eps) - function(x)) / eps; + } + + double NumericalAnalysis::newtonRaphsonMethod(double(*function)(double), double x_0, double epoch){ + double x = x_0; + for(int i = 0; i < epoch; i++){ + x = x - function(x)/numDiff(function, x); + } + return x; + } + + std::vector NumericalAnalysis::jacobian(double(*function)(std::vector), std::vector x){ + std::vector jacobian; + jacobian.resize(x.size()); + for(int i = 0; i < jacobian.size(); i++){ + jacobian[i] = numDiff(function, x, i); // Derivative w.r.t axis i evaluated at x. For all x_i. + } + return jacobian; + } +} \ No newline at end of file diff --git a/MLPP/NumericalAnalysis/NumericalAnalysis.hpp b/MLPP/NumericalAnalysis/NumericalAnalysis.hpp new file mode 100644 index 0000000..f6e6977 --- /dev/null +++ b/MLPP/NumericalAnalysis/NumericalAnalysis.hpp @@ -0,0 +1,27 @@ +// +// NumericalAnalysis.hpp +// +// + +#ifndef NumericalAnalysis_hpp +#define NumericalAnalysis_hpp + +#include + +namespace MLPP{ + class NumericalAnalysis{ + public: + /* A numerical method for derivatives is used. This may be subject to change, + as an analytical method for calculating derivatives will most likely be used in + the future. + */ + double numDiff(double(*function)(double), double x); + double numDiff(double(*function)(std::vector), std::vector x, int axis); + double newtonRaphsonMethod(double(*function)(double), double x_0, double epoch); + + std::vector jacobian(double(*function)(std::vector), std::vector); + + }; +} + +#endif /* NumericalAnalysis_hpp */ diff --git a/main.cpp b/main.cpp index 870fc0d..a5e2424 100644 --- a/main.cpp +++ b/main.cpp @@ -43,19 +43,31 @@ #include "MLPP/Data/Data.hpp" #include "MLPP/Convolutions/Convolutions.hpp" #include "MLPP/SVC/SVC.hpp" +#include "MLPP/NumericalAnalysis/NumericalAnalysis.hpp" using namespace MLPP; + +double f(double x){ + return x*x*x + 2*x - 2; +} + +double f_mv(std::vector x){ + return x[0] * x[0] + x[1] * x[1] + x[1] + 5; + // Where x,y=x[0],x[1], this function is defined as: + // f(x,y) = x^2 + y^2 + y + 5 +} + int main() { - // OBJECTS + // // OBJECTS Stat stat; LinAlg alg; - Activation avn; - Cost cost; - Data data; - Convolutions conv; + // Activation avn; + // Cost cost; + // Data data; + // Convolutions conv; // DATA SETS // std::vector> inputSet = {{1,2,3,4,5,6,7,8,9,10}, {3,5,9,12,15,18,21,24,27,30}}; @@ -460,5 +472,16 @@ int main() { // alg.printMatrix(L); // alg.printMatrix(Lt); + // Checks for numerical analysis class. + NumericalAnalysis numAn; + + std::cout << numAn.numDiff(&f, 1) << std::endl; + std::cout << numAn.newtonRaphsonMethod(&f, 1, 1000) << std::endl; + + std::cout << numAn.numDiff(&f_mv, {1, 1}, 1) << std::endl; // Derivative w.r.t. x. + + alg.printVector(numAn.jacobian(&f_mv, {1, 1})); + return 0; -} \ No newline at end of file +} +