diff --git a/MLPP/Convolutions/Convolutions.cpp b/MLPP/Convolutions/Convolutions.cpp index b5ea669..97aef9a 100644 --- a/MLPP/Convolutions/Convolutions.cpp +++ b/MLPP/Convolutions/Convolutions.cpp @@ -87,7 +87,7 @@ namespace MLPP{ int N = input[0].size(); int F = filter[0].size(); int C = filter.size() / input.size(); - int mapSize = (N - F + 2*P) / S + 1; // This is computed as ⌊mapSize⌋ by def- thanks C++! + int mapSize = (N - F + 2*P) / S + 1; // This is computed as ⌊mapSize⌋ by def. if(P != 0){ for(int c = 0; c < input.size(); c++){ diff --git a/MLPP/LinAlg/LinAlg.cpp b/MLPP/LinAlg/LinAlg.cpp index a7a3ab8..58fcedc 100644 --- a/MLPP/LinAlg/LinAlg.cpp +++ b/MLPP/LinAlg/LinAlg.cpp @@ -996,6 +996,20 @@ namespace MLPP{ return c; } + std::vector> LinAlg::tensor_vec_mult(std::vector>> A, std::vector b){ + std::vector> C; + C.resize(A.size()); + for(int i = 0; i < C.size(); i++){ + C[i].resize(A[0].size()); + } + for(int i = 0; i < C.size(); i++){ + for(int j = 0; j < C[i].size(); j++){ + C[i][j] = dot(A[i][j], b); + } + } + return C; + } + std::vector LinAlg::flatten(std::vector>> A){ std::vector c; for(int i = 0; i < A.size(); i++){ diff --git a/MLPP/LinAlg/LinAlg.hpp b/MLPP/LinAlg/LinAlg.hpp index 3b05a38..b032d42 100644 --- a/MLPP/LinAlg/LinAlg.hpp +++ b/MLPP/LinAlg/LinAlg.hpp @@ -180,6 +180,8 @@ namespace MLPP{ std::vector mat_vec_mult(std::vector> A, std::vector b); // TENSOR FUNCTIONS + std::vector> tensor_vec_mult(std::vector>> A, std::vector b); + std::vector flatten(std::vector>> A); void printTensor(std::vector>> A); diff --git a/MLPP/NumericalAnalysis/NumericalAnalysis.cpp b/MLPP/NumericalAnalysis/NumericalAnalysis.cpp index 5818e15..c84ee84 100644 --- a/MLPP/NumericalAnalysis/NumericalAnalysis.cpp +++ b/MLPP/NumericalAnalysis/NumericalAnalysis.cpp @@ -40,6 +40,10 @@ namespace MLPP{ return linearApproximation(function, c, x) + 0.5 * numDiff_2(function, c) * (x - c) * (x - c); } + double NumericalAnalysis::cubicApproximation(double(*function)(double), double c, double x){ + return quadraticApproximation(function, c, x) + (1/6) * numDiff_3(function, c) * (x - c) * (x - c) * (x - c); + } + double NumericalAnalysis::numDiff(double(*function)(std::vector), std::vector x, int axis){ // For multivariable function analysis. // This will be used for calculating Jacobian vectors. @@ -192,6 +196,23 @@ namespace MLPP{ return linearApproximation(function, c, x) + 0.5 * alg.matmult({(alg.subtraction(x, c))}, alg.matmult(hessian(function, c), alg.transpose({alg.subtraction(x, c)})))[0][0]; } + double NumericalAnalysis::cubicApproximation(double(*function)(std::vector), std::vector c, std::vector x){ + /* + Not completely sure as the literature seldom discusses the third order taylor approximation, + in particular for multivariate cases, but ostensibly, the matrix/tensor/vector multiplies + should look something like this: + + (N x N x N) (N x 1) [tensor vector mult] => (N x N x 1) => (N x N) + Perform remaining multiplies as done for the 2nd order approximation. + Result is a scalar. + */ + LinAlg alg; + std::vector> resultMat = alg.tensor_vec_mult(thirdOrderTensor(function, c), alg.subtraction(x, c)); + double resultScalar = alg.matmult({(alg.subtraction(x, c))}, alg.matmult(resultMat, alg.transpose({alg.subtraction(x, c)})))[0][0]; + + return quadraticApproximation(function, c, x) + (1/6) * resultScalar; + } + double NumericalAnalysis::laplacian(double(*function)(std::vector), std::vector x){ LinAlg alg; std::vector> hessian_matrix = hessian(function, x); diff --git a/MLPP/NumericalAnalysis/NumericalAnalysis.hpp b/MLPP/NumericalAnalysis/NumericalAnalysis.hpp index 7ed5159..cc49094 100644 --- a/MLPP/NumericalAnalysis/NumericalAnalysis.hpp +++ b/MLPP/NumericalAnalysis/NumericalAnalysis.hpp @@ -22,6 +22,7 @@ namespace MLPP{ double constantApproximation(double(*function)(double), double c); double linearApproximation(double(*function)(double), double c, double x); double quadraticApproximation(double(*function)(double), double c, double x); + double cubicApproximation(double(*function)(double), double c, double x); double numDiff(double(*function)(std::vector), std::vector x, int axis); double numDiff_2(double(*function)(std::vector), std::vector x, int axis1, int axis2); @@ -38,6 +39,7 @@ namespace MLPP{ double constantApproximation(double(*function)(std::vector), std::vector c); double linearApproximation(double(*function)(std::vector), std::vector c, std::vector x); double quadraticApproximation(double(*function)(std::vector), std::vector c, std::vector x); + double cubicApproximation(double(*function)(std::vector), std::vector c, std::vector x); double laplacian(double(*function)(std::vector), std::vector x); // laplacian }; diff --git a/main.cpp b/main.cpp index 997afb2..eb2dbb6 100644 --- a/main.cpp +++ b/main.cpp @@ -9,12 +9,14 @@ // POLYMORPHIC IMPLEMENTATION OF REGRESSION CLASSES // EXTEND SGD/MBGD SUPPORT FOR DYN. SIZED ANN // ADD LEAKYRELU, ELU, SELU TO ANN +// FIX VECTOR/MATRIX/TENSOR RESIZE ROUTINE // HYPOTHESIS TESTING CLASS // GAUSS MARKOV CHECKER CLASS #include #include +#include #include #include "MLPP/UniLinReg/UniLinReg.hpp" #include "MLPP/LinReg/LinReg.hpp" @@ -54,7 +56,7 @@ using namespace MLPP; // } double f(double x){ - return cos(x); + return sin(x); } /* y = x^3 + 2x - 2 @@ -77,18 +79,32 @@ double f(double x){ double f_mv(std::vector x){ return x[0] * x[0] * x[0] + x[0] + x[1] * x[1] * x[1] * x[0] + x[2] * x[2] * x[1]; } + /* Where x, y = x[0], x[1], this function is defined as: f(x, y) = x^3 + x + xy^3 + yz^2 - ∂f/∂x = 4x^3 + 3y^2 - ∂^2f/∂x^2 = 6x + fy = 3xy^2 + 2yz + fyy = 6xy + 2z + fyyz = 2 + + ∂^2f/∂y^2 = 6xy + 2z + ∂^3f/∂y^3 = 6x + + ∂f/∂z = 2zy + ∂^2f/∂z^2 = 2y + ∂^3f/∂z^3 = 0 + + ∂f/∂x = 3x^2 + 1 + y^3 + ∂^2f/∂x^2 = 6x + ∂^3f/∂x^3 = 6 ∂f/∂z = 2zy ∂^2f/∂z^2 = 2z ∂f/∂y = 3xy^2 ∂^2f/∂y∂x = 3y^2 + */ @@ -536,24 +552,43 @@ int main() { //std::cout << numAn.quadraticApproximation(f, 0, 1) << std::endl; + // std::cout << numAn.cubicApproximation(f, 0, 1.001) << std::endl; + + // std::cout << f(1.001) << std::endl; + // std::cout << numAn.quadraticApproximation(f_mv, {0, 0, 0}, {1, 1, 1}) << std::endl; // std::cout << numAn.numDiff(&f, 1) << std::endl; // std::cout << numAn.newtonRaphsonMethod(&f, 1, 1000) << std::endl; - std::cout << numAn.invQuadraticInterpolation(&f, {100, 2,1.5}, 10) << std::endl; + //std::cout << numAn.invQuadraticInterpolation(&f, {100, 2,1.5}, 10) << std::endl; // std::cout << numAn.numDiff(&f_mv, {1, 1}, 1) << std::endl; // Derivative w.r.t. x. // alg.printVector(numAn.jacobian(&f_mv, {1, 1})); - //std::cout << numAn.numDiff_2(&f, 2) << std::endl; + //std::cout << numAn.numDiff_2(&f, 2) << std::endl; + + //std::cout << numAn.numDiff_3(&f, 2) << std::endl; // std::cout << numAn.numDiff_2(&f_mv, {2, 2, 500}, 2, 2) << std::endl; + //std::cout << numAn.numDiff_3(&f_mv, {2, 1000, 130}, 0, 0, 0) << std::endl; + + // alg.printTensor(numAn.thirdOrderTensor(&f_mv, {1, 1, 1})); // std::cout << "Our Hessian." << std::endl; // alg.printMatrix(numAn.hessian(&f_mv, {2, 2, 500})); // std::cout << numAn.laplacian(f_mv, {1,1,1}) << std::endl; + // std::vector>> tensor; + // tensor.push_back({{1,2}, {1,2}, {1,2}}); + // tensor.push_back({{1,2}, {1,2}, {1,2}}); + + // alg.printTensor(tensor); + + // alg.printMatrix(alg.tensor_vec_mult(tensor, {1,2})); + + std::cout << numAn.cubicApproximation(f_mv, {0, 0, 0}, {1, 1, 1}) << std::endl; + return 0; }