diff --git a/MLPP/AutoEncoder/AutoEncoder.hpp b/MLPP/AutoEncoder/AutoEncoder.hpp index d65ab35..94e32e9 100644 --- a/MLPP/AutoEncoder/AutoEncoder.hpp +++ b/MLPP/AutoEncoder/AutoEncoder.hpp @@ -34,7 +34,7 @@ class AutoEncoder{ void forwardPass(); std::vector> inputSet; - std::vector> y_hat; // This is your latent representation + std::vector> y_hat; std::vector> weights1; std::vector> weights2; diff --git a/MLPP/LinAlg/LinAlg.cpp b/MLPP/LinAlg/LinAlg.cpp index 3e4d01c..72a7528 100644 --- a/MLPP/LinAlg/LinAlg.cpp +++ b/MLPP/LinAlg/LinAlg.cpp @@ -593,6 +593,10 @@ namespace MLPP{ return a; } + std::vector LinAlg::solve(std::vector> A, std::vector b){ + return mat_vec_mult(inverse(A), b); + } + void LinAlg::printMatrix(std::vector> A){ for(int i = 0; i < A.size(); i++){ for(int j = 0; j < A[i].size(); j++){ diff --git a/MLPP/LinAlg/LinAlg.hpp b/MLPP/LinAlg/LinAlg.hpp index 319cb44..8c5315c 100644 --- a/MLPP/LinAlg/LinAlg.hpp +++ b/MLPP/LinAlg/LinAlg.hpp @@ -83,6 +83,8 @@ namespace MLPP{ double sum_elements(std::vector> A); std::vector flatten(std::vector> A); + + std::vector solve(std::vector> A, std::vector b); void printMatrix(std::vector> A); diff --git a/SharedLib/MLPP.so b/SharedLib/MLPP.so index c336848..3a8b729 100755 Binary files a/SharedLib/MLPP.so and b/SharedLib/MLPP.so differ diff --git a/main.cpp b/main.cpp index a5e54f3..b86d81b 100644 --- a/main.cpp +++ b/main.cpp @@ -366,6 +366,7 @@ int main() { // alg.printMatrix(alg.diag({1,2,3,4,5})); // alg.printMatrix(alg.kronecker_product({{1,2,3,4,5}}, {{6,7,8,9,10}})); // alg.printMatrix(alg.matrixPower({{5,5},{5,5}}, 2)); + // alg.printVector(alg.solve({{1,1}, {1.5, 4.0}}, {2200, 5050})); return 0; } \ No newline at end of file