diff --git a/MLPP/LinAlg/LinAlg.cpp b/MLPP/LinAlg/LinAlg.cpp index 196f923..651cf71 100644 --- a/MLPP/LinAlg/LinAlg.cpp +++ b/MLPP/LinAlg/LinAlg.cpp @@ -611,6 +611,38 @@ namespace MLPP{ return {left_eigenvecs, sigma, right_eigenvecs}; } + std::vector LinAlg::vectorProjection(std::vector a, std::vector b){ + double product = dot(a, b)/dot(a, a); + return scalarMultiply(product, a); // Projection of vector a onto b. Denotated as proj_a(b). + } + + std::vector> LinAlg::gramSchmidtProcess(std::vector> A){ + A = transpose(A); // C++ vectors lack a mechanism to directly index columns. So, we transpose *a copy* of A for this purpose for ease of use. + std::vector> B; + B.resize(A.size()); + for(int i = 0; i < B.size(); i++){ + B[i].resize(A[0].size()); + } + + B[0] = A[0]; // We set a_1 = b_1 as an initial condition. + B[0] = scalarMultiply(1/norm_2(B[0]), B[0]); + for(int i = 1; i < B.size(); i++){ + B[i] = A[i]; + for(int j = i-1; j >= 0; j--){ + B[i] = subtraction(B[i], vectorProjection(B[j], A[i])); + } + B[i] = scalarMultiply(1/norm_2(B[i]), B[i]); // Very simply multiply all elements of vec B[i] by 1/||B[i]||_2 + } + return transpose(B); // We re-transpose the marix. + } + + std::tuple>, std::vector>> LinAlg::QRD(std::vector> A){ + std::vector> Q = gramSchmidtProcess(A); + std::vector> R = matmult(transpose(Q), A); + return {Q, R}; + + } + double LinAlg::sum_elements(std::vector> A){ double sum = 0; for(int i = 0; i < A.size(); i++){ @@ -866,6 +898,10 @@ namespace MLPP{ return std::sqrt(dist); } + double LinAlg::norm_2(std::vector a){ + return std::sqrt(norm_sq(a)); + } + double LinAlg::norm_sq(std::vector a){ double n_sq = 0; for(int i = 0; i < a.size(); i++){ @@ -883,7 +919,7 @@ namespace MLPP{ } double LinAlg::cosineSimilarity(std::vector a, std::vector b){ - return dot(a, b) / (std::sqrt(norm_sq(a)) * std::sqrt(norm_sq(b))); + return dot(a, b) / (norm_2(a) * norm_2(b)); } void LinAlg::printVector(std::vector a){ diff --git a/MLPP/LinAlg/LinAlg.hpp b/MLPP/LinAlg/LinAlg.hpp index a230e64..410fd8a 100644 --- a/MLPP/LinAlg/LinAlg.hpp +++ b/MLPP/LinAlg/LinAlg.hpp @@ -88,6 +88,12 @@ namespace MLPP{ std::tuple>, std::vector>, std::vector>> SVD(std::vector> A); + std::vector vectorProjection(std::vector a, std::vector b); + + std::vector> gramSchmidtProcess(std::vector> A); + + std::tuple>, std::vector>> QRD(std::vector> A); + double sum_elements(std::vector> A); std::vector flatten(std::vector> A); @@ -152,6 +158,8 @@ namespace MLPP{ double euclideanDistance(std::vector a, std::vector b); + double norm_2(std::vector a); + double norm_sq(std::vector a); double sum_elements(std::vector a); diff --git a/a.out b/a.out deleted file mode 100755 index fc87283..0000000 Binary files a/a.out and /dev/null differ diff --git a/main.cpp b/main.cpp index da1484e..c36ad2b 100644 --- a/main.cpp +++ b/main.cpp @@ -433,5 +433,18 @@ int main() { // data.getImage("../../Data/apple.jpeg", chicken); // alg.printVector(chicken); + // TESTING QR DECOMP. EXAMPLE VIA WIKIPEDIA. SEE https://en.wikipedia.org/wiki/QR_decomposition. + + std::vector> P = {{12, -51, 4}, {6, 167, -68}, {-4, 24, -41}}; + alg.printMatrix(P); + + alg.printMatrix(alg.gramSchmidtProcess(P)); + + auto [Q, R] = alg.QRD(P); // It works! + + alg.printMatrix(Q); + + alg.printMatrix(R); + return 0; } \ No newline at end of file