diff --git a/MLPP/ANN/ANN.cpp b/MLPP/ANN/ANN.cpp index 7410957..06eae81 100644 --- a/MLPP/ANN/ANN.cpp +++ b/MLPP/ANN/ANN.cpp @@ -70,7 +70,7 @@ namespace MLPP { outputLayer->bias -= learning_rate * alg.sum_elements(outputLayer->delta) / n; auto hiddenLayerAvn = network[network.size() - 1].activation_map[network[network.size() - 1].activation]; - network[network.size() - 1].delta = alg.hadamard_product(alg.vecmult(outputLayer->delta, outputLayer->weights), (avn.*hiddenLayerAvn)(network[network.size() - 1].z, 1)); + network[network.size() - 1].delta = alg.hadamard_product(alg.outerProduct(outputLayer->delta, outputLayer->weights), (avn.*hiddenLayerAvn)(network[network.size() - 1].z, 1)); std::vector> hiddenLayerWGrad = alg.matmult(alg.transpose(network[network.size() - 1].input), network[network.size() - 1].delta); network[network.size() - 1].weights = alg.subtraction(network[network.size() - 1].weights, alg.scalarMultiply(learning_rate/n, hiddenLayerWGrad)); diff --git a/MLPP/AutoEncoder/AutoEncoder.cpp b/MLPP/AutoEncoder/AutoEncoder.cpp index 423c0eb..24c9284 100644 --- a/MLPP/AutoEncoder/AutoEncoder.cpp +++ b/MLPP/AutoEncoder/AutoEncoder.cpp @@ -107,7 +107,7 @@ namespace MLPP { std::vector error = alg.subtraction(y_hat, inputSet[outputIndex]); // Weight updation for layer 2 - std::vector> D2_1 = alg.vecmult(error, a2); + std::vector> D2_1 = alg.outerProduct(error, a2); weights2 = alg.subtraction(weights2, alg.scalarMultiply(learning_rate, alg.transpose(D2_1))); // Bias updation for layer 2 @@ -116,7 +116,7 @@ namespace MLPP { // Weight updation for layer 1 std::vector D1_1 = alg.mat_vec_mult(weights2, error); std::vector D1_2 = alg.hadamard_product(D1_1, avn.sigmoid(z2, 1)); - std::vector> D1_3 = alg.vecmult(inputSet[outputIndex], D1_2); + std::vector> D1_3 = alg.outerProduct(inputSet[outputIndex], D1_2); weights1 = alg.subtraction(weights1, alg.scalarMultiply(learning_rate, D1_3)); // Bias updation for layer 1 diff --git a/MLPP/LinAlg/LinAlg.cpp b/MLPP/LinAlg/LinAlg.cpp index c2c4085..cdf5af8 100644 --- a/MLPP/LinAlg/LinAlg.cpp +++ b/MLPP/LinAlg/LinAlg.cpp @@ -539,7 +539,7 @@ namespace MLPP{ } } - std::vector> LinAlg::vecmult(std::vector a, std::vector b){ + std::vector> LinAlg::outerProduct(std::vector a, std::vector b){ std::vector> C; C.resize(a.size()); for(int i = 0; i < C.size(); i++){ @@ -671,6 +671,14 @@ namespace MLPP{ return full(n, 1); } + std::vector> LinAlg::diag(std::vector a){ + std::vector> B = zeromat(a.size(), a.size()); + for(int i = 0; i < B.size(); i++){ + B[i][i] = a[i]; + } + return B; + } + std::vector LinAlg::full(int n, int k){ std::vector full; full.resize(n); diff --git a/MLPP/LinAlg/LinAlg.hpp b/MLPP/LinAlg/LinAlg.hpp index 3ea493a..effadb3 100644 --- a/MLPP/LinAlg/LinAlg.hpp +++ b/MLPP/LinAlg/LinAlg.hpp @@ -80,7 +80,7 @@ namespace MLPP{ // VECTOR FUNCTIONS - std::vector> vecmult(std::vector a, std::vector b); // This multiplies a, bT + std::vector> outerProduct(std::vector a, std::vector b); // This multiplies a, bT std::vector hadamard_product(std::vector a, std::vector b); @@ -112,6 +112,8 @@ namespace MLPP{ std::vector onevec(int n); + std::vector> diag(std::vector a); + std::vector full(int n, int k); double max(std::vector a); diff --git a/MLPP/MLP/MLP.cpp b/MLPP/MLP/MLP.cpp index 028829b..bd251a3 100644 --- a/MLPP/MLP/MLP.cpp +++ b/MLPP/MLP/MLP.cpp @@ -65,7 +65,7 @@ namespace MLPP { std::vector> D1_1; D1_1.resize(n); - D1_1 = alg.vecmult(error, weights2); + D1_1 = alg.outerProduct(error, weights2); std::vector> D1_2 = alg.hadamard_product(D1_1, avn.sigmoid(z2, 1)); @@ -125,7 +125,7 @@ namespace MLPP { // Weight updation for layer 1 std::vector D1_1 = alg.scalarMultiply(error, weights2); std::vector D1_2 = alg.hadamard_product(D1_1, avn.sigmoid(z2, 1)); - std::vector> D1_3 = alg.vecmult(inputSet[outputIndex], D1_2); + std::vector> D1_3 = alg.outerProduct(inputSet[outputIndex], D1_2); weights1 = alg.subtraction(weights1, alg.scalarMultiply(learning_rate, D1_3)); weights1 = regularization.regWeights(weights1, lambda, alpha, reg); @@ -204,7 +204,7 @@ namespace MLPP { //Calculating the weight/bias for layer 1 - std::vector> D1_1 = alg.vecmult(error, weights2); + std::vector> D1_1 = alg.outerProduct(error, weights2); std::vector> D1_2 = alg.hadamard_product(D1_1, avn.sigmoid(z2, 1)); diff --git a/MLPP/SoftmaxNet/SoftmaxNet.cpp b/MLPP/SoftmaxNet/SoftmaxNet.cpp index 2b847c1..06bbb3a 100644 --- a/MLPP/SoftmaxNet/SoftmaxNet.cpp +++ b/MLPP/SoftmaxNet/SoftmaxNet.cpp @@ -113,7 +113,7 @@ namespace MLPP{ std::vector error = alg.subtraction(y_hat, outputSet[outputIndex]); // Weight updation for layer 2 - std::vector> D2_1 = alg.vecmult(error, a2); + std::vector> D2_1 = alg.outerProduct(error, a2); weights2 = alg.subtraction(weights2, alg.scalarMultiply(learning_rate, alg.transpose(D2_1))); weights2 = regularization.regWeights(weights2, lambda, alpha, reg); @@ -123,7 +123,7 @@ namespace MLPP{ // Weight updation for layer 1 std::vector D1_1 = alg.mat_vec_mult(weights2, error); std::vector D1_2 = alg.hadamard_product(D1_1, avn.sigmoid(z2, 1)); - std::vector> D1_3 = alg.vecmult(inputSet[outputIndex], D1_2); + std::vector> D1_3 = alg.outerProduct(inputSet[outputIndex], D1_2); weights1 = alg.subtraction(weights1, alg.scalarMultiply(learning_rate, D1_3)); weights1 = regularization.regWeights(weights1, lambda, alpha, reg); diff --git a/MLPP/SoftmaxReg/SoftmaxReg.cpp b/MLPP/SoftmaxReg/SoftmaxReg.cpp index fb8b34e..30a0edb 100644 --- a/MLPP/SoftmaxReg/SoftmaxReg.cpp +++ b/MLPP/SoftmaxReg/SoftmaxReg.cpp @@ -88,7 +88,7 @@ namespace MLPP{ cost_prev = Cost({y_hat}, {outputSet[outputIndex]}); // Calculating the weight gradients - std::vector> w_gradient = alg.vecmult(inputSet[outputIndex], alg.subtraction(y_hat, outputSet[outputIndex])); + std::vector> w_gradient = alg.outerProduct(inputSet[outputIndex], alg.subtraction(y_hat, outputSet[outputIndex])); // Weight Updation weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate, w_gradient)); diff --git a/SharedLib/MLPP.so b/SharedLib/MLPP.so index ab67ef8..20bcd6f 100755 Binary files a/SharedLib/MLPP.so and b/SharedLib/MLPP.so differ diff --git a/main.cpp b/main.cpp index 0bc6fe2..0789275 100644 --- a/main.cpp +++ b/main.cpp @@ -363,6 +363,7 @@ int main() { // std::cout << alg.trace({{1,2}, {3,4}}) << std::endl; // alg.printMatrix(alg.pinverse({{1,2}, {3,4}})); + // alg.printMatrix(alg.diag({1,2,3,4,5})); return 0; } \ No newline at end of file