Added LinAlg.diag, changed name of LinAlg.vecmult to LinAlg.outerProduct, rebuilt SO

This commit is contained in:
novak_99 2021-05-29 17:39:57 -07:00
parent f58145b12b
commit baae76a5e0
9 changed files with 22 additions and 11 deletions

View File

@ -70,7 +70,7 @@ namespace MLPP {
outputLayer->bias -= learning_rate * alg.sum_elements(outputLayer->delta) / n;
auto hiddenLayerAvn = network[network.size() - 1].activation_map[network[network.size() - 1].activation];
network[network.size() - 1].delta = alg.hadamard_product(alg.vecmult(outputLayer->delta, outputLayer->weights), (avn.*hiddenLayerAvn)(network[network.size() - 1].z, 1));
network[network.size() - 1].delta = alg.hadamard_product(alg.outerProduct(outputLayer->delta, outputLayer->weights), (avn.*hiddenLayerAvn)(network[network.size() - 1].z, 1));
std::vector<std::vector<double>> hiddenLayerWGrad = alg.matmult(alg.transpose(network[network.size() - 1].input), network[network.size() - 1].delta);
network[network.size() - 1].weights = alg.subtraction(network[network.size() - 1].weights, alg.scalarMultiply(learning_rate/n, hiddenLayerWGrad));

View File

@ -107,7 +107,7 @@ namespace MLPP {
std::vector<double> error = alg.subtraction(y_hat, inputSet[outputIndex]);
// Weight updation for layer 2
std::vector<std::vector<double>> D2_1 = alg.vecmult(error, a2);
std::vector<std::vector<double>> D2_1 = alg.outerProduct(error, a2);
weights2 = alg.subtraction(weights2, alg.scalarMultiply(learning_rate, alg.transpose(D2_1)));
// Bias updation for layer 2
@ -116,7 +116,7 @@ namespace MLPP {
// Weight updation for layer 1
std::vector<double> D1_1 = alg.mat_vec_mult(weights2, error);
std::vector<double> D1_2 = alg.hadamard_product(D1_1, avn.sigmoid(z2, 1));
std::vector<std::vector<double>> D1_3 = alg.vecmult(inputSet[outputIndex], D1_2);
std::vector<std::vector<double>> D1_3 = alg.outerProduct(inputSet[outputIndex], D1_2);
weights1 = alg.subtraction(weights1, alg.scalarMultiply(learning_rate, D1_3));
// Bias updation for layer 1

View File

@ -539,7 +539,7 @@ namespace MLPP{
}
}
std::vector<std::vector<double>> LinAlg::vecmult(std::vector<double> a, std::vector<double> b){
std::vector<std::vector<double>> LinAlg::outerProduct(std::vector<double> a, std::vector<double> b){
std::vector<std::vector<double>> C;
C.resize(a.size());
for(int i = 0; i < C.size(); i++){
@ -671,6 +671,14 @@ namespace MLPP{
return full(n, 1);
}
std::vector<std::vector<double>> LinAlg::diag(std::vector<double> a){
std::vector<std::vector<double>> B = zeromat(a.size(), a.size());
for(int i = 0; i < B.size(); i++){
B[i][i] = a[i];
}
return B;
}
std::vector<double> LinAlg::full(int n, int k){
std::vector<double> full;
full.resize(n);

View File

@ -80,7 +80,7 @@ namespace MLPP{
// VECTOR FUNCTIONS
std::vector<std::vector<double>> vecmult(std::vector<double> a, std::vector<double> b); // This multiplies a, bT
std::vector<std::vector<double>> outerProduct(std::vector<double> a, std::vector<double> b); // This multiplies a, bT
std::vector<double> hadamard_product(std::vector<double> a, std::vector<double> b);
@ -112,6 +112,8 @@ namespace MLPP{
std::vector<double> onevec(int n);
std::vector<std::vector<double>> diag(std::vector<double> a);
std::vector<double> full(int n, int k);
double max(std::vector<double> a);

View File

@ -65,7 +65,7 @@ namespace MLPP {
std::vector<std::vector<double>> D1_1;
D1_1.resize(n);
D1_1 = alg.vecmult(error, weights2);
D1_1 = alg.outerProduct(error, weights2);
std::vector<std::vector<double>> D1_2 = alg.hadamard_product(D1_1, avn.sigmoid(z2, 1));
@ -125,7 +125,7 @@ namespace MLPP {
// Weight updation for layer 1
std::vector<double> D1_1 = alg.scalarMultiply(error, weights2);
std::vector<double> D1_2 = alg.hadamard_product(D1_1, avn.sigmoid(z2, 1));
std::vector<std::vector<double>> D1_3 = alg.vecmult(inputSet[outputIndex], D1_2);
std::vector<std::vector<double>> D1_3 = alg.outerProduct(inputSet[outputIndex], D1_2);
weights1 = alg.subtraction(weights1, alg.scalarMultiply(learning_rate, D1_3));
weights1 = regularization.regWeights(weights1, lambda, alpha, reg);
@ -204,7 +204,7 @@ namespace MLPP {
//Calculating the weight/bias for layer 1
std::vector<std::vector<double>> D1_1 = alg.vecmult(error, weights2);
std::vector<std::vector<double>> D1_1 = alg.outerProduct(error, weights2);
std::vector<std::vector<double>> D1_2 = alg.hadamard_product(D1_1, avn.sigmoid(z2, 1));

View File

@ -113,7 +113,7 @@ namespace MLPP{
std::vector<double> error = alg.subtraction(y_hat, outputSet[outputIndex]);
// Weight updation for layer 2
std::vector<std::vector<double>> D2_1 = alg.vecmult(error, a2);
std::vector<std::vector<double>> D2_1 = alg.outerProduct(error, a2);
weights2 = alg.subtraction(weights2, alg.scalarMultiply(learning_rate, alg.transpose(D2_1)));
weights2 = regularization.regWeights(weights2, lambda, alpha, reg);
@ -123,7 +123,7 @@ namespace MLPP{
// Weight updation for layer 1
std::vector<double> D1_1 = alg.mat_vec_mult(weights2, error);
std::vector<double> D1_2 = alg.hadamard_product(D1_1, avn.sigmoid(z2, 1));
std::vector<std::vector<double>> D1_3 = alg.vecmult(inputSet[outputIndex], D1_2);
std::vector<std::vector<double>> D1_3 = alg.outerProduct(inputSet[outputIndex], D1_2);
weights1 = alg.subtraction(weights1, alg.scalarMultiply(learning_rate, D1_3));
weights1 = regularization.regWeights(weights1, lambda, alpha, reg);

View File

@ -88,7 +88,7 @@ namespace MLPP{
cost_prev = Cost({y_hat}, {outputSet[outputIndex]});
// Calculating the weight gradients
std::vector<std::vector<double>> w_gradient = alg.vecmult(inputSet[outputIndex], alg.subtraction(y_hat, outputSet[outputIndex]));
std::vector<std::vector<double>> w_gradient = alg.outerProduct(inputSet[outputIndex], alg.subtraction(y_hat, outputSet[outputIndex]));
// Weight Updation
weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate, w_gradient));

Binary file not shown.

View File

@ -363,6 +363,7 @@ int main() {
// std::cout << alg.trace({{1,2}, {3,4}}) << std::endl;
// alg.printMatrix(alg.pinverse({{1,2}, {3,4}}));
// alg.printMatrix(alg.diag({1,2,3,4,5}));
return 0;
}