mirror of
https://github.com/Relintai/MLPP.git
synced 2025-02-10 16:10:06 +01:00
Added LinAlg.diag, changed name of LinAlg.vecmult to LinAlg.outerProduct, rebuilt SO
This commit is contained in:
parent
f58145b12b
commit
baae76a5e0
@ -70,7 +70,7 @@ namespace MLPP {
|
||||
outputLayer->bias -= learning_rate * alg.sum_elements(outputLayer->delta) / n;
|
||||
|
||||
auto hiddenLayerAvn = network[network.size() - 1].activation_map[network[network.size() - 1].activation];
|
||||
network[network.size() - 1].delta = alg.hadamard_product(alg.vecmult(outputLayer->delta, outputLayer->weights), (avn.*hiddenLayerAvn)(network[network.size() - 1].z, 1));
|
||||
network[network.size() - 1].delta = alg.hadamard_product(alg.outerProduct(outputLayer->delta, outputLayer->weights), (avn.*hiddenLayerAvn)(network[network.size() - 1].z, 1));
|
||||
std::vector<std::vector<double>> hiddenLayerWGrad = alg.matmult(alg.transpose(network[network.size() - 1].input), network[network.size() - 1].delta);
|
||||
|
||||
network[network.size() - 1].weights = alg.subtraction(network[network.size() - 1].weights, alg.scalarMultiply(learning_rate/n, hiddenLayerWGrad));
|
||||
|
@ -107,7 +107,7 @@ namespace MLPP {
|
||||
std::vector<double> error = alg.subtraction(y_hat, inputSet[outputIndex]);
|
||||
|
||||
// Weight updation for layer 2
|
||||
std::vector<std::vector<double>> D2_1 = alg.vecmult(error, a2);
|
||||
std::vector<std::vector<double>> D2_1 = alg.outerProduct(error, a2);
|
||||
weights2 = alg.subtraction(weights2, alg.scalarMultiply(learning_rate, alg.transpose(D2_1)));
|
||||
|
||||
// Bias updation for layer 2
|
||||
@ -116,7 +116,7 @@ namespace MLPP {
|
||||
// Weight updation for layer 1
|
||||
std::vector<double> D1_1 = alg.mat_vec_mult(weights2, error);
|
||||
std::vector<double> D1_2 = alg.hadamard_product(D1_1, avn.sigmoid(z2, 1));
|
||||
std::vector<std::vector<double>> D1_3 = alg.vecmult(inputSet[outputIndex], D1_2);
|
||||
std::vector<std::vector<double>> D1_3 = alg.outerProduct(inputSet[outputIndex], D1_2);
|
||||
|
||||
weights1 = alg.subtraction(weights1, alg.scalarMultiply(learning_rate, D1_3));
|
||||
// Bias updation for layer 1
|
||||
|
@ -539,7 +539,7 @@ namespace MLPP{
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> LinAlg::vecmult(std::vector<double> a, std::vector<double> b){
|
||||
std::vector<std::vector<double>> LinAlg::outerProduct(std::vector<double> a, std::vector<double> b){
|
||||
std::vector<std::vector<double>> C;
|
||||
C.resize(a.size());
|
||||
for(int i = 0; i < C.size(); i++){
|
||||
@ -671,6 +671,14 @@ namespace MLPP{
|
||||
return full(n, 1);
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> LinAlg::diag(std::vector<double> a){
|
||||
std::vector<std::vector<double>> B = zeromat(a.size(), a.size());
|
||||
for(int i = 0; i < B.size(); i++){
|
||||
B[i][i] = a[i];
|
||||
}
|
||||
return B;
|
||||
}
|
||||
|
||||
std::vector<double> LinAlg::full(int n, int k){
|
||||
std::vector<double> full;
|
||||
full.resize(n);
|
||||
|
@ -80,7 +80,7 @@ namespace MLPP{
|
||||
|
||||
// VECTOR FUNCTIONS
|
||||
|
||||
std::vector<std::vector<double>> vecmult(std::vector<double> a, std::vector<double> b); // This multiplies a, bT
|
||||
std::vector<std::vector<double>> outerProduct(std::vector<double> a, std::vector<double> b); // This multiplies a, bT
|
||||
|
||||
std::vector<double> hadamard_product(std::vector<double> a, std::vector<double> b);
|
||||
|
||||
@ -112,6 +112,8 @@ namespace MLPP{
|
||||
|
||||
std::vector<double> onevec(int n);
|
||||
|
||||
std::vector<std::vector<double>> diag(std::vector<double> a);
|
||||
|
||||
std::vector<double> full(int n, int k);
|
||||
|
||||
double max(std::vector<double> a);
|
||||
|
@ -65,7 +65,7 @@ namespace MLPP {
|
||||
std::vector<std::vector<double>> D1_1;
|
||||
D1_1.resize(n);
|
||||
|
||||
D1_1 = alg.vecmult(error, weights2);
|
||||
D1_1 = alg.outerProduct(error, weights2);
|
||||
|
||||
std::vector<std::vector<double>> D1_2 = alg.hadamard_product(D1_1, avn.sigmoid(z2, 1));
|
||||
|
||||
@ -125,7 +125,7 @@ namespace MLPP {
|
||||
// Weight updation for layer 1
|
||||
std::vector<double> D1_1 = alg.scalarMultiply(error, weights2);
|
||||
std::vector<double> D1_2 = alg.hadamard_product(D1_1, avn.sigmoid(z2, 1));
|
||||
std::vector<std::vector<double>> D1_3 = alg.vecmult(inputSet[outputIndex], D1_2);
|
||||
std::vector<std::vector<double>> D1_3 = alg.outerProduct(inputSet[outputIndex], D1_2);
|
||||
|
||||
weights1 = alg.subtraction(weights1, alg.scalarMultiply(learning_rate, D1_3));
|
||||
weights1 = regularization.regWeights(weights1, lambda, alpha, reg);
|
||||
@ -204,7 +204,7 @@ namespace MLPP {
|
||||
|
||||
//Calculating the weight/bias for layer 1
|
||||
|
||||
std::vector<std::vector<double>> D1_1 = alg.vecmult(error, weights2);
|
||||
std::vector<std::vector<double>> D1_1 = alg.outerProduct(error, weights2);
|
||||
|
||||
std::vector<std::vector<double>> D1_2 = alg.hadamard_product(D1_1, avn.sigmoid(z2, 1));
|
||||
|
||||
|
@ -113,7 +113,7 @@ namespace MLPP{
|
||||
std::vector<double> error = alg.subtraction(y_hat, outputSet[outputIndex]);
|
||||
|
||||
// Weight updation for layer 2
|
||||
std::vector<std::vector<double>> D2_1 = alg.vecmult(error, a2);
|
||||
std::vector<std::vector<double>> D2_1 = alg.outerProduct(error, a2);
|
||||
weights2 = alg.subtraction(weights2, alg.scalarMultiply(learning_rate, alg.transpose(D2_1)));
|
||||
weights2 = regularization.regWeights(weights2, lambda, alpha, reg);
|
||||
|
||||
@ -123,7 +123,7 @@ namespace MLPP{
|
||||
// Weight updation for layer 1
|
||||
std::vector<double> D1_1 = alg.mat_vec_mult(weights2, error);
|
||||
std::vector<double> D1_2 = alg.hadamard_product(D1_1, avn.sigmoid(z2, 1));
|
||||
std::vector<std::vector<double>> D1_3 = alg.vecmult(inputSet[outputIndex], D1_2);
|
||||
std::vector<std::vector<double>> D1_3 = alg.outerProduct(inputSet[outputIndex], D1_2);
|
||||
|
||||
weights1 = alg.subtraction(weights1, alg.scalarMultiply(learning_rate, D1_3));
|
||||
weights1 = regularization.regWeights(weights1, lambda, alpha, reg);
|
||||
|
@ -88,7 +88,7 @@ namespace MLPP{
|
||||
cost_prev = Cost({y_hat}, {outputSet[outputIndex]});
|
||||
|
||||
// Calculating the weight gradients
|
||||
std::vector<std::vector<double>> w_gradient = alg.vecmult(inputSet[outputIndex], alg.subtraction(y_hat, outputSet[outputIndex]));
|
||||
std::vector<std::vector<double>> w_gradient = alg.outerProduct(inputSet[outputIndex], alg.subtraction(y_hat, outputSet[outputIndex]));
|
||||
|
||||
// Weight Updation
|
||||
weights = alg.subtraction(weights, alg.scalarMultiply(learning_rate, w_gradient));
|
||||
|
Binary file not shown.
Loading…
Reference in New Issue
Block a user