diff --git a/mlpp/activation/activation.cpp b/mlpp/activation/activation.cpp index 1957227..e0c6207 100644 --- a/mlpp/activation/activation.cpp +++ b/mlpp/activation/activation.cpp @@ -851,11 +851,11 @@ real_t MLPPActivation::sigmoid_normr(real_t z) { } Ref MLPPActivation::sigmoid_normv(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_division(alg.onevecv(z->size()), alg.additionm(alg.onevecv(z->size()), alg.expv(alg.scalar_multiplynv(-1, z)))); + return alg.element_wise_division(alg.onevecv(z->size()), alg.additionnv(alg.onevecv(z->size()), alg.expv(alg.scalar_multiplynv(-1, z)))); } Ref MLPPActivation::sigmoid_normm(const Ref &z) { MLPPLinAlg alg; - return alg.element_wise_division(alg.onematm(z->size().x, z->size().y), alg.additionm(alg.onematm(z->size().x, z->size().y), alg.expv(alg.scalar_multiplynv(-1, z)))); + return alg.element_wise_divisionm(alg.onematm(z->size().x, z->size().y), alg.additionm(alg.onematm(z->size().x, z->size().y), alg.expm(alg.scalar_multiplym(-1, z)))); } real_t MLPPActivation::sigmoid_derivr(real_t z) { @@ -874,9 +874,9 @@ Ref MLPPActivation::sigmoid_derivv(const Ref &z) { Ref MLPPActivation::sigmoid_derivm(const Ref &z) { MLPPLinAlg alg; - Ref sig_norm = sigmoid_normm(z); + Ref sig_norm = sigmoid_normm(z); - return alg.subtractionnv(sig_norm, alg.hadamard_productnv(sig_norm, sig_norm)); + return alg.subtractionm(sig_norm, alg.hadamard_productm(sig_norm, sig_norm)); } //SOFTMAX diff --git a/mlpp/mlp/mlp.cpp b/mlpp/mlp/mlp.cpp index 35e4767..7ce9961 100644 --- a/mlpp/mlp/mlp.cpp +++ b/mlpp/mlp/mlp.cpp @@ -106,12 +106,8 @@ void MLPPMLP::gradient_descent(real_t learning_rate, int max_epoch, bool UI) { // Calculating the weight/bias for layer 1 - Ref D1_1; - - D1_1 = alg.outer_product(error, weights2); - - Ref D1_2 = alg.hadamard_productm(D1_1, avn.sigmoid_derivm(z2)); - + Ref D1_1 = alg.outer_product(error, weights2); + Ref D1_2 = alg.hadamard_productm(alg.transposem(D1_1), avn.sigmoid_derivm(z2)); Ref D1_3 = alg.matmultm(alg.transposem(input_set), D1_2); // weight an bias updation for layer 1 @@ -354,15 +350,15 @@ real_t MLPPMLP::cost(const Ref &y_hat, const Ref &y) { MLPPReg regularization; class MLPPCost cost; - return cost.log_lossv(y_hat, y) + regularization.reg_termv(weights2, lambda, alpha, reg) + regularization.reg_termv(weights1, lambda, alpha, reg); + return cost.log_lossv(y_hat, y) + regularization.reg_termv(weights2, lambda, alpha, reg) + regularization.reg_termm(weights1, lambda, alpha, reg); } Ref MLPPMLP::evaluatem(const Ref &X) { MLPPLinAlg alg; MLPPActivation avn; - Ref pz2 = alg.mat_vec_addv(alg.matmultm(X, weights1), bias1); - Ref pa2 = avn.sigmoid_normm(pz2); + Ref pz2 = alg.mat_vec_addv(alg.matmultm(X, weights1), bias1); + Ref pa2 = avn.sigmoid_normm(pz2); return avn.sigmoid_normv(alg.scalar_addnv(bias2, alg.mat_vec_multv(pa2, weights2))); } @@ -397,10 +393,10 @@ void MLPPMLP::forward_pass() { MLPPLinAlg alg; MLPPActivation avn; - z2 = alg.mat_vec_addv(alg.matmultm(input_set, weights1), bias1); - a2 = avn.sigmoid_normv(z2); + z2->set_from_mlpp_matrix(alg.mat_vec_addv(alg.matmultm(input_set, weights1), bias1)); + a2->set_from_mlpp_matrix(avn.sigmoid_normm(z2)); - y_hat = avn.sigmoid_normv(alg.scalar_addm(bias2, alg.mat_vec_multv(a2, weights2))); + y_hat = avn.sigmoid_normv(alg.scalar_addnv(bias2, alg.mat_vec_multv(a2, weights2))); } MLPPMLP::MLPPMLP(const Ref &p_input_set, const Ref &p_output_set, int p_n_hidden, MLPPReg::RegularizationType p_reg, real_t p_lambda, real_t p_alpha) {