mirror of
https://github.com/Relintai/pmlpp.git
synced 2025-01-21 15:27:17 +01:00
Fixed a few crashes and issues.
This commit is contained in:
parent
4281746cfc
commit
e6afa5b715
@ -851,11 +851,11 @@ real_t MLPPActivation::sigmoid_normr(real_t z) {
|
||||
}
|
||||
Ref<MLPPVector> MLPPActivation::sigmoid_normv(const Ref<MLPPVector> &z) {
|
||||
MLPPLinAlg alg;
|
||||
return alg.element_wise_division(alg.onevecv(z->size()), alg.additionm(alg.onevecv(z->size()), alg.expv(alg.scalar_multiplynv(-1, z))));
|
||||
return alg.element_wise_division(alg.onevecv(z->size()), alg.additionnv(alg.onevecv(z->size()), alg.expv(alg.scalar_multiplynv(-1, z))));
|
||||
}
|
||||
Ref<MLPPMatrix> MLPPActivation::sigmoid_normm(const Ref<MLPPMatrix> &z) {
|
||||
MLPPLinAlg alg;
|
||||
return alg.element_wise_division(alg.onematm(z->size().x, z->size().y), alg.additionm(alg.onematm(z->size().x, z->size().y), alg.expv(alg.scalar_multiplynv(-1, z))));
|
||||
return alg.element_wise_divisionm(alg.onematm(z->size().x, z->size().y), alg.additionm(alg.onematm(z->size().x, z->size().y), alg.expm(alg.scalar_multiplym(-1, z))));
|
||||
}
|
||||
|
||||
real_t MLPPActivation::sigmoid_derivr(real_t z) {
|
||||
@ -874,9 +874,9 @@ Ref<MLPPVector> MLPPActivation::sigmoid_derivv(const Ref<MLPPVector> &z) {
|
||||
Ref<MLPPMatrix> MLPPActivation::sigmoid_derivm(const Ref<MLPPMatrix> &z) {
|
||||
MLPPLinAlg alg;
|
||||
|
||||
Ref<MLPPVector> sig_norm = sigmoid_normm(z);
|
||||
Ref<MLPPMatrix> sig_norm = sigmoid_normm(z);
|
||||
|
||||
return alg.subtractionnv(sig_norm, alg.hadamard_productnv(sig_norm, sig_norm));
|
||||
return alg.subtractionm(sig_norm, alg.hadamard_productm(sig_norm, sig_norm));
|
||||
}
|
||||
|
||||
//SOFTMAX
|
||||
|
@ -106,12 +106,8 @@ void MLPPMLP::gradient_descent(real_t learning_rate, int max_epoch, bool UI) {
|
||||
|
||||
// Calculating the weight/bias for layer 1
|
||||
|
||||
Ref<MLPPMatrix> D1_1;
|
||||
|
||||
D1_1 = alg.outer_product(error, weights2);
|
||||
|
||||
Ref<MLPPMatrix> D1_2 = alg.hadamard_productm(D1_1, avn.sigmoid_derivm(z2));
|
||||
|
||||
Ref<MLPPMatrix> D1_1 = alg.outer_product(error, weights2);
|
||||
Ref<MLPPMatrix> D1_2 = alg.hadamard_productm(alg.transposem(D1_1), avn.sigmoid_derivm(z2));
|
||||
Ref<MLPPMatrix> D1_3 = alg.matmultm(alg.transposem(input_set), D1_2);
|
||||
|
||||
// weight an bias updation for layer 1
|
||||
@ -354,15 +350,15 @@ real_t MLPPMLP::cost(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y) {
|
||||
MLPPReg regularization;
|
||||
class MLPPCost cost;
|
||||
|
||||
return cost.log_lossv(y_hat, y) + regularization.reg_termv(weights2, lambda, alpha, reg) + regularization.reg_termv(weights1, lambda, alpha, reg);
|
||||
return cost.log_lossv(y_hat, y) + regularization.reg_termv(weights2, lambda, alpha, reg) + regularization.reg_termm(weights1, lambda, alpha, reg);
|
||||
}
|
||||
|
||||
Ref<MLPPVector> MLPPMLP::evaluatem(const Ref<MLPPMatrix> &X) {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
|
||||
Ref<MLPPVector> pz2 = alg.mat_vec_addv(alg.matmultm(X, weights1), bias1);
|
||||
Ref<MLPPVector> pa2 = avn.sigmoid_normm(pz2);
|
||||
Ref<MLPPMatrix> pz2 = alg.mat_vec_addv(alg.matmultm(X, weights1), bias1);
|
||||
Ref<MLPPMatrix> pa2 = avn.sigmoid_normm(pz2);
|
||||
|
||||
return avn.sigmoid_normv(alg.scalar_addnv(bias2, alg.mat_vec_multv(pa2, weights2)));
|
||||
}
|
||||
@ -397,10 +393,10 @@ void MLPPMLP::forward_pass() {
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
|
||||
z2 = alg.mat_vec_addv(alg.matmultm(input_set, weights1), bias1);
|
||||
a2 = avn.sigmoid_normv(z2);
|
||||
z2->set_from_mlpp_matrix(alg.mat_vec_addv(alg.matmultm(input_set, weights1), bias1));
|
||||
a2->set_from_mlpp_matrix(avn.sigmoid_normm(z2));
|
||||
|
||||
y_hat = avn.sigmoid_normv(alg.scalar_addm(bias2, alg.mat_vec_multv(a2, weights2)));
|
||||
y_hat = avn.sigmoid_normv(alg.scalar_addnv(bias2, alg.mat_vec_multv(a2, weights2)));
|
||||
}
|
||||
|
||||
MLPPMLP::MLPPMLP(const Ref<MLPPMatrix> &p_input_set, const Ref<MLPPVector> &p_output_set, int p_n_hidden, MLPPReg::RegularizationType p_reg, real_t p_lambda, real_t p_alpha) {
|
||||
|
Loading…
Reference in New Issue
Block a user