More api standardization.

This commit is contained in:
Relintai 2023-04-22 14:46:25 +02:00
parent eb9f3eaa34
commit da7659860a
22 changed files with 199 additions and 199 deletions

View File

@ -838,7 +838,7 @@ real_t MLPPActivation::linear_derivr(real_t z) {
}
Ref<MLPPVector> MLPPActivation::linear_derivv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg;
return alg.onevecv(z->size());
return alg.onevecnv(z->size());
}
Ref<MLPPMatrix> MLPPActivation::linear_derivm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg;
@ -851,7 +851,7 @@ real_t MLPPActivation::sigmoid_normr(real_t z) {
}
Ref<MLPPVector> MLPPActivation::sigmoid_normv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg;
return alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.additionnv(alg.onevecv(z->size()), alg.expnv(alg.scalar_multiplynv(-1, z))));
return alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.additionnv(alg.onevecnv(z->size()), alg.expnv(alg.scalar_multiplynv(-1, z))));
}
Ref<MLPPMatrix> MLPPActivation::sigmoid_normm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg;
@ -1219,7 +1219,7 @@ real_t MLPPActivation::softplus_normr(real_t z) {
Ref<MLPPVector> MLPPActivation::softplus_normv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg;
return alg.lognv(alg.additionnv(alg.onevecv(z->size()), alg.expnv(z)));
return alg.lognv(alg.additionnv(alg.onevecnv(z->size()), alg.expnv(z)));
}
Ref<MLPPMatrix> MLPPActivation::softplus_normm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg;
@ -1245,7 +1245,7 @@ real_t MLPPActivation::softsign_normr(real_t z) {
Ref<MLPPVector> MLPPActivation::softsign_normv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg;
return alg.element_wise_divisionnv(z, alg.additionnv(alg.onevecv(z->size()), alg.absv(z)));
return alg.element_wise_divisionnv(z, alg.additionnv(alg.onevecnv(z->size()), alg.absv(z)));
}
Ref<MLPPMatrix> MLPPActivation::softsign_normm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg;
@ -1259,7 +1259,7 @@ real_t MLPPActivation::softsign_derivr(real_t z) {
Ref<MLPPVector> MLPPActivation::softsign_derivv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg;
return alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.exponentiatenv(alg.additionnv(alg.onevecv(z->size()), alg.absv(z)), 2));
return alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.exponentiatenv(alg.additionnv(alg.onevecnv(z->size()), alg.absv(z)), 2));
}
Ref<MLPPMatrix> MLPPActivation::softsign_derivm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg;
@ -1275,7 +1275,7 @@ real_t MLPPActivation::gaussian_cdf_normr(real_t z) {
Ref<MLPPVector> MLPPActivation::gaussian_cdf_normv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg;
return alg.scalar_multiplynv(0.5, alg.additionnv(alg.onevecv(z->size()), alg.erfnv(alg.scalar_multiplynv(1 / sqrt(2), z))));
return alg.scalar_multiplynv(0.5, alg.additionnv(alg.onevecnv(z->size()), alg.erfnv(alg.scalar_multiplynv(1 / sqrt(2), z))));
}
Ref<MLPPMatrix> MLPPActivation::gaussian_cdf_normm(const Ref<MLPPMatrix> &z) {
@ -1339,7 +1339,7 @@ real_t MLPPActivation::logit_normr(real_t z) {
Ref<MLPPVector> MLPPActivation::logit_normv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg;
return alg.lognv(alg.element_wise_divisionnv(z, alg.subtractionnv(alg.onevecv(z->size()), z)));
return alg.lognv(alg.element_wise_divisionnv(z, alg.subtractionnv(alg.onevecnv(z->size()), z)));
}
Ref<MLPPMatrix> MLPPActivation::logit_normm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg;
@ -1354,8 +1354,8 @@ Ref<MLPPVector> MLPPActivation::logit_derivv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg;
return alg.subtractionnv(
alg.element_wise_divisionnv(alg.onevecv(z->size()), z),
alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.subtractionnv(z, alg.onevecv(z->size()))));
alg.element_wise_divisionnv(alg.onevecnv(z->size()), z),
alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.subtractionnv(z, alg.onevecnv(z->size()))));
}
Ref<MLPPMatrix> MLPPActivation::logit_derivm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg;
@ -1507,7 +1507,7 @@ real_t MLPPActivation::sinc_normr(real_t z) {
Ref<MLPPVector> MLPPActivation::sinc_normv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg;
return alg.element_wise_divisionnv(alg.sinv(z), z);
return alg.element_wise_divisionnv(alg.sinnv(z), z);
}
Ref<MLPPMatrix> MLPPActivation::sinc_normm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg;
@ -1521,7 +1521,7 @@ real_t MLPPActivation::sinc_derivr(real_t z) {
Ref<MLPPVector> MLPPActivation::sinc_derivv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg;
return alg.element_wise_divisionnv(alg.subtractionnv(alg.hadamard_productnv(z, alg.cosv(z)), alg.sinv(z)), alg.hadamard_productnv(z, z));
return alg.element_wise_divisionnv(alg.subtractionnv(alg.hadamard_productnv(z, alg.cosnv(z)), alg.sinnv(z)), alg.hadamard_productnv(z, z));
}
Ref<MLPPMatrix> MLPPActivation::sinc_derivm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg;
@ -2081,7 +2081,7 @@ real_t MLPPActivation::csch_normr(real_t z) {
Ref<MLPPVector> MLPPActivation::csch_normv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg;
return alg.element_wise_divisionnv(alg.onevecv(z->size()), sinh_normv(z));
return alg.element_wise_divisionnv(alg.onevecnv(z->size()), sinh_normv(z));
}
Ref<MLPPMatrix> MLPPActivation::csch_normm(const Ref<MLPPMatrix> &z) {
@ -2114,7 +2114,7 @@ real_t MLPPActivation::sech_normr(real_t z) {
Ref<MLPPVector> MLPPActivation::sech_normv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg;
return alg.element_wise_divisionnv(alg.onevecv(z->size()), cosh_normv(z));
return alg.element_wise_divisionnv(alg.onevecnv(z->size()), cosh_normv(z));
// return activation(z, deriv, static_cast<void (*)(real_t, bool)>(&sech));
}
@ -2149,7 +2149,7 @@ real_t MLPPActivation::coth_normr(real_t z) {
Ref<MLPPVector> MLPPActivation::coth_normv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg;
return alg.element_wise_divisionnv(alg.onevecv(z->size()), tanh_normv(z));
return alg.element_wise_divisionnv(alg.onevecnv(z->size()), tanh_normv(z));
}
Ref<MLPPMatrix> MLPPActivation::coth_normm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg;
@ -2180,7 +2180,7 @@ real_t MLPPActivation::arsinh_normr(real_t z) {
Ref<MLPPVector> MLPPActivation::arsinh_normv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg;
return alg.lognv(alg.additionnv(z, alg.sqrtnv(alg.additionnv(alg.hadamard_productnv(z, z), alg.onevecv(z->size())))));
return alg.lognv(alg.additionnv(z, alg.sqrtnv(alg.additionnv(alg.hadamard_productnv(z, z), alg.onevecnv(z->size())))));
}
Ref<MLPPMatrix> MLPPActivation::arsinh_normm(const Ref<MLPPMatrix> &z) {
@ -2196,7 +2196,7 @@ real_t MLPPActivation::arsinh_derivr(real_t z) {
Ref<MLPPVector> MLPPActivation::arsinh_derivv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg;
return alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.sqrtnv(alg.additionnv(alg.hadamard_productnv(z, z), alg.onevecv(z->size()))));
return alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.sqrtnv(alg.additionnv(alg.hadamard_productnv(z, z), alg.onevecnv(z->size()))));
}
Ref<MLPPMatrix> MLPPActivation::arsinh_derivm(const Ref<MLPPMatrix> &z) {
@ -2213,7 +2213,7 @@ real_t MLPPActivation::arcosh_normr(real_t z) {
Ref<MLPPVector> MLPPActivation::arcosh_normv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg;
return alg.lognv(alg.additionnv(z, alg.sqrtnv(alg.subtractionnv(alg.hadamard_productnv(z, z), alg.onevecv(z->size())))));
return alg.lognv(alg.additionnv(z, alg.sqrtnv(alg.subtractionnv(alg.hadamard_productnv(z, z), alg.onevecnv(z->size())))));
}
Ref<MLPPMatrix> MLPPActivation::arcosh_normm(const Ref<MLPPMatrix> &z) {
@ -2228,7 +2228,7 @@ real_t MLPPActivation::arcosh_derivr(real_t z) {
Ref<MLPPVector> MLPPActivation::arcosh_derivv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg;
return alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.sqrtnv(alg.subtractionnv(alg.hadamard_productnv(z, z), alg.onevecv(z->size()))));
return alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.sqrtnv(alg.subtractionnv(alg.hadamard_productnv(z, z), alg.onevecnv(z->size()))));
}
Ref<MLPPMatrix> MLPPActivation::arcosh_derivm(const Ref<MLPPMatrix> &z) {
@ -2245,7 +2245,7 @@ real_t MLPPActivation::artanh_normr(real_t z) {
Ref<MLPPVector> MLPPActivation::artanh_normv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg;
return alg.scalar_multiplynv(0.5, alg.lognv(alg.element_wise_divisionnv(alg.additionnv(alg.onevecv(z->size()), z), alg.subtractionnv(alg.onevecv(z->size()), z))));
return alg.scalar_multiplynv(0.5, alg.lognv(alg.element_wise_divisionnv(alg.additionnv(alg.onevecnv(z->size()), z), alg.subtractionnv(alg.onevecnv(z->size()), z))));
}
Ref<MLPPMatrix> MLPPActivation::artanh_normm(const Ref<MLPPMatrix> &z) {
@ -2260,7 +2260,7 @@ real_t MLPPActivation::artanh_derivr(real_t z) {
Ref<MLPPVector> MLPPActivation::artanh_derivv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg;
return alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.subtractionnv(alg.onevecv(z->size()), alg.hadamard_productnv(z, z)));
return alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.subtractionnv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z)));
}
Ref<MLPPMatrix> MLPPActivation::artanh_derivm(const Ref<MLPPMatrix> &z) {
@ -2281,9 +2281,9 @@ Ref<MLPPVector> MLPPActivation::arcsch_normv(const Ref<MLPPVector> &z) {
alg.additionnv(
alg.sqrtnv(
alg.additionnv(
alg.onevecv(z->size()),
alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.hadamard_productnv(z, z)))),
alg.element_wise_divisionnv(alg.onevecv(z->size()), z)));
alg.onevecnv(z->size()),
alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z)))),
alg.element_wise_divisionnv(alg.onevecnv(z->size()), z)));
}
Ref<MLPPMatrix> MLPPActivation::arcsch_normm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg;
@ -2303,10 +2303,10 @@ Ref<MLPPVector> MLPPActivation::arcsch_derivv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg;
return alg.element_wise_divisionnv(
alg.fullv(z->size(), -1),
alg.fullnv(z->size(), -1),
alg.hadamard_productnm(
alg.hadamard_productnv(z, z),
alg.sqrtnv(alg.additionnv(alg.onevecv(z->size()), alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.hadamard_productnv(z, z))))));
alg.sqrtnv(alg.additionnv(alg.onevecnv(z->size()), alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z))))));
}
Ref<MLPPMatrix> MLPPActivation::arcsch_derivm(const Ref<MLPPMatrix> &z) {
MLPPLinAlg alg;
@ -2330,10 +2330,10 @@ Ref<MLPPVector> MLPPActivation::arsech_normv(const Ref<MLPPVector> &z) {
return alg.lognv(
alg.additionnv(
alg.element_wise_divisionnv(
alg.onevecv(z->size()), z),
alg.onevecnv(z->size()), z),
alg.hadamard_productnv(
alg.additionnv(alg.element_wise_divisionnv(alg.onevecv(z->size()), z), alg.onevecv(z->size())),
alg.subtractionnv(alg.element_wise_divisionnv(alg.onevecv(z->size()), z), alg.onevecv(z->size())))));
alg.additionnv(alg.element_wise_divisionnv(alg.onevecnv(z->size()), z), alg.onevecnv(z->size())),
alg.subtractionnv(alg.element_wise_divisionnv(alg.onevecnv(z->size()), z), alg.onevecnv(z->size())))));
}
Ref<MLPPMatrix> MLPPActivation::arsech_normm(const Ref<MLPPMatrix> &z) {
@ -2362,11 +2362,11 @@ Ref<MLPPVector> MLPPActivation::arsech_derivv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg;
return alg.element_wise_divisionnv(
alg.fullv(z->size(), -1),
alg.fullnv(z->size(), -1),
alg.hadamard_productnv(
z,
alg.sqrtnv(
alg.subtractionnv(alg.onevecv(z->size()), alg.hadamard_productnv(z, z)))));
alg.subtractionnv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z)))));
}
Ref<MLPPMatrix> MLPPActivation::arsech_derivm(const Ref<MLPPMatrix> &z) {
@ -2389,7 +2389,7 @@ Ref<MLPPVector> MLPPActivation::arcoth_normv(const Ref<MLPPVector> &z) {
return alg.scalar_multiplynv(
0.5,
alg.lognv(alg.element_wise_divisionnv(alg.additionnv(alg.onevecv(z->size()), z), alg.subtractionnv(z, alg.onevecv(z->size())))));
alg.lognv(alg.element_wise_divisionnv(alg.additionnv(alg.onevecnv(z->size()), z), alg.subtractionnv(z, alg.onevecnv(z->size())))));
}
Ref<MLPPMatrix> MLPPActivation::arcoth_normm(const Ref<MLPPMatrix> &z) {
@ -2406,7 +2406,7 @@ real_t MLPPActivation::arcoth_derivr(real_t z) {
Ref<MLPPVector> MLPPActivation::arcoth_derivv(const Ref<MLPPVector> &z) {
MLPPLinAlg alg;
return alg.element_wise_divisionnv(alg.onevecv(z->size()), alg.subtractionnv(alg.onevecv(z->size()), alg.hadamard_productnv(z, z)));
return alg.element_wise_divisionnv(alg.onevecnv(z->size()), alg.subtractionnv(alg.onevecnv(z->size()), alg.hadamard_productnv(z, z)));
}
Ref<MLPPMatrix> MLPPActivation::arcoth_derivm(const Ref<MLPPMatrix> &z) {

View File

@ -845,13 +845,13 @@ void MLPPANN::update_parameters(const Vector<Ref<MLPPMatrix>> &hidden_layer_upda
Ref<MLPPHiddenLayer> layer = _network[_network.size() - 1];
layer->set_weights(alg.subtractionnm(layer->get_weights(), hidden_layer_updations[0]));
layer->set_bias(alg.subtract_matrix_rows(layer->get_bias(), alg.scalar_multiplynm(learning_rate / _n, layer->get_delta())));
layer->set_bias(alg.subtract_matrix_rowsnv(layer->get_bias(), alg.scalar_multiplynm(learning_rate / _n, layer->get_delta())));
for (int i = _network.size() - 2; i >= 0; i--) {
layer = _network[i];
layer->set_weights(alg.subtractionnm(layer->get_weights(), hidden_layer_updations[(_network.size() - 2) - i + 1]));
layer->set_bias(alg.subtract_matrix_rows(layer->get_bias(), alg.scalar_multiplynm(learning_rate / _n, layer->get_delta())));
layer->set_bias(alg.subtract_matrix_rowsnv(layer->get_bias(), alg.scalar_multiplynm(learning_rate / _n, layer->get_delta())));
}
}
}
@ -867,7 +867,7 @@ MLPPANN::ComputeGradientsResult MLPPANN::compute_gradients(const Ref<MLPPVector>
_output_layer->set_delta(alg.hadamard_productnv(mlpp_cost.run_cost_deriv_vector(_output_layer->get_cost(), y_hat, _output_set), avn.run_activation_deriv_vector(_output_layer->get_activation(), _output_layer->get_z())));
res.output_w_grad = alg.mat_vec_multv(alg.transposenm(_output_layer->get_input()), _output_layer->get_delta());
res.output_w_grad = alg.mat_vec_multnv(alg.transposenm(_output_layer->get_input()), _output_layer->get_delta());
res.output_w_grad = alg.additionnv(res.output_w_grad, regularization.reg_deriv_termv(_output_layer->get_weights(), _output_layer->get_lambda(), _output_layer->get_alpha(), _output_layer->get_reg()));
if (!_network.empty()) {

View File

@ -69,7 +69,7 @@ void MLPPAutoEncoder::gradient_descent(real_t learning_rate, int max_epoch, bool
_weights2 = alg.subtractionnm(_weights2, alg.scalar_multiplynm(learning_rate / _n, D2_1));
// Calculating the bias gradients for layer 2
_bias2 = alg.subtract_matrix_rows(_bias2, alg.scalar_multiplynm(learning_rate, error));
_bias2 = alg.subtract_matrix_rowsnv(_bias2, alg.scalar_multiplynm(learning_rate, error));
//Calculating the weight/bias for layer 1
@ -80,7 +80,7 @@ void MLPPAutoEncoder::gradient_descent(real_t learning_rate, int max_epoch, bool
// weight an bias updation for layer 1
_weights1 = alg.subtractionnm(_weights1, alg.scalar_multiplynm(learning_rate / _n, D1_3));
_bias1 = alg.subtract_matrix_rows(_bias1, alg.scalar_multiplynm(learning_rate / _n, D1_2));
_bias1 = alg.subtract_matrix_rowsnv(_bias1, alg.scalar_multiplynm(learning_rate / _n, D1_2));
forward_pass();
@ -147,7 +147,7 @@ void MLPPAutoEncoder::sgd(real_t learning_rate, int max_epoch, bool ui) {
_bias2 = alg.subtractionnv(_bias2, alg.scalar_multiplynv(learning_rate, error));
// Weight updation for layer 1
Ref<MLPPVector> D1_1 = alg.mat_vec_multv(_weights2, error);
Ref<MLPPVector> D1_1 = alg.mat_vec_multnv(_weights2, error);
Ref<MLPPVector> D1_2 = alg.hadamard_productnv(D1_1, avn.sigmoid_derivv(prop_res.z2));
Ref<MLPPMatrix> D1_3 = alg.outer_product(input_set_row_tmp, D1_2);
@ -210,7 +210,7 @@ void MLPPAutoEncoder::mbgd(real_t learning_rate, int max_epoch, int mini_batch_s
_weights2 = alg.subtractionnm(_weights2, alg.scalar_multiplynm(learning_rate / current_batch->size().y, D2_1));
// Bias Updation for layer 2
_bias2 = alg.subtract_matrix_rows(_bias2, alg.scalar_multiplynm(learning_rate, error));
_bias2 = alg.subtract_matrix_rowsnv(_bias2, alg.scalar_multiplynm(learning_rate, error));
//Calculating the weight/bias for layer 1
@ -220,7 +220,7 @@ void MLPPAutoEncoder::mbgd(real_t learning_rate, int max_epoch, int mini_batch_s
// weight an bias updation for layer 1
_weights1 = alg.subtractionnm(_weights1, alg.scalar_multiplynm(learning_rate / current_batch->size().x, D1_3));
_bias1 = alg.subtract_matrix_rows(_bias1, alg.scalar_multiplynm(learning_rate / current_batch->size().x, D1_2));
_bias1 = alg.subtract_matrix_rowsnv(_bias1, alg.scalar_multiplynm(learning_rate / current_batch->size().x, D1_2));
y_hat = evaluatem(current_batch);
@ -304,10 +304,10 @@ Ref<MLPPVector> MLPPAutoEncoder::evaluatev(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
MLPPActivation avn;
Ref<MLPPVector> z2 = alg.additionnv(alg.mat_vec_multv(alg.transposenm(_weights1), x), _bias1);
Ref<MLPPVector> z2 = alg.additionnv(alg.mat_vec_multnv(alg.transposenm(_weights1), x), _bias1);
Ref<MLPPVector> a2 = avn.sigmoid_normv(z2);
return alg.additionnv(alg.mat_vec_multv(alg.transposenm(_weights2), a2), _bias2);
return alg.additionnv(alg.mat_vec_multnv(alg.transposenm(_weights2), a2), _bias2);
}
MLPPAutoEncoder::PropagateVResult MLPPAutoEncoder::propagatev(const Ref<MLPPVector> &x) {
@ -316,7 +316,7 @@ MLPPAutoEncoder::PropagateVResult MLPPAutoEncoder::propagatev(const Ref<MLPPVect
PropagateVResult res;
res.z2 = alg.additionnv(alg.mat_vec_multv(alg.transposenm(_weights1), x), _bias1);
res.z2 = alg.additionnv(alg.mat_vec_multnv(alg.transposenm(_weights1), x), _bias1);
res.a2 = avn.sigmoid_normv(res.z2);
return res;
@ -326,10 +326,10 @@ Ref<MLPPMatrix> MLPPAutoEncoder::evaluatem(const Ref<MLPPMatrix> &X) {
MLPPLinAlg alg;
MLPPActivation avn;
Ref<MLPPMatrix> z2 = alg.mat_vec_addv(alg.matmultnm(X, _weights1), _bias1);
Ref<MLPPMatrix> z2 = alg.mat_vec_addnm(alg.matmultnm(X, _weights1), _bias1);
Ref<MLPPMatrix> a2 = avn.sigmoid_normm(z2);
return alg.mat_vec_addv(alg.matmultnm(a2, _weights2), _bias2);
return alg.mat_vec_addnm(alg.matmultnm(a2, _weights2), _bias2);
}
MLPPAutoEncoder::PropagateMResult MLPPAutoEncoder::propagatem(const Ref<MLPPMatrix> &X) {
@ -338,7 +338,7 @@ MLPPAutoEncoder::PropagateMResult MLPPAutoEncoder::propagatem(const Ref<MLPPMatr
PropagateMResult res;
res.z2 = alg.mat_vec_addv(alg.matmultnm(X, _weights1), _bias1);
res.z2 = alg.mat_vec_addnm(alg.matmultnm(X, _weights1), _bias1);
res.a2 = avn.sigmoid_normm(res.z2);
return res;
@ -348,9 +348,9 @@ void MLPPAutoEncoder::forward_pass() {
MLPPLinAlg alg;
MLPPActivation avn;
_z2 = alg.mat_vec_addv(alg.matmultnm(_input_set, _weights1), _bias1);
_z2 = alg.mat_vec_addnm(alg.matmultnm(_input_set, _weights1), _bias1);
_a2 = avn.sigmoid_normm(_z2);
_y_hat = alg.mat_vec_addv(alg.matmultnm(_a2, _weights2), _bias2);
_y_hat = alg.mat_vec_addnm(alg.matmultnm(_a2, _weights2), _bias2);
}
void MLPPAutoEncoder::_bind_methods() {

View File

@ -37,7 +37,7 @@ void MLPPCLogLogReg::gradient_descent(real_t learning_rate, int max_epoch, bool
Ref<MLPPVector> error = alg.subtractionnv(_y_hat, _output_set);
// Calculating the weight gradients
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multv(alg.transposenm(_input_set), alg.hadamard_productnv(error, avn.cloglog_derivv(_z)))));
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multnv(alg.transposenm(_input_set), alg.hadamard_productnv(error, avn.cloglog_derivv(_z)))));
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients
@ -73,7 +73,7 @@ void MLPPCLogLogReg::mle(real_t learning_rate, int max_epoch, bool ui) {
Ref<MLPPVector> error = alg.subtractionnv(_y_hat, _output_set);
_weights = alg.additionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multv(alg.transposenm(_input_set), alg.hadamard_productnv(error, avn.cloglog_derivv(_z)))));
_weights = alg.additionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multnv(alg.transposenm(_input_set), alg.hadamard_productnv(error, avn.cloglog_derivv(_z)))));
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients
@ -182,7 +182,7 @@ void MLPPCLogLogReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_si
Ref<MLPPVector> error = alg.subtractionnv(y_hat, current_output_batch);
// Calculating the weight gradients
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multv(alg.transposenm(current_input_batch), alg.hadamard_productnv(error, avn.cloglog_derivv(z)))));
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multnv(alg.transposenm(current_input_batch), alg.hadamard_productnv(error, avn.cloglog_derivv(z)))));
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients
@ -251,26 +251,26 @@ real_t MLPPCLogLogReg::evaluatev(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
MLPPActivation avn;
return avn.cloglog_normr(alg.dotv(_weights, x) + bias);
return avn.cloglog_normr(alg.dotnv(_weights, x) + bias);
}
real_t MLPPCLogLogReg::propagatev(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
return alg.dotv(_weights, x) + bias;
return alg.dotnv(_weights, x) + bias;
}
Ref<MLPPVector> MLPPCLogLogReg::evaluatem(const Ref<MLPPMatrix> &X) {
MLPPLinAlg alg;
MLPPActivation avn;
return avn.cloglog_normv(alg.scalar_addnv(bias, alg.mat_vec_multv(X, _weights)));
return avn.cloglog_normv(alg.scalar_addnv(bias, alg.mat_vec_multnv(X, _weights)));
}
Ref<MLPPVector> MLPPCLogLogReg::propagatem(const Ref<MLPPMatrix> &X) {
MLPPLinAlg alg;
return alg.scalar_addnv(bias, alg.mat_vec_multv(X, _weights));
return alg.scalar_addnv(bias, alg.mat_vec_multnv(X, _weights));
}
// cloglog ( wTx + b )

View File

@ -205,7 +205,7 @@ real_t MLPPCost::mbem(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y) {
Ref<MLPPVector> MLPPCost::mbe_derivv(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y) {
MLPPLinAlg alg;
return alg.onevecv(y_hat->size());
return alg.onevecnv(y_hat->size());
}
Ref<MLPPMatrix> MLPPCost::mbe_derivm(const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y) {
MLPPLinAlg alg;
@ -554,7 +554,7 @@ Ref<MLPPMatrix> MLPPCost::wasserstein_loss_derivm(const Ref<MLPPMatrix> &y_hat,
real_t MLPPCost::dual_form_svm(const Ref<MLPPVector> &alpha, const Ref<MLPPMatrix> &X, const Ref<MLPPVector> &y) {
MLPPLinAlg alg;
Ref<MLPPMatrix> Y = alg.diagm(y); // Y is a diagnoal matrix. Y[i][j] = y[i] if i = i, else Y[i][j] = 0. Yt = Y.
Ref<MLPPMatrix> Y = alg.diagnm(y); // Y is a diagnoal matrix. Y[i][j] = y[i] if i = i, else Y[i][j] = 0. Yt = Y.
Ref<MLPPMatrix> K = alg.matmultnm(X, alg.transposenm(X)); // TO DO: DON'T forget to add non-linear kernelizations.
Ref<MLPPMatrix> Q = alg.matmultnm(alg.matmultnm(alg.transposenm(Y), K), Y);
@ -566,19 +566,19 @@ real_t MLPPCost::dual_form_svm(const Ref<MLPPVector> &alpha, const Ref<MLPPMatri
Ref<MLPPMatrix> alpha_m_res = alg.matmultnm(alg.matmultnm(alpha_m, Q), alg.transposenm(alpha_m));
real_t alphaQ = alpha_m_res->get_element(0, 0);
Ref<MLPPVector> one = alg.onevecv(alpha->size());
Ref<MLPPVector> one = alg.onevecnv(alpha->size());
return -alg.dotv(one, alpha) + 0.5 * alphaQ;
return -alg.dotnv(one, alpha) + 0.5 * alphaQ;
}
Ref<MLPPVector> MLPPCost::dual_form_svm_deriv(const Ref<MLPPVector> &alpha, const Ref<MLPPMatrix> &X, const Ref<MLPPVector> &y) {
MLPPLinAlg alg;
Ref<MLPPMatrix> Y = alg.diagm(y); // Y is a diagnoal matrix. Y[i][j] = y[i] if i = i, else Y[i][j] = 0. Yt = Y.
Ref<MLPPMatrix> Y = alg.diagnm(y); // Y is a diagnoal matrix. Y[i][j] = y[i] if i = i, else Y[i][j] = 0. Yt = Y.
Ref<MLPPMatrix> K = alg.matmultnm(X, alg.transposenm(X)); // TO DO: DON'T forget to add non-linear kernelizations.
Ref<MLPPMatrix> Q = alg.matmultnm(alg.matmultnm(alg.transposenm(Y), K), Y);
Ref<MLPPVector> alphaQDeriv = alg.mat_vec_multv(Q, alpha);
Ref<MLPPVector> one = alg.onevecv(alpha->size());
Ref<MLPPVector> alphaQDeriv = alg.mat_vec_multnv(Q, alpha);
Ref<MLPPVector> one = alg.onevecnv(alpha->size());
return alg.subtractionnm(alphaQDeriv, one);
}

View File

@ -56,7 +56,7 @@ void MLPPDualSVC::gradient_descent(real_t learning_rate, int max_epoch, bool ui)
_input_set->get_row_into_mlpp_vector(i, input_set_i_row_tmp);
_input_set->get_row_into_mlpp_vector(j, input_set_j_row_tmp);
sum += _alpha->get_element(j) * _output_set->get_element(j) * alg.dotv(input_set_j_row_tmp, input_set_i_row_tmp); // TO DO: DON'T forget to add non-linear kernelizations.
sum += _alpha->get_element(j) * _output_set->get_element(j) * alg.dotnv(input_set_j_row_tmp, input_set_i_row_tmp); // TO DO: DON'T forget to add non-linear kernelizations.
}
}
}
@ -217,7 +217,7 @@ real_t MLPPDualSVC::propagatev(const Ref<MLPPVector> &x) {
for (int j = 0; j < _alpha->size(); j++) {
if (_alpha->get_element(j) != 0) {
_input_set->get_row_into_mlpp_vector(j, input_set_row_tmp);
z += _alpha->get_element(j) * _output_set->get_element(j) * alg.dotv(input_set_row_tmp, x); // TO DO: DON'T forget to add non-linear kernelizations.
z += _alpha->get_element(j) * _output_set->get_element(j) * alg.dotnv(input_set_row_tmp, x); // TO DO: DON'T forget to add non-linear kernelizations.
}
}
z += _bias;
@ -252,7 +252,7 @@ Ref<MLPPVector> MLPPDualSVC::propagatem(const Ref<MLPPMatrix> &X) {
_input_set->get_row_into_mlpp_vector(j, input_set_row_tmp);
X->get_row_into_mlpp_vector(i, x_row_tmp);
sum += _alpha->get_element(j) * _output_set->get_element(j) * alg.dotv(input_set_row_tmp, x_row_tmp); // TO DO: DON'T forget to add non-linear kernelizations.
sum += _alpha->get_element(j) * _output_set->get_element(j) * alg.dotnv(input_set_row_tmp, x_row_tmp); // TO DO: DON'T forget to add non-linear kernelizations.
}
}
@ -284,7 +284,7 @@ real_t MLPPDualSVC::kernel_functionv(const Ref<MLPPVector> &v, const Ref<MLPPVec
MLPPLinAlg alg;
if (kernel == KERNEL_METHOD_LINEAR) {
return alg.dotv(u, v);
return alg.dotnv(u, v);
}
return 0;

View File

@ -54,7 +54,7 @@ void MLPPGAN::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
forward_pass();
while (true) {
cost_prev = cost(_y_hat, alg.onevecv(_n));
cost_prev = cost(_y_hat, alg.onevecnv(_n));
// Training of the discriminator.
@ -63,8 +63,8 @@ void MLPPGAN::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
discriminator_input_set->add_rows_mlpp_matrix(_output_set); // Fake + real inputs.
Ref<MLPPVector> y_hat = model_set_test_discriminator(discriminator_input_set);
Ref<MLPPVector> output_set = alg.zerovecv(_n);
Ref<MLPPVector> output_set_real = alg.onevecv(_n);
Ref<MLPPVector> output_set = alg.zerovecnv(_n);
Ref<MLPPVector> output_set_real = alg.onevecnv(_n);
output_set->add_mlpp_vector(output_set_real); // Fake + real output scores.
ComputeDiscriminatorGradientsResult dgrads = compute_discriminator_gradients(y_hat, _output_set);
@ -77,7 +77,7 @@ void MLPPGAN::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
generator_input_set = alg.gaussian_noise(_n, _k);
discriminator_input_set = model_set_test_generator(generator_input_set);
y_hat = model_set_test_discriminator(discriminator_input_set);
_output_set = alg.onevecv(_n);
_output_set = alg.onevecnv(_n);
Vector<Ref<MLPPMatrix>> cumulative_generator_hidden_layer_w_grad = compute_generator_gradients(y_hat, _output_set);
cumulative_generator_hidden_layer_w_grad = alg.scalar_multiply_vm(learning_rate / _n, cumulative_generator_hidden_layer_w_grad);
@ -86,7 +86,7 @@ void MLPPGAN::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
forward_pass();
if (ui) {
print_ui(epoch, cost_prev, _y_hat, alg.onevecv(_n));
print_ui(epoch, cost_prev, _y_hat, alg.onevecnv(_n));
}
epoch++;
@ -103,7 +103,7 @@ real_t MLPPGAN::score() {
forward_pass();
return util.performance_vec(_y_hat, alg.onevecv(_n));
return util.performance_vec(_y_hat, alg.onevecnv(_n));
}
void MLPPGAN::save(const String &file_name) {
@ -240,13 +240,13 @@ void MLPPGAN::update_discriminator_parameters(const Vector<Ref<MLPPMatrix>> &hid
Ref<MLPPHiddenLayer> layer = _network[_network.size() - 1];
layer->set_weights(alg.subtractionnm(layer->get_weights(), hidden_layer_updations[0]));
layer->set_bias(alg.subtract_matrix_rows(layer->get_bias(), alg.scalar_multiplynm(learning_rate / _n, layer->get_delta())));
layer->set_bias(alg.subtract_matrix_rowsnv(layer->get_bias(), alg.scalar_multiplynm(learning_rate / _n, layer->get_delta())));
for (int i = _network.size() - 2; i > _network.size() / 2; i--) {
layer = _network[i];
layer->set_weights(alg.subtractionnm(layer->get_weights(), hidden_layer_updations[(_network.size() - 2) - i + 1]));
layer->set_bias(alg.subtract_matrix_rows(layer->get_bias(), alg.scalar_multiplynm(learning_rate / _n, layer->get_delta())));
layer->set_bias(alg.subtract_matrix_rowsnv(layer->get_bias(), alg.scalar_multiplynm(learning_rate / _n, layer->get_delta())));
}
}
}
@ -261,7 +261,7 @@ void MLPPGAN::update_generator_parameters(const Vector<Ref<MLPPMatrix>> &hidden_
//std::cout << network[i].weights.size() << "x" << network[i].weights[0].size() << std::endl;
//std::cout << hidden_layer_updations[(network.size() - 2) - i + 1].size() << "x" << hidden_layer_updations[(network.size() - 2) - i + 1][0].size() << std::endl;
layer->set_weights(alg.subtractionnm(layer->get_weights(), hidden_layer_updations[(_network.size() - 2) - i + 1]));
layer->set_bias(alg.subtract_matrix_rows(layer->get_bias(), alg.scalar_multiplynm(learning_rate / _n, layer->get_delta())));
layer->set_bias(alg.subtract_matrix_rowsnv(layer->get_bias(), alg.scalar_multiplynm(learning_rate / _n, layer->get_delta())));
}
}
}
@ -279,7 +279,7 @@ MLPPGAN::ComputeDiscriminatorGradientsResult MLPPGAN::compute_discriminator_grad
_output_layer->set_delta(alg.hadamard_productnv(cost_deriv, activ_deriv));
res.output_w_grad = alg.mat_vec_multv(alg.transposenm(_output_layer->get_input()), _output_layer->get_delta());
res.output_w_grad = alg.mat_vec_multnv(alg.transposenm(_output_layer->get_input()), _output_layer->get_delta());
res.output_w_grad = alg.additionnv(res.output_w_grad, regularization.reg_deriv_termv(_output_layer->get_weights(), _output_layer->get_lambda(), _output_layer->get_alpha(), _output_layer->get_reg()));
if (!_network.empty()) {
@ -321,7 +321,7 @@ Vector<Ref<MLPPMatrix>> MLPPGAN::compute_generator_gradients(const Ref<MLPPVecto
_output_layer->set_delta(alg.hadamard_productnv(cost_deriv, activ_deriv));
Ref<MLPPVector> output_w_grad = alg.mat_vec_multv(alg.transposenm(_output_layer->get_input()), _output_layer->get_delta());
Ref<MLPPVector> output_w_grad = alg.mat_vec_multnv(alg.transposenm(_output_layer->get_input()), _output_layer->get_delta());
output_w_grad = alg.additionnv(output_w_grad, regularization.reg_deriv_termv(_output_layer->get_weights(), _output_layer->get_lambda(), _output_layer->get_alpha(), _output_layer->get_reg()));
if (!_network.empty()) {

View File

@ -150,7 +150,7 @@ void MLPPHiddenLayer::forward_pass() {
MLPPLinAlg alg;
MLPPActivation avn;
_z = alg.mat_vec_addv(alg.matmultnm(_input, _weights), _bias);
_z = alg.mat_vec_addnm(alg.matmultnm(_input, _weights), _bias);
_a = avn.run_activation_norm_matrix(_activation, _z);
}
@ -162,7 +162,7 @@ void MLPPHiddenLayer::test(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
MLPPActivation avn;
_z_test = alg.additionnm(alg.mat_vec_multv(alg.transposenm(_weights), x), _bias);
_z_test = alg.additionnm(alg.mat_vec_multnv(alg.transposenm(_weights), x), _bias);
_a_test = avn.run_activation_norm_matrix(_activation, _z_test);
}

View File

@ -2099,7 +2099,7 @@ std::vector<real_t> MLPPLinAlg::subtractMatrixRows(std::vector<real_t> a, std::v
return a;
}
Ref<MLPPVector> MLPPLinAlg::subtract_matrix_rows(const Ref<MLPPVector> &a, const Ref<MLPPMatrix> &B) {
Ref<MLPPVector> MLPPLinAlg::subtract_matrix_rowsnv(const Ref<MLPPVector> &a, const Ref<MLPPMatrix> &B) {
Ref<MLPPVector> c = a->duplicate();
Size2i b_size = B->size();
@ -2238,7 +2238,7 @@ real_t MLPPLinAlg::dot(std::vector<real_t> a, std::vector<real_t> b) {
return c;
}
real_t MLPPLinAlg::dotv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b) {
real_t MLPPLinAlg::dotnv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b) {
int a_size = a->size();
ERR_FAIL_COND_V(a_size != b->size(), 0);
@ -2291,7 +2291,7 @@ std::vector<std::vector<real_t>> MLPPLinAlg::diag(std::vector<real_t> a) {
return B;
}
Ref<MLPPVector> MLPPLinAlg::diagm(const Ref<MLPPVector> &a) {
Ref<MLPPMatrix> MLPPLinAlg::diagnm(const Ref<MLPPVector> &a) {
int a_size = a->size();
Ref<MLPPMatrix> B;
@ -2338,7 +2338,7 @@ Ref<MLPPVector> MLPPLinAlg::absv(const Ref<MLPPVector> &a) {
return out;
}
Ref<MLPPVector> MLPPLinAlg::zerovecv(int n) {
Ref<MLPPVector> MLPPLinAlg::zerovecnv(int n) {
Ref<MLPPVector> vec;
vec.instance();
@ -2347,7 +2347,7 @@ Ref<MLPPVector> MLPPLinAlg::zerovecv(int n) {
return vec;
}
Ref<MLPPVector> MLPPLinAlg::onevecv(int n) {
Ref<MLPPVector> MLPPLinAlg::onevecnv(int n) {
Ref<MLPPVector> vec;
vec.instance();
@ -2356,7 +2356,7 @@ Ref<MLPPVector> MLPPLinAlg::onevecv(int n) {
return vec;
}
Ref<MLPPVector> MLPPLinAlg::fullv(int n, int k) {
Ref<MLPPVector> MLPPLinAlg::fullnv(int n, int k) {
Ref<MLPPVector> vec;
vec.instance();
@ -2384,7 +2384,7 @@ std::vector<real_t> MLPPLinAlg::cos(std::vector<real_t> a) {
return b;
}
Ref<MLPPVector> MLPPLinAlg::sinv(const Ref<MLPPVector> &a) {
Ref<MLPPVector> MLPPLinAlg::sinnv(const Ref<MLPPVector> &a) {
ERR_FAIL_COND_V(!a.is_valid(), Ref<MLPPVector>());
Ref<MLPPVector> out;
@ -2402,7 +2402,7 @@ Ref<MLPPVector> MLPPLinAlg::sinv(const Ref<MLPPVector> &a) {
return out;
}
Ref<MLPPVector> MLPPLinAlg::cosv(const Ref<MLPPVector> &a) {
Ref<MLPPVector> MLPPLinAlg::cosnv(const Ref<MLPPVector> &a) {
ERR_FAIL_COND_V(!a.is_valid(), Ref<MLPPVector>());
Ref<MLPPVector> out;
@ -2654,7 +2654,7 @@ std::vector<real_t> MLPPLinAlg::mat_vec_mult(std::vector<std::vector<real_t>> A,
return c;
}
Ref<MLPPMatrix> MLPPLinAlg::mat_vec_addv(const Ref<MLPPMatrix> &A, const Ref<MLPPVector> &b) {
Ref<MLPPMatrix> MLPPLinAlg::mat_vec_addnm(const Ref<MLPPMatrix> &A, const Ref<MLPPVector> &b) {
ERR_FAIL_COND_V(!A.is_valid() || !b.is_valid(), Ref<MLPPMatrix>());
Size2i a_size = A->size();
@ -2679,7 +2679,7 @@ Ref<MLPPMatrix> MLPPLinAlg::mat_vec_addv(const Ref<MLPPMatrix> &A, const Ref<MLP
return ret;
}
Ref<MLPPVector> MLPPLinAlg::mat_vec_multv(const Ref<MLPPMatrix> &A, const Ref<MLPPVector> &b) {
Ref<MLPPVector> MLPPLinAlg::mat_vec_multnv(const Ref<MLPPMatrix> &A, const Ref<MLPPVector> &b) {
ERR_FAIL_COND_V(!A.is_valid() || !b.is_valid(), Ref<MLPPMatrix>());
Size2i a_size = A->size();

View File

@ -225,7 +225,7 @@ public:
void subtractionv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b, Ref<MLPPVector> out);
std::vector<real_t> subtractMatrixRows(std::vector<real_t> a, std::vector<std::vector<real_t>> B);
Ref<MLPPVector> subtract_matrix_rows(const Ref<MLPPVector> &a, const Ref<MLPPMatrix> &B);
Ref<MLPPVector> subtract_matrix_rowsnv(const Ref<MLPPVector> &a, const Ref<MLPPMatrix> &B);
Ref<MLPPVector> lognv(const Ref<MLPPVector> &a);
Ref<MLPPVector> log10nv(const Ref<MLPPVector> &a);
@ -236,7 +236,7 @@ public:
Ref<MLPPVector> cbrtnv(const Ref<MLPPVector> &a);
real_t dot(std::vector<real_t> a, std::vector<real_t> b);
real_t dotv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b);
real_t dotnv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b);
std::vector<real_t> cross(std::vector<real_t> a, std::vector<real_t> b);
@ -248,18 +248,18 @@ public:
Ref<MLPPVector> absv(const Ref<MLPPVector> &a);
Ref<MLPPVector> zerovecv(int n);
Ref<MLPPVector> onevecv(int n);
Ref<MLPPVector> fullv(int n, int k);
Ref<MLPPVector> zerovecnv(int n);
Ref<MLPPVector> onevecnv(int n);
Ref<MLPPVector> fullnv(int n, int k);
std::vector<std::vector<real_t>> diag(std::vector<real_t> a);
Ref<MLPPVector> diagm(const Ref<MLPPVector> &a);
Ref<MLPPMatrix> diagnm(const Ref<MLPPVector> &a);
std::vector<real_t> sin(std::vector<real_t> a);
std::vector<real_t> cos(std::vector<real_t> a);
Ref<MLPPVector> sinv(const Ref<MLPPVector> &a);
Ref<MLPPVector> cosv(const Ref<MLPPVector> &a);
Ref<MLPPVector> sinnv(const Ref<MLPPVector> &a);
Ref<MLPPVector> cosnv(const Ref<MLPPVector> &a);
std::vector<real_t> max(std::vector<real_t> a, std::vector<real_t> b);
@ -293,8 +293,8 @@ public:
std::vector<std::vector<real_t>> mat_vec_add(std::vector<std::vector<real_t>> A, std::vector<real_t> b);
std::vector<real_t> mat_vec_mult(std::vector<std::vector<real_t>> A, std::vector<real_t> b);
Ref<MLPPMatrix> mat_vec_addv(const Ref<MLPPMatrix> &A, const Ref<MLPPVector> &b);
Ref<MLPPVector> mat_vec_multv(const Ref<MLPPMatrix> &A, const Ref<MLPPVector> &b);
Ref<MLPPMatrix> mat_vec_addnm(const Ref<MLPPMatrix> &A, const Ref<MLPPVector> &b);
Ref<MLPPVector> mat_vec_multnv(const Ref<MLPPMatrix> &A, const Ref<MLPPVector> &b);
// TENSOR FUNCTIONS
std::vector<std::vector<std::vector<real_t>>> addition(std::vector<std::vector<std::vector<real_t>>> A, std::vector<std::vector<std::vector<real_t>>> B);

View File

@ -92,9 +92,9 @@ void MLPPLinReg::newton_raphson(real_t learning_rate, int max_epoch, bool ui) {
Ref<MLPPVector> error = alg.subtractionnv(_y_hat, _output_set);
// Calculating the weight gradients (2nd derivative)
Ref<MLPPVector> first_derivative = alg.mat_vec_multv(alg.transposenm(_input_set), error);
Ref<MLPPVector> first_derivative = alg.mat_vec_multnv(alg.transposenm(_input_set), error);
Ref<MLPPMatrix> second_derivative = alg.matmultnm(alg.transposenm(_input_set), _input_set);
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multv(alg.transposenm(alg.inversenm(second_derivative)), first_derivative)));
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multnv(alg.transposenm(alg.inversenm(second_derivative)), first_derivative)));
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients (2nd derivative)
@ -132,7 +132,7 @@ void MLPPLinReg::gradient_descent(real_t learning_rate, int max_epoch, bool ui)
Ref<MLPPVector> error = alg.subtractionnv(_y_hat, _output_set);
// Calculating the weight gradients
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multv(alg.transposenm(_input_set), error)));
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multnv(alg.transposenm(_input_set), error)));
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients
@ -240,7 +240,7 @@ void MLPPLinReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size,
Ref<MLPPVector> error = alg.subtractionnv(y_hat, current_output_mini_batch);
// Calculating the weight gradients
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / current_output_mini_batch->size(), alg.mat_vec_multv(alg.transposenm(current_input_mini_batch), error)));
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / current_output_mini_batch->size(), alg.mat_vec_multnv(alg.transposenm(current_input_mini_batch), error)));
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients
@ -276,7 +276,7 @@ void MLPPLinReg::momentum(real_t learning_rate, int max_epoch, int mini_batch_si
MLPPUtilities::CreateMiniBatchMVBatch batches = MLPPUtilities::create_mini_batchesmv(_input_set, _output_set, n_mini_batch);
// Initializing necessary components for Momentum.
Ref<MLPPVector> v = alg.zerovecv(_weights->size());
Ref<MLPPVector> v = alg.zerovecnv(_weights->size());
while (true) {
for (int i = 0; i < n_mini_batch; i++) {
@ -289,7 +289,7 @@ void MLPPLinReg::momentum(real_t learning_rate, int max_epoch, int mini_batch_si
Ref<MLPPVector> error = alg.subtractionnv(y_hat, current_output_mini_batch);
// Calculating the weight gradients
Ref<MLPPVector> gradient = alg.scalar_multiplynv(1 / current_output_mini_batch->size(), alg.mat_vec_multv(alg.transposenm(current_input_mini_batch), error));
Ref<MLPPVector> gradient = alg.scalar_multiplynv(1 / current_output_mini_batch->size(), alg.mat_vec_multnv(alg.transposenm(current_input_mini_batch), error));
Ref<MLPPVector> reg_deriv_term = regularization.reg_deriv_termv(_weights, _lambda, _alpha, _reg);
Ref<MLPPVector> weight_grad = alg.additionnv(gradient, reg_deriv_term); // Weight_grad_final
@ -330,7 +330,7 @@ void MLPPLinReg::nag(real_t learning_rate, int max_epoch, int mini_batch_size, r
MLPPUtilities::CreateMiniBatchMVBatch batches = MLPPUtilities::create_mini_batchesmv(_input_set, _output_set, n_mini_batch);
// Initializing necessary components for Momentum.
Ref<MLPPVector> v = alg.zerovecv(_weights->size());
Ref<MLPPVector> v = alg.zerovecnv(_weights->size());
while (true) {
for (int i = 0; i < n_mini_batch; i++) {
@ -345,7 +345,7 @@ void MLPPLinReg::nag(real_t learning_rate, int max_epoch, int mini_batch_size, r
Ref<MLPPVector> error = alg.subtractionnv(y_hat, current_output_mini_batch);
// Calculating the weight gradients
Ref<MLPPVector> gradient = alg.scalar_multiplynv(1 / current_output_mini_batch->size(), alg.mat_vec_multv(alg.transposenm(current_input_mini_batch), error));
Ref<MLPPVector> gradient = alg.scalar_multiplynv(1 / current_output_mini_batch->size(), alg.mat_vec_multnv(alg.transposenm(current_input_mini_batch), error));
Ref<MLPPVector> reg_deriv_term = regularization.reg_deriv_termv(_weights, _lambda, _alpha, _reg);
Ref<MLPPVector> weight_grad = alg.additionnv(gradient, reg_deriv_term); // Weight_grad_final
@ -386,7 +386,7 @@ void MLPPLinReg::adagrad(real_t learning_rate, int max_epoch, int mini_batch_siz
MLPPUtilities::CreateMiniBatchMVBatch batches = MLPPUtilities::create_mini_batchesmv(_input_set, _output_set, n_mini_batch);
// Initializing necessary components for Adagrad.
Ref<MLPPVector> v = alg.zerovecv(_weights->size());
Ref<MLPPVector> v = alg.zerovecnv(_weights->size());
while (true) {
for (int i = 0; i < n_mini_batch; i++) {
@ -399,7 +399,7 @@ void MLPPLinReg::adagrad(real_t learning_rate, int max_epoch, int mini_batch_siz
Ref<MLPPVector> error = alg.subtractionnv(y_hat, current_output_mini_batch);
// Calculating the weight gradients
Ref<MLPPVector> gradient = alg.scalar_multiplynv(1 / current_output_mini_batch->size(), alg.mat_vec_multv(alg.transposenm(current_input_mini_batch), error));
Ref<MLPPVector> gradient = alg.scalar_multiplynv(1 / current_output_mini_batch->size(), alg.mat_vec_multnv(alg.transposenm(current_input_mini_batch), error));
Ref<MLPPVector> reg_deriv_term = regularization.reg_deriv_termv(_weights, _lambda, _alpha, _reg);
Ref<MLPPVector> weight_grad = alg.additionnv(gradient, reg_deriv_term); // Weight_grad_final
@ -441,7 +441,7 @@ void MLPPLinReg::adadelta(real_t learning_rate, int max_epoch, int mini_batch_si
MLPPUtilities::CreateMiniBatchMVBatch batches = MLPPUtilities::create_mini_batchesmv(_input_set, _output_set, n_mini_batch);
// Initializing necessary components for Adagrad.
Ref<MLPPVector> v = alg.zerovecv(_weights->size());
Ref<MLPPVector> v = alg.zerovecnv(_weights->size());
while (true) {
for (int i = 0; i < n_mini_batch; i++) {
@ -454,7 +454,7 @@ void MLPPLinReg::adadelta(real_t learning_rate, int max_epoch, int mini_batch_si
Ref<MLPPVector> error = alg.subtractionnv(y_hat, current_output_mini_batch);
// Calculating the weight gradients
Ref<MLPPVector> gradient = alg.scalar_multiplynv(1 / current_output_mini_batch->size(), alg.mat_vec_multv(alg.transposenm(current_input_mini_batch), error));
Ref<MLPPVector> gradient = alg.scalar_multiplynv(1 / current_output_mini_batch->size(), alg.mat_vec_multnv(alg.transposenm(current_input_mini_batch), error));
Ref<MLPPVector> reg_deriv_term = regularization.reg_deriv_termv(_weights, _lambda, _alpha, _reg);
Ref<MLPPVector> weight_grad = alg.additionnv(gradient, reg_deriv_term); // Weight_grad_final
@ -495,8 +495,8 @@ void MLPPLinReg::adam(real_t learning_rate, int max_epoch, int mini_batch_size,
MLPPUtilities::CreateMiniBatchMVBatch batches = MLPPUtilities::create_mini_batchesmv(_input_set, _output_set, n_mini_batch);
// Initializing necessary components for Adam.
Ref<MLPPVector> m = alg.zerovecv(_weights->size());
Ref<MLPPVector> v = alg.zerovecv(_weights->size());
Ref<MLPPVector> m = alg.zerovecnv(_weights->size());
Ref<MLPPVector> v = alg.zerovecnv(_weights->size());
while (true) {
for (int i = 0; i < n_mini_batch; i++) {
@ -509,7 +509,7 @@ void MLPPLinReg::adam(real_t learning_rate, int max_epoch, int mini_batch_size,
Ref<MLPPVector> error = alg.subtractionnv(y_hat, current_output_mini_batch);
// Calculating the weight gradients
Ref<MLPPVector> gradient = alg.scalar_multiplynv(1 / current_output_mini_batch->size(), alg.mat_vec_multv(alg.transposenm(current_input_mini_batch), error));
Ref<MLPPVector> gradient = alg.scalar_multiplynv(1 / current_output_mini_batch->size(), alg.mat_vec_multnv(alg.transposenm(current_input_mini_batch), error));
Ref<MLPPVector> reg_deriv_term = regularization.reg_deriv_termv(_weights, _lambda, _alpha, _reg);
Ref<MLPPVector> weight_grad = alg.additionnv(gradient, reg_deriv_term); // Weight_grad_final
@ -553,8 +553,8 @@ void MLPPLinReg::adamax(real_t learning_rate, int max_epoch, int mini_batch_size
int n_mini_batch = _n / mini_batch_size;
MLPPUtilities::CreateMiniBatchMVBatch batches = MLPPUtilities::create_mini_batchesmv(_input_set, _output_set, n_mini_batch);
Ref<MLPPVector> m = alg.zerovecv(_weights->size());
Ref<MLPPVector> u = alg.zerovecv(_weights->size());
Ref<MLPPVector> m = alg.zerovecnv(_weights->size());
Ref<MLPPVector> u = alg.zerovecnv(_weights->size());
while (true) {
for (int i = 0; i < n_mini_batch; i++) {
@ -567,7 +567,7 @@ void MLPPLinReg::adamax(real_t learning_rate, int max_epoch, int mini_batch_size
Ref<MLPPVector> error = alg.subtractionnv(y_hat, current_output_mini_batch);
// Calculating the weight gradients
Ref<MLPPVector> gradient = alg.scalar_multiplynv(1 / current_output_mini_batch->size(), alg.mat_vec_multv(alg.transposenm(current_input_mini_batch), error));
Ref<MLPPVector> gradient = alg.scalar_multiplynv(1 / current_output_mini_batch->size(), alg.mat_vec_multnv(alg.transposenm(current_input_mini_batch), error));
Ref<MLPPVector> reg_deriv_term = regularization.reg_deriv_termv(_weights, _lambda, _alpha, _reg);
Ref<MLPPVector> weight_grad = alg.additionnv(gradient, reg_deriv_term); // Weight_grad_final
@ -611,9 +611,9 @@ void MLPPLinReg::nadam(real_t learning_rate, int max_epoch, int mini_batch_size,
MLPPUtilities::CreateMiniBatchMVBatch batches = MLPPUtilities::create_mini_batchesmv(_input_set, _output_set, n_mini_batch);
// Initializing necessary components for Adam.
Ref<MLPPVector> m = alg.zerovecv(_weights->size());
Ref<MLPPVector> v = alg.zerovecv(_weights->size());
Ref<MLPPVector> m_final = alg.zerovecv(_weights->size());
Ref<MLPPVector> m = alg.zerovecnv(_weights->size());
Ref<MLPPVector> v = alg.zerovecnv(_weights->size());
Ref<MLPPVector> m_final = alg.zerovecnv(_weights->size());
while (true) {
for (int i = 0; i < n_mini_batch; i++) {
@ -626,7 +626,7 @@ void MLPPLinReg::nadam(real_t learning_rate, int max_epoch, int mini_batch_size,
Ref<MLPPVector> error = alg.subtractionnv(y_hat, current_output_mini_batch);
// Calculating the weight gradients
Ref<MLPPVector> gradient = alg.scalar_multiplynv(1 / current_output_mini_batch->size(), alg.mat_vec_multv(alg.transposenm(current_input_mini_batch), error));
Ref<MLPPVector> gradient = alg.scalar_multiplynv(1 / current_output_mini_batch->size(), alg.mat_vec_multnv(alg.transposenm(current_input_mini_batch), error));
Ref<MLPPVector> reg_deriv_term = regularization.reg_deriv_termv(_weights, _lambda, _alpha, _reg);
Ref<MLPPVector> weight_grad = alg.additionnv(gradient, reg_deriv_term); // Weight_grad_final
@ -683,17 +683,17 @@ void MLPPLinReg::normal_equation() {
Ref<MLPPVector> temp;
//temp.resize(_k);
temp = alg.mat_vec_multv(alg.inversenm(alg.matmultnm(alg.transposenm(_input_set), _input_set)), alg.mat_vec_multv(alg.transposenm(_input_set), _output_set));
temp = alg.mat_vec_multnv(alg.inversenm(alg.matmultnm(alg.transposenm(_input_set), _input_set)), alg.mat_vec_multnv(alg.transposenm(_input_set), _output_set));
ERR_FAIL_COND_MSG(Math::is_nan(temp->get_element(0)), "ERR: Resulting matrix was noninvertible/degenerate, and so the normal equation could not be performed. Try utilizing gradient descent.");
if (_reg == MLPPReg::REGULARIZATION_TYPE_RIDGE) {
_weights = alg.mat_vec_multv(alg.inversenm(alg.additionnm(alg.matmultnm(alg.transposenm(_input_set), _input_set), alg.scalar_multiplynm(_lambda, alg.identitym(_k)))), alg.mat_vec_multv(alg.transposenm(_input_set), _output_set));
_weights = alg.mat_vec_multnv(alg.inversenm(alg.additionnm(alg.matmultnm(alg.transposenm(_input_set), _input_set), alg.scalar_multiplynm(_lambda, alg.identitym(_k)))), alg.mat_vec_multnv(alg.transposenm(_input_set), _output_set));
} else {
_weights = alg.mat_vec_multv(alg.inversenm(alg.matmultnm(alg.transposenm(_input_set), _input_set)), alg.mat_vec_multv(alg.transposenm(_input_set), _output_set));
_weights = alg.mat_vec_multnv(alg.inversenm(alg.matmultnm(alg.transposenm(_input_set), _input_set)), alg.mat_vec_multnv(alg.transposenm(_input_set), _output_set));
}
_bias = stat.meanv(_output_set) - alg.dotv(_weights, x_means);
_bias = stat.meanv(_output_set) - alg.dotnv(_weights, x_means);
forward_pass();
}
@ -766,13 +766,13 @@ real_t MLPPLinReg::cost(const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y)
real_t MLPPLinReg::evaluatev(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
return alg.dotv(_weights, x) + _bias;
return alg.dotnv(_weights, x) + _bias;
}
Ref<MLPPVector> MLPPLinReg::evaluatem(const Ref<MLPPMatrix> &X) {
MLPPLinAlg alg;
return alg.scalar_addnv(_bias, alg.mat_vec_multv(X, _weights));
return alg.scalar_addnv(_bias, alg.mat_vec_multnv(X, _weights));
}
// wTx + b

View File

@ -91,7 +91,7 @@ void MLPPLogReg::gradient_descent(real_t learning_rate, int max_epoch, bool ui)
Ref<MLPPVector> error = alg.subtractionnv(_y_hat, _output_set);
// Calculating the weight gradients
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multv(alg.transposenm(_input_set), error)));
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multnv(alg.transposenm(_input_set), error)));
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients
@ -129,7 +129,7 @@ void MLPPLogReg::mle(real_t learning_rate, int max_epoch, bool ui) {
Ref<MLPPVector> error = alg.subtractionnv(_output_set, _y_hat);
// Calculating the weight gradients
_weights = alg.additionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multv(alg.transposenm(_input_set), error)));
_weights = alg.additionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multnv(alg.transposenm(_input_set), error)));
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients
@ -235,7 +235,7 @@ void MLPPLogReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size,
Ref<MLPPVector> error = alg.subtractionnv(y_hat, current_mini_batch_output_entry);
// Calculating the weight gradients
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / current_mini_batch_output_entry->size(), alg.mat_vec_multv(alg.transposenm(current_mini_batch_input_entry), error)));
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / current_mini_batch_output_entry->size(), alg.mat_vec_multnv(alg.transposenm(current_mini_batch_input_entry), error)));
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients
@ -325,14 +325,14 @@ real_t MLPPLogReg::evaluatev(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
MLPPActivation avn;
return avn.sigmoid_normr(alg.dotv(_weights, x) + _bias);
return avn.sigmoid_normr(alg.dotnv(_weights, x) + _bias);
}
Ref<MLPPVector> MLPPLogReg::evaluatem(const Ref<MLPPMatrix> &X) {
MLPPLinAlg alg;
MLPPActivation avn;
return avn.sigmoid_normv(alg.scalar_addnv(_bias, alg.mat_vec_multv(X, _weights)));
return avn.sigmoid_normv(alg.scalar_addnv(_bias, alg.mat_vec_multnv(X, _weights)));
}
// sigmoid ( wTx + b )

View File

@ -110,7 +110,7 @@ void MLPPMANN::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
_output_layer->set_weights(alg.subtractionnm(_output_layer->get_weights(), alg.scalar_multiplynm(learning_rate / _n, output_w_grad)));
_output_layer->set_weights(regularization.reg_weightsm(_output_layer->get_weights(), _output_layer->get_lambda(), _output_layer->get_alpha(), _output_layer->get_reg()));
_output_layer->set_bias(alg.subtract_matrix_rows(_output_layer->get_bias(), alg.scalar_multiplynm(learning_rate / _n, _output_layer->get_delta())));
_output_layer->set_bias(alg.subtract_matrix_rowsnv(_output_layer->get_bias(), alg.scalar_multiplynm(learning_rate / _n, _output_layer->get_delta())));
if (!_network.empty()) {
Ref<MLPPHiddenLayer> layer = _network[_network.size() - 1];
@ -122,7 +122,7 @@ void MLPPMANN::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
layer->set_weights(alg.subtractionnm(layer->get_weights(), alg.scalar_multiplynm(learning_rate / _n, hidden_layer_w_grad)));
layer->set_weights(regularization.reg_weightsm(layer->get_weights(), layer->get_lambda(), layer->get_alpha(), layer->get_reg()));
layer->set_bias(alg.subtract_matrix_rows(layer->get_bias(), alg.scalar_multiplynm(learning_rate / _n, layer->get_delta())));
layer->set_bias(alg.subtract_matrix_rowsnv(layer->get_bias(), alg.scalar_multiplynm(learning_rate / _n, layer->get_delta())));
for (int i = _network.size() - 2; i >= 0; i--) {
layer = _network[i];
@ -133,7 +133,7 @@ void MLPPMANN::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
hidden_layer_w_grad = alg.matmultnm(alg.transposenm(layer->get_input()), layer->get_delta());
layer->set_weights(alg.subtractionnm(layer->get_weights(), alg.scalar_multiplynm(learning_rate / _n, hidden_layer_w_grad)));
layer->set_weights(regularization.reg_weightsm(layer->get_weights(), layer->get_lambda(), layer->get_alpha(), layer->get_reg()));
layer->set_bias(alg.subtract_matrix_rows(layer->get_bias(), alg.scalar_multiplynm(learning_rate / _n, layer->get_delta())));
layer->set_bias(alg.subtract_matrix_rowsnv(layer->get_bias(), alg.scalar_multiplynm(learning_rate / _n, layer->get_delta())));
}
}

View File

@ -100,7 +100,7 @@ void MLPPMLP::gradient_descent(real_t learning_rate, int max_epoch, bool UI) {
// Calculating the weight/bias gradients for layer 2
Ref<MLPPVector> D2_1 = alg.mat_vec_multv(alg.transposenm(_a2), error);
Ref<MLPPVector> D2_1 = alg.mat_vec_multnv(alg.transposenm(_a2), error);
// weights and bias updation for layer 2
_weights2->set_from_mlpp_vector(alg.subtractionnv(_weights2, alg.scalar_multiplynv(learning_rate / static_cast<real_t>(_n), D2_1)));
@ -118,7 +118,7 @@ void MLPPMLP::gradient_descent(real_t learning_rate, int max_epoch, bool UI) {
_weights1->set_from_mlpp_matrix(alg.subtractionnm(_weights1, alg.scalar_multiplynm(learning_rate / _n, D1_3)));
_weights1->set_from_mlpp_matrix(regularization.reg_weightsm(_weights1, _lambda, _alpha, _reg));
_bias1->set_from_mlpp_vector(alg.subtract_matrix_rows(_bias1, alg.scalar_multiplynm(learning_rate / _n, D1_2)));
_bias1->set_from_mlpp_vector(alg.subtract_matrix_rowsnv(_bias1, alg.scalar_multiplynm(learning_rate / _n, D1_2)));
forward_pass();
@ -254,7 +254,7 @@ void MLPPMLP::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size, boo
Ref<MLPPVector> error = alg.subtractionnv(ly_hat, current_output);
// Calculating the weight/bias gradients for layer 2
Ref<MLPPVector> D2_1 = alg.mat_vec_multv(alg.transposenm(la2), error);
Ref<MLPPVector> D2_1 = alg.mat_vec_multnv(alg.transposenm(la2), error);
real_t lr_d_cos = learning_rate / static_cast<real_t>(current_output->size());
@ -277,7 +277,7 @@ void MLPPMLP::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size, boo
_weights1->set_from_mlpp_matrix(alg.subtractionnm(_weights1, alg.scalar_multiplynm(lr_d_cos, D1_3)));
_weights1->set_from_mlpp_matrix(regularization.reg_weightsm(_weights1, _lambda, _alpha, _reg));
_bias1->set_from_mlpp_vector(alg.subtract_matrix_rows(_bias1, alg.scalar_multiplynm(lr_d_cos, D1_2)));
_bias1->set_from_mlpp_vector(alg.subtract_matrix_rowsnv(_bias1, alg.scalar_multiplynm(lr_d_cos, D1_2)));
_y_hat = evaluatem(current_input);
@ -359,17 +359,17 @@ Ref<MLPPVector> MLPPMLP::evaluatem(const Ref<MLPPMatrix> &X) {
MLPPLinAlg alg;
MLPPActivation avn;
Ref<MLPPMatrix> pz2 = alg.mat_vec_addv(alg.matmultnm(X, _weights1), _bias1);
Ref<MLPPMatrix> pz2 = alg.mat_vec_addnm(alg.matmultnm(X, _weights1), _bias1);
Ref<MLPPMatrix> pa2 = avn.sigmoid_normm(pz2);
return avn.sigmoid_normv(alg.scalar_addnv(_bias2, alg.mat_vec_multv(pa2, _weights2)));
return avn.sigmoid_normv(alg.scalar_addnv(_bias2, alg.mat_vec_multnv(pa2, _weights2)));
}
void MLPPMLP::propagatem(const Ref<MLPPMatrix> &X, Ref<MLPPMatrix> z2_out, Ref<MLPPMatrix> a2_out) {
MLPPLinAlg alg;
MLPPActivation avn;
z2_out->set_from_mlpp_matrix(alg.mat_vec_addv(alg.matmultnm(X, _weights1), _bias1));
z2_out->set_from_mlpp_matrix(alg.mat_vec_addnm(alg.matmultnm(X, _weights1), _bias1));
a2_out->set_from_mlpp_matrix(avn.sigmoid_normm(z2_out));
}
@ -377,17 +377,17 @@ real_t MLPPMLP::evaluatev(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
MLPPActivation avn;
Ref<MLPPVector> pz2 = alg.additionnv(alg.mat_vec_multv(alg.transposenm(_weights1), x), _bias1);
Ref<MLPPVector> pz2 = alg.additionnv(alg.mat_vec_multnv(alg.transposenm(_weights1), x), _bias1);
Ref<MLPPVector> pa2 = avn.sigmoid_normv(pz2);
return avn.sigmoid_normr(alg.dotv(_weights2, pa2) + _bias2);
return avn.sigmoid_normr(alg.dotnv(_weights2, pa2) + _bias2);
}
void MLPPMLP::propagatev(const Ref<MLPPVector> &x, Ref<MLPPVector> z2_out, Ref<MLPPVector> a2_out) {
MLPPLinAlg alg;
MLPPActivation avn;
z2_out->set_from_mlpp_vector(alg.additionnv(alg.mat_vec_multv(alg.transposenm(_weights1), x), _bias1));
z2_out->set_from_mlpp_vector(alg.additionnv(alg.mat_vec_multnv(alg.transposenm(_weights1), x), _bias1));
a2_out->set_from_mlpp_vector(avn.sigmoid_normv(z2_out));
}
@ -395,10 +395,10 @@ void MLPPMLP::forward_pass() {
MLPPLinAlg alg;
MLPPActivation avn;
_z2->set_from_mlpp_matrix(alg.mat_vec_addv(alg.matmultnm(_input_set, _weights1), _bias1));
_z2->set_from_mlpp_matrix(alg.mat_vec_addnm(alg.matmultnm(_input_set, _weights1), _bias1));
_a2->set_from_mlpp_matrix(avn.sigmoid_normm(_z2));
_y_hat->set_from_mlpp_vector(avn.sigmoid_normv(alg.scalar_addnv(_bias2, alg.mat_vec_multv(_a2, _weights2))));
_y_hat->set_from_mlpp_vector(avn.sigmoid_normv(alg.scalar_addnv(_bias2, alg.mat_vec_multnv(_a2, _weights2))));
}
MLPPMLP::MLPPMLP(const Ref<MLPPMatrix> &p_input_set, const Ref<MLPPVector> &p_output_set, int p_n_hidden, MLPPReg::RegularizationType p_reg, real_t p_lambda, real_t p_alpha) {

View File

@ -124,7 +124,7 @@ void MLPPMultiOutputLayer::forward_pass() {
MLPPLinAlg alg;
MLPPActivation avn;
_z = alg.mat_vec_addv(alg.matmultnm(_input, _weights), _bias);
_z = alg.mat_vec_addnm(alg.matmultnm(_input, _weights), _bias);
_a = avn.run_activation_norm_matrix(_activation, _z);
}
@ -132,7 +132,7 @@ void MLPPMultiOutputLayer::test(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
MLPPActivation avn;
_z_test = alg.additionnm(alg.mat_vec_multv(alg.transposenm(_weights), x), _bias);
_z_test = alg.additionnm(alg.mat_vec_multnv(alg.transposenm(_weights), x), _bias);
_a_test = avn.run_activation_norm_vector(_activation, _z_test);
}

View File

@ -153,7 +153,7 @@ void MLPPOutputLayer::forward_pass() {
MLPPLinAlg alg;
MLPPActivation avn;
_z = alg.scalar_addnv(_bias, alg.mat_vec_multv(_input, _weights));
_z = alg.scalar_addnv(_bias, alg.mat_vec_multnv(_input, _weights));
_a = avn.run_activation_norm_vector(_activation, _z);
}
@ -165,7 +165,7 @@ void MLPPOutputLayer::test(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
MLPPActivation avn;
_z_test = alg.dotv(_weights, x) + _bias;
_z_test = alg.dotnv(_weights, x) + _bias;
_a_test = avn.run_activation_norm_real(_activation, _z_test);
}

View File

@ -84,7 +84,7 @@ void MLPPProbitReg::gradient_descent(real_t learning_rate, int max_epoch, bool u
Ref<MLPPVector> error = alg.subtractionnv(_y_hat, _output_set);
// Calculating the weight gradients
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multv(alg.transposenm(_input_set), alg.hadamard_productnv(error, avn.gaussian_cdf_derivv(_z)))));
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multnv(alg.transposenm(_input_set), alg.hadamard_productnv(error, avn.gaussian_cdf_derivv(_z)))));
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients
@ -122,7 +122,7 @@ void MLPPProbitReg::mle(real_t learning_rate, int max_epoch, bool ui) {
Ref<MLPPVector> error = alg.subtractionnv(_output_set, _y_hat);
// Calculating the weight gradients
_weights = alg.additionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multv(alg.transposenm(_input_set), alg.hadamard_productnv(error, avn.gaussian_cdf_derivv(_z)))));
_weights = alg.additionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multnv(alg.transposenm(_input_set), alg.hadamard_productnv(error, avn.gaussian_cdf_derivv(_z)))));
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients
@ -242,7 +242,7 @@ void MLPPProbitReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_siz
Ref<MLPPVector> error = alg.subtractionnv(y_hat, current_output);
// Calculating the weight gradients
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / batches.input_sets.size(), alg.mat_vec_multv(alg.transposenm(current_input), alg.hadamard_productnv(error, avn.gaussian_cdf_derivv(z_tmp)))));
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / batches.input_sets.size(), alg.mat_vec_multnv(alg.transposenm(current_input), alg.hadamard_productnv(error, avn.gaussian_cdf_derivv(z_tmp)))));
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients
@ -364,26 +364,26 @@ Ref<MLPPVector> MLPPProbitReg::evaluatem(const Ref<MLPPMatrix> &X) {
MLPPLinAlg alg;
MLPPActivation avn;
return avn.gaussian_cdf_normv(alg.scalar_addnv(_bias, alg.mat_vec_multv(X, _weights)));
return avn.gaussian_cdf_normv(alg.scalar_addnv(_bias, alg.mat_vec_multnv(X, _weights)));
}
Ref<MLPPVector> MLPPProbitReg::propagatem(const Ref<MLPPMatrix> &X) {
MLPPLinAlg alg;
return alg.scalar_addnv(_bias, alg.mat_vec_multv(X, _weights));
return alg.scalar_addnv(_bias, alg.mat_vec_multnv(X, _weights));
}
real_t MLPPProbitReg::evaluatev(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
MLPPActivation avn;
return avn.gaussian_cdf_normr(alg.dotv(_weights, x) + _bias);
return avn.gaussian_cdf_normr(alg.dotnv(_weights, x) + _bias);
}
real_t MLPPProbitReg::propagatev(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
return alg.dotv(_weights, x) + _bias;
return alg.dotnv(_weights, x) + _bias;
}
// gaussianCDF ( wTx + b )

View File

@ -96,7 +96,7 @@ void MLPPSoftmaxNet::gradient_descent(real_t learning_rate, int max_epoch, bool
_weights2 = alg.subtractionnm(_weights2, alg.scalar_multiplynm(learning_rate, D2_1));
_weights2 = regularization.reg_weightsm(_weights2, _lambda, _alpha, _reg);
_bias2 = alg.subtract_matrix_rows(_bias2, alg.scalar_multiplynm(learning_rate, error));
_bias2 = alg.subtract_matrix_rowsnv(_bias2, alg.scalar_multiplynm(learning_rate, error));
//Calculating the weight/bias for layer 1
@ -110,7 +110,7 @@ void MLPPSoftmaxNet::gradient_descent(real_t learning_rate, int max_epoch, bool
_weights1 = alg.subtractionnm(_weights1, alg.scalar_multiplynm(learning_rate, D1_3));
_weights1 = regularization.reg_weightsm(_weights1, _lambda, _alpha, _reg);
_bias1 = alg.subtract_matrix_rows(_bias1, alg.scalar_multiplynm(learning_rate, D1_2));
_bias1 = alg.subtract_matrix_rowsnv(_bias1, alg.scalar_multiplynm(learning_rate, D1_2));
forward_pass();
@ -183,7 +183,7 @@ void MLPPSoftmaxNet::sgd(real_t learning_rate, int max_epoch, bool ui) {
_bias2 = alg.subtractionnv(_bias2, alg.scalar_multiplynv(learning_rate, error));
// Weight updation for layer 1
Ref<MLPPVector> D1_1 = alg.mat_vec_multv(_weights2, error);
Ref<MLPPVector> D1_1 = alg.mat_vec_multnv(_weights2, error);
Ref<MLPPVector> D1_2 = alg.hadamard_productnm(D1_1, avn.sigmoid_derivv(prop_res.z2));
Ref<MLPPMatrix> D1_3 = alg.outer_product(input_set_row_tmp, D1_2);
@ -248,7 +248,7 @@ void MLPPSoftmaxNet::mbgd(real_t learning_rate, int max_epoch, int mini_batch_si
_weights2 = regularization.reg_weightsm(_weights2, _lambda, _alpha, _reg);
// Bias Updation for layer 2
_bias2 = alg.subtract_matrix_rows(_bias2, alg.scalar_multiplynm(learning_rate, error));
_bias2 = alg.subtract_matrix_rowsnv(_bias2, alg.scalar_multiplynm(learning_rate, error));
//Calculating the weight/bias for layer 1
@ -260,7 +260,7 @@ void MLPPSoftmaxNet::mbgd(real_t learning_rate, int max_epoch, int mini_batch_si
_weights1 = alg.subtractionnm(_weights1, alg.scalar_multiplynm(learning_rate, D1_3));
_weights1 = regularization.reg_weightsm(_weights1, _lambda, _alpha, _reg);
_bias1 = alg.subtract_matrix_rows(_bias1, alg.scalar_multiplynm(learning_rate, D1_2));
_bias1 = alg.subtract_matrix_rowsnv(_bias1, alg.scalar_multiplynm(learning_rate, D1_2));
y_hat = evaluatem(current_input_mini_batch);
@ -366,10 +366,10 @@ Ref<MLPPVector> MLPPSoftmaxNet::evaluatev(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
MLPPActivation avn;
Ref<MLPPVector> z2 = alg.additionnv(alg.mat_vec_multv(alg.transposenm(_weights1), x), _bias1);
Ref<MLPPVector> z2 = alg.additionnv(alg.mat_vec_multnv(alg.transposenm(_weights1), x), _bias1);
Ref<MLPPVector> a2 = avn.sigmoid_normv(z2);
return avn.adj_softmax_normv(alg.additionnv(alg.mat_vec_multv(alg.transposenm(_weights2), a2), _bias2));
return avn.adj_softmax_normv(alg.additionnv(alg.mat_vec_multnv(alg.transposenm(_weights2), a2), _bias2));
}
MLPPSoftmaxNet::PropagateVResult MLPPSoftmaxNet::propagatev(const Ref<MLPPVector> &x) {
@ -378,7 +378,7 @@ MLPPSoftmaxNet::PropagateVResult MLPPSoftmaxNet::propagatev(const Ref<MLPPVector
PropagateVResult res;
res.z2 = alg.additionnv(alg.mat_vec_multv(alg.transposenm(_weights1), x), _bias1);
res.z2 = alg.additionnv(alg.mat_vec_multnv(alg.transposenm(_weights1), x), _bias1);
res.a2 = avn.sigmoid_normv(res.z2);
return res;
@ -388,10 +388,10 @@ Ref<MLPPMatrix> MLPPSoftmaxNet::evaluatem(const Ref<MLPPMatrix> &X) {
MLPPLinAlg alg;
MLPPActivation avn;
Ref<MLPPMatrix> z2 = alg.mat_vec_addv(alg.matmultnm(X, _weights1), _bias1);
Ref<MLPPMatrix> z2 = alg.mat_vec_addnm(alg.matmultnm(X, _weights1), _bias1);
Ref<MLPPMatrix> a2 = avn.sigmoid_normm(z2);
return avn.adj_softmax_normm(alg.mat_vec_addv(alg.matmultnm(a2, _weights2), _bias2));
return avn.adj_softmax_normm(alg.mat_vec_addnm(alg.matmultnm(a2, _weights2), _bias2));
}
MLPPSoftmaxNet::PropagateMResult MLPPSoftmaxNet::propagatem(const Ref<MLPPMatrix> &X) {
@ -400,7 +400,7 @@ MLPPSoftmaxNet::PropagateMResult MLPPSoftmaxNet::propagatem(const Ref<MLPPMatrix
MLPPSoftmaxNet::PropagateMResult res;
res.z2 = alg.mat_vec_addv(alg.matmultnm(X, _weights1), _bias1);
res.z2 = alg.mat_vec_addnm(alg.matmultnm(X, _weights1), _bias1);
res.a2 = avn.sigmoid_normm(res.z2);
return res;
@ -410,9 +410,9 @@ void MLPPSoftmaxNet::forward_pass() {
MLPPLinAlg alg;
MLPPActivation avn;
_z2 = alg.mat_vec_addv(alg.matmultnm(_input_set, _weights1), _bias1);
_z2 = alg.mat_vec_addnm(alg.matmultnm(_input_set, _weights1), _bias1);
_a2 = avn.sigmoid_normm(_z2);
_y_hat = avn.adj_softmax_normm(alg.mat_vec_addv(alg.matmultnm(_a2, _weights2), _bias2));
_y_hat = avn.adj_softmax_normm(alg.mat_vec_addnm(alg.matmultnm(_a2, _weights2), _bias2));
}
void MLPPSoftmaxNet::_bind_methods() {

View File

@ -97,7 +97,7 @@ void MLPPSoftmaxReg::gradient_descent(real_t learning_rate, int max_epoch, bool
//real_t b_gradient = alg.sum_elements(error);
// Bias Updation
_bias = alg.subtract_matrix_rows(_bias, alg.scalar_multiplynm(learning_rate, error));
_bias = alg.subtract_matrix_rowsnv(_bias, alg.scalar_multiplynm(learning_rate, error));
forward_pass();
@ -218,7 +218,7 @@ void MLPPSoftmaxReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_si
_weights = regularization.reg_weightsm(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients
_bias = alg.subtract_matrix_rows(_bias, alg.scalar_multiplynm(learning_rate, error));
_bias = alg.subtract_matrix_rowsnv(_bias, alg.scalar_multiplynm(learning_rate, error));
y_hat = evaluatem(current_inputs);
if (ui) {
@ -345,14 +345,14 @@ Ref<MLPPVector> MLPPSoftmaxReg::evaluatev(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
MLPPActivation avn;
return avn.softmax_normv(alg.additionnv(_bias, alg.mat_vec_multv(alg.transposenm(_weights), x)));
return avn.softmax_normv(alg.additionnv(_bias, alg.mat_vec_multnv(alg.transposenm(_weights), x)));
}
Ref<MLPPMatrix> MLPPSoftmaxReg::evaluatem(const Ref<MLPPMatrix> &X) {
MLPPLinAlg alg;
MLPPActivation avn;
return avn.softmax_normm(alg.mat_vec_addv(alg.matmultnm(X, _weights), _bias));
return avn.softmax_normm(alg.mat_vec_addnm(alg.matmultnm(X, _weights), _bias));
}
// softmax ( wTx + b )
@ -360,7 +360,7 @@ void MLPPSoftmaxReg::forward_pass() {
MLPPLinAlg alg;
MLPPActivation avn;
_y_hat = avn.softmax_normm(alg.mat_vec_addv(alg.matmultnm(_input_set, _weights), _bias));
_y_hat = avn.softmax_normm(alg.mat_vec_addnm(alg.matmultnm(_input_set, _weights), _bias));
}
void MLPPSoftmaxReg::_bind_methods() {

View File

@ -69,7 +69,7 @@ void MLPPSVC::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
while (true) {
cost_prev = cost(_y_hat, _output_set, _weights, _c);
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multv(alg.transposenm(_input_set), mlpp_cost.hinge_loss_derivwv(_z, _output_set, _c))));
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multnv(alg.transposenm(_input_set), mlpp_cost.hinge_loss_derivwv(_z, _output_set, _c))));
_weights = regularization.reg_weightsv(_weights, learning_rate / _n, 0, MLPPReg::REGULARIZATION_TYPE_RIDGE);
// Calculating the bias gradients
@ -190,7 +190,7 @@ void MLPPSVC::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size, boo
cost_prev = cost(z, current_output_batch_entry, _weights, _c);
// Calculating the weight gradients
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multv(alg.transposenm(current_input_batch_entry), mlpp_cost.hinge_loss_derivwv(z, current_output_batch_entry, _c))));
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multnv(alg.transposenm(current_input_batch_entry), mlpp_cost.hinge_loss_derivwv(z, current_output_batch_entry, _c))));
_weights = regularization.reg_weightsv(_weights, learning_rate / _n, 0, MLPPReg::REGULARIZATION_TYPE_RIDGE);
// Calculating the bias gradients
@ -307,25 +307,25 @@ real_t MLPPSVC::cost(const Ref<MLPPVector> &z, const Ref<MLPPVector> &y, const R
Ref<MLPPVector> MLPPSVC::evaluatem(const Ref<MLPPMatrix> &X) {
MLPPLinAlg alg;
MLPPActivation avn;
return avn.sign_normv(alg.scalar_addnv(_bias, alg.mat_vec_multv(X, _weights)));
return avn.sign_normv(alg.scalar_addnv(_bias, alg.mat_vec_multnv(X, _weights)));
}
Ref<MLPPVector> MLPPSVC::propagatem(const Ref<MLPPMatrix> &X) {
MLPPLinAlg alg;
MLPPActivation avn;
return alg.scalar_addnv(_bias, alg.mat_vec_multv(X, _weights));
return alg.scalar_addnv(_bias, alg.mat_vec_multnv(X, _weights));
}
real_t MLPPSVC::evaluatev(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
MLPPActivation avn;
return avn.sign_normr(alg.dotv(_weights, x) + _bias);
return avn.sign_normr(alg.dotnv(_weights, x) + _bias);
}
real_t MLPPSVC::propagatev(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
MLPPActivation avn;
return alg.dotv(_weights, x) + _bias;
return alg.dotnv(_weights, x) + _bias;
}
// sign ( wTx + b )

View File

@ -87,7 +87,7 @@ void MLPPTanhReg::gradient_descent(real_t learning_rate, int max_epoch, bool ui)
Ref<MLPPVector> error = alg.subtractionnv(_y_hat, _output_set);
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multv(alg.transposenm(_input_set), alg.hadamard_productnv(error, avn.tanh_derivv(_z)))));
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multnv(alg.transposenm(_input_set), alg.hadamard_productnv(error, avn.tanh_derivv(_z)))));
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients
@ -194,7 +194,7 @@ void MLPPTanhReg::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size,
Ref<MLPPVector> error = alg.subtractionnv(y_hat, current_output_batch_entry);
// Calculating the weight gradients
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multv(alg.transposenm(current_input_batch_entry), alg.hadamard_productnv(error, avn.tanh_derivv(z)))));
_weights = alg.subtractionnv(_weights, alg.scalar_multiplynv(learning_rate / _n, alg.mat_vec_multnv(alg.transposenm(current_input_batch_entry), alg.hadamard_productnv(error, avn.tanh_derivv(z)))));
_weights = regularization.reg_weightsv(_weights, _lambda, _alpha, _reg);
// Calculating the bias gradients
@ -286,26 +286,26 @@ real_t MLPPTanhReg::evaluatev(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
MLPPActivation avn;
return avn.tanh_normr(alg.dotv(_weights, x) + _bias);
return avn.tanh_normr(alg.dotnv(_weights, x) + _bias);
}
real_t MLPPTanhReg::propagatev(const Ref<MLPPVector> &x) {
MLPPLinAlg alg;
return alg.dotv(_weights, x) + _bias;
return alg.dotnv(_weights, x) + _bias;
}
Ref<MLPPVector> MLPPTanhReg::evaluatem(const Ref<MLPPMatrix> &X) {
MLPPLinAlg alg;
MLPPActivation avn;
return avn.tanh_normv(alg.scalar_addnv(_bias, alg.mat_vec_multv(X, _weights)));
return avn.tanh_normv(alg.scalar_addnv(_bias, alg.mat_vec_multnv(X, _weights)));
}
Ref<MLPPVector> MLPPTanhReg::propagatem(const Ref<MLPPMatrix> &X) {
MLPPLinAlg alg;
return alg.scalar_addnv(_bias, alg.mat_vec_multv(X, _weights));
return alg.scalar_addnv(_bias, alg.mat_vec_multnv(X, _weights));
}
// Tanh ( wTx + b )

View File

@ -53,7 +53,7 @@ void MLPPWGAN::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
const int CRITIC_INTERATIONS = 5; // Wasserstein GAN specific parameter.
while (true) {
cost_prev = cost(_y_hat, alg.onevecv(_n));
cost_prev = cost(_y_hat, alg.onevecnv(_n));
Ref<MLPPMatrix> generator_input_set;
Ref<MLPPMatrix> discriminator_input_set;
@ -69,8 +69,8 @@ void MLPPWGAN::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
discriminator_input_set->add_rows_mlpp_matrix(_output_set); // Fake + real inputs.
ly_hat = model_set_test_discriminator(discriminator_input_set);
loutput_set = alg.scalar_multiplynv(-1, alg.onevecv(_n)); // WGAN changes y_i = 1 and y_i = 0 to y_i = 1 and y_i = -1
Ref<MLPPVector> output_set_real = alg.onevecv(_n);
loutput_set = alg.scalar_multiplynv(-1, alg.onevecnv(_n)); // WGAN changes y_i = 1 and y_i = 0 to y_i = 1 and y_i = -1
Ref<MLPPVector> output_set_real = alg.onevecnv(_n);
loutput_set->add_mlpp_vector(output_set_real); // Fake + real output scores.
DiscriminatorGradientResult discriminator_gradient_results = compute_discriminator_gradients(ly_hat, loutput_set);
@ -86,7 +86,7 @@ void MLPPWGAN::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
generator_input_set = alg.gaussian_noise(_n, _k);
discriminator_input_set->set_from_mlpp_matrix(model_set_test_generator(generator_input_set));
ly_hat = model_set_test_discriminator(discriminator_input_set);
loutput_set = alg.onevecv(_n);
loutput_set = alg.onevecnv(_n);
Vector<Ref<MLPPMatrix>> cumulative_generator_hidden_layer_w_grad = compute_generator_gradients(_y_hat, loutput_set);
cumulative_generator_hidden_layer_w_grad = alg.scalar_multiply_vm(learning_rate / _n, cumulative_generator_hidden_layer_w_grad);
@ -95,7 +95,7 @@ void MLPPWGAN::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
forward_pass();
if (ui) {
handle_ui(epoch, cost_prev, _y_hat, alg.onevecv(_n));
handle_ui(epoch, cost_prev, _y_hat, alg.onevecnv(_n));
}
epoch++;
@ -109,7 +109,7 @@ real_t MLPPWGAN::score() {
MLPPLinAlg alg;
MLPPUtilities util;
forward_pass();
return util.performance_vec(_y_hat, alg.onevecv(_n));
return util.performance_vec(_y_hat, alg.onevecnv(_n));
}
void MLPPWGAN::save(const String &file_name) {
@ -271,13 +271,13 @@ void MLPPWGAN::update_discriminator_parameters(Vector<Ref<MLPPMatrix>> hidden_la
Ref<MLPPHiddenLayer> layer = _network[_network.size() - 1];
layer->set_weights(alg.subtractionnm(layer->get_weights(), hidden_layer_updations[0]));
layer->set_bias(alg.subtract_matrix_rows(layer->get_bias(), alg.scalar_multiplynm(learning_rate / _n, layer->get_delta())));
layer->set_bias(alg.subtract_matrix_rowsnv(layer->get_bias(), alg.scalar_multiplynm(learning_rate / _n, layer->get_delta())));
for (int i = _network.size() - 2; i > _network.size() / 2; i--) {
layer = _network[i];
layer->set_weights(alg.subtractionnm(layer->get_weights(), hidden_layer_updations[(_network.size() - 2) - i + 1]));
layer->set_bias(alg.subtract_matrix_rows(layer->get_bias(), alg.scalar_multiplynm(learning_rate / _n, layer->get_delta())));
layer->set_bias(alg.subtract_matrix_rowsnv(layer->get_bias(), alg.scalar_multiplynm(learning_rate / _n, layer->get_delta())));
}
}
}
@ -292,7 +292,7 @@ void MLPPWGAN::update_generator_parameters(Vector<Ref<MLPPMatrix>> hidden_layer_
//std::cout << network[i].weights.size() << "x" << network[i].weights[0].size() << std::endl;
//std::cout << hiddenLayerUpdations[(network.size() - 2) - i + 1].size() << "x" << hiddenLayerUpdations[(network.size() - 2) - i + 1][0].size() << std::endl;
layer->set_weights(alg.subtractionnm(layer->get_weights(), hidden_layer_updations[(_network.size() - 2) - i + 1]));
layer->set_bias(alg.subtract_matrix_rows(layer->get_bias(), alg.scalar_multiplynm(learning_rate / _n, layer->get_delta())));
layer->set_bias(alg.subtract_matrix_rowsnv(layer->get_bias(), alg.scalar_multiplynm(learning_rate / _n, layer->get_delta())));
}
}
}
@ -307,7 +307,7 @@ MLPPWGAN::DiscriminatorGradientResult MLPPWGAN::compute_discriminator_gradients(
_output_layer->set_delta(alg.hadamard_productnv(mlpp_cost.run_cost_deriv_vector(_output_layer->get_cost(), y_hat, output_set), avn.run_activation_deriv_vector(_output_layer->get_activation(), _output_layer->get_z())));
data.output_w_grad = alg.mat_vec_multv(alg.transposenm(_output_layer->get_input()), _output_layer->get_delta());
data.output_w_grad = alg.mat_vec_multnv(alg.transposenm(_output_layer->get_input()), _output_layer->get_delta());
data.output_w_grad = alg.additionnv(data.output_w_grad, regularization.reg_deriv_termv(_output_layer->get_weights(), _output_layer->get_lambda(), _output_layer->get_alpha(), _output_layer->get_reg()));
if (!_network.empty()) {
@ -350,7 +350,7 @@ Vector<Ref<MLPPMatrix>> MLPPWGAN::compute_generator_gradients(const Ref<MLPPVect
_output_layer->set_delta(alg.hadamard_productnv(cost_deriv_vector, activation_deriv_vector));
Ref<MLPPVector> output_w_grad = alg.mat_vec_multv(alg.transposenm(_output_layer->get_input()), _output_layer->get_delta());
Ref<MLPPVector> output_w_grad = alg.mat_vec_multnv(alg.transposenm(_output_layer->get_input()), _output_layer->get_delta());
output_w_grad = alg.additionnv(output_w_grad, regularization.reg_deriv_termv(_output_layer->get_weights(), _output_layer->get_lambda(), _output_layer->get_alpha(), _output_layer->get_reg()));
if (!_network.empty()) {