From 1497a2c1b0501261fec6297de00c64ddefc69849 Mon Sep 17 00:00:00 2001 From: Relintai Date: Wed, 26 Apr 2023 10:07:40 +0200 Subject: [PATCH] Small cleanups. --- mlpp/ann/ann.cpp | 4 ++-- mlpp/hidden_layer/hidden_layer.cpp | 10 +++++---- mlpp/lin_alg/mlpp_matrix.cpp | 36 +++++++++++++++++------------- 3 files changed, 29 insertions(+), 21 deletions(-) diff --git a/mlpp/ann/ann.cpp b/mlpp/ann/ann.cpp index 2ca5304..1f751f1 100644 --- a/mlpp/ann/ann.cpp +++ b/mlpp/ann/ann.cpp @@ -877,7 +877,7 @@ MLPPANN::ComputeGradientsResult MLPPANN::compute_gradients(const Ref Ref hidden_layer_w_grad = alg.matmultnm(alg.transposenm(layer->get_input()), layer->get_delta()); - res.cumulative_hidden_layer_w_grad.push_back(alg.additionnm(hidden_layer_w_grad, regularization.reg_deriv_termm(layer->get_weights(), layer->get_lambda(), layer->get_alpha(), layer->get_reg()))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well. + res.cumulative_hidden_layer_w_grad.push_back(hidden_layer_w_grad->addn(regularization.reg_deriv_termm(layer->get_weights(), layer->get_lambda(), layer->get_alpha(), layer->get_reg()))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well. for (int i = _network.size() - 2; i >= 0; i--) { layer = _network[i]; @@ -885,7 +885,7 @@ MLPPANN::ComputeGradientsResult MLPPANN::compute_gradients(const Ref layer->set_delta(alg.hadamard_productnm(alg.matmultnm(next_layer->get_delta(), alg.transposenm(next_layer->get_weights())), avn.run_activation_deriv_vector(layer->get_activation(), layer->get_z()))); hidden_layer_w_grad = alg.matmultnm(alg.transposenm(layer->get_input()), layer->get_delta()); - res.cumulative_hidden_layer_w_grad.push_back(alg.additionnm(hidden_layer_w_grad, regularization.reg_deriv_termm(layer->get_weights(), layer->get_lambda(), layer->get_alpha(), layer->get_reg()))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well. + res.cumulative_hidden_layer_w_grad.push_back(hidden_layer_w_grad->addn(regularization.reg_deriv_termm(layer->get_weights(), layer->get_lambda(), layer->get_alpha(), layer->get_reg()))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well. } } diff --git a/mlpp/hidden_layer/hidden_layer.cpp b/mlpp/hidden_layer/hidden_layer.cpp index 6b87bf0..5a85410 100644 --- a/mlpp/hidden_layer/hidden_layer.cpp +++ b/mlpp/hidden_layer/hidden_layer.cpp @@ -147,10 +147,11 @@ void MLPPHiddenLayer::forward_pass() { initialize(); } - MLPPLinAlg alg; MLPPActivation avn; - _z = alg.mat_vec_addnm(alg.matmultnm(_input, _weights), _bias); + _z->multb(_input, _weights); + _z->add_vec(_bias); + _a = avn.run_activation_norm_matrix(_activation, _z); } @@ -159,10 +160,11 @@ void MLPPHiddenLayer::test(const Ref &x) { initialize(); } - MLPPLinAlg alg; MLPPActivation avn; - _z_test = alg.additionnm(alg.mat_vec_multnv(alg.transposenm(_weights), x), _bias); + _z_test = _weights->transposen()->mult_vec(x); + _z_test->add(_bias); + _a_test = avn.run_activation_norm_matrix(_activation, _z_test); } diff --git a/mlpp/lin_alg/mlpp_matrix.cpp b/mlpp/lin_alg/mlpp_matrix.cpp index d1d31ad..8713d98 100644 --- a/mlpp/lin_alg/mlpp_matrix.cpp +++ b/mlpp/lin_alg/mlpp_matrix.cpp @@ -665,26 +665,28 @@ void MLPPMatrix::subb(const Ref &A, const Ref &B) { } void MLPPMatrix::mult(const Ref &B) { + ERR_FAIL_MSG("TODO"); + ERR_FAIL_COND(!B.is_valid()); Size2i b_size = B->size(); - ERR_FAIL_COND(_size != b_size); + ERR_FAIL_COND(_size.x != b_size.y || _size.y != b_size.x); + + //TODO need to make a copy of this, resize, and use that to get results into this const real_t *b_ptr = B->ptr(); real_t *c_ptr = ptrw(); - for (int i = 0; i < _size.y; i++) { - for (int k = 0; k < b_size.y; k++) { - int ind_i_k = calculate_index(i, k); + for (int ay = 0; ay < _size.y; ay++) { + for (int by = 0; by < b_size.y; by++) { + int ind_ay_by = calculate_index(ay, by); - for (int j = 0; j < b_size.x; j++) { - int ind_i_j = calculate_index(i, j); - int ind_k_j = B->calculate_index(k, j); + for (int bx = 0; bx < b_size.x; bx++) { + int ind_ay_bx = calculate_index(ay, bx); + int ind_by_bx = B->calculate_index(by, bx); - c_ptr[ind_i_j] += c_ptr[ind_i_k] * b_ptr[ind_k_j]; - - //C->set_element(i, j, get_element(i, j) + get_element(i, k) * B->get_element(k, j + c_ptr[ind_ay_bx] += c_ptr[ind_ay_by] * b_ptr[ind_by_bx]; } } } @@ -694,11 +696,13 @@ Ref MLPPMatrix::multn(const Ref &B) const { Size2i b_size = B->size(); - ERR_FAIL_COND_V(_size != b_size, Ref()); + ERR_FAIL_COND_V_MSG(_size.y != b_size.x || _size.x != b_size.y, Ref(), "_size.y != b_size.x || _size.x != b_size.y _size: " + _size.operator String() + " b_size: " + b_size.operator String()); + + Size2i rs = Size2i(b_size.x, _size.y); Ref C; C.instance(); - C->resize(_size); + C->resize(rs); const real_t *a_ptr = ptr(); const real_t *b_ptr = B->ptr(); @@ -727,10 +731,12 @@ void MLPPMatrix::multb(const Ref &A, const Ref &B) { Size2i a_size = A->size(); Size2i b_size = B->size(); - ERR_FAIL_COND(a_size != b_size); + ERR_FAIL_COND_MSG(a_size.y != b_size.x || a_size.x != b_size.y, "a_size.y != b_size.x || a_size.x != b_size.y: a_size: " + a_size.operator String() + " b_size: " + b_size.operator String()); - if (_size != a_size) { - resize(a_size); + Size2i rs = Size2i(b_size.x, a_size.y); + + if (unlikely(_size != rs)) { + resize(rs); } const real_t *a_ptr = A->ptr();