mirror of
https://github.com/Relintai/pmlpp.git
synced 2024-11-08 13:12:09 +01:00
Small cleanups.
This commit is contained in:
parent
fffb3075e4
commit
1497a2c1b0
@ -877,7 +877,7 @@ MLPPANN::ComputeGradientsResult MLPPANN::compute_gradients(const Ref<MLPPVector>
|
||||
|
||||
Ref<MLPPMatrix> hidden_layer_w_grad = alg.matmultnm(alg.transposenm(layer->get_input()), layer->get_delta());
|
||||
|
||||
res.cumulative_hidden_layer_w_grad.push_back(alg.additionnm(hidden_layer_w_grad, regularization.reg_deriv_termm(layer->get_weights(), layer->get_lambda(), layer->get_alpha(), layer->get_reg()))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
|
||||
res.cumulative_hidden_layer_w_grad.push_back(hidden_layer_w_grad->addn(regularization.reg_deriv_termm(layer->get_weights(), layer->get_lambda(), layer->get_alpha(), layer->get_reg()))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
|
||||
|
||||
for (int i = _network.size() - 2; i >= 0; i--) {
|
||||
layer = _network[i];
|
||||
@ -885,7 +885,7 @@ MLPPANN::ComputeGradientsResult MLPPANN::compute_gradients(const Ref<MLPPVector>
|
||||
|
||||
layer->set_delta(alg.hadamard_productnm(alg.matmultnm(next_layer->get_delta(), alg.transposenm(next_layer->get_weights())), avn.run_activation_deriv_vector(layer->get_activation(), layer->get_z())));
|
||||
hidden_layer_w_grad = alg.matmultnm(alg.transposenm(layer->get_input()), layer->get_delta());
|
||||
res.cumulative_hidden_layer_w_grad.push_back(alg.additionnm(hidden_layer_w_grad, regularization.reg_deriv_termm(layer->get_weights(), layer->get_lambda(), layer->get_alpha(), layer->get_reg()))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
|
||||
res.cumulative_hidden_layer_w_grad.push_back(hidden_layer_w_grad->addn(regularization.reg_deriv_termm(layer->get_weights(), layer->get_lambda(), layer->get_alpha(), layer->get_reg()))); // Adding to our cumulative hidden layer grads. Maintain reg terms as well.
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -147,10 +147,11 @@ void MLPPHiddenLayer::forward_pass() {
|
||||
initialize();
|
||||
}
|
||||
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
|
||||
_z = alg.mat_vec_addnm(alg.matmultnm(_input, _weights), _bias);
|
||||
_z->multb(_input, _weights);
|
||||
_z->add_vec(_bias);
|
||||
|
||||
_a = avn.run_activation_norm_matrix(_activation, _z);
|
||||
}
|
||||
|
||||
@ -159,10 +160,11 @@ void MLPPHiddenLayer::test(const Ref<MLPPVector> &x) {
|
||||
initialize();
|
||||
}
|
||||
|
||||
MLPPLinAlg alg;
|
||||
MLPPActivation avn;
|
||||
|
||||
_z_test = alg.additionnm(alg.mat_vec_multnv(alg.transposenm(_weights), x), _bias);
|
||||
_z_test = _weights->transposen()->mult_vec(x);
|
||||
_z_test->add(_bias);
|
||||
|
||||
_a_test = avn.run_activation_norm_matrix(_activation, _z_test);
|
||||
}
|
||||
|
||||
|
@ -665,26 +665,28 @@ void MLPPMatrix::subb(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B) {
|
||||
}
|
||||
|
||||
void MLPPMatrix::mult(const Ref<MLPPMatrix> &B) {
|
||||
ERR_FAIL_MSG("TODO");
|
||||
|
||||
ERR_FAIL_COND(!B.is_valid());
|
||||
|
||||
Size2i b_size = B->size();
|
||||
|
||||
ERR_FAIL_COND(_size != b_size);
|
||||
ERR_FAIL_COND(_size.x != b_size.y || _size.y != b_size.x);
|
||||
|
||||
//TODO need to make a copy of this, resize, and use that to get results into this
|
||||
|
||||
const real_t *b_ptr = B->ptr();
|
||||
real_t *c_ptr = ptrw();
|
||||
|
||||
for (int i = 0; i < _size.y; i++) {
|
||||
for (int k = 0; k < b_size.y; k++) {
|
||||
int ind_i_k = calculate_index(i, k);
|
||||
for (int ay = 0; ay < _size.y; ay++) {
|
||||
for (int by = 0; by < b_size.y; by++) {
|
||||
int ind_ay_by = calculate_index(ay, by);
|
||||
|
||||
for (int j = 0; j < b_size.x; j++) {
|
||||
int ind_i_j = calculate_index(i, j);
|
||||
int ind_k_j = B->calculate_index(k, j);
|
||||
for (int bx = 0; bx < b_size.x; bx++) {
|
||||
int ind_ay_bx = calculate_index(ay, bx);
|
||||
int ind_by_bx = B->calculate_index(by, bx);
|
||||
|
||||
c_ptr[ind_i_j] += c_ptr[ind_i_k] * b_ptr[ind_k_j];
|
||||
|
||||
//C->set_element(i, j, get_element(i, j) + get_element(i, k) * B->get_element(k, j
|
||||
c_ptr[ind_ay_bx] += c_ptr[ind_ay_by] * b_ptr[ind_by_bx];
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -694,11 +696,13 @@ Ref<MLPPMatrix> MLPPMatrix::multn(const Ref<MLPPMatrix> &B) const {
|
||||
|
||||
Size2i b_size = B->size();
|
||||
|
||||
ERR_FAIL_COND_V(_size != b_size, Ref<MLPPMatrix>());
|
||||
ERR_FAIL_COND_V_MSG(_size.y != b_size.x || _size.x != b_size.y, Ref<MLPPMatrix>(), "_size.y != b_size.x || _size.x != b_size.y _size: " + _size.operator String() + " b_size: " + b_size.operator String());
|
||||
|
||||
Size2i rs = Size2i(b_size.x, _size.y);
|
||||
|
||||
Ref<MLPPMatrix> C;
|
||||
C.instance();
|
||||
C->resize(_size);
|
||||
C->resize(rs);
|
||||
|
||||
const real_t *a_ptr = ptr();
|
||||
const real_t *b_ptr = B->ptr();
|
||||
@ -727,10 +731,12 @@ void MLPPMatrix::multb(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B) {
|
||||
Size2i a_size = A->size();
|
||||
Size2i b_size = B->size();
|
||||
|
||||
ERR_FAIL_COND(a_size != b_size);
|
||||
ERR_FAIL_COND_MSG(a_size.y != b_size.x || a_size.x != b_size.y, "a_size.y != b_size.x || a_size.x != b_size.y: a_size: " + a_size.operator String() + " b_size: " + b_size.operator String());
|
||||
|
||||
if (_size != a_size) {
|
||||
resize(a_size);
|
||||
Size2i rs = Size2i(b_size.x, a_size.y);
|
||||
|
||||
if (unlikely(_size != rs)) {
|
||||
resize(rs);
|
||||
}
|
||||
|
||||
const real_t *a_ptr = A->ptr();
|
||||
|
Loading…
Reference in New Issue
Block a user