More api cleanups.

This commit is contained in:
Relintai 2023-04-22 17:17:58 +02:00
parent da7659860a
commit 1823ffe616
59 changed files with 704 additions and 2406 deletions

View File

@ -78,7 +78,7 @@ void MLPPANN::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
ComputeGradientsResult grads = compute_gradients(_y_hat, _output_set); ComputeGradientsResult grads = compute_gradients(_y_hat, _output_set);
grads.cumulative_hidden_layer_w_grad = alg.scalar_multiply_vm(learning_rate / _n, grads.cumulative_hidden_layer_w_grad); grads.cumulative_hidden_layer_w_grad = alg.scalar_multiplynvt(learning_rate / _n, grads.cumulative_hidden_layer_w_grad);
grads.output_w_grad = alg.scalar_multiplynv(learning_rate / _n, grads.output_w_grad); grads.output_w_grad = alg.scalar_multiplynv(learning_rate / _n, grads.output_w_grad);
update_parameters(grads.cumulative_hidden_layer_w_grad, grads.output_w_grad, learning_rate); // subject to change. may want bias to have this matrix too. update_parameters(grads.cumulative_hidden_layer_w_grad, grads.output_w_grad, learning_rate); // subject to change. may want bias to have this matrix too.
@ -136,7 +136,7 @@ void MLPPANN::sgd(real_t learning_rate, int max_epoch, bool ui) {
ComputeGradientsResult grads = compute_gradients(y_hat_row_tmp, output_set_row_tmp); ComputeGradientsResult grads = compute_gradients(y_hat_row_tmp, output_set_row_tmp);
grads.cumulative_hidden_layer_w_grad = alg.scalar_multiply_vm(learning_rate / _n, grads.cumulative_hidden_layer_w_grad); grads.cumulative_hidden_layer_w_grad = alg.scalar_multiplynvt(learning_rate / _n, grads.cumulative_hidden_layer_w_grad);
grads.output_w_grad = alg.scalar_multiplynv(learning_rate / _n, grads.output_w_grad); grads.output_w_grad = alg.scalar_multiplynv(learning_rate / _n, grads.output_w_grad);
update_parameters(grads.cumulative_hidden_layer_w_grad, grads.output_w_grad, learning_rate); // subject to change. may want bias to have this matrix too. update_parameters(grads.cumulative_hidden_layer_w_grad, grads.output_w_grad, learning_rate); // subject to change. may want bias to have this matrix too.
@ -183,7 +183,7 @@ void MLPPANN::mbgd(real_t learning_rate, int max_epoch, int mini_batch_size, boo
ComputeGradientsResult grads = compute_gradients(y_hat, current_output_batch); ComputeGradientsResult grads = compute_gradients(y_hat, current_output_batch);
grads.cumulative_hidden_layer_w_grad = alg.scalar_multiply_vm(learning_rate / _n, grads.cumulative_hidden_layer_w_grad); grads.cumulative_hidden_layer_w_grad = alg.scalar_multiplynvt(learning_rate / _n, grads.cumulative_hidden_layer_w_grad);
grads.output_w_grad = alg.scalar_multiplynv(learning_rate / _n, grads.output_w_grad); grads.output_w_grad = alg.scalar_multiplynv(learning_rate / _n, grads.output_w_grad);
update_parameters(grads.cumulative_hidden_layer_w_grad, grads.output_w_grad, learning_rate); // subject to change. may want bias to have this matrix too. update_parameters(grads.cumulative_hidden_layer_w_grad, grads.output_w_grad, learning_rate); // subject to change. may want bias to have this matrix too.
@ -238,7 +238,7 @@ void MLPPANN::momentum(real_t learning_rate, int max_epoch, int mini_batch_size,
ComputeGradientsResult grads = compute_gradients(y_hat, current_output_batch); ComputeGradientsResult grads = compute_gradients(y_hat, current_output_batch);
if (!_network.empty() && v_hidden.empty()) { // Initing our tensor if (!_network.empty() && v_hidden.empty()) { // Initing our tensor
v_hidden = alg.resize_vt(v_hidden, grads.cumulative_hidden_layer_w_grad); v_hidden = alg.resizenvt(v_hidden, grads.cumulative_hidden_layer_w_grad);
} }
if (v_output->size() == 0) { if (v_output->size() == 0) {
@ -249,7 +249,7 @@ void MLPPANN::momentum(real_t learning_rate, int max_epoch, int mini_batch_size,
update_parameters(v_hidden, v_output, 0); // DON'T update bias. update_parameters(v_hidden, v_output, 0); // DON'T update bias.
} }
v_hidden = alg.addition_vt(alg.scalar_multiply_vm(gamma, v_hidden), alg.scalar_multiply_vm(learning_rate / _n, grads.cumulative_hidden_layer_w_grad)); v_hidden = alg.additionnvt(alg.scalar_multiplynvt(gamma, v_hidden), alg.scalar_multiplynvt(learning_rate / _n, grads.cumulative_hidden_layer_w_grad));
v_output = alg.additionnv(alg.scalar_multiplynv(gamma, v_output), alg.scalar_multiplynv(learning_rate / _n, grads.output_w_grad)); v_output = alg.additionnv(alg.scalar_multiplynv(gamma, v_output), alg.scalar_multiplynv(learning_rate / _n, grads.output_w_grad));
update_parameters(v_hidden, v_output, learning_rate); // subject to change. may want bias to have this matrix too. update_parameters(v_hidden, v_output, learning_rate); // subject to change. may want bias to have this matrix too.
@ -304,17 +304,17 @@ void MLPPANN::adagrad(real_t learning_rate, int max_epoch, int mini_batch_size,
ComputeGradientsResult grads = compute_gradients(y_hat, current_output_batch); ComputeGradientsResult grads = compute_gradients(y_hat, current_output_batch);
if (!_network.empty() && v_hidden.empty()) { // Initing our tensor if (!_network.empty() && v_hidden.empty()) { // Initing our tensor
v_hidden = alg.resize_vt(v_hidden, grads.cumulative_hidden_layer_w_grad); v_hidden = alg.resizenvt(v_hidden, grads.cumulative_hidden_layer_w_grad);
} }
if (v_output->size() == 0) { if (v_output->size() == 0) {
v_output->resize(grads.output_w_grad->size()); v_output->resize(grads.output_w_grad->size());
} }
v_hidden = alg.addition_vt(v_hidden, alg.exponentiate_vt(grads.cumulative_hidden_layer_w_grad, 2)); v_hidden = alg.additionnvt(v_hidden, alg.exponentiatenvt(grads.cumulative_hidden_layer_w_grad, 2));
v_output = alg.additionnv(v_output, alg.exponentiatenv(grads.output_w_grad, 2)); v_output = alg.additionnv(v_output, alg.exponentiatenv(grads.output_w_grad, 2));
Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_divisionnv_vt(grads.cumulative_hidden_layer_w_grad, alg.scalar_add_vm(e, alg.sqrt_vt(v_hidden)))); Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.element_wise_divisionnvnvt(grads.cumulative_hidden_layer_w_grad, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden))));
Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(grads.output_w_grad, alg.scalar_addnv(e, alg.sqrtnv(v_output)))); Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(grads.output_w_grad, alg.scalar_addnv(e, alg.sqrtnv(v_output))));
update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too. update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too.
@ -368,17 +368,17 @@ void MLPPANN::adadelta(real_t learning_rate, int max_epoch, int mini_batch_size,
ComputeGradientsResult grads = compute_gradients(y_hat, current_output_batch); ComputeGradientsResult grads = compute_gradients(y_hat, current_output_batch);
if (!_network.empty() && v_hidden.empty()) { // Initing our tensor if (!_network.empty() && v_hidden.empty()) { // Initing our tensor
v_hidden = alg.resize_vt(v_hidden, grads.cumulative_hidden_layer_w_grad); v_hidden = alg.resizenvt(v_hidden, grads.cumulative_hidden_layer_w_grad);
} }
if (v_output->size() == 0) { if (v_output->size() == 0) {
v_output->resize(grads.output_w_grad->size()); v_output->resize(grads.output_w_grad->size());
} }
v_hidden = alg.addition_vt(alg.scalar_multiply_vm(1 - b1, v_hidden), alg.scalar_multiply_vm(b1, alg.exponentiate_vt(grads.cumulative_hidden_layer_w_grad, 2))); v_hidden = alg.additionnvt(alg.scalar_multiplynvt(1 - b1, v_hidden), alg.scalar_multiplynvt(b1, alg.exponentiatenvt(grads.cumulative_hidden_layer_w_grad, 2)));
v_output = alg.additionnv(v_output, alg.exponentiatenv(grads.output_w_grad, 2)); v_output = alg.additionnv(v_output, alg.exponentiatenv(grads.output_w_grad, 2));
Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_divisionnv_vt(grads.cumulative_hidden_layer_w_grad, alg.scalar_add_vm(e, alg.sqrt_vt(v_hidden)))); Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.element_wise_divisionnvnvt(grads.cumulative_hidden_layer_w_grad, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden))));
Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(grads.output_w_grad, alg.scalar_addnv(e, alg.sqrtnv(v_output)))); Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(grads.output_w_grad, alg.scalar_addnv(e, alg.sqrtnv(v_output))));
update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too. update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too.
@ -435,8 +435,8 @@ void MLPPANN::adam(real_t learning_rate, int max_epoch, int mini_batch_size, rea
ComputeGradientsResult grads = compute_gradients(y_hat, current_output_batch); ComputeGradientsResult grads = compute_gradients(y_hat, current_output_batch);
if (!_network.empty() && m_hidden.empty() && v_hidden.empty()) { // Initing our tensor if (!_network.empty() && m_hidden.empty() && v_hidden.empty()) { // Initing our tensor
m_hidden = alg.resize_vt(m_hidden, grads.cumulative_hidden_layer_w_grad); m_hidden = alg.resizenvt(m_hidden, grads.cumulative_hidden_layer_w_grad);
v_hidden = alg.resize_vt(v_hidden, grads.cumulative_hidden_layer_w_grad); v_hidden = alg.resizenvt(v_hidden, grads.cumulative_hidden_layer_w_grad);
} }
if (m_output->size() == 0 && v_output->size()) { if (m_output->size() == 0 && v_output->size()) {
@ -444,19 +444,19 @@ void MLPPANN::adam(real_t learning_rate, int max_epoch, int mini_batch_size, rea
v_output->resize(grads.output_w_grad->size()); v_output->resize(grads.output_w_grad->size());
} }
m_hidden = alg.addition_vt(alg.scalar_multiply_vm(b1, m_hidden), alg.scalar_multiply_vm(1 - b1, grads.cumulative_hidden_layer_w_grad)); m_hidden = alg.additionnvt(alg.scalar_multiplynvt(b1, m_hidden), alg.scalar_multiplynvt(1 - b1, grads.cumulative_hidden_layer_w_grad));
v_hidden = alg.addition_vt(alg.scalar_multiply_vm(b2, v_hidden), alg.scalar_multiply_vm(1 - b2, alg.exponentiate_vt(grads.cumulative_hidden_layer_w_grad, 2))); v_hidden = alg.additionnvt(alg.scalar_multiplynvt(b2, v_hidden), alg.scalar_multiplynvt(1 - b2, alg.exponentiatenvt(grads.cumulative_hidden_layer_w_grad, 2)));
m_output = alg.additionnv(alg.scalar_multiplynv(b1, m_output), alg.scalar_multiplynv(1 - b1, grads.output_w_grad)); m_output = alg.additionnv(alg.scalar_multiplynv(b1, m_output), alg.scalar_multiplynv(1 - b1, grads.output_w_grad));
v_output = alg.additionnv(alg.scalar_multiplynv(b2, v_output), alg.scalar_multiplynv(1 - b2, alg.exponentiatenv(grads.output_w_grad, 2))); v_output = alg.additionnv(alg.scalar_multiplynv(b2, v_output), alg.scalar_multiplynv(1 - b2, alg.exponentiatenv(grads.output_w_grad, 2)));
Vector<Ref<MLPPMatrix>> m_hidden_hat = alg.scalar_multiply_vm(1 / (1 - Math::pow(b1, epoch)), m_hidden); Vector<Ref<MLPPMatrix>> m_hidden_hat = alg.scalar_multiplynvt(1 / (1 - Math::pow(b1, epoch)), m_hidden);
Vector<Ref<MLPPMatrix>> v_hidden_hat = alg.scalar_multiply_vm(1 / (1 - Math::pow(b2, epoch)), v_hidden); Vector<Ref<MLPPMatrix>> v_hidden_hat = alg.scalar_multiplynvt(1 / (1 - Math::pow(b2, epoch)), v_hidden);
Ref<MLPPVector> m_output_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m_output); Ref<MLPPVector> m_output_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m_output);
Ref<MLPPVector> v_output_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b2, epoch)), v_output); Ref<MLPPVector> v_output_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b2, epoch)), v_output);
Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_divisionnv_vt(m_hidden_hat, alg.scalar_add_vm(e, alg.sqrt_vt(v_hidden_hat)))); Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.element_wise_divisionnvnvt(m_hidden_hat, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden_hat))));
Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(m_output_hat, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat)))); Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(m_output_hat, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat))));
update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too. update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too.
@ -510,8 +510,8 @@ void MLPPANN::adamax(real_t learning_rate, int max_epoch, int mini_batch_size, r
ComputeGradientsResult grads = compute_gradients(y_hat, current_output_batch); ComputeGradientsResult grads = compute_gradients(y_hat, current_output_batch);
if (!_network.empty() && m_hidden.empty() && u_hidden.empty()) { // Initing our tensor if (!_network.empty() && m_hidden.empty() && u_hidden.empty()) { // Initing our tensor
m_hidden = alg.resize_vt(m_hidden, grads.cumulative_hidden_layer_w_grad); m_hidden = alg.resizenvt(m_hidden, grads.cumulative_hidden_layer_w_grad);
u_hidden = alg.resize_vt(u_hidden, grads.cumulative_hidden_layer_w_grad); u_hidden = alg.resizenvt(u_hidden, grads.cumulative_hidden_layer_w_grad);
} }
if (m_output->size() == 0 && u_output->size() == 0) { if (m_output->size() == 0 && u_output->size() == 0) {
@ -519,17 +519,17 @@ void MLPPANN::adamax(real_t learning_rate, int max_epoch, int mini_batch_size, r
u_output->resize(grads.output_w_grad->size()); u_output->resize(grads.output_w_grad->size());
} }
m_hidden = alg.addition_vt(alg.scalar_multiply_vm(b1, m_hidden), alg.scalar_multiply_vm(1 - b1, grads.cumulative_hidden_layer_w_grad)); m_hidden = alg.additionnvt(alg.scalar_multiplynvt(b1, m_hidden), alg.scalar_multiplynvt(1 - b1, grads.cumulative_hidden_layer_w_grad));
u_hidden = alg.max_vt(alg.scalar_multiply_vm(b2, u_hidden), alg.abs_vt(grads.cumulative_hidden_layer_w_grad)); u_hidden = alg.maxnvt(alg.scalar_multiplynvt(b2, u_hidden), alg.absnvt(grads.cumulative_hidden_layer_w_grad));
m_output = alg.additionnv(alg.scalar_multiplynv(b1, m_output), alg.scalar_multiplynv(1 - b1, grads.output_w_grad)); m_output = alg.additionnv(alg.scalar_multiplynv(b1, m_output), alg.scalar_multiplynv(1 - b1, grads.output_w_grad));
u_output = alg.maxnvv(alg.scalar_multiplynv(b2, u_output), alg.absv(grads.output_w_grad)); u_output = alg.maxnvv(alg.scalar_multiplynv(b2, u_output), alg.absv(grads.output_w_grad));
Vector<Ref<MLPPMatrix>> m_hidden_hat = alg.scalar_multiply_vm(1 / (1 - Math::pow(b1, epoch)), m_hidden); Vector<Ref<MLPPMatrix>> m_hidden_hat = alg.scalar_multiplynvt(1 / (1 - Math::pow(b1, epoch)), m_hidden);
Ref<MLPPVector> m_output_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m_output); Ref<MLPPVector> m_output_hat = alg.scalar_multiplynv(1 / (1 - Math::pow(b1, epoch)), m_output);
Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_divisionnv_vt(m_hidden_hat, alg.scalar_add_vm(e, u_hidden))); Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.element_wise_divisionnvnvt(m_hidden_hat, alg.scalar_addnvt(e, u_hidden)));
Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(m_output_hat, alg.scalar_addnv(e, u_output))); Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(m_output_hat, alg.scalar_addnv(e, u_output)));
update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too. update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too.
@ -583,8 +583,8 @@ void MLPPANN::nadam(real_t learning_rate, int max_epoch, int mini_batch_size, re
ComputeGradientsResult grads = compute_gradients(y_hat, current_output_batch); ComputeGradientsResult grads = compute_gradients(y_hat, current_output_batch);
if (!_network.empty() && m_hidden.empty() && v_hidden.empty()) { // Initing our tensor if (!_network.empty() && m_hidden.empty() && v_hidden.empty()) { // Initing our tensor
m_hidden = alg.resize_vt(m_hidden, grads.cumulative_hidden_layer_w_grad); m_hidden = alg.resizenvt(m_hidden, grads.cumulative_hidden_layer_w_grad);
v_hidden = alg.resize_vt(v_hidden, grads.cumulative_hidden_layer_w_grad); v_hidden = alg.resizenvt(v_hidden, grads.cumulative_hidden_layer_w_grad);
} }
if (m_output->size() == 0 && v_output->size() == 0) { if (m_output->size() == 0 && v_output->size() == 0) {
@ -592,21 +592,21 @@ void MLPPANN::nadam(real_t learning_rate, int max_epoch, int mini_batch_size, re
v_output->resize(grads.output_w_grad->size()); v_output->resize(grads.output_w_grad->size());
} }
m_hidden = alg.addition_vt(alg.scalar_multiply_vm(b1, m_hidden), alg.scalar_multiply_vm(1 - b1, grads.cumulative_hidden_layer_w_grad)); m_hidden = alg.additionnvt(alg.scalar_multiplynvt(b1, m_hidden), alg.scalar_multiplynvt(1 - b1, grads.cumulative_hidden_layer_w_grad));
v_hidden = alg.addition_vt(alg.scalar_multiply_vm(b2, v_hidden), alg.scalar_multiply_vm(1 - b2, alg.exponentiate_vt(grads.cumulative_hidden_layer_w_grad, 2))); v_hidden = alg.additionnvt(alg.scalar_multiplynvt(b2, v_hidden), alg.scalar_multiplynvt(1 - b2, alg.exponentiatenvt(grads.cumulative_hidden_layer_w_grad, 2)));
m_output = alg.additionnv(alg.scalar_multiplynv(b1, m_output), alg.scalar_multiplynv(1 - b1, grads.output_w_grad)); m_output = alg.additionnv(alg.scalar_multiplynv(b1, m_output), alg.scalar_multiplynv(1 - b1, grads.output_w_grad));
v_output = alg.additionnv(alg.scalar_multiplynv(b2, v_output), alg.scalar_multiplynv(1 - b2, alg.exponentiatenv(grads.output_w_grad, 2))); v_output = alg.additionnv(alg.scalar_multiplynv(b2, v_output), alg.scalar_multiplynv(1 - b2, alg.exponentiatenv(grads.output_w_grad, 2)));
Vector<Ref<MLPPMatrix>> m_hidden_hat = alg.scalar_multiply_vm(1 / (1.0 - Math::pow(b1, epoch)), m_hidden); Vector<Ref<MLPPMatrix>> m_hidden_hat = alg.scalar_multiplynvt(1 / (1.0 - Math::pow(b1, epoch)), m_hidden);
Vector<Ref<MLPPMatrix>> v_hidden_hat = alg.scalar_multiply_vm(1 / (1.0 - Math::pow(b2, epoch)), v_hidden); Vector<Ref<MLPPMatrix>> v_hidden_hat = alg.scalar_multiplynvt(1 / (1.0 - Math::pow(b2, epoch)), v_hidden);
Vector<Ref<MLPPMatrix>> m_hidden_final = alg.addition_vt(alg.scalar_multiply_vm(b1, m_hidden_hat), alg.scalar_multiply_vm((1 - b1) / (1 - Math::pow(b1, epoch)), grads.cumulative_hidden_layer_w_grad)); Vector<Ref<MLPPMatrix>> m_hidden_final = alg.additionnvt(alg.scalar_multiplynvt(b1, m_hidden_hat), alg.scalar_multiplynvt((1 - b1) / (1 - Math::pow(b1, epoch)), grads.cumulative_hidden_layer_w_grad));
Ref<MLPPVector> m_output_hat = alg.scalar_multiplynv(1 / (1.0 - Math::pow(b1, epoch)), m_output); Ref<MLPPVector> m_output_hat = alg.scalar_multiplynv(1 / (1.0 - Math::pow(b1, epoch)), m_output);
Ref<MLPPVector> v_output_hat = alg.scalar_multiplynv(1 / (1.0 - Math::pow(b2, epoch)), v_output); Ref<MLPPVector> v_output_hat = alg.scalar_multiplynv(1 / (1.0 - Math::pow(b2, epoch)), v_output);
Ref<MLPPVector> m_output_final = alg.additionnv(alg.scalar_multiplynv(b1, m_output_hat), alg.scalar_multiplynv((1 - b1) / (1.0 - Math::pow(b1, epoch)), grads.output_w_grad)); Ref<MLPPVector> m_output_final = alg.additionnv(alg.scalar_multiplynv(b1, m_output_hat), alg.scalar_multiplynv((1 - b1) / (1.0 - Math::pow(b1, epoch)), grads.output_w_grad));
Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_divisionnv_vt(m_hidden_final, alg.scalar_add_vm(e, alg.sqrt_vt(v_hidden_hat)))); Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.element_wise_divisionnvnvt(m_hidden_final, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden_hat))));
Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnvnm(m_output_final, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat)))); Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnvnm(m_output_final, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat))));
update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too. update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too.
@ -666,9 +666,9 @@ void MLPPANN::amsgrad(real_t learning_rate, int max_epoch, int mini_batch_size,
ComputeGradientsResult grads = compute_gradients(y_hat, current_output_batch); ComputeGradientsResult grads = compute_gradients(y_hat, current_output_batch);
if (!_network.empty() && m_hidden.empty() && v_hidden.empty()) { // Initing our tensor if (!_network.empty() && m_hidden.empty() && v_hidden.empty()) { // Initing our tensor
m_hidden = alg.resize_vt(m_hidden, grads.cumulative_hidden_layer_w_grad); m_hidden = alg.resizenvt(m_hidden, grads.cumulative_hidden_layer_w_grad);
v_hidden = alg.resize_vt(v_hidden, grads.cumulative_hidden_layer_w_grad); v_hidden = alg.resizenvt(v_hidden, grads.cumulative_hidden_layer_w_grad);
v_hidden_hat = alg.resize_vt(v_hidden_hat, grads.cumulative_hidden_layer_w_grad); v_hidden_hat = alg.resizenvt(v_hidden_hat, grads.cumulative_hidden_layer_w_grad);
} }
if (m_output->size() == 0 && v_output->size() == 0) { if (m_output->size() == 0 && v_output->size() == 0) {
@ -677,16 +677,16 @@ void MLPPANN::amsgrad(real_t learning_rate, int max_epoch, int mini_batch_size,
v_output_hat->resize(grads.output_w_grad->size()); v_output_hat->resize(grads.output_w_grad->size());
} }
m_hidden = alg.addition_vt(alg.scalar_multiply_vm(b1, m_hidden), alg.scalar_multiply_vm(1 - b1, grads.cumulative_hidden_layer_w_grad)); m_hidden = alg.additionnvt(alg.scalar_multiplynvt(b1, m_hidden), alg.scalar_multiplynvt(1 - b1, grads.cumulative_hidden_layer_w_grad));
v_hidden = alg.addition_vt(alg.scalar_multiply_vm(b2, v_hidden), alg.scalar_multiply_vm(1 - b2, alg.exponentiate_vt(grads.cumulative_hidden_layer_w_grad, 2))); v_hidden = alg.additionnvt(alg.scalar_multiplynvt(b2, v_hidden), alg.scalar_multiplynvt(1 - b2, alg.exponentiatenvt(grads.cumulative_hidden_layer_w_grad, 2)));
m_output = alg.additionnv(alg.scalar_multiplynv(b1, m_output), alg.scalar_multiplynv(1 - b1, grads.output_w_grad)); m_output = alg.additionnv(alg.scalar_multiplynv(b1, m_output), alg.scalar_multiplynv(1 - b1, grads.output_w_grad));
v_output = alg.additionnv(alg.scalar_multiplynv(b2, v_output), alg.scalar_multiplynv(1 - b2, alg.exponentiatenv(grads.output_w_grad, 2))); v_output = alg.additionnv(alg.scalar_multiplynv(b2, v_output), alg.scalar_multiplynv(1 - b2, alg.exponentiatenv(grads.output_w_grad, 2)));
v_hidden_hat = alg.max_vt(v_hidden_hat, v_hidden); v_hidden_hat = alg.maxnvt(v_hidden_hat, v_hidden);
v_output_hat = alg.maxnvv(v_output_hat, v_output); v_output_hat = alg.maxnvv(v_output_hat, v_output);
Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiply_vm(learning_rate / _n, alg.element_wise_divisionnv_vt(m_hidden, alg.scalar_add_vm(e, alg.sqrt_vt(v_hidden_hat)))); Vector<Ref<MLPPMatrix>> hidden_layer_updations = alg.scalar_multiplynvt(learning_rate / _n, alg.element_wise_divisionnvnvt(m_hidden, alg.scalar_addnvt(e, alg.sqrtnvt(v_hidden_hat))));
Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(m_output, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat)))); Ref<MLPPVector> output_layer_updation = alg.scalar_multiplynv(learning_rate / _n, alg.element_wise_divisionnv(m_output, alg.scalar_addnv(e, alg.sqrtnv(v_output_hat))));
update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too. update_parameters(hidden_layer_updations, output_layer_updation, learning_rate); // subject to change. may want bias to have this matrix too.

View File

@ -6,9 +6,9 @@
#include "ann_old.h" #include "ann_old.h"
#include "../activation/activation_old.h" #include "../activation/activation_old.h"
#include "../cost/cost.h" #include "../cost/cost_old.h"
#include "../lin_alg/lin_alg_old.h" #include "../lin_alg/lin_alg_old.h"
#include "../regularization/reg.h" #include "../regularization/reg_old.h"
#include "../utilities/utilities.h" #include "../utilities/utilities.h"
#include <cmath> #include <cmath>
@ -61,7 +61,6 @@ real_t MLPPANNOld::modelTest(std::vector<real_t> x) {
} }
void MLPPANNOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { void MLPPANNOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
class MLPPCost cost;
MLPPLinAlgOld alg; MLPPLinAlgOld alg;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -97,7 +96,6 @@ void MLPPANNOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
} }
void MLPPANNOld::SGD(real_t learning_rate, int max_epoch, bool UI) { void MLPPANNOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
class MLPPCost cost;
MLPPLinAlgOld alg; MLPPLinAlgOld alg;
real_t cost_prev = 0; real_t cost_prev = 0;
@ -138,7 +136,6 @@ void MLPPANNOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
} }
void MLPPANNOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) { void MLPPANNOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
class MLPPCost cost;
MLPPLinAlgOld alg; MLPPLinAlgOld alg;
real_t cost_prev = 0; real_t cost_prev = 0;
@ -183,7 +180,6 @@ void MLPPANNOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size,
} }
void MLPPANNOld::Momentum(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool NAG, bool UI) { void MLPPANNOld::Momentum(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool NAG, bool UI) {
class MLPPCost cost;
MLPPLinAlgOld alg; MLPPLinAlgOld alg;
real_t cost_prev = 0; real_t cost_prev = 0;
@ -245,7 +241,6 @@ void MLPPANNOld::Momentum(real_t learning_rate, int max_epoch, int mini_batch_si
} }
void MLPPANNOld::Adagrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t e, bool UI) { void MLPPANNOld::Adagrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t e, bool UI) {
class MLPPCost cost;
MLPPLinAlgOld alg; MLPPLinAlgOld alg;
real_t cost_prev = 0; real_t cost_prev = 0;
@ -306,7 +301,6 @@ void MLPPANNOld::Adagrad(real_t learning_rate, int max_epoch, int mini_batch_siz
} }
void MLPPANNOld::Adadelta(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t e, bool UI) { void MLPPANNOld::Adadelta(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t e, bool UI) {
class MLPPCost cost;
MLPPLinAlgOld alg; MLPPLinAlgOld alg;
real_t cost_prev = 0; real_t cost_prev = 0;
@ -367,7 +361,6 @@ void MLPPANNOld::Adadelta(real_t learning_rate, int max_epoch, int mini_batch_si
} }
void MLPPANNOld::Adam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) { void MLPPANNOld::Adam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) {
class MLPPCost cost;
MLPPLinAlgOld alg; MLPPLinAlgOld alg;
real_t cost_prev = 0; real_t cost_prev = 0;
@ -440,7 +433,6 @@ void MLPPANNOld::Adam(real_t learning_rate, int max_epoch, int mini_batch_size,
} }
void MLPPANNOld::Adamax(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) { void MLPPANNOld::Adamax(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) {
class MLPPCost cost;
MLPPLinAlgOld alg; MLPPLinAlgOld alg;
real_t cost_prev = 0; real_t cost_prev = 0;
@ -511,7 +503,6 @@ void MLPPANNOld::Adamax(real_t learning_rate, int max_epoch, int mini_batch_size
} }
void MLPPANNOld::Nadam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) { void MLPPANNOld::Nadam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) {
class MLPPCost cost;
MLPPLinAlgOld alg; MLPPLinAlgOld alg;
real_t cost_prev = 0; real_t cost_prev = 0;
@ -586,7 +577,6 @@ void MLPPANNOld::Nadam(real_t learning_rate, int max_epoch, int mini_batch_size,
} }
void MLPPANNOld::AMSGrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) { void MLPPANNOld::AMSGrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) {
class MLPPCost cost;
MLPPLinAlgOld alg; MLPPLinAlgOld alg;
real_t cost_prev = 0; real_t cost_prev = 0;
@ -726,8 +716,8 @@ void MLPPANNOld::addOutputLayer(std::string activation, std::string loss, std::s
} }
real_t MLPPANNOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) { real_t MLPPANNOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
MLPPReg regularization; MLPPRegOld regularization;
class MLPPCost cost; class MLPPCostOld cost;
real_t totalRegTerm = 0; real_t totalRegTerm = 0;
auto cost_function = outputLayer->cost_map[outputLayer->cost]; auto cost_function = outputLayer->cost_map[outputLayer->cost];
@ -775,10 +765,10 @@ void MLPPANNOld::updateParameters(std::vector<std::vector<std::vector<real_t>>>
std::tuple<std::vector<std::vector<std::vector<real_t>>>, std::vector<real_t>> MLPPANNOld::computeGradients(std::vector<real_t> y_hat, std::vector<real_t> outputSet) { std::tuple<std::vector<std::vector<std::vector<real_t>>>, std::vector<real_t>> MLPPANNOld::computeGradients(std::vector<real_t> y_hat, std::vector<real_t> outputSet) {
// std::cout << "BEGIN" << std::endl; // std::cout << "BEGIN" << std::endl;
class MLPPCost cost; class MLPPCostOld cost;
MLPPActivationOld avn; MLPPActivationOld avn;
MLPPLinAlgOld alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
std::vector<std::vector<std::vector<real_t>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads. std::vector<std::vector<std::vector<real_t>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads.

View File

@ -7,8 +7,8 @@
#include "auto_encoder_old.h" #include "auto_encoder_old.h"
#include "../activation/activation_old.h" #include "../activation/activation_old.h"
#include "../cost/cost.h" #include "../cost/cost_old.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include "../utilities/utilities.h" #include "../utilities/utilities.h"
#include <iostream> #include <iostream>
@ -24,7 +24,7 @@ std::vector<real_t> MLPPAutoEncoderOld::modelTest(std::vector<real_t> x) {
void MLPPAutoEncoderOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { void MLPPAutoEncoderOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
MLPPActivationOld avn; MLPPActivationOld avn;
MLPPLinAlg alg; MLPPLinAlgOld alg;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -77,7 +77,7 @@ void MLPPAutoEncoderOld::gradientDescent(real_t learning_rate, int max_epoch, bo
void MLPPAutoEncoderOld::SGD(real_t learning_rate, int max_epoch, bool UI) { void MLPPAutoEncoderOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
MLPPActivationOld avn; MLPPActivationOld avn;
MLPPLinAlg alg; MLPPLinAlgOld alg;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -131,7 +131,7 @@ void MLPPAutoEncoderOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
void MLPPAutoEncoderOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) { void MLPPAutoEncoderOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPActivationOld avn; MLPPActivationOld avn;
MLPPLinAlg alg; MLPPLinAlgOld alg;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -219,12 +219,12 @@ MLPPAutoEncoderOld::MLPPAutoEncoderOld(std::vector<std::vector<real_t>> pinputSe
} }
real_t MLPPAutoEncoderOld::Cost(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) { real_t MLPPAutoEncoderOld::Cost(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
class MLPPCost cost; class MLPPCostOld cost;
return cost.MSE(y_hat, inputSet); return cost.MSE(y_hat, inputSet);
} }
std::vector<std::vector<real_t>> MLPPAutoEncoderOld::Evaluate(std::vector<std::vector<real_t>> X) { std::vector<std::vector<real_t>> MLPPAutoEncoderOld::Evaluate(std::vector<std::vector<real_t>> X) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
std::vector<std::vector<real_t>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector<std::vector<real_t>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
std::vector<std::vector<real_t>> a2 = avn.sigmoid(z2); std::vector<std::vector<real_t>> a2 = avn.sigmoid(z2);
@ -232,7 +232,7 @@ std::vector<std::vector<real_t>> MLPPAutoEncoderOld::Evaluate(std::vector<std::v
} }
std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> MLPPAutoEncoderOld::propagate(std::vector<std::vector<real_t>> X) { std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> MLPPAutoEncoderOld::propagate(std::vector<std::vector<real_t>> X) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
std::vector<std::vector<real_t>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector<std::vector<real_t>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
std::vector<std::vector<real_t>> a2 = avn.sigmoid(z2); std::vector<std::vector<real_t>> a2 = avn.sigmoid(z2);
@ -240,7 +240,7 @@ std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> M
} }
std::vector<real_t> MLPPAutoEncoderOld::Evaluate(std::vector<real_t> x) { std::vector<real_t> MLPPAutoEncoderOld::Evaluate(std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
std::vector<real_t> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector<real_t> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
std::vector<real_t> a2 = avn.sigmoid(z2); std::vector<real_t> a2 = avn.sigmoid(z2);
@ -248,7 +248,7 @@ std::vector<real_t> MLPPAutoEncoderOld::Evaluate(std::vector<real_t> x) {
} }
std::tuple<std::vector<real_t>, std::vector<real_t>> MLPPAutoEncoderOld::propagate(std::vector<real_t> x) { std::tuple<std::vector<real_t>, std::vector<real_t>> MLPPAutoEncoderOld::propagate(std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
std::vector<real_t> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector<real_t> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
std::vector<real_t> a2 = avn.sigmoid(z2); std::vector<real_t> a2 = avn.sigmoid(z2);
@ -256,7 +256,7 @@ std::tuple<std::vector<real_t>, std::vector<real_t>> MLPPAutoEncoderOld::propaga
} }
void MLPPAutoEncoderOld::forwardPass() { void MLPPAutoEncoderOld::forwardPass() {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1); z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1);
a2 = avn.sigmoid(z2); a2 = avn.sigmoid(z2);

View File

@ -6,7 +6,7 @@
#include "bernoulli_nb_old.h" #include "bernoulli_nb_old.h"
#include "../data/data.h" #include "../data/data.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include "../utilities/utilities.h" #include "../utilities/utilities.h"
#include <iostream> #include <iostream>
@ -77,7 +77,7 @@ real_t MLPPBernoulliNBOld::score() {
} }
void MLPPBernoulliNBOld::computeVocab() { void MLPPBernoulliNBOld::computeVocab() {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPData data; MLPPData data;
vocab = data.vecToSet<real_t>(alg.flatten(inputSet)); vocab = data.vecToSet<real_t>(alg.flatten(inputSet));
} }

View File

@ -7,9 +7,9 @@
#include "c_log_log_reg_old.h" #include "c_log_log_reg_old.h"
#include "../activation/activation_old.h" #include "../activation/activation_old.h"
#include "../cost/cost.h" #include "../cost/cost_old.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include "../regularization/reg.h" #include "../regularization/reg_old.h"
#include "../utilities/utilities.h" #include "../utilities/utilities.h"
#include <iostream> #include <iostream>
@ -32,8 +32,8 @@ real_t MLPPCLogLogRegOld::modelTest(std::vector<real_t> x) {
void MLPPCLogLogRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { void MLPPCLogLogRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
MLPPActivationOld avn; MLPPActivationOld avn;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -66,8 +66,8 @@ void MLPPCLogLogRegOld::gradientDescent(real_t learning_rate, int max_epoch, boo
void MLPPCLogLogRegOld::MLE(real_t learning_rate, int max_epoch, bool UI) { void MLPPCLogLogRegOld::MLE(real_t learning_rate, int max_epoch, bool UI) {
MLPPActivationOld avn; MLPPActivationOld avn;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -97,8 +97,8 @@ void MLPPCLogLogRegOld::MLE(real_t learning_rate, int max_epoch, bool UI) {
} }
void MLPPCLogLogRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) { void MLPPCLogLogRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -139,8 +139,8 @@ void MLPPCLogLogRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
void MLPPCLogLogRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) { void MLPPCLogLogRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPActivationOld avn; MLPPActivationOld avn;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -188,30 +188,30 @@ real_t MLPPCLogLogRegOld::score() {
} }
real_t MLPPCLogLogRegOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) { real_t MLPPCLogLogRegOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
MLPPReg regularization; MLPPRegOld regularization;
class MLPPCost cost; class MLPPCostOld cost;
return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
} }
std::vector<real_t> MLPPCLogLogRegOld::Evaluate(std::vector<std::vector<real_t>> X) { std::vector<real_t> MLPPCLogLogRegOld::Evaluate(std::vector<std::vector<real_t>> X) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
return avn.cloglog(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); return avn.cloglog(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
} }
std::vector<real_t> MLPPCLogLogRegOld::propagate(std::vector<std::vector<real_t>> X) { std::vector<real_t> MLPPCLogLogRegOld::propagate(std::vector<std::vector<real_t>> X) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)); return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights));
} }
real_t MLPPCLogLogRegOld::Evaluate(std::vector<real_t> x) { real_t MLPPCLogLogRegOld::Evaluate(std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
return avn.cloglog(alg.dot(weights, x) + bias); return avn.cloglog(alg.dot(weights, x) + bias);
} }
real_t MLPPCLogLogRegOld::propagate(std::vector<real_t> x) { real_t MLPPCLogLogRegOld::propagate(std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
return alg.dot(weights, x) + bias; return alg.dot(weights, x) + bias;
} }

View File

@ -10,6 +10,7 @@
#include <cmath> #include <cmath>
#include <iostream> #include <iostream>
/*
std::vector<std::vector<real_t>> MLPPConvolutions::convolve_2d(std::vector<std::vector<real_t>> input, std::vector<std::vector<real_t>> filter, int S, int P) { std::vector<std::vector<real_t>> MLPPConvolutions::convolve_2d(std::vector<std::vector<real_t>> input, std::vector<std::vector<real_t>> filter, int S, int P) {
MLPPLinAlg alg; MLPPLinAlg alg;
std::vector<std::vector<real_t>> feature_map; std::vector<std::vector<real_t>> feature_map;
@ -219,12 +220,12 @@ std::vector<std::vector<real_t>> MLPPConvolutions::gaussian_filter_2d(int size,
return filter; return filter;
} }
/*
Indeed a filter could have been used for this purpose, but I decided that it would've just // Indeed a filter could have been used for this purpose, but I decided that it would've just
been easier to carry out the calculation explicitly, mainly because it is more informative, // been easier to carry out the calculation explicitly, mainly because it is more informative,
and also because my convolution algorithm is only built for filters with equally sized // and also because my convolution algorithm is only built for filters with equally sized
heights and widths. // heights and widths.
*/
std::vector<std::vector<real_t>> MLPPConvolutions::dx(std::vector<std::vector<real_t>> input) { std::vector<std::vector<real_t>> MLPPConvolutions::dx(std::vector<std::vector<real_t>> input) {
std::vector<std::vector<real_t>> deriv; // We assume a gray scale image. std::vector<std::vector<real_t>> deriv; // We assume a gray scale image.
deriv.resize(input.size()); deriv.resize(input.size());
@ -360,8 +361,10 @@ std::vector<std::vector<real_t>> MLPPConvolutions::get_roberts_horizontal() {
std::vector<std::vector<real_t>> MLPPConvolutions::get_roberts_vertical() { std::vector<std::vector<real_t>> MLPPConvolutions::get_roberts_vertical() {
return _roberts_vertical; return _roberts_vertical;
} }
*/
MLPPConvolutions::MLPPConvolutions() { MLPPConvolutions::MLPPConvolutions() {
/*
_prewitt_horizontal = { { 1, 1, 1 }, { 0, 0, 0 }, { -1, -1, -1 } }; _prewitt_horizontal = { { 1, 1, 1 }, { 0, 0, 0 }, { -1, -1, -1 } };
_prewitt_vertical = { { 1, 0, -1 }, { 1, 0, -1 }, { 1, 0, -1 } }; _prewitt_vertical = { { 1, 0, -1 }, { 1, 0, -1 }, { 1, 0, -1 } };
_sobel_horizontal = { { 1, 2, 1 }, { 0, 0, 0 }, { -1, -2, -1 } }; _sobel_horizontal = { { 1, 2, 1 }, { 0, 0, 0 }, { -1, -2, -1 } };
@ -370,6 +373,7 @@ MLPPConvolutions::MLPPConvolutions() {
_scharr_vertical = { { 3, 0, -3 }, { 10, 0, -10 }, { 3, 0, -3 } }; _scharr_vertical = { { 3, 0, -3 }, { 10, 0, -10 }, { 3, 0, -3 } };
_roberts_horizontal = { { 0, 1 }, { -1, 0 } }; _roberts_horizontal = { { 0, 1 }, { -1, 0 } };
_roberts_vertical = { { 1, 0 }, { 0, -1 } }; _roberts_vertical = { { 1, 0 }, { 0, -1 } };
*/
} }
void MLPPConvolutions::_bind_methods() { void MLPPConvolutions::_bind_methods() {

View File

@ -13,6 +13,7 @@ class MLPPConvolutions : public Reference {
GDCLASS(MLPPConvolutions, Reference); GDCLASS(MLPPConvolutions, Reference);
public: public:
/*
std::vector<std::vector<real_t>> convolve_2d(std::vector<std::vector<real_t>> input, std::vector<std::vector<real_t>> filter, int S, int P = 0); std::vector<std::vector<real_t>> convolve_2d(std::vector<std::vector<real_t>> input, std::vector<std::vector<real_t>> filter, int S, int P = 0);
std::vector<std::vector<std::vector<real_t>>> convolve_3d(std::vector<std::vector<std::vector<real_t>>> input, std::vector<std::vector<std::vector<real_t>>> filter, int S, int P = 0); std::vector<std::vector<std::vector<real_t>>> convolve_3d(std::vector<std::vector<std::vector<real_t>>> input, std::vector<std::vector<std::vector<real_t>>> filter, int S, int P = 0);
@ -42,12 +43,14 @@ public:
std::vector<std::vector<real_t>> get_scharr_vertical(); std::vector<std::vector<real_t>> get_scharr_vertical();
std::vector<std::vector<real_t>> get_roberts_horizontal(); std::vector<std::vector<real_t>> get_roberts_horizontal();
std::vector<std::vector<real_t>> get_roberts_vertical(); std::vector<std::vector<real_t>> get_roberts_vertical();
*/
MLPPConvolutions(); MLPPConvolutions();
protected: protected:
static void _bind_methods(); static void _bind_methods();
/*
std::vector<std::vector<real_t>> _prewitt_horizontal; std::vector<std::vector<real_t>> _prewitt_horizontal;
std::vector<std::vector<real_t>> _prewitt_vertical; std::vector<std::vector<real_t>> _prewitt_vertical;
std::vector<std::vector<real_t>> _sobel_horizontal; std::vector<std::vector<real_t>> _sobel_horizontal;
@ -56,6 +59,7 @@ protected:
std::vector<std::vector<real_t>> _scharr_vertical; std::vector<std::vector<real_t>> _scharr_vertical;
std::vector<std::vector<real_t>> _roberts_horizontal; std::vector<std::vector<real_t>> _roberts_horizontal;
std::vector<std::vector<real_t>> _roberts_vertical; std::vector<std::vector<real_t>> _roberts_vertical;
*/
}; };
#endif // Convolutions_hpp #endif // Convolutions_hpp

View File

@ -6,13 +6,13 @@
#include "../convolutions/convolutions_old.h" #include "../convolutions/convolutions_old.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include "../stat/stat.h" #include "../stat/stat_old.h"
#include <cmath> #include <cmath>
#include <iostream> #include <iostream>
std::vector<std::vector<real_t>> MLPPConvolutionsOld::convolve_2d(std::vector<std::vector<real_t>> input, std::vector<std::vector<real_t>> filter, int S, int P) { std::vector<std::vector<real_t>> MLPPConvolutionsOld::convolve_2d(std::vector<std::vector<real_t>> input, std::vector<std::vector<real_t>> filter, int S, int P) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::vector<std::vector<real_t>> feature_map; std::vector<std::vector<real_t>> feature_map;
uint32_t N = input.size(); uint32_t N = input.size();
uint32_t F = filter.size(); uint32_t F = filter.size();
@ -68,7 +68,7 @@ std::vector<std::vector<real_t>> MLPPConvolutionsOld::convolve_2d(std::vector<st
} }
std::vector<std::vector<std::vector<real_t>>> MLPPConvolutionsOld::convolve_3d(std::vector<std::vector<std::vector<real_t>>> input, std::vector<std::vector<std::vector<real_t>>> filter, int S, int P) { std::vector<std::vector<std::vector<real_t>>> MLPPConvolutionsOld::convolve_3d(std::vector<std::vector<std::vector<real_t>>> input, std::vector<std::vector<std::vector<real_t>>> filter, int S, int P) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::vector<std::vector<std::vector<real_t>>> feature_map; std::vector<std::vector<std::vector<real_t>>> feature_map;
uint32_t N = input[0].size(); uint32_t N = input[0].size();
uint32_t F = filter[0].size(); uint32_t F = filter[0].size();
@ -134,7 +134,7 @@ std::vector<std::vector<std::vector<real_t>>> MLPPConvolutionsOld::convolve_3d(s
} }
std::vector<std::vector<real_t>> MLPPConvolutionsOld::pool_2d(std::vector<std::vector<real_t>> input, int F, int S, std::string type) { std::vector<std::vector<real_t>> MLPPConvolutionsOld::pool_2d(std::vector<std::vector<real_t>> input, int F, int S, std::string type) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::vector<std::vector<real_t>> pooled_map; std::vector<std::vector<real_t>> pooled_map;
uint32_t N = input.size(); uint32_t N = input.size();
uint32_t map_size = floor((N - F) / S + 1); uint32_t map_size = floor((N - F) / S + 1);
@ -161,7 +161,7 @@ std::vector<std::vector<real_t>> MLPPConvolutionsOld::pool_2d(std::vector<std::v
} }
} }
if (type == "Average") { if (type == "Average") {
MLPPStat stat; MLPPStatOld stat;
pooled_map[i][j] = stat.mean(pooling_input); pooled_map[i][j] = stat.mean(pooling_input);
} else if (type == "Min") { } else if (type == "Min") {
pooled_map[i][j] = alg.min(pooling_input); pooled_map[i][j] = alg.min(pooling_input);
@ -182,9 +182,9 @@ std::vector<std::vector<std::vector<real_t>>> MLPPConvolutionsOld::pool_3d(std::
} }
real_t MLPPConvolutionsOld::global_pool_2d(std::vector<std::vector<real_t>> input, std::string type) { real_t MLPPConvolutionsOld::global_pool_2d(std::vector<std::vector<real_t>> input, std::string type) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
if (type == "Average") { if (type == "Average") {
MLPPStat stat; MLPPStatOld stat;
return stat.mean(alg.flatten(input)); return stat.mean(alg.flatten(input));
} else if (type == "Min") { } else if (type == "Min") {
return alg.min(alg.flatten(input)); return alg.min(alg.flatten(input));
@ -269,7 +269,7 @@ std::vector<std::vector<real_t>> MLPPConvolutionsOld::dy(std::vector<std::vector
} }
std::vector<std::vector<real_t>> MLPPConvolutionsOld::grad_magnitude(std::vector<std::vector<real_t>> input) { std::vector<std::vector<real_t>> MLPPConvolutionsOld::grad_magnitude(std::vector<std::vector<real_t>> input) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::vector<std::vector<real_t>> x_deriv_2 = alg.hadamard_product(dx(input), dx(input)); std::vector<std::vector<real_t>> x_deriv_2 = alg.hadamard_product(dx(input), dx(input));
std::vector<std::vector<real_t>> y_deriv_2 = alg.hadamard_product(dy(input), dy(input)); std::vector<std::vector<real_t>> y_deriv_2 = alg.hadamard_product(dy(input), dy(input));
return alg.sqrt(alg.addition(x_deriv_2, y_deriv_2)); return alg.sqrt(alg.addition(x_deriv_2, y_deriv_2));
@ -298,7 +298,7 @@ std::vector<std::vector<std::vector<real_t>>> MLPPConvolutionsOld::compute_m(std
real_t const GAUSSIAN_PADDING = ((input.size() - 1) + GAUSSIAN_SIZE - input.size()) / 2; // Convs must be same. real_t const GAUSSIAN_PADDING = ((input.size() - 1) + GAUSSIAN_SIZE - input.size()) / 2; // Convs must be same.
std::cout << GAUSSIAN_PADDING << std::endl; std::cout << GAUSSIAN_PADDING << std::endl;
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::vector<std::vector<real_t>> x_deriv = dx(input); std::vector<std::vector<real_t>> x_deriv = dx(input);
std::vector<std::vector<real_t>> y_deriv = dy(input); std::vector<std::vector<real_t>> y_deriv = dy(input);
@ -312,7 +312,7 @@ std::vector<std::vector<std::vector<real_t>>> MLPPConvolutionsOld::compute_m(std
} }
std::vector<std::vector<std::string>> MLPPConvolutionsOld::harris_corner_detection(std::vector<std::vector<real_t>> input) { std::vector<std::vector<std::string>> MLPPConvolutionsOld::harris_corner_detection(std::vector<std::vector<real_t>> input) {
real_t const k = 0.05; // Empirically determined wherein k -> [0.04, 0.06], though conventionally 0.05 is typically used as well. real_t const k = 0.05; // Empirically determined wherein k -> [0.04, 0.06], though conventionally 0.05 is typically used as well.
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::vector<std::vector<std::vector<real_t>>> M = compute_m(input); std::vector<std::vector<std::vector<real_t>>> M = compute_m(input);
std::vector<std::vector<real_t>> det = alg.subtraction(alg.hadamard_product(M[0], M[1]), alg.hadamard_product(M[2], M[2])); std::vector<std::vector<real_t>> det = alg.subtraction(alg.hadamard_product(M[0], M[1]), alg.hadamard_product(M[2], M[2]));
std::vector<std::vector<real_t>> trace = alg.addition(M[0], M[1]); std::vector<std::vector<real_t>> trace = alg.addition(M[0], M[1]);

View File

@ -763,401 +763,6 @@ Ref<MLPPMatrix> MLPPCost::run_cost_deriv_matrix(const CostTypes cost, const Ref<
} }
} }
// ====== OLD ======
real_t MLPPCost::MSE(std::vector<real_t> y_hat, std::vector<real_t> y) {
real_t sum = 0;
for (uint32_t i = 0; i < y_hat.size(); i++) {
sum += (y_hat[i] - y[i]) * (y_hat[i] - y[i]);
}
return sum / 2 * y_hat.size();
}
real_t MLPPCost::MSE(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
real_t sum = 0;
for (uint32_t i = 0; i < y_hat.size(); i++) {
for (uint32_t j = 0; j < y_hat[i].size(); j++) {
sum += (y_hat[i][j] - y[i][j]) * (y_hat[i][j] - y[i][j]);
}
}
return sum / 2 * y_hat.size();
}
std::vector<real_t> MLPPCost::MSEDeriv(std::vector<real_t> y_hat, std::vector<real_t> y) {
MLPPLinAlg alg;
return alg.subtraction(y_hat, y);
}
std::vector<std::vector<real_t>> MLPPCost::MSEDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
MLPPLinAlg alg;
return alg.subtraction(y_hat, y);
}
real_t MLPPCost::RMSE(std::vector<real_t> y_hat, std::vector<real_t> y) {
real_t sum = 0;
for (uint32_t i = 0; i < y_hat.size(); i++) {
sum += (y_hat[i] - y[i]) * (y_hat[i] - y[i]);
}
return sqrt(sum / y_hat.size());
}
real_t MLPPCost::RMSE(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
real_t sum = 0;
for (uint32_t i = 0; i < y_hat.size(); i++) {
for (uint32_t j = 0; j < y_hat[i].size(); j++) {
sum += (y_hat[i][j] - y[i][j]) * (y_hat[i][j] - y[i][j]);
}
}
return sqrt(sum / y_hat.size());
}
std::vector<real_t> MLPPCost::RMSEDeriv(std::vector<real_t> y_hat, std::vector<real_t> y) {
MLPPLinAlg alg;
return alg.scalarMultiply(1 / (2 * sqrt(MSE(y_hat, y))), MSEDeriv(y_hat, y));
}
std::vector<std::vector<real_t>> MLPPCost::RMSEDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
MLPPLinAlg alg;
return alg.scalarMultiply(1 / (2 / sqrt(MSE(y_hat, y))), MSEDeriv(y_hat, y));
}
real_t MLPPCost::MAE(std::vector<real_t> y_hat, std::vector<real_t> y) {
real_t sum = 0;
for (uint32_t i = 0; i < y_hat.size(); i++) {
sum += abs((y_hat[i] - y[i]));
}
return sum / y_hat.size();
}
real_t MLPPCost::MAE(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
real_t sum = 0;
for (uint32_t i = 0; i < y_hat.size(); i++) {
for (uint32_t j = 0; j < y_hat[i].size(); j++) {
sum += abs((y_hat[i][j] - y[i][j]));
}
}
return sum / y_hat.size();
}
std::vector<real_t> MLPPCost::MAEDeriv(std::vector<real_t> y_hat, std::vector<real_t> y) {
std::vector<real_t> deriv;
deriv.resize(y_hat.size());
for (uint32_t i = 0; i < deriv.size(); i++) {
if (y_hat[i] < 0) {
deriv[i] = -1;
} else if (y_hat[i] == 0) {
deriv[i] = 0;
} else {
deriv[i] = 1;
}
}
return deriv;
}
std::vector<std::vector<real_t>> MLPPCost::MAEDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
std::vector<std::vector<real_t>> deriv;
deriv.resize(y_hat.size());
for (uint32_t i = 0; i < deriv.size(); i++) {
deriv.resize(y_hat[i].size());
}
for (uint32_t i = 0; i < deriv.size(); i++) {
for (uint32_t j = 0; j < deriv[i].size(); j++) {
if (y_hat[i][j] < 0) {
deriv[i][j] = -1;
} else if (y_hat[i][j] == 0) {
deriv[i][j] = 0;
} else {
deriv[i][j] = 1;
}
}
}
return deriv;
}
real_t MLPPCost::MBE(std::vector<real_t> y_hat, std::vector<real_t> y) {
real_t sum = 0;
for (uint32_t i = 0; i < y_hat.size(); i++) {
sum += (y_hat[i] - y[i]);
}
return sum / y_hat.size();
}
real_t MLPPCost::MBE(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
real_t sum = 0;
for (uint32_t i = 0; i < y_hat.size(); i++) {
for (uint32_t j = 0; j < y_hat[i].size(); j++) {
sum += (y_hat[i][j] - y[i][j]);
}
}
return sum / y_hat.size();
}
std::vector<real_t> MLPPCost::MBEDeriv(std::vector<real_t> y_hat, std::vector<real_t> y) {
MLPPLinAlg alg;
return alg.onevec(y_hat.size());
}
std::vector<std::vector<real_t>> MLPPCost::MBEDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
MLPPLinAlg alg;
return alg.onemat(y_hat.size(), y_hat[0].size());
}
real_t MLPPCost::LogLoss(std::vector<real_t> y_hat, std::vector<real_t> y) {
real_t sum = 0;
real_t eps = 1e-8;
for (uint32_t i = 0; i < y_hat.size(); i++) {
sum += -(y[i] * std::log(y_hat[i] + eps) + (1 - y[i]) * std::log(1 - y_hat[i] + eps));
}
return sum / y_hat.size();
}
real_t MLPPCost::LogLoss(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
real_t sum = 0;
real_t eps = 1e-8;
for (uint32_t i = 0; i < y_hat.size(); i++) {
for (uint32_t j = 0; j < y_hat[i].size(); j++) {
sum += -(y[i][j] * std::log(y_hat[i][j] + eps) + (1 - y[i][j]) * std::log(1 - y_hat[i][j] + eps));
}
}
return sum / y_hat.size();
}
std::vector<real_t> MLPPCost::LogLossDeriv(std::vector<real_t> y_hat, std::vector<real_t> y) {
MLPPLinAlg alg;
return alg.addition(alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat)), alg.elementWiseDivision(alg.scalarMultiply(-1, alg.scalarAdd(-1, y)), alg.scalarMultiply(-1, alg.scalarAdd(-1, y_hat))));
}
std::vector<std::vector<real_t>> MLPPCost::LogLossDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
MLPPLinAlg alg;
return alg.addition(alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat)), alg.elementWiseDivision(alg.scalarMultiply(-1, alg.scalarAdd(-1, y)), alg.scalarMultiply(-1, alg.scalarAdd(-1, y_hat))));
}
real_t MLPPCost::CrossEntropy(std::vector<real_t> y_hat, std::vector<real_t> y) {
real_t sum = 0;
for (uint32_t i = 0; i < y_hat.size(); i++) {
sum += y[i] * std::log(y_hat[i]);
}
return -1 * sum;
}
real_t MLPPCost::CrossEntropy(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
real_t sum = 0;
for (uint32_t i = 0; i < y_hat.size(); i++) {
for (uint32_t j = 0; j < y_hat[i].size(); j++) {
sum += y[i][j] * std::log(y_hat[i][j]);
}
}
return -1 * sum;
}
std::vector<real_t> MLPPCost::CrossEntropyDeriv(std::vector<real_t> y_hat, std::vector<real_t> y) {
MLPPLinAlg alg;
return alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat));
}
std::vector<std::vector<real_t>> MLPPCost::CrossEntropyDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
MLPPLinAlg alg;
return alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat));
}
real_t MLPPCost::HuberLoss(std::vector<real_t> y_hat, std::vector<real_t> y, real_t delta) {
MLPPLinAlg alg;
real_t sum = 0;
for (uint32_t i = 0; i < y_hat.size(); i++) {
if (abs(y[i] - y_hat[i]) <= delta) {
sum += (y[i] - y_hat[i]) * (y[i] - y_hat[i]);
} else {
sum += 2 * delta * abs(y[i] - y_hat[i]) - delta * delta;
}
}
return sum;
}
real_t MLPPCost::HuberLoss(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y, real_t delta) {
MLPPLinAlg alg;
real_t sum = 0;
for (uint32_t i = 0; i < y_hat.size(); i++) {
for (uint32_t j = 0; j < y_hat[i].size(); j++) {
if (abs(y[i][j] - y_hat[i][j]) <= delta) {
sum += (y[i][j] - y_hat[i][j]) * (y[i][j] - y_hat[i][j]);
} else {
sum += 2 * delta * abs(y[i][j] - y_hat[i][j]) - delta * delta;
}
}
}
return sum;
}
std::vector<real_t> MLPPCost::HuberLossDeriv(std::vector<real_t> y_hat, std::vector<real_t> y, real_t delta) {
MLPPLinAlg alg;
std::vector<real_t> deriv;
deriv.resize(y_hat.size());
for (uint32_t i = 0; i < y_hat.size(); i++) {
if (abs(y[i] - y_hat[i]) <= delta) {
deriv.push_back(-(y[i] - y_hat[i]));
} else {
if (y_hat[i] > 0 || y_hat[i] < 0) {
deriv.push_back(2 * delta * (y_hat[i] / abs(y_hat[i])));
} else {
deriv.push_back(0);
}
}
}
return deriv;
}
std::vector<std::vector<real_t>> MLPPCost::HuberLossDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y, real_t delta) {
MLPPLinAlg alg;
std::vector<std::vector<real_t>> deriv;
deriv.resize(y_hat.size());
for (uint32_t i = 0; i < deriv.size(); i++) {
deriv[i].resize(y_hat[i].size());
}
for (uint32_t i = 0; i < y_hat.size(); i++) {
for (uint32_t j = 0; j < y_hat[i].size(); j++) {
if (abs(y[i][j] - y_hat[i][j]) <= delta) {
deriv[i].push_back(-(y[i][j] - y_hat[i][j]));
} else {
if (y_hat[i][j] > 0 || y_hat[i][j] < 0) {
deriv[i].push_back(2 * delta * (y_hat[i][j] / abs(y_hat[i][j])));
} else {
deriv[i].push_back(0);
}
}
}
}
return deriv;
}
real_t MLPPCost::HingeLoss(std::vector<real_t> y_hat, std::vector<real_t> y) {
real_t sum = 0;
for (uint32_t i = 0; i < y_hat.size(); i++) {
sum += fmax(0, 1 - y[i] * y_hat[i]);
}
return sum / y_hat.size();
}
real_t MLPPCost::HingeLoss(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
real_t sum = 0;
for (uint32_t i = 0; i < y_hat.size(); i++) {
for (uint32_t j = 0; j < y_hat[i].size(); j++) {
sum += fmax(0, 1 - y[i][j] * y_hat[i][j]);
}
}
return sum / y_hat.size();
}
std::vector<real_t> MLPPCost::HingeLossDeriv(std::vector<real_t> y_hat, std::vector<real_t> y) {
std::vector<real_t> deriv;
deriv.resize(y_hat.size());
for (uint32_t i = 0; i < y_hat.size(); i++) {
if (1 - y[i] * y_hat[i] > 0) {
deriv[i] = -y[i];
} else {
deriv[i] = 0;
}
}
return deriv;
}
std::vector<std::vector<real_t>> MLPPCost::HingeLossDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
std::vector<std::vector<real_t>> deriv;
for (uint32_t i = 0; i < y_hat.size(); i++) {
for (uint32_t j = 0; j < y_hat[i].size(); j++) {
if (1 - y[i][j] * y_hat[i][j] > 0) {
deriv[i][j] = -y[i][j];
} else {
deriv[i][j] = 0;
}
}
}
return deriv;
}
real_t MLPPCost::WassersteinLoss(std::vector<real_t> y_hat, std::vector<real_t> y) {
real_t sum = 0;
for (uint32_t i = 0; i < y_hat.size(); i++) {
sum += y_hat[i] * y[i];
}
return -sum / y_hat.size();
}
real_t MLPPCost::WassersteinLoss(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
real_t sum = 0;
for (uint32_t i = 0; i < y_hat.size(); i++) {
for (uint32_t j = 0; j < y_hat[i].size(); j++) {
sum += y_hat[i][j] * y[i][j];
}
}
return -sum / y_hat.size();
}
std::vector<real_t> MLPPCost::WassersteinLossDeriv(std::vector<real_t> y_hat, std::vector<real_t> y) {
MLPPLinAlg alg;
return alg.scalarMultiply(-1, y); // Simple.
}
std::vector<std::vector<real_t>> MLPPCost::WassersteinLossDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
MLPPLinAlg alg;
return alg.scalarMultiply(-1, y); // Simple.
}
real_t MLPPCost::HingeLoss(std::vector<real_t> y_hat, std::vector<real_t> y, std::vector<real_t> weights, real_t C) {
MLPPLinAlg alg;
MLPPReg regularization;
return C * HingeLoss(y_hat, y) + regularization.regTerm(weights, 1, 0, "Ridge");
}
real_t MLPPCost::HingeLoss(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y, std::vector<std::vector<real_t>> weights, real_t C) {
MLPPLinAlg alg;
MLPPReg regularization;
return C * HingeLoss(y_hat, y) + regularization.regTerm(weights, 1, 0, "Ridge");
}
std::vector<real_t> MLPPCost::HingeLossDeriv(std::vector<real_t> y_hat, std::vector<real_t> y, real_t C) {
MLPPLinAlg alg;
MLPPReg regularization;
return alg.scalarMultiply(C, HingeLossDeriv(y_hat, y));
}
std::vector<std::vector<real_t>> MLPPCost::HingeLossDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y, real_t C) {
MLPPLinAlg alg;
MLPPReg regularization;
return alg.scalarMultiply(C, HingeLossDeriv(y_hat, y));
}
real_t MLPPCost::dualFormSVM(std::vector<real_t> alpha, std::vector<std::vector<real_t>> X, std::vector<real_t> y) {
MLPPLinAlg alg;
std::vector<std::vector<real_t>> Y = alg.diag(y); // Y is a diagnoal matrix. Y[i][j] = y[i] if i = i, else Y[i][j] = 0. Yt = Y.
std::vector<std::vector<real_t>> K = alg.matmult(X, alg.transpose(X)); // TO DO: DON'T forget to add non-linear kernelizations.
std::vector<std::vector<real_t>> Q = alg.matmult(alg.matmult(alg.transpose(Y), K), Y);
real_t alphaQ = alg.matmult(alg.matmult({ alpha }, Q), alg.transpose({ alpha }))[0][0];
std::vector<real_t> one = alg.onevec(alpha.size());
return -alg.dot(one, alpha) + 0.5 * alphaQ;
}
std::vector<real_t> MLPPCost::dualFormSVMDeriv(std::vector<real_t> alpha, std::vector<std::vector<real_t>> X, std::vector<real_t> y) {
MLPPLinAlg alg;
std::vector<std::vector<real_t>> Y = alg.zeromat(y.size(), y.size());
for (uint32_t i = 0; i < y.size(); i++) {
Y[i][i] = y[i]; // Y is a diagnoal matrix. Y[i][j] = y[i] if i = i, else Y[i][j] = 0. Yt = Y.
}
std::vector<std::vector<real_t>> K = alg.matmult(X, alg.transpose(X)); // TO DO: DON'T forget to add non-linear kernelizations.
std::vector<std::vector<real_t>> Q = alg.matmult(alg.matmult(alg.transpose(Y), K), Y);
std::vector<real_t> alphaQDeriv = alg.mat_vec_mult(Q, alpha);
std::vector<real_t> one = alg.onevec(alpha.size());
return alg.subtraction(alphaQDeriv, one);
}
void MLPPCost::_bind_methods() { void MLPPCost::_bind_methods() {
ClassDB::bind_method(D_METHOD("msev", "y_hat", "y"), &MLPPCost::msev); ClassDB::bind_method(D_METHOD("msev", "y_hat", "y"), &MLPPCost::msev);
ClassDB::bind_method(D_METHOD("msem", "y_hat", "y"), &MLPPCost::msem); ClassDB::bind_method(D_METHOD("msem", "y_hat", "y"), &MLPPCost::msem);

View File

@ -119,72 +119,6 @@ public:
Ref<MLPPVector> run_cost_deriv_vector(const CostTypes cost, const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y); Ref<MLPPVector> run_cost_deriv_vector(const CostTypes cost, const Ref<MLPPVector> &y_hat, const Ref<MLPPVector> &y);
Ref<MLPPMatrix> run_cost_deriv_matrix(const CostTypes cost, const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y); Ref<MLPPMatrix> run_cost_deriv_matrix(const CostTypes cost, const Ref<MLPPMatrix> &y_hat, const Ref<MLPPMatrix> &y);
// Regression Costs
real_t MSE(std::vector<real_t> y_hat, std::vector<real_t> y);
real_t MSE(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
std::vector<real_t> MSEDeriv(std::vector<real_t> y_hat, std::vector<real_t> y);
std::vector<std::vector<real_t>> MSEDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
real_t RMSE(std::vector<real_t> y_hat, std::vector<real_t> y);
real_t RMSE(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
std::vector<real_t> RMSEDeriv(std::vector<real_t> y_hat, std::vector<real_t> y);
std::vector<std::vector<real_t>> RMSEDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
real_t MAE(std::vector<real_t> y_hat, std::vector<real_t> y);
real_t MAE(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
std::vector<real_t> MAEDeriv(std::vector<real_t> y_hat, std::vector<real_t> y);
std::vector<std::vector<real_t>> MAEDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
real_t MBE(std::vector<real_t> y_hat, std::vector<real_t> y);
real_t MBE(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
std::vector<real_t> MBEDeriv(std::vector<real_t> y_hat, std::vector<real_t> y);
std::vector<std::vector<real_t>> MBEDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
// Classification Costs
real_t LogLoss(std::vector<real_t> y_hat, std::vector<real_t> y);
real_t LogLoss(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
std::vector<real_t> LogLossDeriv(std::vector<real_t> y_hat, std::vector<real_t> y);
std::vector<std::vector<real_t>> LogLossDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
real_t CrossEntropy(std::vector<real_t> y_hat, std::vector<real_t> y);
real_t CrossEntropy(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
std::vector<real_t> CrossEntropyDeriv(std::vector<real_t> y_hat, std::vector<real_t> y);
std::vector<std::vector<real_t>> CrossEntropyDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
real_t HuberLoss(std::vector<real_t> y_hat, std::vector<real_t> y, real_t delta);
real_t HuberLoss(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y, real_t delta);
std::vector<real_t> HuberLossDeriv(std::vector<real_t> y_hat, std::vector<real_t> y, real_t delta);
std::vector<std::vector<real_t>> HuberLossDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y, real_t delta);
real_t HingeLoss(std::vector<real_t> y_hat, std::vector<real_t> y);
real_t HingeLoss(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
std::vector<real_t> HingeLossDeriv(std::vector<real_t> y_hat, std::vector<real_t> y);
std::vector<std::vector<real_t>> HingeLossDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
real_t HingeLoss(std::vector<real_t> y_hat, std::vector<real_t> y, std::vector<real_t> weights, real_t C);
real_t HingeLoss(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y, std::vector<std::vector<real_t>> weights, real_t C);
std::vector<real_t> HingeLossDeriv(std::vector<real_t> y_hat, std::vector<real_t> y, real_t C);
std::vector<std::vector<real_t>> HingeLossDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y, real_t C);
real_t WassersteinLoss(std::vector<real_t> y_hat, std::vector<real_t> y);
real_t WassersteinLoss(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
std::vector<real_t> WassersteinLossDeriv(std::vector<real_t> y_hat, std::vector<real_t> y);
std::vector<std::vector<real_t>> WassersteinLossDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y);
real_t dualFormSVM(std::vector<real_t> alpha, std::vector<std::vector<real_t>> X, std::vector<real_t> y); // TO DO: DON'T forget to add non-linear kernelizations.
std::vector<real_t> dualFormSVMDeriv(std::vector<real_t> alpha, std::vector<std::vector<real_t>> X, std::vector<real_t> y);
protected: protected:
static void _bind_methods(); static void _bind_methods();
}; };

View File

@ -5,8 +5,8 @@
// //
#include "cost_old.h" #include "cost_old.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include "../regularization/reg.h" #include "../regularization/reg_old.h"
#include <cmath> #include <cmath>
#include <iostream> #include <iostream>
@ -29,12 +29,12 @@ real_t MLPPCostOld::MSE(std::vector<std::vector<real_t>> y_hat, std::vector<std:
} }
std::vector<real_t> MLPPCostOld::MSEDeriv(std::vector<real_t> y_hat, std::vector<real_t> y) { std::vector<real_t> MLPPCostOld::MSEDeriv(std::vector<real_t> y_hat, std::vector<real_t> y) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
return alg.subtraction(y_hat, y); return alg.subtraction(y_hat, y);
} }
std::vector<std::vector<real_t>> MLPPCostOld::MSEDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) { std::vector<std::vector<real_t>> MLPPCostOld::MSEDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
return alg.subtraction(y_hat, y); return alg.subtraction(y_hat, y);
} }
@ -57,12 +57,12 @@ real_t MLPPCostOld::RMSE(std::vector<std::vector<real_t>> y_hat, std::vector<std
} }
std::vector<real_t> MLPPCostOld::RMSEDeriv(std::vector<real_t> y_hat, std::vector<real_t> y) { std::vector<real_t> MLPPCostOld::RMSEDeriv(std::vector<real_t> y_hat, std::vector<real_t> y) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
return alg.scalarMultiply(1 / (2 * sqrt(MSE(y_hat, y))), MSEDeriv(y_hat, y)); return alg.scalarMultiply(1 / (2 * sqrt(MSE(y_hat, y))), MSEDeriv(y_hat, y));
} }
std::vector<std::vector<real_t>> MLPPCostOld::RMSEDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) { std::vector<std::vector<real_t>> MLPPCostOld::RMSEDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
return alg.scalarMultiply(1 / (2 / sqrt(MSE(y_hat, y))), MSEDeriv(y_hat, y)); return alg.scalarMultiply(1 / (2 / sqrt(MSE(y_hat, y))), MSEDeriv(y_hat, y));
} }
@ -138,12 +138,12 @@ real_t MLPPCostOld::MBE(std::vector<std::vector<real_t>> y_hat, std::vector<std:
} }
std::vector<real_t> MLPPCostOld::MBEDeriv(std::vector<real_t> y_hat, std::vector<real_t> y) { std::vector<real_t> MLPPCostOld::MBEDeriv(std::vector<real_t> y_hat, std::vector<real_t> y) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
return alg.onevec(y_hat.size()); return alg.onevec(y_hat.size());
} }
std::vector<std::vector<real_t>> MLPPCostOld::MBEDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) { std::vector<std::vector<real_t>> MLPPCostOld::MBEDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
return alg.onemat(y_hat.size(), y_hat[0].size()); return alg.onemat(y_hat.size(), y_hat[0].size());
} }
@ -170,12 +170,12 @@ real_t MLPPCostOld::LogLoss(std::vector<std::vector<real_t>> y_hat, std::vector<
} }
std::vector<real_t> MLPPCostOld::LogLossDeriv(std::vector<real_t> y_hat, std::vector<real_t> y) { std::vector<real_t> MLPPCostOld::LogLossDeriv(std::vector<real_t> y_hat, std::vector<real_t> y) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
return alg.addition(alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat)), alg.elementWiseDivision(alg.scalarMultiply(-1, alg.scalarAdd(-1, y)), alg.scalarMultiply(-1, alg.scalarAdd(-1, y_hat)))); return alg.addition(alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat)), alg.elementWiseDivision(alg.scalarMultiply(-1, alg.scalarAdd(-1, y)), alg.scalarMultiply(-1, alg.scalarAdd(-1, y_hat))));
} }
std::vector<std::vector<real_t>> MLPPCostOld::LogLossDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) { std::vector<std::vector<real_t>> MLPPCostOld::LogLossDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
return alg.addition(alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat)), alg.elementWiseDivision(alg.scalarMultiply(-1, alg.scalarAdd(-1, y)), alg.scalarMultiply(-1, alg.scalarAdd(-1, y_hat)))); return alg.addition(alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat)), alg.elementWiseDivision(alg.scalarMultiply(-1, alg.scalarAdd(-1, y)), alg.scalarMultiply(-1, alg.scalarAdd(-1, y_hat))));
} }
@ -200,17 +200,16 @@ real_t MLPPCostOld::CrossEntropy(std::vector<std::vector<real_t>> y_hat, std::ve
} }
std::vector<real_t> MLPPCostOld::CrossEntropyDeriv(std::vector<real_t> y_hat, std::vector<real_t> y) { std::vector<real_t> MLPPCostOld::CrossEntropyDeriv(std::vector<real_t> y_hat, std::vector<real_t> y) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
return alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat)); return alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat));
} }
std::vector<std::vector<real_t>> MLPPCostOld::CrossEntropyDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) { std::vector<std::vector<real_t>> MLPPCostOld::CrossEntropyDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
return alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat)); return alg.scalarMultiply(-1, alg.elementWiseDivision(y, y_hat));
} }
real_t MLPPCostOld::HuberLoss(std::vector<real_t> y_hat, std::vector<real_t> y, real_t delta) { real_t MLPPCostOld::HuberLoss(std::vector<real_t> y_hat, std::vector<real_t> y, real_t delta) {
MLPPLinAlg alg;
real_t sum = 0; real_t sum = 0;
for (uint32_t i = 0; i < y_hat.size(); i++) { for (uint32_t i = 0; i < y_hat.size(); i++) {
if (abs(y[i] - y_hat[i]) <= delta) { if (abs(y[i] - y_hat[i]) <= delta) {
@ -223,7 +222,6 @@ real_t MLPPCostOld::HuberLoss(std::vector<real_t> y_hat, std::vector<real_t> y,
} }
real_t MLPPCostOld::HuberLoss(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y, real_t delta) { real_t MLPPCostOld::HuberLoss(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y, real_t delta) {
MLPPLinAlg alg;
real_t sum = 0; real_t sum = 0;
for (uint32_t i = 0; i < y_hat.size(); i++) { for (uint32_t i = 0; i < y_hat.size(); i++) {
for (uint32_t j = 0; j < y_hat[i].size(); j++) { for (uint32_t j = 0; j < y_hat[i].size(); j++) {
@ -238,7 +236,6 @@ real_t MLPPCostOld::HuberLoss(std::vector<std::vector<real_t>> y_hat, std::vecto
} }
std::vector<real_t> MLPPCostOld::HuberLossDeriv(std::vector<real_t> y_hat, std::vector<real_t> y, real_t delta) { std::vector<real_t> MLPPCostOld::HuberLossDeriv(std::vector<real_t> y_hat, std::vector<real_t> y, real_t delta) {
MLPPLinAlg alg;
std::vector<real_t> deriv; std::vector<real_t> deriv;
deriv.resize(y_hat.size()); deriv.resize(y_hat.size());
@ -257,8 +254,6 @@ std::vector<real_t> MLPPCostOld::HuberLossDeriv(std::vector<real_t> y_hat, std::
} }
std::vector<std::vector<real_t>> MLPPCostOld::HuberLossDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y, real_t delta) { std::vector<std::vector<real_t>> MLPPCostOld::HuberLossDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y, real_t delta) {
MLPPLinAlg alg;
std::vector<std::vector<real_t>> deriv; std::vector<std::vector<real_t>> deriv;
deriv.resize(y_hat.size()); deriv.resize(y_hat.size());
for (uint32_t i = 0; i < deriv.size(); i++) { for (uint32_t i = 0; i < deriv.size(); i++) {
@ -347,39 +342,35 @@ real_t MLPPCostOld::WassersteinLoss(std::vector<std::vector<real_t>> y_hat, std:
} }
std::vector<real_t> MLPPCostOld::WassersteinLossDeriv(std::vector<real_t> y_hat, std::vector<real_t> y) { std::vector<real_t> MLPPCostOld::WassersteinLossDeriv(std::vector<real_t> y_hat, std::vector<real_t> y) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
return alg.scalarMultiply(-1, y); // Simple. return alg.scalarMultiply(-1, y); // Simple.
} }
std::vector<std::vector<real_t>> MLPPCostOld::WassersteinLossDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) { std::vector<std::vector<real_t>> MLPPCostOld::WassersteinLossDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
return alg.scalarMultiply(-1, y); // Simple. return alg.scalarMultiply(-1, y); // Simple.
} }
real_t MLPPCostOld::HingeLoss(std::vector<real_t> y_hat, std::vector<real_t> y, std::vector<real_t> weights, real_t C) { real_t MLPPCostOld::HingeLoss(std::vector<real_t> y_hat, std::vector<real_t> y, std::vector<real_t> weights, real_t C) {
MLPPLinAlg alg; MLPPRegOld regularization;
MLPPReg regularization;
return C * HingeLoss(y_hat, y) + regularization.regTerm(weights, 1, 0, "Ridge"); return C * HingeLoss(y_hat, y) + regularization.regTerm(weights, 1, 0, "Ridge");
} }
real_t MLPPCostOld::HingeLoss(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y, std::vector<std::vector<real_t>> weights, real_t C) { real_t MLPPCostOld::HingeLoss(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y, std::vector<std::vector<real_t>> weights, real_t C) {
MLPPLinAlg alg; MLPPRegOld regularization;
MLPPReg regularization;
return C * HingeLoss(y_hat, y) + regularization.regTerm(weights, 1, 0, "Ridge"); return C * HingeLoss(y_hat, y) + regularization.regTerm(weights, 1, 0, "Ridge");
} }
std::vector<real_t> MLPPCostOld::HingeLossDeriv(std::vector<real_t> y_hat, std::vector<real_t> y, real_t C) { std::vector<real_t> MLPPCostOld::HingeLossDeriv(std::vector<real_t> y_hat, std::vector<real_t> y, real_t C) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization;
return alg.scalarMultiply(C, HingeLossDeriv(y_hat, y)); return alg.scalarMultiply(C, HingeLossDeriv(y_hat, y));
} }
std::vector<std::vector<real_t>> MLPPCostOld::HingeLossDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y, real_t C) { std::vector<std::vector<real_t>> MLPPCostOld::HingeLossDeriv(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y, real_t C) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization;
return alg.scalarMultiply(C, HingeLossDeriv(y_hat, y)); return alg.scalarMultiply(C, HingeLossDeriv(y_hat, y));
} }
real_t MLPPCostOld::dualFormSVM(std::vector<real_t> alpha, std::vector<std::vector<real_t>> X, std::vector<real_t> y) { real_t MLPPCostOld::dualFormSVM(std::vector<real_t> alpha, std::vector<std::vector<real_t>> X, std::vector<real_t> y) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::vector<std::vector<real_t>> Y = alg.diag(y); // Y is a diagnoal matrix. Y[i][j] = y[i] if i = i, else Y[i][j] = 0. Yt = Y. std::vector<std::vector<real_t>> Y = alg.diag(y); // Y is a diagnoal matrix. Y[i][j] = y[i] if i = i, else Y[i][j] = 0. Yt = Y.
std::vector<std::vector<real_t>> K = alg.matmult(X, alg.transpose(X)); // TO DO: DON'T forget to add non-linear kernelizations. std::vector<std::vector<real_t>> K = alg.matmult(X, alg.transpose(X)); // TO DO: DON'T forget to add non-linear kernelizations.
std::vector<std::vector<real_t>> Q = alg.matmult(alg.matmult(alg.transpose(Y), K), Y); std::vector<std::vector<real_t>> Q = alg.matmult(alg.matmult(alg.transpose(Y), K), Y);
@ -390,7 +381,7 @@ real_t MLPPCostOld::dualFormSVM(std::vector<real_t> alpha, std::vector<std::vect
} }
std::vector<real_t> MLPPCostOld::dualFormSVMDeriv(std::vector<real_t> alpha, std::vector<std::vector<real_t>> X, std::vector<real_t> y) { std::vector<real_t> MLPPCostOld::dualFormSVMDeriv(std::vector<real_t> alpha, std::vector<std::vector<real_t>> X, std::vector<real_t> y) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::vector<std::vector<real_t>> Y = alg.zeromat(y.size(), y.size()); std::vector<std::vector<real_t>> Y = alg.zeromat(y.size(), y.size());
for (uint32_t i = 0; i < y.size(); i++) { for (uint32_t i = 0; i < y.size(); i++) {
Y[i][i] = y[i]; // Y is a diagnoal matrix. Y[i][j] = y[i] if i = i, else Y[i][j] = 0. Yt = Y. Y[i][i] = y[i]; // Y is a diagnoal matrix. Y[i][j] = y[i] if i = i, else Y[i][j] = 0. Yt = Y.

View File

@ -10,8 +10,10 @@
#include "core/os/file_access.h" #include "core/os/file_access.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg.h"
#include "../lin_alg/lin_alg_old.h"
#include "../softmax_net/softmax_net_old.h" #include "../softmax_net/softmax_net_old.h"
#include "../stat/stat.h" #include "../stat/stat.h"
#include "../stat/stat_old.h"
#include <algorithm> #include <algorithm>
#include <cmath> #include <cmath>
@ -513,7 +515,7 @@ std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>, s
// MULTIVARIATE SUPERVISED // MULTIVARIATE SUPERVISED
void MLPPData::setData(int k, std::string fileName, std::vector<std::vector<real_t>> &inputSet, std::vector<real_t> &outputSet) { void MLPPData::setData(int k, std::string fileName, std::vector<std::vector<real_t>> &inputSet, std::vector<real_t> &outputSet) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::string inputTemp; std::string inputTemp;
std::string outputTemp; std::string outputTemp;
@ -541,7 +543,7 @@ void MLPPData::setData(int k, std::string fileName, std::vector<std::vector<real
} }
void MLPPData::printData(std::vector<std::string> inputName, std::string outputName, std::vector<std::vector<real_t>> inputSet, std::vector<real_t> outputSet) { void MLPPData::printData(std::vector<std::string> inputName, std::string outputName, std::vector<std::vector<real_t>> inputSet, std::vector<real_t> outputSet) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
inputSet = alg.transpose(inputSet); inputSet = alg.transpose(inputSet);
for (uint32_t i = 0; i < inputSet.size(); i++) { for (uint32_t i = 0; i < inputSet.size(); i++) {
std::cout << inputName[i] << std::endl; std::cout << inputName[i] << std::endl;
@ -559,7 +561,7 @@ void MLPPData::printData(std::vector<std::string> inputName, std::string outputN
// UNSUPERVISED // UNSUPERVISED
void MLPPData::setData(int k, std::string fileName, std::vector<std::vector<real_t>> &inputSet) { void MLPPData::setData(int k, std::string fileName, std::vector<std::vector<real_t>> &inputSet) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::string inputTemp; std::string inputTemp;
inputSet.resize(k); inputSet.resize(k);
@ -583,7 +585,7 @@ void MLPPData::setData(int k, std::string fileName, std::vector<std::vector<real
} }
void MLPPData::printData(std::vector<std::string> inputName, std::vector<std::vector<real_t>> inputSet) { void MLPPData::printData(std::vector<std::string> inputName, std::vector<std::vector<real_t>> inputSet) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
inputSet = alg.transpose(inputSet); inputSet = alg.transpose(inputSet);
for (uint32_t i = 0; i < inputSet.size(); i++) { for (uint32_t i = 0; i < inputSet.size(); i++) {
std::cout << inputName[i] << std::endl; std::cout << inputName[i] << std::endl;
@ -646,7 +648,7 @@ std::vector<std::vector<real_t>> MLPPData::rgb2gray(std::vector<std::vector<std:
} }
std::vector<std::vector<std::vector<real_t>>> MLPPData::rgb2ycbcr(std::vector<std::vector<std::vector<real_t>>> input) { std::vector<std::vector<std::vector<real_t>>> MLPPData::rgb2ycbcr(std::vector<std::vector<std::vector<real_t>>> input) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::vector<std::vector<std::vector<real_t>>> YCbCr; std::vector<std::vector<std::vector<real_t>>> YCbCr;
YCbCr = alg.resize(YCbCr, input); YCbCr = alg.resize(YCbCr, input);
for (uint32_t i = 0; i < YCbCr[0].size(); i++) { for (uint32_t i = 0; i < YCbCr[0].size(); i++) {
@ -662,7 +664,7 @@ std::vector<std::vector<std::vector<real_t>>> MLPPData::rgb2ycbcr(std::vector<st
// Conversion formulas available here: // Conversion formulas available here:
// https://www.rapidtables.com/convert/color/rgb-to-hsv.html // https://www.rapidtables.com/convert/color/rgb-to-hsv.html
std::vector<std::vector<std::vector<real_t>>> MLPPData::rgb2hsv(std::vector<std::vector<std::vector<real_t>>> input) { std::vector<std::vector<std::vector<real_t>>> MLPPData::rgb2hsv(std::vector<std::vector<std::vector<real_t>>> input) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::vector<std::vector<std::vector<real_t>>> HSV; std::vector<std::vector<std::vector<real_t>>> HSV;
HSV = alg.resize(HSV, input); HSV = alg.resize(HSV, input);
for (uint32_t i = 0; i < HSV[0].size(); i++) { for (uint32_t i = 0; i < HSV[0].size(); i++) {
@ -704,7 +706,7 @@ std::vector<std::vector<std::vector<real_t>>> MLPPData::rgb2hsv(std::vector<std:
// http://machinethatsees.blogspot.com/2013/07/how-to-convert-rgb-to-xyz-or-vice-versa.html // http://machinethatsees.blogspot.com/2013/07/how-to-convert-rgb-to-xyz-or-vice-versa.html
std::vector<std::vector<std::vector<real_t>>> MLPPData::rgb2xyz(std::vector<std::vector<std::vector<real_t>>> input) { std::vector<std::vector<std::vector<real_t>>> MLPPData::rgb2xyz(std::vector<std::vector<std::vector<real_t>>> input) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::vector<std::vector<std::vector<real_t>>> XYZ; std::vector<std::vector<std::vector<real_t>>> XYZ;
XYZ = alg.resize(XYZ, input); XYZ = alg.resize(XYZ, input);
std::vector<std::vector<real_t>> RGB2XYZ = { { 0.4124564, 0.3575761, 0.1804375 }, { 0.2126726, 0.7151522, 0.0721750 }, { 0.0193339, 0.1191920, 0.9503041 } }; std::vector<std::vector<real_t>> RGB2XYZ = { { 0.4124564, 0.3575761, 0.1804375 }, { 0.2126726, 0.7151522, 0.0721750 }, { 0.0193339, 0.1191920, 0.9503041 } };
@ -712,7 +714,7 @@ std::vector<std::vector<std::vector<real_t>>> MLPPData::rgb2xyz(std::vector<std:
} }
std::vector<std::vector<std::vector<real_t>>> MLPPData::xyz2rgb(std::vector<std::vector<std::vector<real_t>>> input) { std::vector<std::vector<std::vector<real_t>>> MLPPData::xyz2rgb(std::vector<std::vector<std::vector<real_t>>> input) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::vector<std::vector<std::vector<real_t>>> XYZ; std::vector<std::vector<std::vector<real_t>>> XYZ;
XYZ = alg.resize(XYZ, input); XYZ = alg.resize(XYZ, input);
std::vector<std::vector<real_t>> RGB2XYZ = alg.inverse({ { 0.4124564, 0.3575761, 0.1804375 }, { 0.2126726, 0.7151522, 0.0721750 }, { 0.0193339, 0.1191920, 0.9503041 } }); std::vector<std::vector<real_t>> RGB2XYZ = alg.inverse({ { 0.4124564, 0.3575761, 0.1804375 }, { 0.2126726, 0.7151522, 0.0721750 }, { 0.0193339, 0.1191920, 0.9503041 } });
@ -907,7 +909,7 @@ std::vector<std::vector<real_t>> MLPPData::BOW(std::vector<std::string> sentence
} }
std::vector<std::vector<real_t>> MLPPData::TFIDF(std::vector<std::string> sentences) { std::vector<std::vector<real_t>> MLPPData::TFIDF(std::vector<std::string> sentences) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::vector<std::string> wordList = removeNullByte(removeStopWords(createWordList(sentences))); std::vector<std::string> wordList = removeNullByte(removeStopWords(createWordList(sentences)));
std::vector<std::vector<std::string>> segmented_sentences; std::vector<std::vector<std::string>> segmented_sentences;
@ -1093,10 +1095,10 @@ MLPPData::WordsToVecResult MLPPData::word_to_vec(std::vector<std::string> senten
} }
std::vector<std::vector<real_t>> MLPPData::LSA(std::vector<std::string> sentences, int dim) { std::vector<std::vector<real_t>> MLPPData::LSA(std::vector<std::string> sentences, int dim) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::vector<std::vector<real_t>> docWordData = BOW(sentences, "Binary"); std::vector<std::vector<real_t>> docWordData = BOW(sentences, "Binary");
MLPPLinAlg::SVDResultOld svr_res = alg.SVD(docWordData); MLPPLinAlgOld::SVDResultOld svr_res = alg.SVD(docWordData);
std::vector<std::vector<real_t>> S_trunc = alg.zeromat(dim, dim); std::vector<std::vector<real_t>> S_trunc = alg.zeromat(dim, dim);
std::vector<std::vector<real_t>> Vt_trunc; std::vector<std::vector<real_t>> Vt_trunc;
for (int i = 0; i < dim; i++) { for (int i = 0; i < dim; i++) {
@ -1136,7 +1138,7 @@ void MLPPData::setInputNames(std::string fileName, std::vector<std::string> &inp
} }
std::vector<std::vector<real_t>> MLPPData::featureScaling(std::vector<std::vector<real_t>> X) { std::vector<std::vector<real_t>> MLPPData::featureScaling(std::vector<std::vector<real_t>> X) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
X = alg.transpose(X); X = alg.transpose(X);
std::vector<real_t> max_elements, min_elements; std::vector<real_t> max_elements, min_elements;
max_elements.resize(X.size()); max_elements.resize(X.size());
@ -1156,8 +1158,8 @@ std::vector<std::vector<real_t>> MLPPData::featureScaling(std::vector<std::vecto
} }
std::vector<std::vector<real_t>> MLPPData::meanNormalization(std::vector<std::vector<real_t>> X) { std::vector<std::vector<real_t>> MLPPData::meanNormalization(std::vector<std::vector<real_t>> X) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPStat stat; MLPPStatOld stat;
// (X_j - mu_j) / std_j, for every j // (X_j - mu_j) / std_j, for every j
X = meanCentering(X); X = meanCentering(X);
@ -1168,7 +1170,7 @@ std::vector<std::vector<real_t>> MLPPData::meanNormalization(std::vector<std::ve
} }
std::vector<std::vector<real_t>> MLPPData::meanCentering(std::vector<std::vector<real_t>> X) { std::vector<std::vector<real_t>> MLPPData::meanCentering(std::vector<std::vector<real_t>> X) {
MLPPStat stat; MLPPStatOld stat;
for (uint32_t i = 0; i < X.size(); i++) { for (uint32_t i = 0; i < X.size(); i++) {
real_t mean_i = stat.mean(X[i]); real_t mean_i = stat.mean(X[i]);
for (uint32_t j = 0; j < X[i].size(); j++) { for (uint32_t j = 0; j < X[i].size(); j++) {

View File

@ -9,9 +9,9 @@
#include "core/os/file_access.h" #include "core/os/file_access.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include "../softmax_net/softmax_net_old.h" #include "../softmax_net/softmax_net_old.h"
#include "../stat/stat.h" #include "../stat/stat_old.h"
#include <algorithm> #include <algorithm>
#include <cmath> #include <cmath>
@ -132,7 +132,7 @@ std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>, s
// MULTIVARIATE SUPERVISED // MULTIVARIATE SUPERVISED
void MLPPDataOld::setData(int k, std::string fileName, std::vector<std::vector<real_t>> &inputSet, std::vector<real_t> &outputSet) { void MLPPDataOld::setData(int k, std::string fileName, std::vector<std::vector<real_t>> &inputSet, std::vector<real_t> &outputSet) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::string inputTemp; std::string inputTemp;
std::string outputTemp; std::string outputTemp;
@ -160,7 +160,7 @@ void MLPPDataOld::setData(int k, std::string fileName, std::vector<std::vector<r
} }
void MLPPDataOld::printData(std::vector<std::string> inputName, std::string outputName, std::vector<std::vector<real_t>> inputSet, std::vector<real_t> outputSet) { void MLPPDataOld::printData(std::vector<std::string> inputName, std::string outputName, std::vector<std::vector<real_t>> inputSet, std::vector<real_t> outputSet) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
inputSet = alg.transpose(inputSet); inputSet = alg.transpose(inputSet);
for (uint32_t i = 0; i < inputSet.size(); i++) { for (uint32_t i = 0; i < inputSet.size(); i++) {
std::cout << inputName[i] << std::endl; std::cout << inputName[i] << std::endl;
@ -178,7 +178,7 @@ void MLPPDataOld::printData(std::vector<std::string> inputName, std::string outp
// UNSUPERVISED // UNSUPERVISED
void MLPPDataOld::setData(int k, std::string fileName, std::vector<std::vector<real_t>> &inputSet) { void MLPPDataOld::setData(int k, std::string fileName, std::vector<std::vector<real_t>> &inputSet) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::string inputTemp; std::string inputTemp;
inputSet.resize(k); inputSet.resize(k);
@ -202,7 +202,7 @@ void MLPPDataOld::setData(int k, std::string fileName, std::vector<std::vector<r
} }
void MLPPDataOld::printData(std::vector<std::string> inputName, std::vector<std::vector<real_t>> inputSet) { void MLPPDataOld::printData(std::vector<std::string> inputName, std::vector<std::vector<real_t>> inputSet) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
inputSet = alg.transpose(inputSet); inputSet = alg.transpose(inputSet);
for (uint32_t i = 0; i < inputSet.size(); i++) { for (uint32_t i = 0; i < inputSet.size(); i++) {
std::cout << inputName[i] << std::endl; std::cout << inputName[i] << std::endl;
@ -265,7 +265,7 @@ std::vector<std::vector<real_t>> MLPPDataOld::rgb2gray(std::vector<std::vector<s
} }
std::vector<std::vector<std::vector<real_t>>> MLPPDataOld::rgb2ycbcr(std::vector<std::vector<std::vector<real_t>>> input) { std::vector<std::vector<std::vector<real_t>>> MLPPDataOld::rgb2ycbcr(std::vector<std::vector<std::vector<real_t>>> input) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::vector<std::vector<std::vector<real_t>>> YCbCr; std::vector<std::vector<std::vector<real_t>>> YCbCr;
YCbCr = alg.resize(YCbCr, input); YCbCr = alg.resize(YCbCr, input);
for (uint32_t i = 0; i < YCbCr[0].size(); i++) { for (uint32_t i = 0; i < YCbCr[0].size(); i++) {
@ -281,7 +281,7 @@ std::vector<std::vector<std::vector<real_t>>> MLPPDataOld::rgb2ycbcr(std::vector
// Conversion formulas available here: // Conversion formulas available here:
// https://www.rapidtables.com/convert/color/rgb-to-hsv.html // https://www.rapidtables.com/convert/color/rgb-to-hsv.html
std::vector<std::vector<std::vector<real_t>>> MLPPDataOld::rgb2hsv(std::vector<std::vector<std::vector<real_t>>> input) { std::vector<std::vector<std::vector<real_t>>> MLPPDataOld::rgb2hsv(std::vector<std::vector<std::vector<real_t>>> input) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::vector<std::vector<std::vector<real_t>>> HSV; std::vector<std::vector<std::vector<real_t>>> HSV;
HSV = alg.resize(HSV, input); HSV = alg.resize(HSV, input);
for (uint32_t i = 0; i < HSV[0].size(); i++) { for (uint32_t i = 0; i < HSV[0].size(); i++) {
@ -323,7 +323,7 @@ std::vector<std::vector<std::vector<real_t>>> MLPPDataOld::rgb2hsv(std::vector<s
// http://machinethatsees.blogspot.com/2013/07/how-to-convert-rgb-to-xyz-or-vice-versa.html // http://machinethatsees.blogspot.com/2013/07/how-to-convert-rgb-to-xyz-or-vice-versa.html
std::vector<std::vector<std::vector<real_t>>> MLPPDataOld::rgb2xyz(std::vector<std::vector<std::vector<real_t>>> input) { std::vector<std::vector<std::vector<real_t>>> MLPPDataOld::rgb2xyz(std::vector<std::vector<std::vector<real_t>>> input) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::vector<std::vector<std::vector<real_t>>> XYZ; std::vector<std::vector<std::vector<real_t>>> XYZ;
XYZ = alg.resize(XYZ, input); XYZ = alg.resize(XYZ, input);
std::vector<std::vector<real_t>> RGB2XYZ = { { 0.4124564, 0.3575761, 0.1804375 }, { 0.2126726, 0.7151522, 0.0721750 }, { 0.0193339, 0.1191920, 0.9503041 } }; std::vector<std::vector<real_t>> RGB2XYZ = { { 0.4124564, 0.3575761, 0.1804375 }, { 0.2126726, 0.7151522, 0.0721750 }, { 0.0193339, 0.1191920, 0.9503041 } };
@ -331,7 +331,7 @@ std::vector<std::vector<std::vector<real_t>>> MLPPDataOld::rgb2xyz(std::vector<s
} }
std::vector<std::vector<std::vector<real_t>>> MLPPDataOld::xyz2rgb(std::vector<std::vector<std::vector<real_t>>> input) { std::vector<std::vector<std::vector<real_t>>> MLPPDataOld::xyz2rgb(std::vector<std::vector<std::vector<real_t>>> input) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::vector<std::vector<std::vector<real_t>>> XYZ; std::vector<std::vector<std::vector<real_t>>> XYZ;
XYZ = alg.resize(XYZ, input); XYZ = alg.resize(XYZ, input);
std::vector<std::vector<real_t>> RGB2XYZ = alg.inverse({ { 0.4124564, 0.3575761, 0.1804375 }, { 0.2126726, 0.7151522, 0.0721750 }, { 0.0193339, 0.1191920, 0.9503041 } }); std::vector<std::vector<real_t>> RGB2XYZ = alg.inverse({ { 0.4124564, 0.3575761, 0.1804375 }, { 0.2126726, 0.7151522, 0.0721750 }, { 0.0193339, 0.1191920, 0.9503041 } });
@ -526,7 +526,7 @@ std::vector<std::vector<real_t>> MLPPDataOld::BOW(std::vector<std::string> sente
} }
std::vector<std::vector<real_t>> MLPPDataOld::TFIDF(std::vector<std::string> sentences) { std::vector<std::vector<real_t>> MLPPDataOld::TFIDF(std::vector<std::string> sentences) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::vector<std::string> wordList = removeNullByte(removeStopWords(createWordList(sentences))); std::vector<std::string> wordList = removeNullByte(removeStopWords(createWordList(sentences)));
std::vector<std::vector<std::string>> segmented_sentences; std::vector<std::vector<std::string>> segmented_sentences;
@ -712,10 +712,10 @@ MLPPDataOld::WordsToVecResult MLPPDataOld::word_to_vec(std::vector<std::string>
} }
std::vector<std::vector<real_t>> MLPPDataOld::LSA(std::vector<std::string> sentences, int dim) { std::vector<std::vector<real_t>> MLPPDataOld::LSA(std::vector<std::string> sentences, int dim) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::vector<std::vector<real_t>> docWordData = BOW(sentences, "Binary"); std::vector<std::vector<real_t>> docWordData = BOW(sentences, "Binary");
MLPPLinAlg::SVDResultOld svr_res = alg.SVD(docWordData); MLPPLinAlgOld::SVDResultOld svr_res = alg.SVD(docWordData);
std::vector<std::vector<real_t>> S_trunc = alg.zeromat(dim, dim); std::vector<std::vector<real_t>> S_trunc = alg.zeromat(dim, dim);
std::vector<std::vector<real_t>> Vt_trunc; std::vector<std::vector<real_t>> Vt_trunc;
for (int i = 0; i < dim; i++) { for (int i = 0; i < dim; i++) {
@ -755,7 +755,7 @@ void MLPPDataOld::setInputNames(std::string fileName, std::vector<std::string> &
} }
std::vector<std::vector<real_t>> MLPPDataOld::featureScaling(std::vector<std::vector<real_t>> X) { std::vector<std::vector<real_t>> MLPPDataOld::featureScaling(std::vector<std::vector<real_t>> X) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
X = alg.transpose(X); X = alg.transpose(X);
std::vector<real_t> max_elements, min_elements; std::vector<real_t> max_elements, min_elements;
max_elements.resize(X.size()); max_elements.resize(X.size());
@ -775,8 +775,8 @@ std::vector<std::vector<real_t>> MLPPDataOld::featureScaling(std::vector<std::ve
} }
std::vector<std::vector<real_t>> MLPPDataOld::meanNormalization(std::vector<std::vector<real_t>> X) { std::vector<std::vector<real_t>> MLPPDataOld::meanNormalization(std::vector<std::vector<real_t>> X) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPStat stat; MLPPStatOld stat;
// (X_j - mu_j) / std_j, for every j // (X_j - mu_j) / std_j, for every j
X = meanCentering(X); X = meanCentering(X);
@ -787,7 +787,7 @@ std::vector<std::vector<real_t>> MLPPDataOld::meanNormalization(std::vector<std:
} }
std::vector<std::vector<real_t>> MLPPDataOld::meanCentering(std::vector<std::vector<real_t>> X) { std::vector<std::vector<real_t>> MLPPDataOld::meanCentering(std::vector<std::vector<real_t>> X) {
MLPPStat stat; MLPPStatOld stat;
for (uint32_t i = 0; i < X.size(); i++) { for (uint32_t i = 0; i < X.size(); i++) {
real_t mean_i = stat.mean(X[i]); real_t mean_i = stat.mean(X[i]);
for (uint32_t j = 0; j < X[i].size(); j++) { for (uint32_t j = 0; j < X[i].size(); j++) {

View File

@ -6,9 +6,9 @@
#include "dual_svc_old.h" #include "dual_svc_old.h"
#include "../activation/activation_old.h" #include "../activation/activation_old.h"
#include "../cost/cost.h" #include "../cost/cost_old.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include "../regularization/reg.h" #include "../regularization/reg_old.h"
#include "../utilities/utilities.h" #include "../utilities/utilities.h"
#include <iostream> #include <iostream>
@ -37,9 +37,8 @@ real_t MLPPDualSVCOld::modelTest(std::vector<real_t> x) {
} }
void MLPPDualSVCOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { void MLPPDualSVCOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
class MLPPCost cost; class MLPPCostOld cost;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -84,10 +83,10 @@ void MLPPDualSVCOld::gradientDescent(real_t learning_rate, int max_epoch, bool U
} }
// void MLPPDualSVCOld::SGD(real_t learning_rate, int max_epoch, bool UI){ // void MLPPDualSVCOld::SGD(real_t learning_rate, int max_epoch, bool UI){
// class MLPPCost cost; // class MLPPCostOld cost;
// MLPPActivationOld avn; // MLPPActivationOld avn;
// MLPPLinAlg alg; // MLPPLinAlgOld alg;
// MLPPReg regularization; // MLPPRegOld regularization;
// real_t cost_prev = 0; // real_t cost_prev = 0;
// int epoch = 1; // int epoch = 1;
@ -117,10 +116,10 @@ void MLPPDualSVCOld::gradientDescent(real_t learning_rate, int max_epoch, bool U
// } // }
// void MLPPDualSVCOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI){ // void MLPPDualSVCOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI){
// class MLPPCost cost; // class MLPPCostOld cost;
// MLPPActivationOld avn; // MLPPActivationOld avn;
// MLPPLinAlg alg; // MLPPLinAlgOld alg;
// MLPPReg regularization; // MLPPRegOld regularization;
// real_t cost_prev = 0; // real_t cost_prev = 0;
// int epoch = 1; // int epoch = 1;
@ -167,7 +166,7 @@ void MLPPDualSVCOld::save(std::string fileName) {
} }
real_t MLPPDualSVCOld::Cost(std::vector<real_t> alpha, std::vector<std::vector<real_t>> X, std::vector<real_t> y) { real_t MLPPDualSVCOld::Cost(std::vector<real_t> alpha, std::vector<std::vector<real_t>> X, std::vector<real_t> y) {
class MLPPCost cost; class MLPPCostOld cost;
return cost.dualFormSVM(alpha, X, y); return cost.dualFormSVM(alpha, X, y);
} }
@ -177,7 +176,7 @@ std::vector<real_t> MLPPDualSVCOld::Evaluate(std::vector<std::vector<real_t>> X)
} }
std::vector<real_t> MLPPDualSVCOld::propagate(std::vector<std::vector<real_t>> X) { std::vector<real_t> MLPPDualSVCOld::propagate(std::vector<std::vector<real_t>> X) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::vector<real_t> z; std::vector<real_t> z;
for (uint32_t i = 0; i < X.size(); i++) { for (uint32_t i = 0; i < X.size(); i++) {
real_t sum = 0; real_t sum = 0;
@ -198,7 +197,7 @@ real_t MLPPDualSVCOld::Evaluate(std::vector<real_t> x) {
} }
real_t MLPPDualSVCOld::propagate(std::vector<real_t> x) { real_t MLPPDualSVCOld::propagate(std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
real_t z = 0; real_t z = 0;
for (uint32_t j = 0; j < alpha.size(); j++) { for (uint32_t j = 0; j < alpha.size(); j++) {
if (alpha[j] != 0) { if (alpha[j] != 0) {
@ -227,7 +226,7 @@ void MLPPDualSVCOld::alphaProjection() {
} }
real_t MLPPDualSVCOld::kernelFunction(std::vector<real_t> u, std::vector<real_t> v, std::string kernel) { real_t MLPPDualSVCOld::kernelFunction(std::vector<real_t> u, std::vector<real_t> v, std::string kernel) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
if (kernel == "Linear") { if (kernel == "Linear") {
return alg.dot(u, v); return alg.dot(u, v);
} }
@ -236,7 +235,7 @@ real_t MLPPDualSVCOld::kernelFunction(std::vector<real_t> u, std::vector<real_t>
} }
std::vector<std::vector<real_t>> MLPPDualSVCOld::kernelFunction(std::vector<std::vector<real_t>> A, std::vector<std::vector<real_t>> B, std::string kernel) { std::vector<std::vector<real_t>> MLPPDualSVCOld::kernelFunction(std::vector<std::vector<real_t>> A, std::vector<std::vector<real_t>> B, std::string kernel) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
if (kernel == "Linear") { if (kernel == "Linear") {
return alg.matmult(inputSet, alg.transpose(inputSet)); return alg.matmult(inputSet, alg.transpose(inputSet));
} }

View File

@ -6,10 +6,10 @@
#include "exp_reg_old.h" #include "exp_reg_old.h"
#include "../cost/cost.h" #include "../cost/cost_old.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include "../regularization/reg.h" #include "../regularization/reg_old.h"
#include "../stat/stat.h" #include "../stat/stat_old.h"
#include "../utilities/utilities.h" #include "../utilities/utilities.h"
#include <iostream> #include <iostream>
@ -39,8 +39,8 @@ real_t MLPPExpRegOld::modelTest(std::vector<real_t> x) {
} }
void MLPPExpRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { void MLPPExpRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -96,7 +96,7 @@ void MLPPExpRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI
} }
void MLPPExpRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) { void MLPPExpRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -142,8 +142,8 @@ void MLPPExpRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
} }
void MLPPExpRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) { void MLPPExpRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -214,8 +214,8 @@ void MLPPExpRegOld::save(std::string fileName) {
} }
real_t MLPPExpRegOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) { real_t MLPPExpRegOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
MLPPReg regularization; MLPPRegOld regularization;
class MLPPCost cost; class MLPPCostOld cost;
return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
} }

View File

@ -69,7 +69,7 @@ void MLPPGAN::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
ComputeDiscriminatorGradientsResult dgrads = compute_discriminator_gradients(y_hat, _output_set); ComputeDiscriminatorGradientsResult dgrads = compute_discriminator_gradients(y_hat, _output_set);
dgrads.cumulative_hidden_layer_w_grad = alg.scalar_multiply_vm(learning_rate / _n, dgrads.cumulative_hidden_layer_w_grad); dgrads.cumulative_hidden_layer_w_grad = alg.scalar_multiplynvt(learning_rate / _n, dgrads.cumulative_hidden_layer_w_grad);
dgrads.output_w_grad = alg.scalar_multiplynv(learning_rate / _n, dgrads.output_w_grad); dgrads.output_w_grad = alg.scalar_multiplynv(learning_rate / _n, dgrads.output_w_grad);
update_discriminator_parameters(dgrads.cumulative_hidden_layer_w_grad, dgrads.output_w_grad, learning_rate); update_discriminator_parameters(dgrads.cumulative_hidden_layer_w_grad, dgrads.output_w_grad, learning_rate);
@ -80,7 +80,7 @@ void MLPPGAN::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
_output_set = alg.onevecnv(_n); _output_set = alg.onevecnv(_n);
Vector<Ref<MLPPMatrix>> cumulative_generator_hidden_layer_w_grad = compute_generator_gradients(y_hat, _output_set); Vector<Ref<MLPPMatrix>> cumulative_generator_hidden_layer_w_grad = compute_generator_gradients(y_hat, _output_set);
cumulative_generator_hidden_layer_w_grad = alg.scalar_multiply_vm(learning_rate / _n, cumulative_generator_hidden_layer_w_grad); cumulative_generator_hidden_layer_w_grad = alg.scalar_multiplynvt(learning_rate / _n, cumulative_generator_hidden_layer_w_grad);
update_generator_parameters(cumulative_generator_hidden_layer_w_grad, learning_rate); update_generator_parameters(cumulative_generator_hidden_layer_w_grad, learning_rate);
forward_pass(); forward_pass();

View File

@ -6,9 +6,9 @@
#include "gan_old.h" #include "gan_old.h"
#include "../activation/activation_old.h" #include "../activation/activation_old.h"
#include "../cost/cost.h" #include "../cost/cost_old.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include "../regularization/reg.h" #include "../regularization/reg_old.h"
#include "../utilities/utilities.h" #include "../utilities/utilities.h"
#include <cmath> #include <cmath>
@ -23,13 +23,13 @@ MLPPGANOld::~MLPPGANOld() {
} }
std::vector<std::vector<real_t>> MLPPGANOld::generateExample(int n) { std::vector<std::vector<real_t>> MLPPGANOld::generateExample(int n) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
return modelSetTestGenerator(alg.gaussianNoise(n, k)); return modelSetTestGenerator(alg.gaussianNoise(n, k));
} }
void MLPPGANOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { void MLPPGANOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
class MLPPCost cost; class MLPPCost cost;
MLPPLinAlg alg; MLPPLinAlgOld alg;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -79,7 +79,7 @@ void MLPPGANOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
} }
real_t MLPPGANOld::score() { real_t MLPPGANOld::score() {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPUtilities util; MLPPUtilities util;
forwardPass(); forwardPass();
return util.performance(y_hat, alg.onevec(n)); return util.performance(y_hat, alg.onevec(n));
@ -99,7 +99,7 @@ void MLPPGANOld::save(std::string fileName) {
} }
void MLPPGANOld::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, real_t lambda, real_t alpha) { void MLPPGANOld::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
if (network.empty()) { if (network.empty()) {
network.push_back(MLPPOldHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha)); network.push_back(MLPPOldHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha));
network[0].forwardPass(); network[0].forwardPass();
@ -110,7 +110,7 @@ void MLPPGANOld::addLayer(int n_hidden, std::string activation, std::string weig
} }
void MLPPGANOld::addOutputLayer(std::string weightInit, std::string reg, real_t lambda, real_t alpha) { void MLPPGANOld::addOutputLayer(std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
if (!network.empty()) { if (!network.empty()) {
outputLayer = new MLPPOldOutputLayer(network[network.size() - 1].n_hidden, "Sigmoid", "LogLoss", network[network.size() - 1].a, weightInit, reg, lambda, alpha); outputLayer = new MLPPOldOutputLayer(network[network.size() - 1].n_hidden, "Sigmoid", "LogLoss", network[network.size() - 1].a, weightInit, reg, lambda, alpha);
} else { } else {
@ -148,8 +148,8 @@ std::vector<real_t> MLPPGANOld::modelSetTestDiscriminator(std::vector<std::vecto
} }
real_t MLPPGANOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) { real_t MLPPGANOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
MLPPReg regularization; MLPPRegOld regularization;
class MLPPCost cost; class MLPPCostOld cost;
real_t totalRegTerm = 0; real_t totalRegTerm = 0;
auto cost_function = outputLayer->cost_map[outputLayer->cost]; auto cost_function = outputLayer->cost_map[outputLayer->cost];
@ -162,7 +162,7 @@ real_t MLPPGANOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
} }
void MLPPGANOld::forwardPass() { void MLPPGANOld::forwardPass() {
MLPPLinAlg alg; MLPPLinAlgOld alg;
if (!network.empty()) { if (!network.empty()) {
network[0].input = alg.gaussianNoise(n, k); network[0].input = alg.gaussianNoise(n, k);
network[0].forwardPass(); network[0].forwardPass();
@ -180,7 +180,7 @@ void MLPPGANOld::forwardPass() {
} }
void MLPPGANOld::updateDiscriminatorParameters(std::vector<std::vector<std::vector<real_t>>> hiddenLayerUpdations, std::vector<real_t> outputLayerUpdation, real_t learning_rate) { void MLPPGANOld::updateDiscriminatorParameters(std::vector<std::vector<std::vector<real_t>>> hiddenLayerUpdations, std::vector<real_t> outputLayerUpdation, real_t learning_rate) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
outputLayer->weights = alg.subtraction(outputLayer->weights, outputLayerUpdation); outputLayer->weights = alg.subtraction(outputLayer->weights, outputLayerUpdation);
outputLayer->bias -= learning_rate * alg.sum_elements(outputLayer->delta) / n; outputLayer->bias -= learning_rate * alg.sum_elements(outputLayer->delta) / n;
@ -197,7 +197,7 @@ void MLPPGANOld::updateDiscriminatorParameters(std::vector<std::vector<std::vect
} }
void MLPPGANOld::updateGeneratorParameters(std::vector<std::vector<std::vector<real_t>>> hiddenLayerUpdations, real_t learning_rate) { void MLPPGANOld::updateGeneratorParameters(std::vector<std::vector<std::vector<real_t>>> hiddenLayerUpdations, real_t learning_rate) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
if (!network.empty()) { if (!network.empty()) {
for (int i = network.size() / 2; i >= 0; i--) { for (int i = network.size() / 2; i >= 0; i--) {
@ -210,10 +210,10 @@ void MLPPGANOld::updateGeneratorParameters(std::vector<std::vector<std::vector<r
} }
std::tuple<std::vector<std::vector<std::vector<real_t>>>, std::vector<real_t>> MLPPGANOld::computeDiscriminatorGradients(std::vector<real_t> y_hat, std::vector<real_t> outputSet) { std::tuple<std::vector<std::vector<std::vector<real_t>>>, std::vector<real_t>> MLPPGANOld::computeDiscriminatorGradients(std::vector<real_t> y_hat, std::vector<real_t> outputSet) {
class MLPPCost cost; class MLPPCostOld cost;
MLPPActivationOld avn; MLPPActivationOld avn;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
std::vector<std::vector<std::vector<real_t>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads. std::vector<std::vector<std::vector<real_t>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads.
@ -246,10 +246,10 @@ std::tuple<std::vector<std::vector<std::vector<real_t>>>, std::vector<real_t>> M
} }
std::vector<std::vector<std::vector<real_t>>> MLPPGANOld::computeGeneratorGradients(std::vector<real_t> y_hat, std::vector<real_t> outputSet) { std::vector<std::vector<std::vector<real_t>>> MLPPGANOld::computeGeneratorGradients(std::vector<real_t> y_hat, std::vector<real_t> outputSet) {
class MLPPCost cost; class MLPPCostOld cost;
MLPPActivationOld avn; MLPPActivationOld avn;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
std::vector<std::vector<std::vector<real_t>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads. std::vector<std::vector<std::vector<real_t>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads.

View File

@ -8,6 +8,7 @@
#include "../stat/stat.h" #include "../stat/stat.h"
#include <iostream> #include <iostream>
/*
void MLPPGaussMarkovChecker::checkGMConditions(std::vector<real_t> eps) { void MLPPGaussMarkovChecker::checkGMConditions(std::vector<real_t> eps) {
bool condition1 = arithmeticMean(eps); bool condition1 = arithmeticMean(eps);
bool condition2 = homoscedasticity(eps); bool condition2 = homoscedasticity(eps);
@ -55,6 +56,7 @@ bool MLPPGaussMarkovChecker::exogeneity(std::vector<real_t> eps) {
return true; return true;
} }
*/
void MLPPGaussMarkovChecker::_bind_methods() { void MLPPGaussMarkovChecker::_bind_methods() {
} }

View File

@ -19,12 +19,14 @@ class MLPPGaussMarkovChecker : public Reference {
GDCLASS(MLPPGaussMarkovChecker, Reference); GDCLASS(MLPPGaussMarkovChecker, Reference);
public: public:
/*
void checkGMConditions(std::vector<real_t> eps); void checkGMConditions(std::vector<real_t> eps);
// Independent, 3 Gauss-Markov Conditions // Independent, 3 Gauss-Markov Conditions
bool arithmeticMean(std::vector<real_t> eps); // 1) Arithmetic Mean of 0. bool arithmeticMean(std::vector<real_t> eps); // 1) Arithmetic Mean of 0.
bool homoscedasticity(std::vector<real_t> eps); // 2) Homoscedasticity bool homoscedasticity(std::vector<real_t> eps); // 2) Homoscedasticity
bool exogeneity(std::vector<real_t> eps); // 3) Cov of any 2 non-equal eps values = 0. bool exogeneity(std::vector<real_t> eps); // 3) Cov of any 2 non-equal eps values = 0.
*/
protected: protected:
static void _bind_methods(); static void _bind_methods();

View File

@ -5,7 +5,7 @@
// //
#include "gauss_markov_checker_old.h" #include "gauss_markov_checker_old.h"
#include "../stat/stat.h" #include "../stat/stat_old.h"
#include <iostream> #include <iostream>
void MLPPGaussMarkovCheckerOld::checkGMConditions(std::vector<real_t> eps) { void MLPPGaussMarkovCheckerOld::checkGMConditions(std::vector<real_t> eps) {
@ -21,7 +21,7 @@ void MLPPGaussMarkovCheckerOld::checkGMConditions(std::vector<real_t> eps) {
} }
bool MLPPGaussMarkovCheckerOld::arithmeticMean(std::vector<real_t> eps) { bool MLPPGaussMarkovCheckerOld::arithmeticMean(std::vector<real_t> eps) {
MLPPStat stat; MLPPStatOld stat;
if (stat.mean(eps) == 0) { if (stat.mean(eps) == 0) {
return true; return true;
} else { } else {
@ -30,7 +30,7 @@ bool MLPPGaussMarkovCheckerOld::arithmeticMean(std::vector<real_t> eps) {
} }
bool MLPPGaussMarkovCheckerOld::homoscedasticity(std::vector<real_t> eps) { bool MLPPGaussMarkovCheckerOld::homoscedasticity(std::vector<real_t> eps) {
MLPPStat stat; MLPPStatOld stat;
real_t currentVar = (eps[0] - stat.mean(eps)) * (eps[0] - stat.mean(eps)) / eps.size(); real_t currentVar = (eps[0] - stat.mean(eps)) * (eps[0] - stat.mean(eps)) / eps.size();
for (uint32_t i = 0; i < eps.size(); i++) { for (uint32_t i = 0; i < eps.size(); i++) {
if (currentVar != (eps[i] - stat.mean(eps)) * (eps[i] - stat.mean(eps)) / eps.size()) { if (currentVar != (eps[i] - stat.mean(eps)) * (eps[i] - stat.mean(eps)) / eps.size()) {
@ -42,7 +42,7 @@ bool MLPPGaussMarkovCheckerOld::homoscedasticity(std::vector<real_t> eps) {
} }
bool MLPPGaussMarkovCheckerOld::exogeneity(std::vector<real_t> eps) { bool MLPPGaussMarkovCheckerOld::exogeneity(std::vector<real_t> eps) {
MLPPStat stat; MLPPStatOld stat;
for (uint32_t i = 0; i < eps.size(); i++) { for (uint32_t i = 0; i < eps.size(); i++) {
for (uint32_t j = 0; j < eps.size(); j++) { for (uint32_t j = 0; j < eps.size(); j++) {
if (i != j) { if (i != j) {

View File

@ -6,8 +6,8 @@
#include "gaussian_nb_old.h" #include "gaussian_nb_old.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include "../stat/stat.h" #include "../stat/stat_old.h"
#include "../utilities/utilities.h" #include "../utilities/utilities.h"
#include <algorithm> #include <algorithm>
@ -47,8 +47,8 @@ real_t MLPPGaussianNBOld::score() {
} }
void MLPPGaussianNBOld::Evaluate() { void MLPPGaussianNBOld::Evaluate() {
MLPPStat stat; MLPPStatOld stat;
MLPPLinAlg alg; MLPPLinAlgOld alg;
// Computing mu_k_y and sigma_k_y // Computing mu_k_y and sigma_k_y
mu.resize(class_num); mu.resize(class_num);

View File

@ -6,7 +6,7 @@
#include "hidden_layer_old.h" #include "hidden_layer_old.h"
#include "../activation/activation.h" #include "../activation/activation.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include <iostream> #include <iostream>
#include <random> #include <random>
@ -103,7 +103,7 @@ MLPPOldHiddenLayer::MLPPOldHiddenLayer(int p_n_hidden, std::string p_activation,
} }
void MLPPOldHiddenLayer::forwardPass() { void MLPPOldHiddenLayer::forwardPass() {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
z = alg.mat_vec_add(alg.matmult(input, weights), bias); z = alg.mat_vec_add(alg.matmult(input, weights), bias);
@ -111,7 +111,7 @@ void MLPPOldHiddenLayer::forwardPass() {
} }
void MLPPOldHiddenLayer::Test(std::vector<real_t> x) { void MLPPOldHiddenLayer::Test(std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias); z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias);
a_test = (avn.*activationTest_map[activation])(z_test, false); a_test = (avn.*activationTest_map[activation])(z_test, false);

File diff suppressed because it is too large Load Diff

View File

@ -26,45 +26,23 @@ class MLPPLinAlg : public Reference {
public: public:
// MATRIX FUNCTIONS // MATRIX FUNCTIONS
std::vector<std::vector<real_t>> gramMatrix(std::vector<std::vector<real_t>> A); //std::vector<std::vector<real_t>> gramMatrix(std::vector<std::vector<real_t>> A);
//bool linearIndependenceChecker(std::vector<std::vector<real_t>> A);
bool linearIndependenceChecker(std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> gaussianNoise(int n, int m);
Ref<MLPPMatrix> gaussian_noise(int n, int m); Ref<MLPPMatrix> gaussian_noise(int n, int m);
std::vector<std::vector<real_t>> addition(std::vector<std::vector<real_t>> A, std::vector<std::vector<real_t>> B);
std::vector<std::vector<real_t>> subtraction(std::vector<std::vector<real_t>> A, std::vector<std::vector<real_t>> B);
std::vector<std::vector<real_t>> matmult(std::vector<std::vector<real_t>> A, std::vector<std::vector<real_t>> B);
Ref<MLPPMatrix> additionnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B); Ref<MLPPMatrix> additionnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B);
Ref<MLPPMatrix> subtractionnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B); Ref<MLPPMatrix> subtractionnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B);
Ref<MLPPMatrix> matmultnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B); Ref<MLPPMatrix> matmultnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B);
std::vector<std::vector<real_t>> hadamard_product(std::vector<std::vector<real_t>> A, std::vector<std::vector<real_t>> B);
std::vector<std::vector<real_t>> kronecker_product(std::vector<std::vector<real_t>> A, std::vector<std::vector<real_t>> B);
std::vector<std::vector<real_t>> elementWiseDivision(std::vector<std::vector<real_t>> A, std::vector<std::vector<real_t>> B);
Ref<MLPPMatrix> hadamard_productnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B); Ref<MLPPMatrix> hadamard_productnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B);
Ref<MLPPMatrix> kronecker_productnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B); Ref<MLPPMatrix> kronecker_productnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B);
Ref<MLPPMatrix> element_wise_divisionnvnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B); Ref<MLPPMatrix> element_wise_divisionnvnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B);
std::vector<std::vector<real_t>> transpose(std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> scalarMultiply(real_t scalar, std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> scalarAdd(real_t scalar, std::vector<std::vector<real_t>> A);
Ref<MLPPMatrix> transposenm(const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> transposenm(const Ref<MLPPMatrix> &A);
Ref<MLPPMatrix> scalar_multiplynm(real_t scalar, const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> scalar_multiplynm(real_t scalar, const Ref<MLPPMatrix> &A);
Ref<MLPPMatrix> scalar_addnm(real_t scalar, const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> scalar_addnm(real_t scalar, const Ref<MLPPMatrix> &A);
std::vector<std::vector<real_t>> log(std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> log10(std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> exp(std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> erf(std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> exponentiate(std::vector<std::vector<real_t>> A, real_t p);
std::vector<std::vector<real_t>> sqrt(std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> cbrt(std::vector<std::vector<real_t>> A);
Ref<MLPPMatrix> lognm(const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> lognm(const Ref<MLPPMatrix> &A);
Ref<MLPPMatrix> log10nm(const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> log10nm(const Ref<MLPPMatrix> &A);
Ref<MLPPMatrix> expnm(const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> expnm(const Ref<MLPPMatrix> &A);
@ -73,68 +51,41 @@ public:
Ref<MLPPMatrix> sqrtnm(const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> sqrtnm(const Ref<MLPPMatrix> &A);
Ref<MLPPMatrix> cbrtnm(const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> cbrtnm(const Ref<MLPPMatrix> &A);
std::vector<std::vector<real_t>> matrixPower(std::vector<std::vector<real_t>> A, int n); //std::vector<std::vector<real_t>> matrixPower(std::vector<std::vector<real_t>> A, int n);
std::vector<std::vector<real_t>> abs(std::vector<std::vector<real_t>> A);
Ref<MLPPMatrix> absnm(const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> absnm(const Ref<MLPPMatrix> &A);
real_t det(std::vector<std::vector<real_t>> A, int d);
real_t detm(const Ref<MLPPMatrix> &A, int d); real_t detm(const Ref<MLPPMatrix> &A, int d);
real_t trace(std::vector<std::vector<real_t>> A); //real_t trace(std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> cofactor(std::vector<std::vector<real_t>> A, int n, int i, int j);
std::vector<std::vector<real_t>> adjoint(std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> inverse(std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> pinverse(std::vector<std::vector<real_t>> A);
Ref<MLPPMatrix> cofactornm(const Ref<MLPPMatrix> &A, int n, int i, int j); Ref<MLPPMatrix> cofactornm(const Ref<MLPPMatrix> &A, int n, int i, int j);
Ref<MLPPMatrix> adjointnm(const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> adjointnm(const Ref<MLPPMatrix> &A);
Ref<MLPPMatrix> inversenm(const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> inversenm(const Ref<MLPPMatrix> &A);
Ref<MLPPMatrix> pinversenm(const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> pinversenm(const Ref<MLPPMatrix> &A);
std::vector<std::vector<real_t>> zeromat(int n, int m);
std::vector<std::vector<real_t>> onemat(int n, int m);
std::vector<std::vector<real_t>> full(int n, int m, int k);
Ref<MLPPMatrix> zeromatnm(int n, int m); Ref<MLPPMatrix> zeromatnm(int n, int m);
Ref<MLPPMatrix> onematnm(int n, int m); Ref<MLPPMatrix> onematnm(int n, int m);
Ref<MLPPMatrix> fullnm(int n, int m, int k); Ref<MLPPMatrix> fullnm(int n, int m, int k);
std::vector<std::vector<real_t>> sin(std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> cos(std::vector<std::vector<real_t>> A);
Ref<MLPPMatrix> sinnm(const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> sinnm(const Ref<MLPPMatrix> &A);
Ref<MLPPMatrix> cosnm(const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> cosnm(const Ref<MLPPMatrix> &A);
std::vector<std::vector<real_t>> rotate(std::vector<std::vector<real_t>> A, real_t theta, int axis = -1); //std::vector<std::vector<real_t>> rotate(std::vector<std::vector<real_t>> A, real_t theta, int axis = -1);
std::vector<std::vector<real_t>> max(std::vector<std::vector<real_t>> A, std::vector<std::vector<real_t>> B);
Ref<MLPPMatrix> maxnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B); Ref<MLPPMatrix> maxnm(const Ref<MLPPMatrix> &A, const Ref<MLPPMatrix> &B);
real_t max(std::vector<std::vector<real_t>> A); //real_t max(std::vector<std::vector<real_t>> A);
real_t min(std::vector<std::vector<real_t>> A); //real_t min(std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> round(std::vector<std::vector<real_t>> A); //std::vector<std::vector<real_t>> round(std::vector<std::vector<real_t>> A);
real_t norm_2(std::vector<std::vector<real_t>> A); //real_t norm_2(std::vector<std::vector<real_t>> A);
std::vector<std::vector<real_t>> identity(real_t d);
Ref<MLPPMatrix> identitym(int d); Ref<MLPPMatrix> identitym(int d);
std::vector<std::vector<real_t>> cov(std::vector<std::vector<real_t>> A);
Ref<MLPPMatrix> covnm(const Ref<MLPPMatrix> &A); Ref<MLPPMatrix> covnm(const Ref<MLPPMatrix> &A);
std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> eig(std::vector<std::vector<real_t>> A);
struct EigenResultOld {
std::vector<std::vector<real_t>> eigen_vectors;
std::vector<std::vector<real_t>> eigen_values;
};
EigenResultOld eigen_old(std::vector<std::vector<real_t>> A);
struct EigenResult { struct EigenResult {
Ref<MLPPMatrix> eigen_vectors; Ref<MLPPMatrix> eigen_vectors;
Ref<MLPPMatrix> eigen_values; Ref<MLPPMatrix> eigen_values;
@ -142,14 +93,6 @@ public:
EigenResult eigen(Ref<MLPPMatrix> A); EigenResult eigen(Ref<MLPPMatrix> A);
struct SVDResultOld {
std::vector<std::vector<real_t>> U;
std::vector<std::vector<real_t>> S;
std::vector<std::vector<real_t>> Vt;
};
SVDResultOld SVD(std::vector<std::vector<real_t>> A);
struct SVDResult { struct SVDResult {
Ref<MLPPMatrix> U; Ref<MLPPMatrix> U;
Ref<MLPPMatrix> S; Ref<MLPPMatrix> S;
@ -158,34 +101,34 @@ public:
SVDResult svd(const Ref<MLPPMatrix> &A); SVDResult svd(const Ref<MLPPMatrix> &A);
std::vector<real_t> vectorProjection(std::vector<real_t> a, std::vector<real_t> b); //std::vector<real_t> vectorProjection(std::vector<real_t> a, std::vector<real_t> b);
std::vector<std::vector<real_t>> gramSchmidtProcess(std::vector<std::vector<real_t>> A); //std::vector<std::vector<real_t>> gramSchmidtProcess(std::vector<std::vector<real_t>> A);
std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> QRD(std::vector<std::vector<real_t>> A);
/*
struct QRDResult { struct QRDResult {
std::vector<std::vector<real_t>> Q; std::vector<std::vector<real_t>> Q;
std::vector<std::vector<real_t>> R; std::vector<std::vector<real_t>> R;
}; };
*/
QRDResult qrd(std::vector<std::vector<real_t>> A); //QRDResult qrd(std::vector<std::vector<real_t>> A);
std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> chol(std::vector<std::vector<real_t>> A);
/*
struct CholeskyResult { struct CholeskyResult {
std::vector<std::vector<real_t>> L; std::vector<std::vector<real_t>> L;
std::vector<std::vector<real_t>> Lt; std::vector<std::vector<real_t>> Lt;
}; };
CholeskyResult cholesky(std::vector<std::vector<real_t>> A); CholeskyResult cholesky(std::vector<std::vector<real_t>> A);
*/
real_t sum_elements(std::vector<std::vector<real_t>> A); //real_t sum_elements(std::vector<std::vector<real_t>> A);
std::vector<real_t> flatten(std::vector<std::vector<real_t>> A);
Ref<MLPPVector> flattenmnv(const Vector<Ref<MLPPVector>> &A); Ref<MLPPVector> flattenmnv(const Vector<Ref<MLPPVector>> &A);
Ref<MLPPVector> flattenvvnv(const Ref<MLPPMatrix> &A); Ref<MLPPVector> flattenvvnv(const Ref<MLPPMatrix> &A);
/*
std::vector<real_t> solve(std::vector<std::vector<real_t>> A, std::vector<real_t> b); std::vector<real_t> solve(std::vector<std::vector<real_t>> A, std::vector<real_t> b);
bool positiveDefiniteChecker(std::vector<std::vector<real_t>> A); bool positiveDefiniteChecker(std::vector<std::vector<real_t>> A);
@ -193,38 +136,29 @@ public:
bool negativeDefiniteChecker(std::vector<std::vector<real_t>> A); bool negativeDefiniteChecker(std::vector<std::vector<real_t>> A);
bool zeroEigenvalue(std::vector<std::vector<real_t>> A); bool zeroEigenvalue(std::vector<std::vector<real_t>> A);
*/
void printMatrix(std::vector<std::vector<real_t>> A);
// VECTOR FUNCTIONS // VECTOR FUNCTIONS
std::vector<std::vector<real_t>> outerProduct(std::vector<real_t> a, std::vector<real_t> b); // This multiplies a, bT
Ref<MLPPMatrix> outer_product(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b); // This multiplies a, bT Ref<MLPPMatrix> outer_product(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b); // This multiplies a, bT
std::vector<real_t> hadamard_product(std::vector<real_t> a, std::vector<real_t> b);
Ref<MLPPVector> hadamard_productnv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b); Ref<MLPPVector> hadamard_productnv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b);
void hadamard_productv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b, Ref<MLPPVector> out); void hadamard_productv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b, Ref<MLPPVector> out);
std::vector<real_t> elementWiseDivision(std::vector<real_t> a, std::vector<real_t> b);
Ref<MLPPVector> element_wise_divisionnv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b); Ref<MLPPVector> element_wise_divisionnv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b);
std::vector<real_t> scalarMultiply(real_t scalar, std::vector<real_t> a);
Ref<MLPPVector> scalar_multiplynv(real_t scalar, const Ref<MLPPVector> &a); Ref<MLPPVector> scalar_multiplynv(real_t scalar, const Ref<MLPPVector> &a);
void scalar_multiplyv(real_t scalar, const Ref<MLPPVector> &a, Ref<MLPPVector> out); void scalar_multiplyv(real_t scalar, const Ref<MLPPVector> &a, Ref<MLPPVector> out);
std::vector<real_t> scalarAdd(real_t scalar, std::vector<real_t> a);
Ref<MLPPVector> scalar_addnv(real_t scalar, const Ref<MLPPVector> &a); Ref<MLPPVector> scalar_addnv(real_t scalar, const Ref<MLPPVector> &a);
void scalar_addv(real_t scalar, const Ref<MLPPVector> &a, Ref<MLPPVector> out); void scalar_addv(real_t scalar, const Ref<MLPPVector> &a, Ref<MLPPVector> out);
std::vector<real_t> addition(std::vector<real_t> a, std::vector<real_t> b);
Ref<MLPPVector> additionnv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b); Ref<MLPPVector> additionnv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b);
void additionv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b, Ref<MLPPVector> out); void additionv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b, Ref<MLPPVector> out);
std::vector<real_t> subtraction(std::vector<real_t> a, std::vector<real_t> b);
Ref<MLPPVector> subtractionnv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b); Ref<MLPPVector> subtractionnv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b);
void subtractionv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b, Ref<MLPPVector> out); void subtractionv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b, Ref<MLPPVector> out);
std::vector<real_t> subtractMatrixRows(std::vector<real_t> a, std::vector<std::vector<real_t>> B);
Ref<MLPPVector> subtract_matrix_rowsnv(const Ref<MLPPVector> &a, const Ref<MLPPMatrix> &B); Ref<MLPPVector> subtract_matrix_rowsnv(const Ref<MLPPVector> &a, const Ref<MLPPMatrix> &B);
Ref<MLPPVector> lognv(const Ref<MLPPVector> &a); Ref<MLPPVector> lognv(const Ref<MLPPVector> &a);
@ -235,16 +169,9 @@ public:
Ref<MLPPVector> sqrtnv(const Ref<MLPPVector> &a); Ref<MLPPVector> sqrtnv(const Ref<MLPPVector> &a);
Ref<MLPPVector> cbrtnv(const Ref<MLPPVector> &a); Ref<MLPPVector> cbrtnv(const Ref<MLPPVector> &a);
real_t dot(std::vector<real_t> a, std::vector<real_t> b);
real_t dotnv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b); real_t dotnv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b);
std::vector<real_t> cross(std::vector<real_t> a, std::vector<real_t> b); //std::vector<real_t> cross(std::vector<real_t> a, std::vector<real_t> b);
std::vector<real_t> abs(std::vector<real_t> a);
std::vector<real_t> zerovec(int n);
std::vector<real_t> onevec(int n);
std::vector<real_t> full(int n, int k);
Ref<MLPPVector> absv(const Ref<MLPPVector> &a); Ref<MLPPVector> absv(const Ref<MLPPVector> &a);
@ -252,91 +179,62 @@ public:
Ref<MLPPVector> onevecnv(int n); Ref<MLPPVector> onevecnv(int n);
Ref<MLPPVector> fullnv(int n, int k); Ref<MLPPVector> fullnv(int n, int k);
std::vector<std::vector<real_t>> diag(std::vector<real_t> a);
Ref<MLPPMatrix> diagnm(const Ref<MLPPVector> &a); Ref<MLPPMatrix> diagnm(const Ref<MLPPVector> &a);
std::vector<real_t> sin(std::vector<real_t> a);
std::vector<real_t> cos(std::vector<real_t> a);
Ref<MLPPVector> sinnv(const Ref<MLPPVector> &a); Ref<MLPPVector> sinnv(const Ref<MLPPVector> &a);
Ref<MLPPVector> cosnv(const Ref<MLPPVector> &a); Ref<MLPPVector> cosnv(const Ref<MLPPVector> &a);
std::vector<real_t> max(std::vector<real_t> a, std::vector<real_t> b);
Ref<MLPPVector> maxnvv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b); Ref<MLPPVector> maxnvv(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b);
real_t max(std::vector<real_t> a);
real_t min(std::vector<real_t> a);
real_t maxvr(const Ref<MLPPVector> &a); real_t maxvr(const Ref<MLPPVector> &a);
real_t minvr(const Ref<MLPPVector> &a); real_t minvr(const Ref<MLPPVector> &a);
std::vector<real_t> round(std::vector<real_t> a); //std::vector<real_t> round(std::vector<real_t> a);
real_t euclideanDistance(std::vector<real_t> a, std::vector<real_t> b);
real_t euclidean_distance(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b); real_t euclidean_distance(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b);
real_t euclidean_distance_squared(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b); real_t euclidean_distance_squared(const Ref<MLPPVector> &a, const Ref<MLPPVector> &b);
/*
real_t norm_2(std::vector<real_t> a); real_t norm_2(std::vector<real_t> a);
*/
real_t norm_sq(std::vector<real_t> a);
real_t norm_sqv(const Ref<MLPPVector> &a); real_t norm_sqv(const Ref<MLPPVector> &a);
real_t sum_elements(std::vector<real_t> a);
real_t sum_elementsv(const Ref<MLPPVector> &a); real_t sum_elementsv(const Ref<MLPPVector> &a);
real_t cosineSimilarity(std::vector<real_t> a, std::vector<real_t> b); //real_t cosineSimilarity(std::vector<real_t> a, std::vector<real_t> b);
void printVector(std::vector<real_t> a);
// MATRIX-VECTOR FUNCTIONS // MATRIX-VECTOR FUNCTIONS
std::vector<std::vector<real_t>> mat_vec_add(std::vector<std::vector<real_t>> A, std::vector<real_t> b);
std::vector<real_t> mat_vec_mult(std::vector<std::vector<real_t>> A, std::vector<real_t> b);
Ref<MLPPMatrix> mat_vec_addnm(const Ref<MLPPMatrix> &A, const Ref<MLPPVector> &b); Ref<MLPPMatrix> mat_vec_addnm(const Ref<MLPPMatrix> &A, const Ref<MLPPVector> &b);
Ref<MLPPVector> mat_vec_multnv(const Ref<MLPPMatrix> &A, const Ref<MLPPVector> &b); Ref<MLPPVector> mat_vec_multnv(const Ref<MLPPMatrix> &A, const Ref<MLPPVector> &b);
// TENSOR FUNCTIONS // TENSOR FUNCTIONS
std::vector<std::vector<std::vector<real_t>>> addition(std::vector<std::vector<std::vector<real_t>>> A, std::vector<std::vector<std::vector<real_t>>> B); Vector<Ref<MLPPMatrix>> additionnvt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B);
Vector<Ref<MLPPMatrix>> addition_vt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B); Vector<Ref<MLPPMatrix>> element_wise_divisionnvnvt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B);
std::vector<std::vector<std::vector<real_t>>> elementWiseDivision(std::vector<std::vector<std::vector<real_t>>> A, std::vector<std::vector<std::vector<real_t>>> B); Vector<Ref<MLPPMatrix>> sqrtnvt(const Vector<Ref<MLPPMatrix>> &A);
Vector<Ref<MLPPMatrix>> element_wise_divisionnv_vt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B);
std::vector<std::vector<std::vector<real_t>>> sqrt(std::vector<std::vector<std::vector<real_t>>> A); Vector<Ref<MLPPMatrix>> exponentiatenvt(const Vector<Ref<MLPPMatrix>> &A, real_t p);
Vector<Ref<MLPPMatrix>> sqrt_vt(const Vector<Ref<MLPPMatrix>> &A);
std::vector<std::vector<std::vector<real_t>>> exponentiate(std::vector<std::vector<std::vector<real_t>>> A, real_t p); //std::vector<std::vector<real_t>> tensor_vec_mult(std::vector<std::vector<std::vector<real_t>>> A, std::vector<real_t> b);
Vector<Ref<MLPPMatrix>> exponentiate_vt(const Vector<Ref<MLPPMatrix>> &A, real_t p);
std::vector<std::vector<real_t>> tensor_vec_mult(std::vector<std::vector<std::vector<real_t>>> A, std::vector<real_t> b); //std::vector<real_t> flatten(std::vector<std::vector<std::vector<real_t>>> A);
std::vector<real_t> flatten(std::vector<std::vector<std::vector<real_t>>> A); Vector<Ref<MLPPMatrix>> scalar_multiplynvt(real_t scalar, Vector<Ref<MLPPMatrix>> A);
Vector<Ref<MLPPMatrix>> scalar_addnvt(real_t scalar, Vector<Ref<MLPPMatrix>> A);
void printTensor(std::vector<std::vector<std::vector<real_t>>> A); Vector<Ref<MLPPMatrix>> resizenvt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B);
std::vector<std::vector<std::vector<real_t>>> scalarMultiply(real_t scalar, std::vector<std::vector<std::vector<real_t>>> A); //std::vector<std::vector<std::vector<real_t>>> hadamard_product(std::vector<std::vector<std::vector<real_t>>> A, std::vector<std::vector<std::vector<real_t>>> B);
std::vector<std::vector<std::vector<real_t>>> scalarAdd(real_t scalar, std::vector<std::vector<std::vector<real_t>>> A);
Vector<Ref<MLPPMatrix>> scalar_multiply_vm(real_t scalar, Vector<Ref<MLPPMatrix>> A); Vector<Ref<MLPPMatrix>> maxnvt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B);
Vector<Ref<MLPPMatrix>> scalar_add_vm(real_t scalar, Vector<Ref<MLPPMatrix>> A); Vector<Ref<MLPPMatrix>> absnvt(const Vector<Ref<MLPPMatrix>> &A);
std::vector<std::vector<std::vector<real_t>>> resize(std::vector<std::vector<std::vector<real_t>>> A, std::vector<std::vector<std::vector<real_t>>> B); //real_t norm_2(std::vector<std::vector<std::vector<real_t>>> A);
Vector<Ref<MLPPMatrix>> resize_vt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B); //std::vector<std::vector<std::vector<real_t>>> vector_wise_tensor_product(std::vector<std::vector<std::vector<real_t>>> A, std::vector<std::vector<real_t>> B);
std::vector<std::vector<std::vector<real_t>>> hadamard_product(std::vector<std::vector<std::vector<real_t>>> A, std::vector<std::vector<std::vector<real_t>>> B);
std::vector<std::vector<std::vector<real_t>>> max(std::vector<std::vector<std::vector<real_t>>> A, std::vector<std::vector<std::vector<real_t>>> B);
Vector<Ref<MLPPMatrix>> max_vt(const Vector<Ref<MLPPMatrix>> &A, const Vector<Ref<MLPPMatrix>> &B);
std::vector<std::vector<std::vector<real_t>>> abs(std::vector<std::vector<std::vector<real_t>>> A);
Vector<Ref<MLPPMatrix>> abs_vt(const Vector<Ref<MLPPMatrix>> &A);
real_t norm_2(std::vector<std::vector<std::vector<real_t>>> A);
std::vector<std::vector<std::vector<real_t>>> vector_wise_tensor_product(std::vector<std::vector<std::vector<real_t>>> A, std::vector<std::vector<real_t>> B);
protected: protected:
static void _bind_methods(); static void _bind_methods();

View File

@ -8,7 +8,7 @@
#include "core/math/math_funcs.h" #include "core/math/math_funcs.h"
#include "../stat/stat.h" #include "../stat/stat_old.h"
#include <cmath> #include <cmath>
#include <iostream> #include <iostream>
@ -509,7 +509,7 @@ std::vector<std::vector<real_t>> MLPPLinAlgOld::identity(real_t d) {
} }
std::vector<std::vector<real_t>> MLPPLinAlgOld::cov(std::vector<std::vector<real_t>> A) { std::vector<std::vector<real_t>> MLPPLinAlgOld::cov(std::vector<std::vector<real_t>> A) {
MLPPStat stat; MLPPStatOld stat;
std::vector<std::vector<real_t>> covMat; std::vector<std::vector<real_t>> covMat;
covMat.resize(A.size()); covMat.resize(A.size());
for (uint32_t i = 0; i < covMat.size(); i++) { for (uint32_t i = 0; i < covMat.size(); i++) {

View File

@ -6,10 +6,10 @@
#include "lin_reg_old.h" #include "lin_reg_old.h"
#include "../cost/cost.h" #include "../cost/cost_old.h"
#include "../lin_alg/lin_alg_old.h" #include "../lin_alg/lin_alg_old.h"
#include "../regularization/reg.h" #include "../regularization/reg_old.h"
#include "../stat/stat.h" #include "../stat/stat_old.h"
#include "../utilities/utilities.h" #include "../utilities/utilities.h"
#include <cmath> #include <cmath>
@ -41,7 +41,7 @@ real_t MLPPLinRegOld::modelTest(std::vector<real_t> x) {
void MLPPLinRegOld::NewtonRaphson(real_t learning_rate, int max_epoch, bool UI) { void MLPPLinRegOld::NewtonRaphson(real_t learning_rate, int max_epoch, bool UI) {
MLPPLinAlgOld alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -73,7 +73,7 @@ void MLPPLinRegOld::NewtonRaphson(real_t learning_rate, int max_epoch, bool UI)
void MLPPLinRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { void MLPPLinRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
MLPPLinAlgOld alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -104,7 +104,7 @@ void MLPPLinRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI
void MLPPLinRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) { void MLPPLinRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
MLPPLinAlgOld alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -143,7 +143,7 @@ void MLPPLinRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
void MLPPLinRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) { void MLPPLinRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPLinAlgOld alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -183,7 +183,7 @@ void MLPPLinRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_siz
void MLPPLinRegOld::Momentum(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool UI) { void MLPPLinRegOld::Momentum(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool UI) {
MLPPLinAlgOld alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -230,7 +230,7 @@ void MLPPLinRegOld::Momentum(real_t learning_rate, int max_epoch, int mini_batch
void MLPPLinRegOld::NAG(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool UI) { void MLPPLinRegOld::NAG(real_t learning_rate, int max_epoch, int mini_batch_size, real_t gamma, bool UI) {
MLPPLinAlgOld alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -279,7 +279,7 @@ void MLPPLinRegOld::NAG(real_t learning_rate, int max_epoch, int mini_batch_size
void MLPPLinRegOld::Adagrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t e, bool UI) { void MLPPLinRegOld::Adagrad(real_t learning_rate, int max_epoch, int mini_batch_size, real_t e, bool UI) {
MLPPLinAlgOld alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -327,7 +327,7 @@ void MLPPLinRegOld::Adagrad(real_t learning_rate, int max_epoch, int mini_batch_
void MLPPLinRegOld::Adadelta(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t e, bool UI) { void MLPPLinRegOld::Adadelta(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t e, bool UI) {
// Adagrad upgrade. Momentum is applied. // Adagrad upgrade. Momentum is applied.
MLPPLinAlgOld alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -374,7 +374,7 @@ void MLPPLinRegOld::Adadelta(real_t learning_rate, int max_epoch, int mini_batch
void MLPPLinRegOld::Adam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) { void MLPPLinRegOld::Adam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) {
MLPPLinAlgOld alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -427,7 +427,7 @@ void MLPPLinRegOld::Adam(real_t learning_rate, int max_epoch, int mini_batch_siz
void MLPPLinRegOld::Adamax(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) { void MLPPLinRegOld::Adamax(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) {
MLPPLinAlgOld alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -478,7 +478,7 @@ void MLPPLinRegOld::Adamax(real_t learning_rate, int max_epoch, int mini_batch_s
void MLPPLinRegOld::Nadam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) { void MLPPLinRegOld::Nadam(real_t learning_rate, int max_epoch, int mini_batch_size, real_t b1, real_t b2, real_t e, bool UI) {
MLPPLinAlgOld alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -532,7 +532,7 @@ void MLPPLinRegOld::Nadam(real_t learning_rate, int max_epoch, int mini_batch_si
void MLPPLinRegOld::normalEquation() { void MLPPLinRegOld::normalEquation() {
MLPPLinAlgOld alg; MLPPLinAlgOld alg;
MLPPStat stat; MLPPStatOld stat;
std::vector<real_t> x_means; std::vector<real_t> x_means;
std::vector<std::vector<real_t>> inputSetT = alg.transpose(inputSet); std::vector<std::vector<real_t>> inputSetT = alg.transpose(inputSet);
@ -577,8 +577,8 @@ void MLPPLinRegOld::save(std::string fileName) {
} }
real_t MLPPLinRegOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) { real_t MLPPLinRegOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
MLPPReg regularization; MLPPRegOld regularization;
class MLPPCost cost; class MLPPCostOld cost;
return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
} }

View File

@ -7,9 +7,9 @@
#include "log_reg_old.h" #include "log_reg_old.h"
#include "../activation/activation_old.h" #include "../activation/activation_old.h"
#include "../cost/cost.h" #include "../cost/cost_old.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include "../regularization/reg.h" #include "../regularization/reg_old.h"
#include "../utilities/utilities.h" #include "../utilities/utilities.h"
#include <iostream> #include <iostream>
@ -38,8 +38,8 @@ real_t MLPPLogRegOld::modelTest(std::vector<real_t> x) {
} }
void MLPPLogRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { void MLPPLogRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -70,8 +70,8 @@ void MLPPLogRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI
} }
void MLPPLogRegOld::MLE(real_t learning_rate, int max_epoch, bool UI) { void MLPPLogRegOld::MLE(real_t learning_rate, int max_epoch, bool UI) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -101,8 +101,8 @@ void MLPPLogRegOld::MLE(real_t learning_rate, int max_epoch, bool UI) {
} }
void MLPPLogRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) { void MLPPLogRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -140,8 +140,8 @@ void MLPPLogRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
} }
void MLPPLogRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) { void MLPPLogRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -190,19 +190,19 @@ void MLPPLogRegOld::save(std::string fileName) {
} }
real_t MLPPLogRegOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) { real_t MLPPLogRegOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
MLPPReg regularization; MLPPRegOld regularization;
class MLPPCost cost; class MLPPCostOld cost;
return cost.LogLoss(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); return cost.LogLoss(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
} }
std::vector<real_t> MLPPLogRegOld::Evaluate(std::vector<std::vector<real_t>> X) { std::vector<real_t> MLPPLogRegOld::Evaluate(std::vector<std::vector<real_t>> X) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
return avn.sigmoid(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); return avn.sigmoid(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
} }
real_t MLPPLogRegOld::Evaluate(std::vector<real_t> x) { real_t MLPPLogRegOld::Evaluate(std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
return avn.sigmoid(alg.dot(weights, x) + bias); return avn.sigmoid(alg.dot(weights, x) + bias);
} }

View File

@ -7,9 +7,9 @@
#include "mann_old.h" #include "mann_old.h"
#include "../activation/activation_old.h" #include "../activation/activation_old.h"
#include "../cost/cost.h" #include "../cost/cost_old.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include "../regularization/reg.h" #include "../regularization/reg_old.h"
#include "../utilities/utilities.h" #include "../utilities/utilities.h"
#include <iostream> #include <iostream>
@ -53,10 +53,10 @@ std::vector<real_t> MLPPMANNOld::modelTest(std::vector<real_t> x) {
} }
void MLPPMANNOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { void MLPPMANNOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
class MLPPCost cost; class MLPPCostOld cost;
MLPPActivationOld avn; MLPPActivationOld avn;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -158,8 +158,8 @@ void MLPPMANNOld::addOutputLayer(std::string activation, std::string loss, std::
} }
real_t MLPPMANNOld::Cost(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) { real_t MLPPMANNOld::Cost(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
MLPPReg regularization; MLPPRegOld regularization;
class MLPPCost cost; class MLPPCostOld cost;
real_t totalRegTerm = 0; real_t totalRegTerm = 0;
auto cost_function = outputLayer->cost_map[outputLayer->cost]; auto cost_function = outputLayer->cost_map[outputLayer->cost];

View File

@ -9,9 +9,9 @@
#include "core/log/logger.h" #include "core/log/logger.h"
#include "../activation/activation_old.h" #include "../activation/activation_old.h"
#include "../cost/cost.h" #include "../cost/cost_old.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include "../regularization/reg.h" #include "../regularization/reg_old.h"
#include "../utilities/utilities.h" #include "../utilities/utilities.h"
#include <iostream> #include <iostream>
@ -45,8 +45,8 @@ real_t MLPPMLPOld::modelTest(std::vector<real_t> x) {
void MLPPMLPOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { void MLPPMLPOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
MLPPActivationOld avn; MLPPActivationOld avn;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -104,8 +104,8 @@ void MLPPMLPOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
void MLPPMLPOld::SGD(real_t learning_rate, int max_epoch, bool UI) { void MLPPMLPOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
MLPPActivationOld avn; MLPPActivationOld avn;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -160,8 +160,8 @@ void MLPPMLPOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
void MLPPMLPOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) { void MLPPMLPOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPActivationOld avn; MLPPActivationOld avn;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -241,13 +241,13 @@ void MLPPMLPOld::save(std::string fileName) {
} }
real_t MLPPMLPOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) { real_t MLPPMLPOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
MLPPReg regularization; MLPPRegOld regularization;
class MLPPCost cost; class MLPPCostOld cost;
return cost.LogLoss(y_hat, y) + regularization.regTerm(weights2, lambda, alpha, reg) + regularization.regTerm(weights1, lambda, alpha, reg); return cost.LogLoss(y_hat, y) + regularization.regTerm(weights2, lambda, alpha, reg) + regularization.regTerm(weights1, lambda, alpha, reg);
} }
std::vector<real_t> MLPPMLPOld::Evaluate(std::vector<std::vector<real_t>> X) { std::vector<real_t> MLPPMLPOld::Evaluate(std::vector<std::vector<real_t>> X) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
std::vector<std::vector<real_t>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector<std::vector<real_t>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
std::vector<std::vector<real_t>> a2 = avn.sigmoid(z2); std::vector<std::vector<real_t>> a2 = avn.sigmoid(z2);
@ -255,7 +255,7 @@ std::vector<real_t> MLPPMLPOld::Evaluate(std::vector<std::vector<real_t>> X) {
} }
std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> MLPPMLPOld::propagate(std::vector<std::vector<real_t>> X) { std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> MLPPMLPOld::propagate(std::vector<std::vector<real_t>> X) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
std::vector<std::vector<real_t>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector<std::vector<real_t>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
std::vector<std::vector<real_t>> a2 = avn.sigmoid(z2); std::vector<std::vector<real_t>> a2 = avn.sigmoid(z2);
@ -263,7 +263,7 @@ std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> M
} }
real_t MLPPMLPOld::Evaluate(std::vector<real_t> x) { real_t MLPPMLPOld::Evaluate(std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
std::vector<real_t> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector<real_t> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
std::vector<real_t> a2 = avn.sigmoid(z2); std::vector<real_t> a2 = avn.sigmoid(z2);
@ -271,7 +271,7 @@ real_t MLPPMLPOld::Evaluate(std::vector<real_t> x) {
} }
std::tuple<std::vector<real_t>, std::vector<real_t>> MLPPMLPOld::propagate(std::vector<real_t> x) { std::tuple<std::vector<real_t>, std::vector<real_t>> MLPPMLPOld::propagate(std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
std::vector<real_t> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector<real_t> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
std::vector<real_t> a2 = avn.sigmoid(z2); std::vector<real_t> a2 = avn.sigmoid(z2);
@ -279,7 +279,7 @@ std::tuple<std::vector<real_t>, std::vector<real_t>> MLPPMLPOld::propagate(std::
} }
void MLPPMLPOld::forwardPass() { void MLPPMLPOld::forwardPass() {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1); z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1);
a2 = avn.sigmoid(z2); a2 = avn.sigmoid(z2);

View File

@ -5,7 +5,7 @@
// //
#include "multi_output_layer_old.h" #include "multi_output_layer_old.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include "../utilities/utilities.h" #include "../utilities/utilities.h"
#include <iostream> #include <iostream>
@ -106,33 +106,33 @@ MLPPOldMultiOutputLayer::MLPPOldMultiOutputLayer(int p_n_output, int p_n_hidden,
activation_map["Arcoth"] = &MLPPActivationOld::arcoth; activation_map["Arcoth"] = &MLPPActivationOld::arcoth;
activationTest_map["Arcoth"] = &MLPPActivationOld::arcoth; activationTest_map["Arcoth"] = &MLPPActivationOld::arcoth;
costDeriv_map["MSE"] = &MLPPCost::MSEDeriv; costDeriv_map["MSE"] = &MLPPCostOld::MSEDeriv;
cost_map["MSE"] = &MLPPCost::MSE; cost_map["MSE"] = &MLPPCostOld::MSE;
costDeriv_map["RMSE"] = &MLPPCost::RMSEDeriv; costDeriv_map["RMSE"] = &MLPPCostOld::RMSEDeriv;
cost_map["RMSE"] = &MLPPCost::RMSE; cost_map["RMSE"] = &MLPPCostOld::RMSE;
costDeriv_map["MAE"] = &MLPPCost::MAEDeriv; costDeriv_map["MAE"] = &MLPPCostOld::MAEDeriv;
cost_map["MAE"] = &MLPPCost::MAE; cost_map["MAE"] = &MLPPCostOld::MAE;
costDeriv_map["MBE"] = &MLPPCost::MBEDeriv; costDeriv_map["MBE"] = &MLPPCostOld::MBEDeriv;
cost_map["MBE"] = &MLPPCost::MBE; cost_map["MBE"] = &MLPPCostOld::MBE;
costDeriv_map["LogLoss"] = &MLPPCost::LogLossDeriv; costDeriv_map["LogLoss"] = &MLPPCostOld::LogLossDeriv;
cost_map["LogLoss"] = &MLPPCost::LogLoss; cost_map["LogLoss"] = &MLPPCostOld::LogLoss;
costDeriv_map["CrossEntropy"] = &MLPPCost::CrossEntropyDeriv; costDeriv_map["CrossEntropy"] = &MLPPCostOld::CrossEntropyDeriv;
cost_map["CrossEntropy"] = &MLPPCost::CrossEntropy; cost_map["CrossEntropy"] = &MLPPCostOld::CrossEntropy;
costDeriv_map["HingeLoss"] = &MLPPCost::HingeLossDeriv; costDeriv_map["HingeLoss"] = &MLPPCostOld::HingeLossDeriv;
cost_map["HingeLoss"] = &MLPPCost::HingeLoss; cost_map["HingeLoss"] = &MLPPCostOld::HingeLoss;
costDeriv_map["WassersteinLoss"] = &MLPPCost::HingeLossDeriv; costDeriv_map["WassersteinLoss"] = &MLPPCostOld::HingeLossDeriv;
cost_map["WassersteinLoss"] = &MLPPCost::HingeLoss; cost_map["WassersteinLoss"] = &MLPPCostOld::HingeLoss;
} }
void MLPPOldMultiOutputLayer::forwardPass() { void MLPPOldMultiOutputLayer::forwardPass() {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
z = alg.mat_vec_add(alg.matmult(input, weights), bias); z = alg.mat_vec_add(alg.matmult(input, weights), bias);
a = (avn.*activation_map[activation])(z, false); a = (avn.*activation_map[activation])(z, false);
} }
void MLPPOldMultiOutputLayer::Test(std::vector<real_t> x) { void MLPPOldMultiOutputLayer::Test(std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias); z_test = alg.addition(alg.mat_vec_mult(alg.transpose(weights), x), bias);
a_test = (avn.*activationTest_map[activation])(z_test, false); a_test = (avn.*activationTest_map[activation])(z_test, false);

View File

@ -14,7 +14,7 @@
#include "core/object/reference.h" #include "core/object/reference.h"
#include "../activation/activation_old.h" #include "../activation/activation_old.h"
#include "../cost/cost.h" #include "../cost/cost_old.h"
#include "../regularization/reg.h" #include "../regularization/reg.h"
#include "../utilities/utilities.h" #include "../utilities/utilities.h"
@ -44,8 +44,8 @@ public:
std::map<std::string, std::vector<std::vector<real_t>> (MLPPActivationOld::*)(std::vector<std::vector<real_t>>, bool)> activation_map; std::map<std::string, std::vector<std::vector<real_t>> (MLPPActivationOld::*)(std::vector<std::vector<real_t>>, bool)> activation_map;
std::map<std::string, std::vector<real_t> (MLPPActivationOld::*)(std::vector<real_t>, bool)> activationTest_map; std::map<std::string, std::vector<real_t> (MLPPActivationOld::*)(std::vector<real_t>, bool)> activationTest_map;
std::map<std::string, real_t (MLPPCost::*)(std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>)> cost_map; std::map<std::string, real_t (MLPPCostOld::*)(std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>)> cost_map;
std::map<std::string, std::vector<std::vector<real_t>> (MLPPCost::*)(std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>)> costDeriv_map; std::map<std::string, std::vector<std::vector<real_t>> (MLPPCostOld::*)(std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>)> costDeriv_map;
std::vector<real_t> z_test; std::vector<real_t> z_test;
std::vector<real_t> a_test; std::vector<real_t> a_test;

View File

@ -6,7 +6,7 @@
#include "multinomial_nb_old.h" #include "multinomial_nb_old.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include "../utilities/utilities.h" #include "../utilities/utilities.h"
#include <algorithm> #include <algorithm>
@ -81,7 +81,7 @@ void MLPPMultinomialNBOld::computeTheta() {
} }
void MLPPMultinomialNBOld::Evaluate() { void MLPPMultinomialNBOld::Evaluate() {
MLPPLinAlg alg; MLPPLinAlgOld alg;
for (uint32_t i = 0; i < outputSet.size(); i++) { for (uint32_t i = 0; i < outputSet.size(); i++) {
// Pr(B | A) * Pr(A) // Pr(B | A) * Pr(A)
real_t score[class_num]; real_t score[class_num];

View File

@ -12,6 +12,7 @@
#include <iostream> #include <iostream>
#include <string> #include <string>
/*
real_t MLPPNumericalAnalysis::numDiff(real_t (*function)(real_t), real_t x) { real_t MLPPNumericalAnalysis::numDiff(real_t (*function)(real_t), real_t x) {
real_t eps = 1e-10; real_t eps = 1e-10;
return (function(x + eps) - function(x)) / eps; // This is just the formal def. of the derivative. return (function(x + eps) - function(x)) / eps; // This is just the formal def. of the derivative.
@ -164,16 +165,15 @@ real_t MLPPNumericalAnalysis::eulerianMethod(real_t (*derivative)(std::vector<re
} }
real_t MLPPNumericalAnalysis::growthMethod(real_t C, real_t k, real_t t) { real_t MLPPNumericalAnalysis::growthMethod(real_t C, real_t k, real_t t) {
/* //dP/dt = kP
dP/dt = kP //dP/P = kdt
dP/P = kdt //integral(1/P)dP = integral(k) dt
integral(1/P)dP = integral(k) dt //ln|P| = kt + C_initial
ln|P| = kt + C_initial //|P| = e^(kt + C_initial)
|P| = e^(kt + C_initial) //|P| = e^(C_initial) * e^(kt)
|P| = e^(C_initial) * e^(kt) //P = +/- e^(C_initial) * e^(kt)
P = +/- e^(C_initial) * e^(kt) //P = C * e^(kt)
P = C * e^(kt)
*/
// auto growthFunction = [&C, &k](real_t t) { return C * exp(k * t); }; // auto growthFunction = [&C, &k](real_t t) { return C * exp(k * t); };
return C * std::exp(k * t); return C * std::exp(k * t);
@ -240,15 +240,14 @@ real_t MLPPNumericalAnalysis::quadraticApproximation(real_t (*function)(std::vec
} }
real_t MLPPNumericalAnalysis::cubicApproximation(real_t (*function)(std::vector<real_t>), std::vector<real_t> c, std::vector<real_t> x) { real_t MLPPNumericalAnalysis::cubicApproximation(real_t (*function)(std::vector<real_t>), std::vector<real_t> c, std::vector<real_t> x) {
/* //Not completely sure as the literature seldom discusses the third order taylor approximation,
Not completely sure as the literature seldom discusses the third order taylor approximation, //in particular for multivariate cases, but ostensibly, the matrix/tensor/vector multiplies
in particular for multivariate cases, but ostensibly, the matrix/tensor/vector multiplies //should look something like this:
should look something like this:
//(N x N x N) (N x 1) [tensor vector mult] => (N x N x 1) => (N x N)
//Perform remaining multiplies as done for the 2nd order approximation.
//Result is a scalar.
(N x N x N) (N x 1) [tensor vector mult] => (N x N x 1) => (N x N)
Perform remaining multiplies as done for the 2nd order approximation.
Result is a scalar.
*/
MLPPLinAlg alg; MLPPLinAlg alg;
std::vector<std::vector<real_t>> resultMat = alg.tensor_vec_mult(thirdOrderTensor(function, c), alg.subtraction(x, c)); std::vector<std::vector<real_t>> resultMat = alg.tensor_vec_mult(thirdOrderTensor(function, c), alg.subtraction(x, c));
real_t resultScalar = alg.matmult({ (alg.subtraction(x, c)) }, alg.matmult(resultMat, alg.transpose({ alg.subtraction(x, c) })))[0][0]; real_t resultScalar = alg.matmult({ (alg.subtraction(x, c)) }, alg.matmult(resultMat, alg.transpose({ alg.subtraction(x, c) })))[0][0];
@ -270,10 +269,10 @@ real_t MLPPNumericalAnalysis::laplacian(real_t (*function)(std::vector<real_t>),
std::string MLPPNumericalAnalysis::secondPartialDerivativeTest(real_t (*function)(std::vector<real_t>), std::vector<real_t> x) { std::string MLPPNumericalAnalysis::secondPartialDerivativeTest(real_t (*function)(std::vector<real_t>), std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlg alg;
std::vector<std::vector<real_t>> hessianMatrix = hessian(function, x); std::vector<std::vector<real_t>> hessianMatrix = hessian(function, x);
/*
The reason we do this is because the 2nd partial derivative test is less conclusive for functions of variables greater than // The reason we do this is because the 2nd partial derivative test is less conclusive for functions of variables greater than
2, and the calculations specific to the bivariate case are less computationally intensive. // 2, and the calculations specific to the bivariate case are less computationally intensive.
*/
if (x.size() == 2) { if (x.size() == 2) {
real_t det = alg.det(hessianMatrix, hessianMatrix.size()); real_t det = alg.det(hessianMatrix, hessianMatrix.size());
real_t secondDerivative = numDiff_2(function, x, 0, 0); real_t secondDerivative = numDiff_2(function, x, 0, 0);
@ -298,6 +297,7 @@ std::string MLPPNumericalAnalysis::secondPartialDerivativeTest(real_t (*function
} }
} }
} }
*/
void MLPPNumericalAnalysis::_bind_methods() { void MLPPNumericalAnalysis::_bind_methods() {
} }

View File

@ -22,6 +22,7 @@ public:
as an analytical method for calculating derivatives will most likely be used in as an analytical method for calculating derivatives will most likely be used in
the future. the future.
*/ */
/*
real_t numDiff(real_t (*function)(real_t), real_t x); real_t numDiff(real_t (*function)(real_t), real_t x);
real_t numDiff_2(real_t (*function)(real_t), real_t x); real_t numDiff_2(real_t (*function)(real_t), real_t x);
real_t numDiff_3(real_t (*function)(real_t), real_t x); real_t numDiff_3(real_t (*function)(real_t), real_t x);
@ -56,6 +57,7 @@ public:
real_t laplacian(real_t (*function)(std::vector<real_t>), std::vector<real_t> x); // laplacian real_t laplacian(real_t (*function)(std::vector<real_t>), std::vector<real_t> x); // laplacian
std::string secondPartialDerivativeTest(real_t (*function)(std::vector<real_t>), std::vector<real_t> x); std::string secondPartialDerivativeTest(real_t (*function)(std::vector<real_t>), std::vector<real_t> x);
*/
protected: protected:
static void _bind_methods(); static void _bind_methods();

View File

@ -5,7 +5,7 @@
// //
#include "numerical_analysis_old.h" #include "numerical_analysis_old.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include <climits> #include <climits>
#include <cmath> #include <cmath>
@ -230,12 +230,12 @@ real_t MLPPNumericalAnalysisOld::constantApproximation(real_t (*function)(std::v
} }
real_t MLPPNumericalAnalysisOld::linearApproximation(real_t (*function)(std::vector<real_t>), std::vector<real_t> c, std::vector<real_t> x) { real_t MLPPNumericalAnalysisOld::linearApproximation(real_t (*function)(std::vector<real_t>), std::vector<real_t> c, std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
return constantApproximation(function, c) + alg.matmult(alg.transpose({ jacobian(function, c) }), { alg.subtraction(x, c) })[0][0]; return constantApproximation(function, c) + alg.matmult(alg.transpose({ jacobian(function, c) }), { alg.subtraction(x, c) })[0][0];
} }
real_t MLPPNumericalAnalysisOld::quadraticApproximation(real_t (*function)(std::vector<real_t>), std::vector<real_t> c, std::vector<real_t> x) { real_t MLPPNumericalAnalysisOld::quadraticApproximation(real_t (*function)(std::vector<real_t>), std::vector<real_t> c, std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
return linearApproximation(function, c, x) + 0.5 * alg.matmult({ (alg.subtraction(x, c)) }, alg.matmult(hessian(function, c), alg.transpose({ alg.subtraction(x, c) })))[0][0]; return linearApproximation(function, c, x) + 0.5 * alg.matmult({ (alg.subtraction(x, c)) }, alg.matmult(hessian(function, c), alg.transpose({ alg.subtraction(x, c) })))[0][0];
} }
@ -249,7 +249,7 @@ real_t MLPPNumericalAnalysisOld::cubicApproximation(real_t (*function)(std::vect
Perform remaining multiplies as done for the 2nd order approximation. Perform remaining multiplies as done for the 2nd order approximation.
Result is a scalar. Result is a scalar.
*/ */
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::vector<std::vector<real_t>> resultMat = alg.tensor_vec_mult(thirdOrderTensor(function, c), alg.subtraction(x, c)); std::vector<std::vector<real_t>> resultMat = alg.tensor_vec_mult(thirdOrderTensor(function, c), alg.subtraction(x, c));
real_t resultScalar = alg.matmult({ (alg.subtraction(x, c)) }, alg.matmult(resultMat, alg.transpose({ alg.subtraction(x, c) })))[0][0]; real_t resultScalar = alg.matmult({ (alg.subtraction(x, c)) }, alg.matmult(resultMat, alg.transpose({ alg.subtraction(x, c) })))[0][0];
@ -268,7 +268,7 @@ real_t MLPPNumericalAnalysisOld::laplacian(real_t (*function)(std::vector<real_t
} }
std::string MLPPNumericalAnalysisOld::secondPartialDerivativeTest(real_t (*function)(std::vector<real_t>), std::vector<real_t> x) { std::string MLPPNumericalAnalysisOld::secondPartialDerivativeTest(real_t (*function)(std::vector<real_t>), std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::vector<std::vector<real_t>> hessianMatrix = hessian(function, x); std::vector<std::vector<real_t>> hessianMatrix = hessian(function, x);
/* /*
The reason we do this is because the 2nd partial derivative test is less conclusive for functions of variables greater than The reason we do this is because the 2nd partial derivative test is less conclusive for functions of variables greater than

View File

@ -6,7 +6,7 @@
#include "outlier_finder_old.h" #include "outlier_finder_old.h"
#include "../stat/stat.h" #include "../stat/stat_old.h"
#include <iostream> #include <iostream>
@ -15,7 +15,7 @@ MLPPOutlierFinderOld::MLPPOutlierFinderOld(int threshold) :
} }
std::vector<std::vector<real_t>> MLPPOutlierFinderOld::modelSetTest(std::vector<std::vector<real_t>> inputSet) { std::vector<std::vector<real_t>> MLPPOutlierFinderOld::modelSetTest(std::vector<std::vector<real_t>> inputSet) {
MLPPStat stat; MLPPStatOld stat;
std::vector<std::vector<real_t>> outliers; std::vector<std::vector<real_t>> outliers;
outliers.resize(inputSet.size()); outliers.resize(inputSet.size());
for (uint32_t i = 0; i < inputSet.size(); i++) { for (uint32_t i = 0; i < inputSet.size(); i++) {
@ -30,7 +30,7 @@ std::vector<std::vector<real_t>> MLPPOutlierFinderOld::modelSetTest(std::vector<
} }
std::vector<real_t> MLPPOutlierFinderOld::modelTest(std::vector<real_t> inputSet) { std::vector<real_t> MLPPOutlierFinderOld::modelTest(std::vector<real_t> inputSet) {
MLPPStat stat; MLPPStatOld stat;
std::vector<real_t> outliers; std::vector<real_t> outliers;
for (uint32_t i = 0; i < inputSet.size(); i++) { for (uint32_t i = 0; i < inputSet.size(); i++) {
real_t z = (inputSet[i] - stat.mean(inputSet)) / stat.standardDeviation(inputSet); real_t z = (inputSet[i] - stat.mean(inputSet)) / stat.standardDeviation(inputSet);

View File

@ -5,7 +5,7 @@
// //
#include "output_layer_old.h" #include "output_layer_old.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include "../utilities/utilities.h" #include "../utilities/utilities.h"
#include <iostream> #include <iostream>
@ -102,33 +102,33 @@ MLPPOldOutputLayer::MLPPOldOutputLayer(int p_n_hidden, std::string p_activation,
activation_map["Arcoth"] = &MLPPActivationOld::arcoth; activation_map["Arcoth"] = &MLPPActivationOld::arcoth;
activationTest_map["Arcoth"] = &MLPPActivationOld::arcoth; activationTest_map["Arcoth"] = &MLPPActivationOld::arcoth;
costDeriv_map["MSE"] = &MLPPCost::MSEDeriv; costDeriv_map["MSE"] = &MLPPCostOld::MSEDeriv;
cost_map["MSE"] = &MLPPCost::MSE; cost_map["MSE"] = &MLPPCostOld::MSE;
costDeriv_map["RMSE"] = &MLPPCost::RMSEDeriv; costDeriv_map["RMSE"] = &MLPPCostOld::RMSEDeriv;
cost_map["RMSE"] = &MLPPCost::RMSE; cost_map["RMSE"] = &MLPPCostOld::RMSE;
costDeriv_map["MAE"] = &MLPPCost::MAEDeriv; costDeriv_map["MAE"] = &MLPPCostOld::MAEDeriv;
cost_map["MAE"] = &MLPPCost::MAE; cost_map["MAE"] = &MLPPCostOld::MAE;
costDeriv_map["MBE"] = &MLPPCost::MBEDeriv; costDeriv_map["MBE"] = &MLPPCostOld::MBEDeriv;
cost_map["MBE"] = &MLPPCost::MBE; cost_map["MBE"] = &MLPPCostOld::MBE;
costDeriv_map["LogLoss"] = &MLPPCost::LogLossDeriv; costDeriv_map["LogLoss"] = &MLPPCostOld::LogLossDeriv;
cost_map["LogLoss"] = &MLPPCost::LogLoss; cost_map["LogLoss"] = &MLPPCostOld::LogLoss;
costDeriv_map["CrossEntropy"] = &MLPPCost::CrossEntropyDeriv; costDeriv_map["CrossEntropy"] = &MLPPCostOld::CrossEntropyDeriv;
cost_map["CrossEntropy"] = &MLPPCost::CrossEntropy; cost_map["CrossEntropy"] = &MLPPCostOld::CrossEntropy;
costDeriv_map["HingeLoss"] = &MLPPCost::HingeLossDeriv; costDeriv_map["HingeLoss"] = &MLPPCostOld::HingeLossDeriv;
cost_map["HingeLoss"] = &MLPPCost::HingeLoss; cost_map["HingeLoss"] = &MLPPCostOld::HingeLoss;
costDeriv_map["WassersteinLoss"] = &MLPPCost::HingeLossDeriv; costDeriv_map["WassersteinLoss"] = &MLPPCostOld::HingeLossDeriv;
cost_map["WassersteinLoss"] = &MLPPCost::HingeLoss; cost_map["WassersteinLoss"] = &MLPPCostOld::HingeLoss;
} }
void MLPPOldOutputLayer::forwardPass() { void MLPPOldOutputLayer::forwardPass() {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
z = alg.scalarAdd(bias, alg.mat_vec_mult(input, weights)); z = alg.scalarAdd(bias, alg.mat_vec_mult(input, weights));
a = (avn.*activation_map[activation])(z, false); a = (avn.*activation_map[activation])(z, false);
} }
void MLPPOldOutputLayer::Test(std::vector<real_t> x) { void MLPPOldOutputLayer::Test(std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
z_test = alg.dot(weights, x) + bias; z_test = alg.dot(weights, x) + bias;
a_test = (avn.*activationTest_map[activation])(z_test, false); a_test = (avn.*activationTest_map[activation])(z_test, false);

View File

@ -14,7 +14,7 @@
#include "core/object/reference.h" #include "core/object/reference.h"
#include "../activation/activation_old.h" #include "../activation/activation_old.h"
#include "../cost/cost.h" #include "../cost/cost_old.h"
#include "../regularization/reg.h" #include "../regularization/reg.h"
#include "../utilities/utilities.h" #include "../utilities/utilities.h"
@ -43,8 +43,8 @@ public:
std::map<std::string, std::vector<real_t> (MLPPActivationOld::*)(std::vector<real_t>, bool)> activation_map; std::map<std::string, std::vector<real_t> (MLPPActivationOld::*)(std::vector<real_t>, bool)> activation_map;
std::map<std::string, real_t (MLPPActivationOld::*)(real_t, bool)> activationTest_map; std::map<std::string, real_t (MLPPActivationOld::*)(real_t, bool)> activationTest_map;
std::map<std::string, real_t (MLPPCost::*)(std::vector<real_t>, std::vector<real_t>)> cost_map; std::map<std::string, real_t (MLPPCostOld::*)(std::vector<real_t>, std::vector<real_t>)> cost_map;
std::map<std::string, std::vector<real_t> (MLPPCost::*)(std::vector<real_t>, std::vector<real_t>)> costDeriv_map; std::map<std::string, std::vector<real_t> (MLPPCostOld::*)(std::vector<real_t>, std::vector<real_t>)> costDeriv_map;
real_t z_test; real_t z_test;
real_t a_test; real_t a_test;

View File

@ -6,7 +6,7 @@
#include "pca_old.h" #include "pca_old.h"
#include "../data/data.h" #include "../data/data.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include <iostream> #include <iostream>
#include <random> #include <random>
@ -18,10 +18,10 @@ MLPPPCAOld::MLPPPCAOld(std::vector<std::vector<real_t>> inputSet, int k) :
} }
std::vector<std::vector<real_t>> MLPPPCAOld::principalComponents() { std::vector<std::vector<real_t>> MLPPPCAOld::principalComponents() {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPData data; MLPPData data;
MLPPLinAlg::SVDResultOld svr_res = alg.SVD(alg.cov(inputSet)); MLPPLinAlgOld::SVDResultOld svr_res = alg.SVD(alg.cov(inputSet));
X_normalized = data.meanCentering(inputSet); X_normalized = data.meanCentering(inputSet);
U_reduce.resize(svr_res.U.size()); U_reduce.resize(svr_res.U.size());
for (int i = 0; i < k; i++) { for (int i = 0; i < k; i++) {
@ -35,7 +35,7 @@ std::vector<std::vector<real_t>> MLPPPCAOld::principalComponents() {
// Simply tells us the percentage of variance maintained. // Simply tells us the percentage of variance maintained.
real_t MLPPPCAOld::score() { real_t MLPPPCAOld::score() {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::vector<std::vector<real_t>> X_approx = alg.matmult(U_reduce, Z); std::vector<std::vector<real_t>> X_approx = alg.matmult(U_reduce, Z);
real_t num = 0; real_t num = 0;
real_t den = 0; real_t den = 0;

View File

@ -6,9 +6,9 @@
#include "probit_reg_old.h" #include "probit_reg_old.h"
#include "../activation/activation_old.h" #include "../activation/activation_old.h"
#include "../cost/cost.h" #include "../cost/cost_old.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include "../regularization/reg.h" #include "../regularization/reg_old.h"
#include "../utilities/utilities.h" #include "../utilities/utilities.h"
#include <iostream> #include <iostream>
@ -31,8 +31,8 @@ real_t MLPPProbitRegOld::modelTest(std::vector<real_t> x) {
void MLPPProbitRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { void MLPPProbitRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
MLPPActivationOld avn; MLPPActivationOld avn;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -64,8 +64,8 @@ void MLPPProbitRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool
void MLPPProbitRegOld::MLE(real_t learning_rate, int max_epoch, bool UI) { void MLPPProbitRegOld::MLE(real_t learning_rate, int max_epoch, bool UI) {
MLPPActivationOld avn; MLPPActivationOld avn;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -97,8 +97,8 @@ void MLPPProbitRegOld::MLE(real_t learning_rate, int max_epoch, bool UI) {
void MLPPProbitRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) { void MLPPProbitRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
// NOTE: ∂y_hat/∂z is sparse // NOTE: ∂y_hat/∂z is sparse
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -138,8 +138,8 @@ void MLPPProbitRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
void MLPPProbitRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) { void MLPPProbitRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPActivationOld avn; MLPPActivationOld avn;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -208,30 +208,30 @@ void MLPPProbitRegOld::save(std::string fileName) {
} }
real_t MLPPProbitRegOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) { real_t MLPPProbitRegOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
MLPPReg regularization; MLPPRegOld regularization;
class MLPPCost cost; class MLPPCostOld cost;
return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
} }
std::vector<real_t> MLPPProbitRegOld::Evaluate(std::vector<std::vector<real_t>> X) { std::vector<real_t> MLPPProbitRegOld::Evaluate(std::vector<std::vector<real_t>> X) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
return avn.gaussianCDF(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); return avn.gaussianCDF(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
} }
std::vector<real_t> MLPPProbitRegOld::propagate(std::vector<std::vector<real_t>> X) { std::vector<real_t> MLPPProbitRegOld::propagate(std::vector<std::vector<real_t>> X) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)); return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights));
} }
real_t MLPPProbitRegOld::Evaluate(std::vector<real_t> x) { real_t MLPPProbitRegOld::Evaluate(std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
return avn.gaussianCDF(alg.dot(weights, x) + bias); return avn.gaussianCDF(alg.dot(weights, x) + bias);
} }
real_t MLPPProbitRegOld::propagate(std::vector<real_t> x) { real_t MLPPProbitRegOld::propagate(std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
return alg.dot(weights, x) + bias; return alg.dot(weights, x) + bias;
} }

View File

@ -211,154 +211,3 @@ real_t MLPPReg::reg_deriv_termmr(const Ref<MLPPMatrix> &weights, real_t lambda,
return 0; return 0;
} }
} }
real_t MLPPReg::regTerm(std::vector<real_t> weights, real_t lambda, real_t alpha, std::string p_reg) {
if (p_reg == "Ridge") {
real_t reg = 0;
for (uint32_t i = 0; i < weights.size(); i++) {
reg += weights[i] * weights[i];
}
return reg * lambda / 2;
} else if (p_reg == "Lasso") {
real_t reg = 0;
for (uint32_t i = 0; i < weights.size(); i++) {
reg += abs(weights[i]);
}
return reg * lambda;
} else if (p_reg == "ElasticNet") {
real_t reg = 0;
for (uint32_t i = 0; i < weights.size(); i++) {
reg += alpha * abs(weights[i]); // Lasso Reg
reg += ((1 - alpha) / 2) * weights[i] * weights[i]; // Ridge Reg
}
return reg * lambda;
}
return 0;
}
real_t MLPPReg::regTerm(std::vector<std::vector<real_t>> weights, real_t lambda, real_t alpha, std::string p_reg) {
if (p_reg == "Ridge") {
real_t reg = 0;
for (uint32_t i = 0; i < weights.size(); i++) {
for (uint32_t j = 0; j < weights[i].size(); j++) {
reg += weights[i][j] * weights[i][j];
}
}
return reg * lambda / 2;
} else if (p_reg == "Lasso") {
real_t reg = 0;
for (uint32_t i = 0; i < weights.size(); i++) {
for (uint32_t j = 0; j < weights[i].size(); j++) {
reg += abs(weights[i][j]);
}
}
return reg * lambda;
} else if (p_reg == "ElasticNet") {
real_t reg = 0;
for (uint32_t i = 0; i < weights.size(); i++) {
for (uint32_t j = 0; j < weights[i].size(); j++) {
reg += alpha * abs(weights[i][j]); // Lasso Reg
reg += ((1 - alpha) / 2) * weights[i][j] * weights[i][j]; // Ridge Reg
}
}
return reg * lambda;
}
return 0;
}
std::vector<real_t> MLPPReg::regWeights(std::vector<real_t> weights, real_t lambda, real_t alpha, std::string reg) {
MLPPLinAlg alg;
if (reg == "WeightClipping") {
return regDerivTerm(weights, lambda, alpha, reg);
}
return alg.subtraction(weights, regDerivTerm(weights, lambda, alpha, reg));
// for(int i = 0; i < weights.size(); i++){
// weights[i] -= regDerivTerm(weights, lambda, alpha, reg, i);
// }
// return weights;
}
std::vector<std::vector<real_t>> MLPPReg::regWeights(std::vector<std::vector<real_t>> weights, real_t lambda, real_t alpha, std::string reg) {
MLPPLinAlg alg;
if (reg == "WeightClipping") {
return regDerivTerm(weights, lambda, alpha, reg);
}
return alg.subtraction(weights, regDerivTerm(weights, lambda, alpha, reg));
// for(int i = 0; i < weights.size(); i++){
// for(int j = 0; j < weights[i].size(); j++){
// weights[i][j] -= regDerivTerm(weights, lambda, alpha, reg, i, j);
// }
// }
// return weights;
}
std::vector<real_t> MLPPReg::regDerivTerm(std::vector<real_t> weights, real_t lambda, real_t alpha, std::string reg) {
std::vector<real_t> regDeriv;
regDeriv.resize(weights.size());
for (uint32_t i = 0; i < regDeriv.size(); i++) {
regDeriv[i] = regDerivTerm(weights, lambda, alpha, reg, i);
}
return regDeriv;
}
std::vector<std::vector<real_t>> MLPPReg::regDerivTerm(std::vector<std::vector<real_t>> weights, real_t lambda, real_t alpha, std::string reg) {
std::vector<std::vector<real_t>> regDeriv;
regDeriv.resize(weights.size());
for (uint32_t i = 0; i < regDeriv.size(); i++) {
regDeriv[i].resize(weights[0].size());
}
for (uint32_t i = 0; i < regDeriv.size(); i++) {
for (uint32_t j = 0; j < regDeriv[i].size(); j++) {
regDeriv[i][j] = regDerivTerm(weights, lambda, alpha, reg, i, j);
}
}
return regDeriv;
}
real_t MLPPReg::regDerivTerm(std::vector<real_t> weights, real_t lambda, real_t alpha, std::string reg, int j) {
MLPPActivation act;
if (reg == "Ridge") {
return lambda * weights[j];
} else if (reg == "Lasso") {
return lambda * act.sign_normr(weights[j]);
} else if (reg == "ElasticNet") {
return alpha * lambda * act.sign_normr(weights[j]) + (1 - alpha) * lambda * weights[j];
} else if (reg == "WeightClipping") { // Preparation for Wasserstein GANs.
// We assume lambda is the lower clipping threshold, while alpha is the higher clipping threshold.
// alpha > lambda.
if (weights[j] > alpha) {
return alpha;
} else if (weights[j] < lambda) {
return lambda;
} else {
return weights[j];
}
} else {
return 0;
}
}
real_t MLPPReg::regDerivTerm(std::vector<std::vector<real_t>> weights, real_t lambda, real_t alpha, std::string reg, int i, int j) {
MLPPActivation act;
if (reg == "Ridge") {
return lambda * weights[i][j];
} else if (reg == "Lasso") {
return lambda * act.sign_normr(weights[i][j]);
} else if (reg == "ElasticNet") {
return alpha * lambda * act.sign_normr(weights[i][j]) + (1 - alpha) * lambda * weights[i][j];
} else if (reg == "WeightClipping") { // Preparation for Wasserstein GANs.
// We assume lambda is the lower clipping threshold, while alpha is the higher clipping threshold.
// alpha > lambda.
if (weights[i][j] > alpha) {
return alpha;
} else if (weights[i][j] < lambda) {
return lambda;
} else {
return weights[i][j];
}
} else {
return 0;
}
}

View File

@ -49,22 +49,6 @@ protected:
private: private:
real_t reg_deriv_termvr(const Ref<MLPPVector> &weights, real_t lambda, real_t alpha, RegularizationType reg, int j); real_t reg_deriv_termvr(const Ref<MLPPVector> &weights, real_t lambda, real_t alpha, RegularizationType reg, int j);
real_t reg_deriv_termmr(const Ref<MLPPMatrix> &weights, real_t lambda, real_t alpha, RegularizationType reg, int i, int j); real_t reg_deriv_termmr(const Ref<MLPPMatrix> &weights, real_t lambda, real_t alpha, RegularizationType reg, int i, int j);
public:
// ======== OLD =========
real_t regTerm(std::vector<real_t> weights, real_t lambda, real_t alpha, std::string reg);
real_t regTerm(std::vector<std::vector<real_t>> weights, real_t lambda, real_t alpha, std::string reg);
std::vector<real_t> regWeights(std::vector<real_t> weights, real_t lambda, real_t alpha, std::string reg);
std::vector<std::vector<real_t>> regWeights(std::vector<std::vector<real_t>> weights, real_t lambda, real_t alpha, std::string reg);
std::vector<real_t> regDerivTerm(std::vector<real_t> weights, real_t lambda, real_t alpha, std::string reg);
std::vector<std::vector<real_t>> regDerivTerm(std::vector<std::vector<real_t>>, real_t lambda, real_t alpha, std::string reg);
private:
real_t regDerivTerm(std::vector<real_t> weights, real_t lambda, real_t alpha, std::string reg, int j);
real_t regDerivTerm(std::vector<std::vector<real_t>> weights, real_t lambda, real_t alpha, std::string reg, int i, int j);
}; };
VARIANT_ENUM_CAST(MLPPReg::RegularizationType); VARIANT_ENUM_CAST(MLPPReg::RegularizationType);

View File

@ -9,13 +9,67 @@
#include "core/math/math_defs.h" #include "core/math/math_defs.h"
#include "../activation/activation_old.h" #include "../activation/activation_old.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include <iostream> #include <iostream>
#include <random> #include <random>
real_t MLPPRegOld::regTerm(std::vector<real_t> weights, real_t lambda, real_t alpha, std::string p_reg) {
if (p_reg == "Ridge") {
real_t reg = 0;
for (uint32_t i = 0; i < weights.size(); i++) {
reg += weights[i] * weights[i];
}
return reg * lambda / 2;
} else if (p_reg == "Lasso") {
real_t reg = 0;
for (uint32_t i = 0; i < weights.size(); i++) {
reg += abs(weights[i]);
}
return reg * lambda;
} else if (p_reg == "ElasticNet") {
real_t reg = 0;
for (uint32_t i = 0; i < weights.size(); i++) {
reg += alpha * abs(weights[i]); // Lasso Reg
reg += ((1 - alpha) / 2) * weights[i] * weights[i]; // Ridge Reg
}
return reg * lambda;
}
return 0;
}
real_t MLPPRegOld::regTerm(std::vector<std::vector<real_t>> weights, real_t lambda, real_t alpha, std::string p_reg) {
if (p_reg == "Ridge") {
real_t reg = 0;
for (uint32_t i = 0; i < weights.size(); i++) {
for (uint32_t j = 0; j < weights[i].size(); j++) {
reg += weights[i][j] * weights[i][j];
}
}
return reg * lambda / 2;
} else if (p_reg == "Lasso") {
real_t reg = 0;
for (uint32_t i = 0; i < weights.size(); i++) {
for (uint32_t j = 0; j < weights[i].size(); j++) {
reg += abs(weights[i][j]);
}
}
return reg * lambda;
} else if (p_reg == "ElasticNet") {
real_t reg = 0;
for (uint32_t i = 0; i < weights.size(); i++) {
for (uint32_t j = 0; j < weights[i].size(); j++) {
reg += alpha * abs(weights[i][j]); // Lasso Reg
reg += ((1 - alpha) / 2) * weights[i][j] * weights[i][j]; // Ridge Reg
}
}
return reg * lambda;
}
return 0;
}
std::vector<real_t> MLPPRegOld::regWeights(std::vector<real_t> weights, real_t lambda, real_t alpha, std::string reg) { std::vector<real_t> MLPPRegOld::regWeights(std::vector<real_t> weights, real_t lambda, real_t alpha, std::string reg) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
if (reg == "WeightClipping") { if (reg == "WeightClipping") {
return regDerivTerm(weights, lambda, alpha, reg); return regDerivTerm(weights, lambda, alpha, reg);
} }
@ -27,7 +81,7 @@ std::vector<real_t> MLPPRegOld::regWeights(std::vector<real_t> weights, real_t l
} }
std::vector<std::vector<real_t>> MLPPRegOld::regWeights(std::vector<std::vector<real_t>> weights, real_t lambda, real_t alpha, std::string reg) { std::vector<std::vector<real_t>> MLPPRegOld::regWeights(std::vector<std::vector<real_t>> weights, real_t lambda, real_t alpha, std::string reg) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
if (reg == "WeightClipping") { if (reg == "WeightClipping") {
return regDerivTerm(weights, lambda, alpha, reg); return regDerivTerm(weights, lambda, alpha, reg);
} }

View File

@ -16,8 +16,6 @@
class MLPPRegOld { class MLPPRegOld {
public: public:
// ======== OLD =========
real_t regTerm(std::vector<real_t> weights, real_t lambda, real_t alpha, std::string reg); real_t regTerm(std::vector<real_t> weights, real_t lambda, real_t alpha, std::string reg);
real_t regTerm(std::vector<std::vector<real_t>> weights, real_t lambda, real_t alpha, std::string reg); real_t regTerm(std::vector<std::vector<real_t>> weights, real_t lambda, real_t alpha, std::string reg);

View File

@ -7,10 +7,10 @@
#include "softmax_net_old.h" #include "softmax_net_old.h"
#include "../activation/activation_old.h" #include "../activation/activation_old.h"
#include "../cost/cost.h" #include "../cost/cost_old.h"
#include "../data/data.h" #include "../data/data.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include "../regularization/reg.h" #include "../regularization/reg_old.h"
#include "../utilities/utilities.h" #include "../utilities/utilities.h"
#include <iostream> #include <iostream>
@ -45,8 +45,8 @@ std::vector<std::vector<real_t>> MLPPSoftmaxNetOld::modelSetTest(std::vector<std
void MLPPSoftmaxNetOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { void MLPPSoftmaxNetOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
MLPPActivationOld avn; MLPPActivationOld avn;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -101,8 +101,8 @@ void MLPPSoftmaxNetOld::gradientDescent(real_t learning_rate, int max_epoch, boo
void MLPPSoftmaxNetOld::SGD(real_t learning_rate, int max_epoch, bool UI) { void MLPPSoftmaxNetOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
MLPPActivationOld avn; MLPPActivationOld avn;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -159,8 +159,8 @@ void MLPPSoftmaxNetOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
void MLPPSoftmaxNetOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) { void MLPPSoftmaxNetOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPActivationOld avn; MLPPActivationOld avn;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -262,14 +262,14 @@ std::vector<std::vector<real_t>> MLPPSoftmaxNetOld::getEmbeddings() {
} }
real_t MLPPSoftmaxNetOld::Cost(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) { real_t MLPPSoftmaxNetOld::Cost(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
MLPPReg regularization; MLPPRegOld regularization;
MLPPData data; MLPPData data;
class MLPPCost cost; class MLPPCostOld cost;
return cost.CrossEntropy(y_hat, y) + regularization.regTerm(weights1, lambda, alpha, reg) + regularization.regTerm(weights2, lambda, alpha, reg); return cost.CrossEntropy(y_hat, y) + regularization.regTerm(weights1, lambda, alpha, reg) + regularization.regTerm(weights2, lambda, alpha, reg);
} }
std::vector<std::vector<real_t>> MLPPSoftmaxNetOld::Evaluate(std::vector<std::vector<real_t>> X) { std::vector<std::vector<real_t>> MLPPSoftmaxNetOld::Evaluate(std::vector<std::vector<real_t>> X) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
std::vector<std::vector<real_t>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector<std::vector<real_t>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
std::vector<std::vector<real_t>> a2 = avn.sigmoid(z2); std::vector<std::vector<real_t>> a2 = avn.sigmoid(z2);
@ -277,7 +277,7 @@ std::vector<std::vector<real_t>> MLPPSoftmaxNetOld::Evaluate(std::vector<std::ve
} }
std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> MLPPSoftmaxNetOld::propagate(std::vector<std::vector<real_t>> X) { std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> MLPPSoftmaxNetOld::propagate(std::vector<std::vector<real_t>> X) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
std::vector<std::vector<real_t>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1); std::vector<std::vector<real_t>> z2 = alg.mat_vec_add(alg.matmult(X, weights1), bias1);
std::vector<std::vector<real_t>> a2 = avn.sigmoid(z2); std::vector<std::vector<real_t>> a2 = avn.sigmoid(z2);
@ -285,7 +285,7 @@ std::tuple<std::vector<std::vector<real_t>>, std::vector<std::vector<real_t>>> M
} }
std::vector<real_t> MLPPSoftmaxNetOld::Evaluate(std::vector<real_t> x) { std::vector<real_t> MLPPSoftmaxNetOld::Evaluate(std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
std::vector<real_t> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector<real_t> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
std::vector<real_t> a2 = avn.sigmoid(z2); std::vector<real_t> a2 = avn.sigmoid(z2);
@ -293,7 +293,7 @@ std::vector<real_t> MLPPSoftmaxNetOld::Evaluate(std::vector<real_t> x) {
} }
std::tuple<std::vector<real_t>, std::vector<real_t>> MLPPSoftmaxNetOld::propagate(std::vector<real_t> x) { std::tuple<std::vector<real_t>, std::vector<real_t>> MLPPSoftmaxNetOld::propagate(std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
std::vector<real_t> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1); std::vector<real_t> z2 = alg.addition(alg.mat_vec_mult(alg.transpose(weights1), x), bias1);
std::vector<real_t> a2 = avn.sigmoid(z2); std::vector<real_t> a2 = avn.sigmoid(z2);
@ -301,7 +301,7 @@ std::tuple<std::vector<real_t>, std::vector<real_t>> MLPPSoftmaxNetOld::propagat
} }
void MLPPSoftmaxNetOld::forwardPass() { void MLPPSoftmaxNetOld::forwardPass() {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1); z2 = alg.mat_vec_add(alg.matmult(inputSet, weights1), bias1);
a2 = avn.sigmoid(z2); a2 = avn.sigmoid(z2);

View File

@ -6,9 +6,9 @@
#include "softmax_reg_old.h" #include "softmax_reg_old.h"
#include "../activation/activation_old.h" #include "../activation/activation_old.h"
#include "../cost/cost.h" #include "../cost/cost_old.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include "../regularization/reg.h" #include "../regularization/reg_old.h"
#include "../utilities/utilities.h" #include "../utilities/utilities.h"
#include <iostream> #include <iostream>
@ -30,8 +30,8 @@ std::vector<std::vector<real_t>> MLPPSoftmaxRegOld::modelSetTest(std::vector<std
} }
void MLPPSoftmaxRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { void MLPPSoftmaxRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -69,8 +69,8 @@ void MLPPSoftmaxRegOld::gradientDescent(real_t learning_rate, int max_epoch, boo
} }
void MLPPSoftmaxRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) { void MLPPSoftmaxRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -113,8 +113,8 @@ void MLPPSoftmaxRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
} }
void MLPPSoftmaxRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) { void MLPPSoftmaxRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -166,19 +166,19 @@ void MLPPSoftmaxRegOld::save(std::string fileName) {
} }
real_t MLPPSoftmaxRegOld::Cost(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) { real_t MLPPSoftmaxRegOld::Cost(std::vector<std::vector<real_t>> y_hat, std::vector<std::vector<real_t>> y) {
MLPPReg regularization; MLPPRegOld regularization;
class MLPPCost cost; class MLPPCostOld cost;
return cost.CrossEntropy(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); return cost.CrossEntropy(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
} }
std::vector<real_t> MLPPSoftmaxRegOld::Evaluate(std::vector<real_t> x) { std::vector<real_t> MLPPSoftmaxRegOld::Evaluate(std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
return avn.softmax(alg.addition(bias, alg.mat_vec_mult(alg.transpose(weights), x))); return avn.softmax(alg.addition(bias, alg.mat_vec_mult(alg.transpose(weights), x)));
} }
std::vector<std::vector<real_t>> MLPPSoftmaxRegOld::Evaluate(std::vector<std::vector<real_t>> X) { std::vector<std::vector<real_t>> MLPPSoftmaxRegOld::Evaluate(std::vector<std::vector<real_t>> X) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
return avn.softmax(alg.mat_vec_add(alg.matmult(X, weights), bias)); return avn.softmax(alg.mat_vec_add(alg.matmult(X, weights), bias));
@ -186,7 +186,7 @@ std::vector<std::vector<real_t>> MLPPSoftmaxRegOld::Evaluate(std::vector<std::ve
// softmax ( wTx + b ) // softmax ( wTx + b )
void MLPPSoftmaxRegOld::forwardPass() { void MLPPSoftmaxRegOld::forwardPass() {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
y_hat = avn.softmax(alg.mat_vec_add(alg.matmult(inputSet, weights), bias)); y_hat = avn.softmax(alg.mat_vec_add(alg.matmult(inputSet, weights), bias));

View File

@ -14,14 +14,6 @@
#include <iostream> #include <iostream>
real_t MLPPStat::b0Estimation(const std::vector<real_t> &x, const std::vector<real_t> &y) {
return mean(y) - b1Estimation(x, y) * mean(x);
}
real_t MLPPStat::b1Estimation(const std::vector<real_t> &x, const std::vector<real_t> &y) {
return covariance(x, y) / variance(x);
}
real_t MLPPStat::b0_estimation(const Ref<MLPPVector> &x, const Ref<MLPPVector> &y) { real_t MLPPStat::b0_estimation(const Ref<MLPPVector> &x, const Ref<MLPPVector> &y) {
return meanv(y) - b1_estimation(x, y) * meanv(x); return meanv(y) - b1_estimation(x, y) * meanv(x);
} }
@ -29,14 +21,7 @@ real_t MLPPStat::b1_estimation(const Ref<MLPPVector> &x, const Ref<MLPPVector> &
return covariancev(x, y) / variancev(x); return covariancev(x, y) / variancev(x);
} }
real_t MLPPStat::mean(const std::vector<real_t> &x) { /*
real_t sum = 0;
for (uint32_t i = 0; i < x.size(); i++) {
sum += x[i];
}
return sum / x.size();
}
real_t MLPPStat::median(std::vector<real_t> x) { real_t MLPPStat::median(std::vector<real_t> x) {
real_t center = real_t(x.size()) / real_t(2); real_t center = real_t(x.size()) / real_t(2);
sort(x.begin(), x.end()); sort(x.begin(), x.end());
@ -88,26 +73,6 @@ real_t MLPPStat::absAvgDeviation(const std::vector<real_t> &x) {
return sum / x.size(); return sum / x.size();
} }
real_t MLPPStat::standardDeviation(const std::vector<real_t> &x) {
return std::sqrt(variance(x));
}
real_t MLPPStat::variance(const std::vector<real_t> &x) {
real_t sum = 0;
for (uint32_t i = 0; i < x.size(); i++) {
sum += (x[i] - mean(x)) * (x[i] - mean(x));
}
return sum / (x.size() - 1);
}
real_t MLPPStat::covariance(const std::vector<real_t> &x, const std::vector<real_t> &y) {
real_t sum = 0;
for (uint32_t i = 0; i < x.size(); i++) {
sum += (x[i] - mean(x)) * (y[i] - mean(y));
}
return sum / (x.size() - 1);
}
real_t MLPPStat::correlation(const std::vector<real_t> &x, const std::vector<real_t> &y) { real_t MLPPStat::correlation(const std::vector<real_t> &x, const std::vector<real_t> &y) {
return covariance(x, y) / (standardDeviation(x) * standardDeviation(y)); return covariance(x, y) / (standardDeviation(x) * standardDeviation(y));
} }
@ -120,6 +85,7 @@ real_t MLPPStat::chebyshevIneq(const real_t k) {
// X may or may not belong to a Gaussian Distribution // X may or may not belong to a Gaussian Distribution
return 1 - 1 / (k * k); return 1 - 1 / (k * k);
} }
*/
real_t MLPPStat::meanv(const Ref<MLPPVector> &x) { real_t MLPPStat::meanv(const Ref<MLPPVector> &x) {
int x_size = x->size(); int x_size = x->size();
@ -171,6 +137,7 @@ real_t MLPPStat::covariancev(const Ref<MLPPVector> &x, const Ref<MLPPVector> &y)
return sum / (x_size - 1); return sum / (x_size - 1);
} }
/*
real_t MLPPStat::weightedMean(const std::vector<real_t> &x, const std::vector<real_t> &weights) { real_t MLPPStat::weightedMean(const std::vector<real_t> &x, const std::vector<real_t> &weights) {
real_t sum = 0; real_t sum = 0;
real_t weights_sum = 0; real_t weights_sum = 0;
@ -270,6 +237,7 @@ real_t MLPPStat::logMean(const real_t x, const real_t y) {
} }
return (y - x) / (log(y) - std::log(x)); return (y - x) / (log(y) - std::log(x));
} }
*/
void MLPPStat::_bind_methods() { void MLPPStat::_bind_methods() {
} }

View File

@ -22,25 +22,20 @@ class MLPPStat : public Reference {
public: public:
// These functions are for univariate lin reg module- not for users. // These functions are for univariate lin reg module- not for users.
real_t b0Estimation(const std::vector<real_t> &x, const std::vector<real_t> &y);
real_t b1Estimation(const std::vector<real_t> &x, const std::vector<real_t> &y);
real_t b0_estimation(const Ref<MLPPVector> &x, const Ref<MLPPVector> &y); real_t b0_estimation(const Ref<MLPPVector> &x, const Ref<MLPPVector> &y);
real_t b1_estimation(const Ref<MLPPVector> &x, const Ref<MLPPVector> &y); real_t b1_estimation(const Ref<MLPPVector> &x, const Ref<MLPPVector> &y);
// Statistical Functions // Statistical Functions
real_t mean(const std::vector<real_t> &x); /*
real_t median(std::vector<real_t> x); real_t median(std::vector<real_t> x);
std::vector<real_t> mode(const std::vector<real_t> &x); std::vector<real_t> mode(const std::vector<real_t> &x);
real_t range(const std::vector<real_t> &x); real_t range(const std::vector<real_t> &x);
real_t midrange(const std::vector<real_t> &x); real_t midrange(const std::vector<real_t> &x);
real_t absAvgDeviation(const std::vector<real_t> &x); real_t absAvgDeviation(const std::vector<real_t> &x);
real_t standardDeviation(const std::vector<real_t> &x);
real_t variance(const std::vector<real_t> &x);
real_t covariance(const std::vector<real_t> &x, const std::vector<real_t> &y);
real_t correlation(const std::vector<real_t> &x, const std::vector<real_t> &y); real_t correlation(const std::vector<real_t> &x, const std::vector<real_t> &y);
real_t R2(const std::vector<real_t> &x, const std::vector<real_t> &y); real_t R2(const std::vector<real_t> &x, const std::vector<real_t> &y);
real_t chebyshevIneq(const real_t k); real_t chebyshevIneq(const real_t k);
*/
real_t meanv(const Ref<MLPPVector> &x); real_t meanv(const Ref<MLPPVector> &x);
real_t standard_deviationv(const Ref<MLPPVector> &x); real_t standard_deviationv(const Ref<MLPPVector> &x);
@ -48,6 +43,7 @@ public:
real_t covariancev(const Ref<MLPPVector> &x, const Ref<MLPPVector> &y); real_t covariancev(const Ref<MLPPVector> &x, const Ref<MLPPVector> &y);
// Extras // Extras
/*
real_t weightedMean(const std::vector<real_t> &x, const std::vector<real_t> &weights); real_t weightedMean(const std::vector<real_t> &x, const std::vector<real_t> &weights);
real_t geometricMean(const std::vector<real_t> &x); real_t geometricMean(const std::vector<real_t> &x);
real_t harmonicMean(const std::vector<real_t> &x); real_t harmonicMean(const std::vector<real_t> &x);
@ -62,6 +58,7 @@ public:
real_t stolarskyMean(const real_t x, const real_t y, const real_t p); real_t stolarskyMean(const real_t x, const real_t y, const real_t p);
real_t identricMean(const real_t x, const real_t y); real_t identricMean(const real_t x, const real_t y);
real_t logMean(const real_t x, const real_t y); real_t logMean(const real_t x, const real_t y);
*/
protected: protected:
static void _bind_methods(); static void _bind_methods();

View File

@ -7,7 +7,7 @@
#include "stat_old.h" #include "stat_old.h"
#include "../activation/activation_old.h" #include "../activation/activation_old.h"
#include "../data/data.h" #include "../data/data.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include <algorithm> #include <algorithm>
#include <cmath> #include <cmath>
#include <map> #include <map>
@ -65,7 +65,7 @@ std::vector<real_t> MLPPStatOld::mode(const std::vector<real_t> &x) {
} }
real_t MLPPStatOld::range(const std::vector<real_t> &x) { real_t MLPPStatOld::range(const std::vector<real_t> &x) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
return alg.max(x) - alg.min(x); return alg.max(x) - alg.min(x);
} }

View File

@ -6,9 +6,9 @@
#include "svc_old.h" #include "svc_old.h"
#include "../activation/activation_old.h" #include "../activation/activation_old.h"
#include "../cost/cost.h" #include "../cost/cost_old.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include "../regularization/reg.h" #include "../regularization/reg_old.h"
#include "../utilities/utilities.h" #include "../utilities/utilities.h"
#include <iostream> #include <iostream>
@ -23,9 +23,9 @@ real_t MLPPSVCOld::modelTest(std::vector<real_t> x) {
} }
void MLPPSVCOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { void MLPPSVCOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
class MLPPCost cost; class MLPPCostOld cost;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -55,9 +55,9 @@ void MLPPSVCOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
} }
void MLPPSVCOld::SGD(real_t learning_rate, int max_epoch, bool UI) { void MLPPSVCOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
class MLPPCost cost; class MLPPCostOld cost;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -98,9 +98,9 @@ void MLPPSVCOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
} }
void MLPPSVCOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) { void MLPPSVCOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
class MLPPCost cost; class MLPPCostOld cost;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -163,29 +163,29 @@ MLPPSVCOld::MLPPSVCOld(std::vector<std::vector<real_t>> p_inputSet, std::vector<
} }
real_t MLPPSVCOld::Cost(std::vector<real_t> z, std::vector<real_t> y, std::vector<real_t> weights, real_t C) { real_t MLPPSVCOld::Cost(std::vector<real_t> z, std::vector<real_t> y, std::vector<real_t> weights, real_t C) {
class MLPPCost cost; class MLPPCostOld cost;
return cost.HingeLoss(z, y, weights, C); return cost.HingeLoss(z, y, weights, C);
} }
std::vector<real_t> MLPPSVCOld::Evaluate(std::vector<std::vector<real_t>> X) { std::vector<real_t> MLPPSVCOld::Evaluate(std::vector<std::vector<real_t>> X) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
return avn.sign(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); return avn.sign(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
} }
std::vector<real_t> MLPPSVCOld::propagate(std::vector<std::vector<real_t>> X) { std::vector<real_t> MLPPSVCOld::propagate(std::vector<std::vector<real_t>> X) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)); return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights));
} }
real_t MLPPSVCOld::Evaluate(std::vector<real_t> x) { real_t MLPPSVCOld::Evaluate(std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
return avn.sign(alg.dot(weights, x) + bias); return avn.sign(alg.dot(weights, x) + bias);
} }
real_t MLPPSVCOld::propagate(std::vector<real_t> x) { real_t MLPPSVCOld::propagate(std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
return alg.dot(weights, x) + bias; return alg.dot(weights, x) + bias;
} }

View File

@ -7,9 +7,9 @@
#include "tanh_reg_old.h" #include "tanh_reg_old.h"
#include "../activation/activation_old.h" #include "../activation/activation_old.h"
#include "../cost/cost.h" #include "../cost/cost_old.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include "../regularization/reg.h" #include "../regularization/reg_old.h"
#include "../utilities/utilities.h" #include "../utilities/utilities.h"
#include <iostream> #include <iostream>
@ -32,8 +32,8 @@ real_t MLPPTanhRegOld::modelTest(std::vector<real_t> x) {
void MLPPTanhRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { void MLPPTanhRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
MLPPActivationOld avn; MLPPActivationOld avn;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -65,8 +65,8 @@ void MLPPTanhRegOld::gradientDescent(real_t learning_rate, int max_epoch, bool U
} }
void MLPPTanhRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) { void MLPPTanhRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -105,8 +105,8 @@ void MLPPTanhRegOld::SGD(real_t learning_rate, int max_epoch, bool UI) {
void MLPPTanhRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) { void MLPPTanhRegOld::MBGD(real_t learning_rate, int max_epoch, int mini_batch_size, bool UI) {
MLPPActivationOld avn; MLPPActivationOld avn;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
@ -160,30 +160,30 @@ void MLPPTanhRegOld::save(std::string fileName) {
} }
real_t MLPPTanhRegOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) { real_t MLPPTanhRegOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
MLPPReg regularization; MLPPRegOld regularization;
class MLPPCost cost; class MLPPCostOld cost;
return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg); return cost.MSE(y_hat, y) + regularization.regTerm(weights, lambda, alpha, reg);
} }
std::vector<real_t> MLPPTanhRegOld::Evaluate(std::vector<std::vector<real_t>> X) { std::vector<real_t> MLPPTanhRegOld::Evaluate(std::vector<std::vector<real_t>> X) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
return avn.tanh(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights))); return avn.tanh(alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)));
} }
std::vector<real_t> MLPPTanhRegOld::propagate(std::vector<std::vector<real_t>> X) { std::vector<real_t> MLPPTanhRegOld::propagate(std::vector<std::vector<real_t>> X) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights)); return alg.scalarAdd(bias, alg.mat_vec_mult(X, weights));
} }
real_t MLPPTanhRegOld::Evaluate(std::vector<real_t> x) { real_t MLPPTanhRegOld::Evaluate(std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPActivationOld avn; MLPPActivationOld avn;
return avn.tanh(alg.dot(weights, x) + bias); return avn.tanh(alg.dot(weights, x) + bias);
} }
real_t MLPPTanhRegOld::propagate(std::vector<real_t> x) { real_t MLPPTanhRegOld::propagate(std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
return alg.dot(weights, x) + bias; return alg.dot(weights, x) + bias;
} }

View File

@ -10,6 +10,7 @@
#include <iostream> #include <iostream>
#include <string> #include <string>
/*
// DCT ii. // DCT ii.
// https://www.mathworks.com/help/images/discrete-cosine-transform.html // https://www.mathworks.com/help/images/discrete-cosine-transform.html
std::vector<std::vector<real_t>> MLPPTransforms::discreteCosineTransform(std::vector<std::vector<real_t>> A) { std::vector<std::vector<real_t>> MLPPTransforms::discreteCosineTransform(std::vector<std::vector<real_t>> A) {
@ -51,6 +52,7 @@ std::vector<std::vector<real_t>> MLPPTransforms::discreteCosineTransform(std::ve
} }
return B; return B;
} }
*/
void MLPPTransforms::_bind_methods() { void MLPPTransforms::_bind_methods() {
} }

View File

@ -18,7 +18,7 @@ class MLPPTransforms : public Reference {
GDCLASS(MLPPTransforms, Reference); GDCLASS(MLPPTransforms, Reference);
public: public:
std::vector<std::vector<real_t>> discreteCosineTransform(std::vector<std::vector<real_t>> A); //std::vector<std::vector<real_t>> discreteCosineTransform(std::vector<std::vector<real_t>> A);
protected: protected:
static void _bind_methods(); static void _bind_methods();

View File

@ -5,7 +5,7 @@
// //
#include "transforms_old.h" #include "transforms_old.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include <cmath> #include <cmath>
#include <iostream> #include <iostream>
#include <string> #include <string>
@ -13,7 +13,7 @@
// DCT ii. // DCT ii.
// https://www.mathworks.com/help/images/discrete-cosine-transform.html // https://www.mathworks.com/help/images/discrete-cosine-transform.html
std::vector<std::vector<real_t>> MLPPTransformsOld::discreteCosineTransform(std::vector<std::vector<real_t>> A) { std::vector<std::vector<real_t>> MLPPTransformsOld::discreteCosineTransform(std::vector<std::vector<real_t>> A) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
A = alg.scalarAdd(-128, A); // Center around 0. A = alg.scalarAdd(-128, A); // Center around 0.
std::vector<std::vector<real_t>> B; std::vector<std::vector<real_t>> B;

View File

@ -6,8 +6,8 @@
#include "uni_lin_reg_old.h" #include "uni_lin_reg_old.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include "../stat/stat.h" #include "../stat/stat_old.h"
#include <iostream> #include <iostream>
@ -19,13 +19,13 @@
MLPPUniLinRegOld::MLPPUniLinRegOld(std::vector<real_t> x, std::vector<real_t> y) : MLPPUniLinRegOld::MLPPUniLinRegOld(std::vector<real_t> x, std::vector<real_t> y) :
inputSet(x), outputSet(y) { inputSet(x), outputSet(y) {
MLPPStat estimator; MLPPStatOld estimator;
b1 = estimator.b1Estimation(inputSet, outputSet); b1 = estimator.b1Estimation(inputSet, outputSet);
b0 = estimator.b0Estimation(inputSet, outputSet); b0 = estimator.b0Estimation(inputSet, outputSet);
} }
std::vector<real_t> MLPPUniLinRegOld::modelSetTest(std::vector<real_t> x) { std::vector<real_t> MLPPUniLinRegOld::modelSetTest(std::vector<real_t> x) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
return alg.scalarAdd(b0, alg.scalarMultiply(b1, x)); return alg.scalarAdd(b0, alg.scalarMultiply(b1, x));
} }

View File

@ -77,7 +77,7 @@ void MLPPWGAN::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
Vector<Ref<MLPPMatrix>> cumulative_discriminator_hidden_layer_w_grad = discriminator_gradient_results.cumulative_hidden_layer_w_grad; Vector<Ref<MLPPMatrix>> cumulative_discriminator_hidden_layer_w_grad = discriminator_gradient_results.cumulative_hidden_layer_w_grad;
Ref<MLPPVector> output_discriminator_w_grad = discriminator_gradient_results.output_w_grad; Ref<MLPPVector> output_discriminator_w_grad = discriminator_gradient_results.output_w_grad;
cumulative_discriminator_hidden_layer_w_grad = alg.scalar_multiply_vm(learning_rate / _n, cumulative_discriminator_hidden_layer_w_grad); cumulative_discriminator_hidden_layer_w_grad = alg.scalar_multiplynvt(learning_rate / _n, cumulative_discriminator_hidden_layer_w_grad);
output_discriminator_w_grad = alg.scalar_multiplynv(learning_rate / _n, output_discriminator_w_grad); output_discriminator_w_grad = alg.scalar_multiplynv(learning_rate / _n, output_discriminator_w_grad);
update_discriminator_parameters(cumulative_discriminator_hidden_layer_w_grad, output_discriminator_w_grad, learning_rate); update_discriminator_parameters(cumulative_discriminator_hidden_layer_w_grad, output_discriminator_w_grad, learning_rate);
} }
@ -89,7 +89,7 @@ void MLPPWGAN::gradient_descent(real_t learning_rate, int max_epoch, bool ui) {
loutput_set = alg.onevecnv(_n); loutput_set = alg.onevecnv(_n);
Vector<Ref<MLPPMatrix>> cumulative_generator_hidden_layer_w_grad = compute_generator_gradients(_y_hat, loutput_set); Vector<Ref<MLPPMatrix>> cumulative_generator_hidden_layer_w_grad = compute_generator_gradients(_y_hat, loutput_set);
cumulative_generator_hidden_layer_w_grad = alg.scalar_multiply_vm(learning_rate / _n, cumulative_generator_hidden_layer_w_grad); cumulative_generator_hidden_layer_w_grad = alg.scalar_multiplynvt(learning_rate / _n, cumulative_generator_hidden_layer_w_grad);
update_generator_parameters(cumulative_generator_hidden_layer_w_grad, learning_rate); update_generator_parameters(cumulative_generator_hidden_layer_w_grad, learning_rate);
forward_pass(); forward_pass();

View File

@ -9,9 +9,9 @@
#include "core/log/logger.h" #include "core/log/logger.h"
#include "../activation/activation_old.h" #include "../activation/activation_old.h"
#include "../cost/cost.h" #include "../cost/cost_old.h"
#include "../lin_alg/lin_alg.h" #include "../lin_alg/lin_alg_old.h"
#include "../regularization/reg.h" #include "../regularization/reg_old.h"
#include "../utilities/utilities.h" #include "../utilities/utilities.h"
#include "core/object/method_bind_ext.gen.inc" #include "core/object/method_bind_ext.gen.inc"
@ -28,13 +28,12 @@ MLPPWGANOld::~MLPPWGANOld() {
} }
std::vector<std::vector<real_t>> MLPPWGANOld::generateExample(int n) { std::vector<std::vector<real_t>> MLPPWGANOld::generateExample(int n) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
return modelSetTestGenerator(alg.gaussianNoise(n, k)); return modelSetTestGenerator(alg.gaussianNoise(n, k));
} }
void MLPPWGANOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) { void MLPPWGANOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI) {
class MLPPCost cost; MLPPLinAlgOld alg;
MLPPLinAlg alg;
real_t cost_prev = 0; real_t cost_prev = 0;
int epoch = 1; int epoch = 1;
forwardPass(); forwardPass();
@ -94,7 +93,7 @@ void MLPPWGANOld::gradientDescent(real_t learning_rate, int max_epoch, bool UI)
} }
real_t MLPPWGANOld::score() { real_t MLPPWGANOld::score() {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPUtilities util; MLPPUtilities util;
forwardPass(); forwardPass();
return util.performance(y_hat, alg.onevec(n)); return util.performance(y_hat, alg.onevec(n));
@ -114,7 +113,7 @@ void MLPPWGANOld::save(std::string fileName) {
} }
void MLPPWGANOld::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, real_t lambda, real_t alpha) { void MLPPWGANOld::addLayer(int n_hidden, std::string activation, std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
if (network.empty()) { if (network.empty()) {
network.push_back(MLPPOldHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha)); network.push_back(MLPPOldHiddenLayer(n_hidden, activation, alg.gaussianNoise(n, k), weightInit, reg, lambda, alpha));
network[0].forwardPass(); network[0].forwardPass();
@ -125,7 +124,7 @@ void MLPPWGANOld::addLayer(int n_hidden, std::string activation, std::string wei
} }
void MLPPWGANOld::addOutputLayer(std::string weightInit, std::string reg, real_t lambda, real_t alpha) { void MLPPWGANOld::addOutputLayer(std::string weightInit, std::string reg, real_t lambda, real_t alpha) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
if (!network.empty()) { if (!network.empty()) {
outputLayer = new MLPPOldOutputLayer(network[network.size() - 1].n_hidden, "Linear", "WassersteinLoss", network[network.size() - 1].a, weightInit, "WeightClipping", -0.01, 0.01); outputLayer = new MLPPOldOutputLayer(network[network.size() - 1].n_hidden, "Linear", "WassersteinLoss", network[network.size() - 1].a, weightInit, "WeightClipping", -0.01, 0.01);
} else { // Should never happen. } else { // Should never happen.
@ -163,8 +162,8 @@ std::vector<real_t> MLPPWGANOld::modelSetTestDiscriminator(std::vector<std::vect
} }
real_t MLPPWGANOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) { real_t MLPPWGANOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
MLPPReg regularization; MLPPRegOld regularization;
class MLPPCost cost; class MLPPCostOld cost;
real_t totalRegTerm = 0; real_t totalRegTerm = 0;
auto cost_function = outputLayer->cost_map[outputLayer->cost]; auto cost_function = outputLayer->cost_map[outputLayer->cost];
@ -177,7 +176,7 @@ real_t MLPPWGANOld::Cost(std::vector<real_t> y_hat, std::vector<real_t> y) {
} }
void MLPPWGANOld::forwardPass() { void MLPPWGANOld::forwardPass() {
MLPPLinAlg alg; MLPPLinAlgOld alg;
if (!network.empty()) { if (!network.empty()) {
network[0].input = alg.gaussianNoise(n, k); network[0].input = alg.gaussianNoise(n, k);
network[0].forwardPass(); network[0].forwardPass();
@ -195,7 +194,7 @@ void MLPPWGANOld::forwardPass() {
} }
void MLPPWGANOld::updateDiscriminatorParameters(std::vector<std::vector<std::vector<real_t>>> hiddenLayerUpdations, std::vector<real_t> outputLayerUpdation, real_t learning_rate) { void MLPPWGANOld::updateDiscriminatorParameters(std::vector<std::vector<std::vector<real_t>>> hiddenLayerUpdations, std::vector<real_t> outputLayerUpdation, real_t learning_rate) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
outputLayer->weights = alg.subtraction(outputLayer->weights, outputLayerUpdation); outputLayer->weights = alg.subtraction(outputLayer->weights, outputLayerUpdation);
outputLayer->bias -= learning_rate * alg.sum_elements(outputLayer->delta) / n; outputLayer->bias -= learning_rate * alg.sum_elements(outputLayer->delta) / n;
@ -212,7 +211,7 @@ void MLPPWGANOld::updateDiscriminatorParameters(std::vector<std::vector<std::vec
} }
void MLPPWGANOld::updateGeneratorParameters(std::vector<std::vector<std::vector<real_t>>> hiddenLayerUpdations, real_t learning_rate) { void MLPPWGANOld::updateGeneratorParameters(std::vector<std::vector<std::vector<real_t>>> hiddenLayerUpdations, real_t learning_rate) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
if (!network.empty()) { if (!network.empty()) {
for (int ii = network.size() / 2; ii >= 0; ii--) { for (int ii = network.size() / 2; ii >= 0; ii--) {
@ -227,10 +226,10 @@ void MLPPWGANOld::updateGeneratorParameters(std::vector<std::vector<std::vector<
} }
std::tuple<std::vector<std::vector<std::vector<real_t>>>, std::vector<real_t>> MLPPWGANOld::computeDiscriminatorGradients(std::vector<real_t> y_hat, std::vector<real_t> outputSet) { std::tuple<std::vector<std::vector<std::vector<real_t>>>, std::vector<real_t>> MLPPWGANOld::computeDiscriminatorGradients(std::vector<real_t> y_hat, std::vector<real_t> outputSet) {
class MLPPCost cost; class MLPPCostOld cost;
MLPPActivationOld avn; MLPPActivationOld avn;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
std::vector<std::vector<std::vector<real_t>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads. std::vector<std::vector<std::vector<real_t>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads.
@ -262,10 +261,10 @@ std::tuple<std::vector<std::vector<std::vector<real_t>>>, std::vector<real_t>> M
} }
std::vector<std::vector<std::vector<real_t>>> MLPPWGANOld::computeGeneratorGradients(std::vector<real_t> y_hat, std::vector<real_t> outputSet) { std::vector<std::vector<std::vector<real_t>>> MLPPWGANOld::computeGeneratorGradients(std::vector<real_t> y_hat, std::vector<real_t> outputSet) {
class MLPPCost cost; class MLPPCostOld cost;
MLPPActivationOld avn; MLPPActivationOld avn;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPReg regularization; MLPPRegOld regularization;
std::vector<std::vector<std::vector<real_t>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads. std::vector<std::vector<std::vector<real_t>>> cumulativeHiddenLayerWGrad; // Tensor containing ALL hidden grads.

View File

@ -22,7 +22,7 @@
#include "../activation/activation.h" #include "../activation/activation.h"
#include "../cost/cost.h" #include "../cost/cost.h"
#include "../regularization/reg.h" #include "../regularization/reg_old.h"
#include "../utilities/utilities.h" #include "../utilities/utilities.h"
#include <string> #include <string>

View File

@ -52,6 +52,9 @@
#include "../mlpp/auto_encoder/auto_encoder_old.h" #include "../mlpp/auto_encoder/auto_encoder_old.h"
#include "../mlpp/bernoulli_nb/bernoulli_nb_old.h" #include "../mlpp/bernoulli_nb/bernoulli_nb_old.h"
#include "../mlpp/c_log_log_reg/c_log_log_reg_old.h" #include "../mlpp/c_log_log_reg/c_log_log_reg_old.h"
#include "../mlpp/convolutions/convolutions_old.h"
#include "../mlpp/cost/cost_old.h"
#include "../mlpp/data/data_old.h"
#include "../mlpp/dual_svc/dual_svc_old.h" #include "../mlpp/dual_svc/dual_svc_old.h"
#include "../mlpp/exp_reg/exp_reg_old.h" #include "../mlpp/exp_reg/exp_reg_old.h"
#include "../mlpp/gan/gan_old.h" #include "../mlpp/gan/gan_old.h"
@ -71,8 +74,10 @@
#include "../mlpp/probit_reg/probit_reg_old.h" #include "../mlpp/probit_reg/probit_reg_old.h"
#include "../mlpp/softmax_net/softmax_net_old.h" #include "../mlpp/softmax_net/softmax_net_old.h"
#include "../mlpp/softmax_reg/softmax_reg_old.h" #include "../mlpp/softmax_reg/softmax_reg_old.h"
#include "../mlpp/stat/stat_old.h"
#include "../mlpp/svc/svc_old.h" #include "../mlpp/svc/svc_old.h"
#include "../mlpp/tanh_reg/tanh_reg_old.h" #include "../mlpp/tanh_reg/tanh_reg_old.h"
#include "../mlpp/transforms/transforms_old.h"
#include "../mlpp/uni_lin_reg/uni_lin_reg_old.h" #include "../mlpp/uni_lin_reg/uni_lin_reg_old.h"
#include "../mlpp/wgan/wgan_old.h" #include "../mlpp/wgan/wgan_old.h"
@ -102,7 +107,7 @@ Vector<Vector<real_t>> dstd_mat_to_mat(const std::vector<std::vector<real_t>> &i
void MLPPTests::test_statistics() { void MLPPTests::test_statistics() {
ERR_PRINT("MLPPTests::test_statistics() Started!"); ERR_PRINT("MLPPTests::test_statistics() Started!");
MLPPStat stat; MLPPStatOld stat;
MLPPConvolutions conv; MLPPConvolutions conv;
// STATISTICS // STATISTICS
@ -146,7 +151,7 @@ void MLPPTests::test_statistics() {
} }
void MLPPTests::test_linear_algebra() { void MLPPTests::test_linear_algebra() {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::vector<std::vector<real_t>> square = { { 1, 1 }, { -1, 1 }, { 1, -1 }, { -1, -1 } }; std::vector<std::vector<real_t>> square = { { 1, 1 }, { -1, 1 }, { 1, -1 }, { -1, -1 } };
std::vector<std::vector<real_t>> square_rot_res = { { 1.41421, 1.11022e-16 }, { -1.11022e-16, 1.41421 }, { 1.11022e-16, -1.41421 }, { -1.41421, -1.11022e-16 } }; std::vector<std::vector<real_t>> square_rot_res = { { 1.41421, 1.11022e-16 }, { -1.11022e-16, 1.41421 }, { 1.11022e-16, -1.41421 }, { -1.41421, -1.11022e-16 } };
@ -242,7 +247,7 @@ void MLPPTests::test_univariate_linear_regression() {
void MLPPTests::test_multivariate_linear_regression_gradient_descent(bool ui) { void MLPPTests::test_multivariate_linear_regression_gradient_descent(bool ui) {
MLPPData data; MLPPData data;
MLPPLinAlg alg; MLPPLinAlgOld alg;
Ref<MLPPDataSimple> ds = data.load_california_housing(_california_housing_data_path); Ref<MLPPDataSimple> ds = data.load_california_housing(_california_housing_data_path);
@ -257,7 +262,7 @@ void MLPPTests::test_multivariate_linear_regression_gradient_descent(bool ui) {
void MLPPTests::test_multivariate_linear_regression_sgd(bool ui) { void MLPPTests::test_multivariate_linear_regression_sgd(bool ui) {
MLPPData data; MLPPData data;
MLPPLinAlg alg; MLPPLinAlgOld alg;
Ref<MLPPDataSimple> ds = data.load_california_housing(_california_housing_data_path); Ref<MLPPDataSimple> ds = data.load_california_housing(_california_housing_data_path);
@ -272,7 +277,7 @@ void MLPPTests::test_multivariate_linear_regression_sgd(bool ui) {
void MLPPTests::test_multivariate_linear_regression_mbgd(bool ui) { void MLPPTests::test_multivariate_linear_regression_mbgd(bool ui) {
MLPPData data; MLPPData data;
MLPPLinAlg alg; MLPPLinAlgOld alg;
Ref<MLPPDataSimple> ds = data.load_california_housing(_california_housing_data_path); Ref<MLPPDataSimple> ds = data.load_california_housing(_california_housing_data_path);
@ -287,7 +292,7 @@ void MLPPTests::test_multivariate_linear_regression_mbgd(bool ui) {
void MLPPTests::test_multivariate_linear_regression_normal_equation(bool ui) { void MLPPTests::test_multivariate_linear_regression_normal_equation(bool ui) {
MLPPData data; MLPPData data;
MLPPLinAlg alg; MLPPLinAlgOld alg;
Ref<MLPPDataSimple> ds = data.load_california_housing(_california_housing_data_path); Ref<MLPPDataSimple> ds = data.load_california_housing(_california_housing_data_path);
@ -302,7 +307,8 @@ void MLPPTests::test_multivariate_linear_regression_normal_equation(bool ui) {
void MLPPTests::test_multivariate_linear_regression_adam() { void MLPPTests::test_multivariate_linear_regression_adam() {
MLPPData data; MLPPData data;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPLinAlg algn;
Ref<MLPPDataSimple> ds = data.load_california_housing(_california_housing_data_path); Ref<MLPPDataSimple> ds = data.load_california_housing(_california_housing_data_path);
@ -310,14 +316,15 @@ void MLPPTests::test_multivariate_linear_regression_adam() {
alg.printVector(adamModelOld.modelSetTest(ds->get_input()->to_std_vector())); alg.printVector(adamModelOld.modelSetTest(ds->get_input()->to_std_vector()));
std::cout << "ACCURACY: " << 100 * adamModelOld.score() << "%" << std::endl; std::cout << "ACCURACY: " << 100 * adamModelOld.score() << "%" << std::endl;
MLPPLinReg adam_model(alg.transposenm(ds->get_input()), ds->get_output()); MLPPLinReg adam_model(algn.transposenm(ds->get_input()), ds->get_output());
PLOG_MSG(adam_model.model_set_test(ds->get_input())->to_string()); PLOG_MSG(adam_model.model_set_test(ds->get_input())->to_string());
PLOG_MSG("ACCURACY: " + String::num(100 * adam_model.score()) + "%"); PLOG_MSG("ACCURACY: " + String::num(100 * adam_model.score()) + "%");
} }
void MLPPTests::test_multivariate_linear_regression_score_sgd_adam(bool ui) { void MLPPTests::test_multivariate_linear_regression_score_sgd_adam(bool ui) {
MLPPData data; MLPPData data;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPLinAlg algn;
Ref<MLPPDataSimple> ds = data.load_california_housing(_california_housing_data_path); Ref<MLPPDataSimple> ds = data.load_california_housing(_california_housing_data_path);
@ -330,7 +337,7 @@ void MLPPTests::test_multivariate_linear_regression_score_sgd_adam(bool ui) {
modelf_old.MBGD(0.001, 5, 1, ui); modelf_old.MBGD(0.001, 5, 1, ui);
scoreSGD += modelf_old.score(); scoreSGD += modelf_old.score();
MLPPLinReg modelf(alg.transposenm(ds->get_input()), ds->get_output()); MLPPLinReg modelf(algn.transposenm(ds->get_input()), ds->get_output());
modelf.mbgd(0.001, 5, 1, ui); modelf.mbgd(0.001, 5, 1, ui);
scoreSGD += modelf.score(); scoreSGD += modelf.score();
@ -338,7 +345,7 @@ void MLPPTests::test_multivariate_linear_regression_score_sgd_adam(bool ui) {
adamModelf_old.Adam(0.1, 5, 1, 0.9, 0.999, 1e-8, ui); // Change batch size = sgd, bgd adamModelf_old.Adam(0.1, 5, 1, 0.9, 0.999, 1e-8, ui); // Change batch size = sgd, bgd
scoreADAM += adamModelf_old.score(); scoreADAM += adamModelf_old.score();
MLPPLinReg adamModelf(alg.transposenm(ds->get_input()), ds->get_output()); MLPPLinReg adamModelf(algn.transposenm(ds->get_input()), ds->get_output());
adamModelf.adam(0.1, 5, 1, 0.9, 0.999, 1e-8, ui); // Change batch size = sgd, bgd adamModelf.adam(0.1, 5, 1, 0.9, 0.999, 1e-8, ui); // Change batch size = sgd, bgd
scoreADAM += adamModelf.score(); scoreADAM += adamModelf.score();
} }
@ -350,7 +357,8 @@ void MLPPTests::test_multivariate_linear_regression_score_sgd_adam(bool ui) {
void MLPPTests::test_multivariate_linear_regression_epochs_gradient_descent(bool ui) { void MLPPTests::test_multivariate_linear_regression_epochs_gradient_descent(bool ui) {
MLPPData data; MLPPData data;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPLinAlg algn;
Ref<MLPPDataSimple> ds = data.load_california_housing(_california_housing_data_path); Ref<MLPPDataSimple> ds = data.load_california_housing(_california_housing_data_path);
@ -361,14 +369,15 @@ void MLPPTests::test_multivariate_linear_regression_epochs_gradient_descent(bool
model3_old.gradientDescent(0.001, 300, ui); model3_old.gradientDescent(0.001, 300, ui);
alg.printVector(model3_old.modelSetTest(ds->get_input()->to_std_vector())); alg.printVector(model3_old.modelSetTest(ds->get_input()->to_std_vector()));
MLPPLinReg model3(alg.transposenm(ds->get_input()), ds->get_output()); // Can use Lasso, Ridge, ElasticNet Reg MLPPLinReg model3(algn.transposenm(ds->get_input()), ds->get_output()); // Can use Lasso, Ridge, ElasticNet Reg
model3.gradient_descent(0.001, 300, ui); model3.gradient_descent(0.001, 300, ui);
PLOG_MSG(model3.model_set_test(ds->get_input())->to_string()); PLOG_MSG(model3.model_set_test(ds->get_input())->to_string());
} }
void MLPPTests::test_multivariate_linear_regression_newton_raphson(bool ui) { void MLPPTests::test_multivariate_linear_regression_newton_raphson(bool ui) {
MLPPData data; MLPPData data;
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPLinAlg algn;
Ref<MLPPDataSimple> ds = data.load_california_housing(_california_housing_data_path); Ref<MLPPDataSimple> ds = data.load_california_housing(_california_housing_data_path);
@ -380,13 +389,13 @@ void MLPPTests::test_multivariate_linear_regression_newton_raphson(bool ui) {
model2_old.NewtonRaphson(1.5, 300, ui); model2_old.NewtonRaphson(1.5, 300, ui);
alg.printVector(model2_old.modelSetTest(ds->get_input()->to_std_vector())); alg.printVector(model2_old.modelSetTest(ds->get_input()->to_std_vector()));
MLPPLinReg model2(alg.transposenm(ds->get_input()), ds->get_output()); MLPPLinReg model2(algn.transposenm(ds->get_input()), ds->get_output());
model2.newton_raphson(1.5, 300, ui); model2.newton_raphson(1.5, 300, ui);
PLOG_MSG(model2.model_set_test(ds->get_input())->to_string()); PLOG_MSG(model2.model_set_test(ds->get_input())->to_string());
} }
void MLPPTests::test_logistic_regression(bool ui) { void MLPPTests::test_logistic_regression(bool ui) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPData data; MLPPData data;
Ref<MLPPDataSimple> dt = data.load_breast_cancer(_breast_cancer_data_path); Ref<MLPPDataSimple> dt = data.load_breast_cancer(_breast_cancer_data_path);
@ -404,7 +413,7 @@ void MLPPTests::test_logistic_regression(bool ui) {
std::cout << "ACCURACY: " << 100 * model.score() << "%" << std::endl; std::cout << "ACCURACY: " << 100 * model.score() << "%" << std::endl;
} }
void MLPPTests::test_probit_regression(bool ui) { void MLPPTests::test_probit_regression(bool ui) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPData data; MLPPData data;
// PROBIT REGRESSION // PROBIT REGRESSION
@ -421,7 +430,8 @@ void MLPPTests::test_probit_regression(bool ui) {
PLOG_MSG("ACCURACY: " + String::num(100 * model.score()) + "%"); PLOG_MSG("ACCURACY: " + String::num(100 * model.score()) + "%");
} }
void MLPPTests::test_c_log_log_regression(bool ui) { void MLPPTests::test_c_log_log_regression(bool ui) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPLinAlg algn;
// CLOGLOG REGRESSION // CLOGLOG REGRESSION
std::vector<std::vector<real_t>> inputSet = { { 1, 2, 3, 4, 5, 6, 7, 8 }, { 0, 0, 0, 0, 1, 1, 1, 1 } }; std::vector<std::vector<real_t>> inputSet = { { 1, 2, 3, 4, 5, 6, 7, 8 }, { 0, 0, 0, 0, 1, 1, 1, 1 } };
@ -440,13 +450,14 @@ void MLPPTests::test_c_log_log_regression(bool ui) {
output_set.instance(); output_set.instance();
output_set->set_from_std_vector(outputSet); output_set->set_from_std_vector(outputSet);
MLPPCLogLogReg model(alg.transposenm(input_set), output_set); MLPPCLogLogReg model(algn.transposenm(input_set), output_set);
model.sgd(0.1, 10000, ui); model.sgd(0.1, 10000, ui);
PLOG_MSG(model.model_set_test(alg.transposenm(input_set))->to_string()); PLOG_MSG(model.model_set_test(algn.transposenm(input_set))->to_string());
PLOG_MSG("ACCURACY: " + String::num(100 * model.score()) + "%"); PLOG_MSG("ACCURACY: " + String::num(100 * model.score()) + "%");
} }
void MLPPTests::test_exp_reg_regression(bool ui) { void MLPPTests::test_exp_reg_regression(bool ui) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPLinAlg algn;
// EXPREG REGRESSION // EXPREG REGRESSION
std::vector<std::vector<real_t>> inputSet = { { 0, 1, 2, 3, 4 } }; std::vector<std::vector<real_t>> inputSet = { { 0, 1, 2, 3, 4 } };
@ -465,13 +476,13 @@ void MLPPTests::test_exp_reg_regression(bool ui) {
output_set.instance(); output_set.instance();
output_set->set_from_std_vector(outputSet); output_set->set_from_std_vector(outputSet);
MLPPExpReg model(alg.transposenm(input_set), output_set); MLPPExpReg model(algn.transposenm(input_set), output_set);
model.sgd(0.001, 10000, ui); model.sgd(0.001, 10000, ui);
PLOG_MSG(model.model_set_test(alg.transposenm(input_set))->to_string()); PLOG_MSG(model.model_set_test(algn.transposenm(input_set))->to_string());
PLOG_MSG("ACCURACY: " + String::num(100 * model.score()) + "%"); PLOG_MSG("ACCURACY: " + String::num(100 * model.score()) + "%");
} }
void MLPPTests::test_tanh_regression(bool ui) { void MLPPTests::test_tanh_regression(bool ui) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
// TANH REGRESSION // TANH REGRESSION
std::vector<std::vector<real_t>> inputSet = { { 4, 3, 0, -3, -4 }, { 0, 0, 0, 1, 1 } }; std::vector<std::vector<real_t>> inputSet = { { 4, 3, 0, -3, -4 }, { 0, 0, 0, 1, 1 } };
@ -483,7 +494,7 @@ void MLPPTests::test_tanh_regression(bool ui) {
std::cout << "ACCURACY (Old): " << 100 * model_old.score() << "%" << std::endl; std::cout << "ACCURACY (Old): " << 100 * model_old.score() << "%" << std::endl;
} }
void MLPPTests::test_softmax_regression(bool ui) { void MLPPTests::test_softmax_regression(bool ui) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPData data; MLPPData data;
Ref<MLPPDataComplex> dt = data.load_iris(_iris_data_path); Ref<MLPPDataComplex> dt = data.load_iris(_iris_data_path);
@ -502,7 +513,7 @@ void MLPPTests::test_softmax_regression(bool ui) {
} }
void MLPPTests::test_support_vector_classification(bool ui) { void MLPPTests::test_support_vector_classification(bool ui) {
//MLPPStat stat; //MLPPStat stat;
MLPPLinAlg alg; MLPPLinAlgOld alg;
//MLPPActivation avn; //MLPPActivation avn;
//MLPPCost cost; //MLPPCost cost;
MLPPData data; MLPPData data;
@ -523,7 +534,7 @@ void MLPPTests::test_support_vector_classification(bool ui) {
} }
void MLPPTests::test_mlp(bool ui) { void MLPPTests::test_mlp(bool ui) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
// MLP // MLP
std::vector<std::vector<real_t>> inputSet = { std::vector<std::vector<real_t>> inputSet = {
@ -569,7 +580,7 @@ void MLPPTests::test_mlp(bool ui) {
PLOG_MSG(res); PLOG_MSG(res);
} }
void MLPPTests::test_soft_max_network(bool ui) { void MLPPTests::test_soft_max_network(bool ui) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPData data; MLPPData data;
// SOFTMAX NETWORK // SOFTMAX NETWORK
@ -586,7 +597,8 @@ void MLPPTests::test_soft_max_network(bool ui) {
std::cout << "ACCURACY: " << 100 * model.score() << "%" << std::endl; std::cout << "ACCURACY: " << 100 * model.score() << "%" << std::endl;
} }
void MLPPTests::test_autoencoder(bool ui) { void MLPPTests::test_autoencoder(bool ui) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPLinAlg algn;
std::vector<std::vector<real_t>> inputSet = { { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }, { 3, 5, 9, 12, 15, 18, 21, 24, 27, 30 } }; std::vector<std::vector<real_t>> inputSet = { { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }, { 3, 5, 9, 12, 15, 18, 21, 24, 27, 30 } };
@ -600,13 +612,14 @@ void MLPPTests::test_autoencoder(bool ui) {
input_set.instance(); input_set.instance();
input_set->set_from_std_vectors(inputSet); input_set->set_from_std_vectors(inputSet);
MLPPAutoEncoder model(alg.transposenm(input_set), 5); MLPPAutoEncoder model(algn.transposenm(input_set), 5);
model.sgd(0.001, 300000, ui); model.sgd(0.001, 300000, ui);
PLOG_MSG(model.model_set_test(alg.transposenm(input_set))->to_string()); PLOG_MSG(model.model_set_test(algn.transposenm(input_set))->to_string());
PLOG_MSG("ACCURACY: " + String::num(100 * model.score()) + "%"); PLOG_MSG("ACCURACY: " + String::num(100 * model.score()) + "%");
} }
void MLPPTests::test_dynamically_sized_ann(bool ui) { void MLPPTests::test_dynamically_sized_ann(bool ui) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPLinAlg algn;
// DYNAMICALLY SIZED ANN // DYNAMICALLY SIZED ANN
// Possible Weight Init Methods: Default, Uniform, HeNormal, HeUniform, XavierNormal, XavierUniform // Possible Weight Init Methods: Default, Uniform, HeNormal, HeUniform, XavierNormal, XavierUniform
@ -636,7 +649,7 @@ void MLPPTests::test_dynamically_sized_ann(bool ui) {
output_set.instance(); output_set.instance();
output_set->set_from_std_vector(outputSet); output_set->set_from_std_vector(outputSet);
MLPPANN ann(alg.transposenm(input_set), output_set); MLPPANN ann(algn.transposenm(input_set), output_set);
ann.add_layer(2, MLPPActivation::ACTIVATION_FUNCTION_COSH); ann.add_layer(2, MLPPActivation::ACTIVATION_FUNCTION_COSH);
ann.add_output_layer(MLPPActivation::ACTIVATION_FUNCTION_SIGMOID, MLPPCost::COST_TYPE_LOGISTIC_LOSS); ann.add_output_layer(MLPPActivation::ACTIVATION_FUNCTION_SIGMOID, MLPPCost::COST_TYPE_LOGISTIC_LOSS);
@ -646,12 +659,12 @@ void MLPPTests::test_dynamically_sized_ann(bool ui) {
ann.set_learning_rate_scheduler_drop(MLPPANN::SCHEDULER_TYPE_STEP, 0.5, 1000); ann.set_learning_rate_scheduler_drop(MLPPANN::SCHEDULER_TYPE_STEP, 0.5, 1000);
ann.gradient_descent(0.01, 30000); ann.gradient_descent(0.01, 30000);
PLOG_MSG(ann.model_set_test(alg.transposenm(input_set))->to_string()); PLOG_MSG(ann.model_set_test(algn.transposenm(input_set))->to_string());
PLOG_MSG("ACCURACY: " + String::num(100 * ann.score()) + "%"); PLOG_MSG("ACCURACY: " + String::num(100 * ann.score()) + "%");
} }
void MLPPTests::test_wgan_old(bool ui) { void MLPPTests::test_wgan_old(bool ui) {
//MLPPStat stat; //MLPPStat stat;
MLPPLinAlg alg; MLPPLinAlgOld alg;
//MLPPActivation avn; //MLPPActivation avn;
//MLPPCost cost; //MLPPCost cost;
//MLPPData data; //MLPPData data;
@ -673,7 +686,7 @@ void MLPPTests::test_wgan_old(bool ui) {
} }
void MLPPTests::test_wgan(bool ui) { void MLPPTests::test_wgan(bool ui) {
//MLPPStat stat; //MLPPStat stat;
MLPPLinAlg alg; MLPPLinAlgOld alg;
//MLPPActivation avn; //MLPPActivation avn;
//MLPPCost cost; //MLPPCost cost;
//MLPPData data; //MLPPData data;
@ -700,7 +713,7 @@ void MLPPTests::test_wgan(bool ui) {
PLOG_MSG(str); PLOG_MSG(str);
} }
void MLPPTests::test_ann(bool ui) { void MLPPTests::test_ann(bool ui) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
std::vector<std::vector<real_t>> inputSet = { { 0, 0 }, { 0, 1 }, { 1, 0 }, { 1, 1 } }; // XOR std::vector<std::vector<real_t>> inputSet = { { 0, 0 }, { 0, 1 }, { 1, 0 }, { 1, 1 } }; // XOR
std::vector<real_t> outputSet = { 0, 1, 1, 0 }; std::vector<real_t> outputSet = { 0, 1, 1, 0 };
@ -734,7 +747,7 @@ void MLPPTests::test_ann(bool ui) {
PLOG_MSG("ACCURACY: " + String::num(100 * ann.score()) + "%"); // Accuracy. PLOG_MSG("ACCURACY: " + String::num(100 * ann.score()) + "%"); // Accuracy.
} }
void MLPPTests::test_dynamically_sized_mann(bool ui) { void MLPPTests::test_dynamically_sized_mann(bool ui) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPData data; MLPPData data;
// DYNAMICALLY SIZED MANN (Multidimensional Output ANN) // DYNAMICALLY SIZED MANN (Multidimensional Output ANN)
@ -762,7 +775,8 @@ void MLPPTests::test_dynamically_sized_mann(bool ui) {
PLOG_MSG("ACCURACY: " + String::num(100 * mann.score()) + "%"); PLOG_MSG("ACCURACY: " + String::num(100 * mann.score()) + "%");
} }
void MLPPTests::test_train_test_split_mann(bool ui) { void MLPPTests::test_train_test_split_mann(bool ui) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPLinAlg algn;
MLPPData data; MLPPData data;
// TRAIN TEST SPLIT CHECK // TRAIN TEST SPLIT CHECK
@ -780,8 +794,8 @@ void MLPPTests::test_train_test_split_mann(bool ui) {
Ref<MLPPDataComplex> d; Ref<MLPPDataComplex> d;
d.instance(); d.instance();
d->set_input(alg.transposenm(input_set_1)); d->set_input(algn.transposenm(input_set_1));
d->set_output(alg.transposenm(output_set_1)); d->set_output(algn.transposenm(output_set_1));
MLPPData::SplitComplexData split_data = data.train_test_split(d, 0.2); MLPPData::SplitComplexData split_data = data.train_test_split(d, 0.2);
@ -806,7 +820,8 @@ void MLPPTests::test_train_test_split_mann(bool ui) {
} }
void MLPPTests::test_naive_bayes() { void MLPPTests::test_naive_bayes() {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPLinAlg algn;
// NAIVE BAYES // NAIVE BAYES
std::vector<std::vector<real_t>> inputSet = { { 1, 1, 1, 1, 1 }, { 0, 0, 1, 1, 1 }, { 0, 0, 1, 0, 1 } }; std::vector<std::vector<real_t>> inputSet = { { 1, 1, 1, 1, 1 }, { 0, 0, 1, 1, 1 }, { 0, 0, 1, 0, 1 } };
@ -829,14 +844,14 @@ void MLPPTests::test_naive_bayes() {
MLPPBernoulliNBOld BNBOld(alg.transpose(inputSet), outputSet); MLPPBernoulliNBOld BNBOld(alg.transpose(inputSet), outputSet);
alg.printVector(BNBOld.modelSetTest(alg.transpose(inputSet))); alg.printVector(BNBOld.modelSetTest(alg.transpose(inputSet)));
MLPPBernoulliNB BNB(alg.transposenm(input_set), output_set); MLPPBernoulliNB BNB(algn.transposenm(input_set), output_set);
PLOG_MSG(BNB.model_set_test(alg.transposenm(input_set))->to_string()); PLOG_MSG(BNB.model_set_test(algn.transposenm(input_set))->to_string());
MLPPGaussianNBOld GNBOld(alg.transpose(inputSet), outputSet, 2); MLPPGaussianNBOld GNBOld(alg.transpose(inputSet), outputSet, 2);
alg.printVector(GNBOld.modelSetTest(alg.transpose(inputSet))); alg.printVector(GNBOld.modelSetTest(alg.transpose(inputSet)));
MLPPGaussianNB GNB(alg.transposenm(input_set), output_set, 2); MLPPGaussianNB GNB(algn.transposenm(input_set), output_set, 2);
PLOG_MSG(GNB.model_set_test(alg.transposenm(input_set))->to_string()); PLOG_MSG(GNB.model_set_test(algn.transposenm(input_set))->to_string());
} }
void MLPPTests::test_k_means(bool ui) { void MLPPTests::test_k_means(bool ui) {
// KMeans // KMeans
@ -858,7 +873,7 @@ void MLPPTests::test_k_means(bool ui) {
PLOG_MSG(kmeans->silhouette_scores()->to_string()); PLOG_MSG(kmeans->silhouette_scores()->to_string());
} }
void MLPPTests::test_knn(bool ui) { void MLPPTests::test_knn(bool ui) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
// kNN // kNN
std::vector<std::vector<real_t>> inputSet = { std::vector<std::vector<real_t>> inputSet = {
@ -897,9 +912,10 @@ void MLPPTests::test_knn(bool ui) {
} }
void MLPPTests::test_convolution_tensors_etc() { void MLPPTests::test_convolution_tensors_etc() {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPLinAlg algn;
MLPPData data; MLPPData data;
MLPPConvolutions conv; MLPPConvolutionsOld conv;
// CONVOLUTION, POOLING, ETC.. // CONVOLUTION, POOLING, ETC..
std::vector<std::vector<real_t>> input = { std::vector<std::vector<real_t>> input = {
@ -924,7 +940,7 @@ void MLPPTests::test_convolution_tensors_etc() {
{ 109, 121, 127, 133, 139, 141, 140, 133 }, { 109, 121, 127, 133, 139, 141, 140, 133 },
}; };
MLPPTransforms trans; MLPPTransformsOld trans;
alg.printMatrix(trans.discreteCosineTransform(input2)); alg.printMatrix(trans.discreteCosineTransform(input2));
@ -940,12 +956,12 @@ void MLPPTests::test_convolution_tensors_etc() {
alg.printMatrix(conv.convolve_2d(conv.gaussian_filter_2d(5, 1), laplacian, 1)); alg.printMatrix(conv.convolve_2d(conv.gaussian_filter_2d(5, 1), laplacian, 1));
} }
void MLPPTests::test_pca_svd_eigenvalues_eigenvectors(bool ui) { void MLPPTests::test_pca_svd_eigenvalues_eigenvectors(bool ui) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
// PCA, SVD, eigenvalues & eigenvectors // PCA, SVD, eigenvalues & eigenvectors
std::vector<std::vector<real_t>> inputSet = { { 1, 1 }, { 1, 1 } }; std::vector<std::vector<real_t>> inputSet = { { 1, 1 }, { 1, 1 } };
MLPPLinAlg::EigenResultOld eigen = alg.eigen_old(inputSet); MLPPLinAlgOld::EigenResultOld eigen = alg.eigen_old(inputSet);
std::cout << "Eigenvectors:" << std::endl; std::cout << "Eigenvectors:" << std::endl;
alg.printMatrix(eigen.eigen_vectors); alg.printMatrix(eigen.eigen_vectors);
@ -955,7 +971,7 @@ void MLPPTests::test_pca_svd_eigenvalues_eigenvectors(bool ui) {
std::cout << "SVD OLD START" << std::endl; std::cout << "SVD OLD START" << std::endl;
MLPPLinAlg::SVDResultOld svd_old = alg.SVD(inputSet); MLPPLinAlgOld::SVDResultOld svd_old = alg.SVD(inputSet);
std::cout << "U:" << std::endl; std::cout << "U:" << std::endl;
alg.printMatrix(svd_old.U); alg.printMatrix(svd_old.U);
@ -970,9 +986,10 @@ void MLPPTests::test_pca_svd_eigenvalues_eigenvectors(bool ui) {
input_set.instance(); input_set.instance();
input_set->set_from_std_vectors(inputSet); input_set->set_from_std_vectors(inputSet);
/*
String str_svd = "SVD\n"; String str_svd = "SVD\n";
MLPPLinAlg::SVDResult svd = alg.svd(input_set); MLPPLinAlgOld::SVDResult svd = alg.svd(input_set);
str_svd += "U:\n"; str_svd += "U:\n";
str_svd += svd.U->to_string(); str_svd += svd.U->to_string();
@ -983,6 +1000,7 @@ void MLPPTests::test_pca_svd_eigenvalues_eigenvectors(bool ui) {
str_svd += "\n"; str_svd += "\n";
PLOG_MSG(str_svd); PLOG_MSG(str_svd);
*/
std::cout << "PCA" << std::endl; std::cout << "PCA" << std::endl;
@ -1003,7 +1021,7 @@ void MLPPTests::test_pca_svd_eigenvalues_eigenvectors(bool ui) {
} }
void MLPPTests::test_nlp_and_data(bool ui) { void MLPPTests::test_nlp_and_data(bool ui) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPData data; MLPPData data;
// NLP/DATA // NLP/DATA
@ -1053,7 +1071,7 @@ void MLPPTests::test_nlp_and_data(bool ui) {
std::cout << std::endl; std::cout << std::endl;
} }
void MLPPTests::test_outlier_finder(bool ui) { void MLPPTests::test_outlier_finder(bool ui) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
// Outlier Finder // Outlier Finder
//std::vector<real_t> inputSet = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 23554332523523 }; //std::vector<real_t> inputSet = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 23554332523523 };
@ -1109,13 +1127,13 @@ void MLPPTests::test_new_math_functions() {
alg.printMatrix(alg.gramSchmidtProcess(P)); alg.printMatrix(alg.gramSchmidtProcess(P));
//MLPPLinAlg::QRDResult qrd_result = alg.qrd(P); // It works! //MLPPLinAlgOld::QRDResult qrd_result = alg.qrd(P); // It works!
//alg.printMatrix(qrd_result.Q); //alg.printMatrix(qrd_result.Q);
//alg.printMatrix(qrd_result.R); //alg.printMatrix(qrd_result.R);
} }
void MLPPTests::test_positive_definiteness_checker() { void MLPPTests::test_positive_definiteness_checker() {
//MLPPStat stat; //MLPPStat stat;
MLPPLinAlg alg; MLPPLinAlgOld alg;
//MLPPActivation avn; //MLPPActivation avn;
//MLPPCost cost; //MLPPCost cost;
//MLPPData data; //MLPPData data;
@ -1130,7 +1148,7 @@ void MLPPTests::test_positive_definiteness_checker() {
}; };
std::cout << std::boolalpha << alg.positiveDefiniteChecker(A) << std::endl; std::cout << std::boolalpha << alg.positiveDefiniteChecker(A) << std::endl;
MLPPLinAlg::CholeskyResult chres = alg.cholesky(A); // works. MLPPLinAlgOld::CholeskyResult chres = alg.cholesky(A); // works.
alg.printMatrix(chres.L); alg.printMatrix(chres.L);
alg.printMatrix(chres.Lt); alg.printMatrix(chres.Lt);
} }
@ -1200,8 +1218,8 @@ real_t f_mv(std::vector<real_t> x) {
*/ */
void MLPPTests::test_numerical_analysis() { void MLPPTests::test_numerical_analysis() {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPConvolutions conv; MLPPConvolutionsOld conv;
// Checks for numerical analysis class. // Checks for numerical analysis class.
MLPPNumericalAnalysisOld numAn; MLPPNumericalAnalysisOld numAn;
@ -1273,7 +1291,7 @@ void MLPPTests::test_numerical_analysis() {
alg.printVector(alg.cross(a, b)); alg.printVector(alg.cross(a, b));
} }
void MLPPTests::test_support_vector_classification_kernel(bool ui) { void MLPPTests::test_support_vector_classification_kernel(bool ui) {
MLPPLinAlg alg; MLPPLinAlgOld alg;
MLPPData data; MLPPData data;
//SUPPORT VECTOR CLASSIFICATION (kernel method) //SUPPORT VECTOR CLASSIFICATION (kernel method)